Compare commits

...

12 Commits

Author SHA1 Message Date
Junyan Qin
45e4096a12 Merge pull request #587 from RockChinQ/hotfix/openai-1.0-adaptation
Feat: 适配openai>=1.0.0
2023-11-10 11:49:20 +08:00
GitHub Actions
8a1d4fe287 Update override-all.json 2023-11-10 03:47:30 +00:00
RockChinQ
98f880ebc2 chore: 群内回复不再默认引用原消息 2023-11-10 11:47:10 +08:00
RockChinQ
2b852853f3 feat: 适配completion和chat_completions 2023-11-10 11:31:14 +08:00
RockChinQ
c7a9988033 feat: 以新的方式设置正向代理 2023-11-10 10:54:03 +08:00
RockChinQ
c475eebe1c chore: 不再限制openai版本 2023-11-10 10:14:11 +08:00
RockChinQ
0fe7355ae0 hotfix: 适配openai>=1.0.0 2023-11-10 10:13:50 +08:00
Junyan Qin
57de96e3a2 chore(requirements.txt): 锁定openai版本到0.28.1 2023-11-10 09:31:27 +08:00
Junyan Qin
70571cef50 Update README.md 2023-10-02 17:31:08 +08:00
Junyan Qin
0b6deb3340 Update README.md 2023-10-02 17:23:36 +08:00
Junyan Qin
dcda85a825 Merge pull request #580 from RockChinQ/dependabot/pip/openai-approx-eq-0.28.1
chore(deps): update openai requirement from ~=0.28.0 to ~=0.28.1
2023-10-02 16:10:37 +08:00
dependabot[bot]
9d3bff018b chore(deps): update openai requirement from ~=0.28.0 to ~=0.28.1
Updates the requirements on [openai](https://github.com/openai/openai-python) to permit the latest version.
- [Release notes](https://github.com/openai/openai-python/releases)
- [Commits](https://github.com/openai/openai-python/compare/v0.28.0...v0.28.1)

---
updated-dependencies:
- dependency-name: openai
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-02 08:09:30 +00:00
13 changed files with 114 additions and 100 deletions

View File

@@ -8,7 +8,7 @@
# QChatGPT
<!-- 高稳定性/持续迭代/架构清晰/支持插件/高可自定义的 ChatGPT QQ机器人框架 -->
“当然下面是一个使用Java编写的快速排序算法的示例代码”
<!-- “当然下面是一个使用Java编写的快速排序算法的示例代码” -->
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/QChatGPT)](https://github.com/RockChinQ/QChatGPT/releases/latest)
<a href="https://hub.docker.com/repository/docker/rockchin/qchatgpt">
@@ -25,7 +25,7 @@
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=nC80H57wmKPwRDLFeQrDDjVl81XuC21P&authKey=2wTUTfoQ5v%2BD4C5zfpuR%2BSPMDqdXgDXA%2FS2wHI1NxTfWIG%2B%2FqK08dgyjMMOzhXa9&noverify=0&group_code=738382634">
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-738382634-purple">
</a>
<a href="https://lazyfree.top/2023/08/16/QChatGPT%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C/">
<a href="https://qchatgpt.rockchin.top">
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E7%A4%BE%E5%8C%BA%E7%BC%96%E5%86%99%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C-blue">
</a>
<a href="https://www.bilibili.com/video/BV14h4y1w7TC">

View File

@@ -248,7 +248,7 @@ image_api_params = {
trace_function_calls = False
# 群内回复消息时是否引用原消息
quote_origin = True
quote_origin = False
# 群内回复消息时是否at发送者
at_sender = False

View File

@@ -191,13 +191,16 @@ def start(first_time_init=False):
# 配置OpenAI proxy
import openai
openai.proxy = None # 先重置因为重载后可能需要清除proxy
openai.proxies = None # 先重置因为重载后可能需要清除proxy
if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None:
openai.proxy = config.openai_config["http_proxy"]
openai.proxies = {
"http": config.openai_config["http_proxy"],
"https": config.openai_config["http_proxy"]
}
# 配置openai api_base
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
openai.api_base = config.openai_config["reverse_proxy"]
openai.base_url = config.openai_config["reverse_proxy"]
# 主启动流程
database = pkg.database.manager.DatabaseManager()

View File

@@ -63,7 +63,7 @@
"size": "256x256"
},
"trace_function_calls": false,
"quote_origin": true,
"quote_origin": false,
"at_sender": false,
"include_image_description": true,
"process_message_timeout": 120,

View File

@@ -1,4 +1,5 @@
import openai
from openai.types.chat import chat_completion_message
import json
import logging
@@ -13,13 +14,14 @@ class ChatCompletionRequest(RequestBase):
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
"""
model: str
messages: list[dict[str, str]]
kwargs: dict
stopped: bool = False
pending_func_call: dict = None
pending_func_call: chat_completion_message.FunctionCall = None
pending_msg: str
@@ -46,16 +48,18 @@ class ChatCompletionRequest(RequestBase):
def __init__(
self,
client: openai.Client,
model: str,
messages: list[dict[str, str]],
**kwargs
):
self.client = client
self.model = model
self.messages = messages.copy()
self.kwargs = kwargs
self.req_func = openai.ChatCompletion.acreate
self.req_func = self.client.chat.completions.create
self.pending_func_call = None
@@ -84,39 +88,48 @@ class ChatCompletionRequest(RequestBase):
# 拼接kwargs
args = {**args, **self.kwargs}
from openai.types.chat import chat_completion
resp = self._req(**args)
resp: chat_completion.ChatCompletion = self._req(**args)
choice0 = resp["choices"][0]
choice0 = resp.choices[0]
# 如果不是函数调用且finish_reason为stop则停止迭代
if choice0['finish_reason'] == 'stop': # and choice0["finish_reason"] == "stop"
if choice0.finish_reason == 'stop': # and choice0["finish_reason"] == "stop"
self.stopped = True
if 'function_call' in choice0['message']:
self.pending_func_call = choice0['message']['function_call']
if hasattr(choice0.message, 'function_call') and choice0.message.function_call is not None:
self.pending_func_call = choice0.message.function_call
self.append_message(
role="assistant",
content=choice0['message']['content'],
function_call=choice0['message']['function_call']
content=choice0.message.content,
function_call=choice0.message.function_call
)
return {
"id": resp["id"],
"id": resp.id,
"choices": [
{
"index": choice0["index"],
"index": choice0.index,
"message": {
"role": "assistant",
"type": "function_call",
"content": choice0['message']['content'],
"function_call": choice0['message']['function_call']
"content": choice0.message.content,
"function_call": {
"name": choice0.message.function_call.name,
"arguments": choice0.message.function_call.arguments
}
},
"finish_reason": "function_call"
}
],
"usage": resp["usage"]
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens
}
}
else:
@@ -124,19 +137,23 @@ class ChatCompletionRequest(RequestBase):
# 普通回复一定处于最后方故不用再追加进内部messages
return {
"id": resp["id"],
"id": resp.id,
"choices": [
{
"index": choice0["index"],
"index": choice0.index,
"message": {
"role": "assistant",
"type": "text",
"content": choice0['message']['content']
"content": choice0.message.content
},
"finish_reason": choice0["finish_reason"]
"finish_reason": choice0.finish_reason
}
],
"usage": resp["usage"]
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens
}
}
else: # 处理函数调用请求
@@ -144,20 +161,20 @@ class ChatCompletionRequest(RequestBase):
self.pending_func_call = None
func_name = cp_pending_func_call['name']
func_name = cp_pending_func_call.name
arguments = {}
try:
try:
arguments = json.loads(cp_pending_func_call['arguments'])
arguments = json.loads(cp_pending_func_call.arguments)
# 若不是json格式的异常处理
except json.decoder.JSONDecodeError:
# 获取函数的参数列表
func_schema = get_func_schema(func_name)
arguments = {
func_schema['parameters']['required'][0]: cp_pending_func_call['arguments']
func_schema['parameters']['required'][0]: cp_pending_func_call.arguments
}
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))

View File

@@ -1,4 +1,5 @@
import openai
from openai.types import completion, completion_choice
from .model import RequestBase
@@ -17,10 +18,12 @@ class CompletionRequest(RequestBase):
def __init__(
self,
client: openai.Client,
model: str,
messages: list[dict[str, str]],
**kwargs
):
self.client = client
self.model = model
self.prompt = ""
@@ -31,7 +34,7 @@ class CompletionRequest(RequestBase):
self.kwargs = kwargs
self.req_func = openai.Completion.acreate
self.req_func = self.client.completions.create
def __iter__(self):
return self
@@ -63,49 +66,35 @@ class CompletionRequest(RequestBase):
if self.stopped:
raise StopIteration()
resp = self._req(
resp: completion.Completion = self._req(
model=self.model,
prompt=self.prompt,
**self.kwargs
)
if resp["choices"][0]["finish_reason"] == "stop":
if resp.choices[0].finish_reason == "stop":
self.stopped = True
choice0 = resp["choices"][0]
choice0: completion_choice.CompletionChoice = resp.choices[0]
self.prompt += choice0["text"]
self.prompt += choice0.text
return {
"id": resp["id"],
"id": resp.id,
"choices": [
{
"index": choice0["index"],
"index": choice0.index,
"message": {
"role": "assistant",
"type": "text",
"content": choice0["text"]
"content": choice0.text
},
"finish_reason": choice0["finish_reason"]
"finish_reason": choice0.finish_reason
}
],
"usage": resp["usage"]
}
if __name__ == "__main__":
import os
openai.api_key = os.environ["OPENAI_API_KEY"]
for resp in CompletionRequest(
model="text-davinci-003",
messages=[
{
"role": "user",
"content": "Hello, who are you?"
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens
}
]
):
print(resp)
if resp["choices"][0]["finish_reason"] == "stop":
break
}

View File

@@ -8,6 +8,8 @@ import openai
class RequestBase:
client: openai.Client
req_func: callable
def __init__(self, *args, **kwargs):
@@ -17,41 +19,17 @@ class RequestBase:
import pkg.utils.context as context
switched, name = context.get_openai_manager().key_mgr.auto_switch()
logging.debug("切换api-key: switched={}, name={}".format(switched, name))
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
self.client.api_key = context.get_openai_manager().key_mgr.get_using_key()
def _req(self, **kwargs):
"""处理代理问题"""
import config
ret: dict = {}
exception: Exception = None
ret = self.req_func(**kwargs)
logging.debug("接口请求返回:%s", str(ret))
async def awrapper(**kwargs):
nonlocal ret, exception
try:
ret = await self.req_func(**kwargs)
logging.debug("接口请求返回:%s", str(ret))
if config.switch_strategy == 'active':
self._next_key()
return ret
except Exception as e:
exception = e
loop = asyncio.new_event_loop()
thr = threading.Thread(
target=loop.run_until_complete,
args=(awrapper(**kwargs),)
)
thr.start()
thr.join()
if exception is not None:
raise exception
if config.switch_strategy == 'active':
self._next_key()
return ret

View File

@@ -24,6 +24,8 @@ class OpenAIInteract:
"size": "256x256",
}
client: openai.Client = None
def __init__(self, api_key: str):
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
@@ -31,7 +33,9 @@ class OpenAIInteract:
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
openai.api_key = self.key_mgr.get_using_key()
self.client = openai.Client(
api_key=self.key_mgr.get_using_key()
)
pkg.utils.context.set_openai_manager(self)
@@ -48,7 +52,7 @@ class OpenAIInteract:
cp_parmas = config.completion_api_params.copy()
del cp_parmas['model']
request = select_request_cls(model, messages, cp_parmas)
request = select_request_cls(self.client, model, messages, cp_parmas)
# 请求接口
for resp in request:

View File

@@ -5,9 +5,8 @@ ChatCompletion - gpt-3.5-turbo 等模型
Completion - text-davinci-003 等模型
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
"""
import openai, logging, threading, asyncio
import openai.error as aiE
import tiktoken
import openai
from pkg.openai.api.model import RequestBase
from pkg.openai.api.completion import CompletionRequest
@@ -53,11 +52,11 @@ IMAGE_MODELS = {
}
def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase:
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> RequestBase:
if model_name in CHAT_COMPLETION_MODELS:
return ChatCompletionRequest(model_name, messages, **args)
return ChatCompletionRequest(client, model_name, messages, **args)
elif model_name in COMPLETION_MODELS:
return CompletionRequest(model_name, messages, **args)
return CompletionRequest(client, model_name, messages, **args)
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))

View File

@@ -278,7 +278,7 @@ class Session:
if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应
if not trace_func_calls:
res_text += resp['choices'][0]['message']['content'] + "\n"
res_text += resp['choices'][0]['message']['content']
else:
res_text = resp['choices'][0]['message']['content']
pending_res_text = resp['choices'][0]['message']['content']

View File

@@ -65,14 +65,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
if not event.is_prevented_default():
reply = [prefix + text]
except openai.error.APIConnectionError as e:
except openai.APIConnectionError as e:
err_msg = str(e)
if err_msg.__contains__('Error communicating with OpenAI'):
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
"[bot]err:调用API失败请重试或联系管理员或等待修复")
else:
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败请重试或联系管理员或等待修复")
except openai.error.RateLimitError as e:
except openai.RateLimitError as e:
logging.debug(type(e))
logging.debug(e.error['message'])
@@ -116,14 +116,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
else:
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
except openai.error.InvalidRequestError as e:
except openai.BadRequestError as e:
if config.auto_reset and "This model's maximum context length is" in str(e):
session.reset(persist=True)
reply = [tips_custom.session_auto_reset_message]
else:
reply = handle_exception("{}API调用参数错误:{}\n".format(
session_name, e), "[bot]err:API调用参数错误请联系管理员或等待修复")
except openai.error.ServiceUnavailableError as e:
except openai.APIStatusError as e:
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用请重试或联系管理员或等待修复")
except Exception as e:
logging.exception(e)

View File

@@ -1,5 +1,5 @@
requests
openai~=0.28.0
openai
dulwich~=0.21.6
colorlog~=6.6.0
yiri-mirai
@@ -10,4 +10,4 @@ Pillow
nakuru-project-idk
CallingGPT
tiktoken
PyYaml
PyYaml

View File

@@ -0,0 +1,24 @@
import os
import openai
client = openai.Client(
api_key=os.environ["OPENAI_API_KEY"],
)
openai.proxies = {
'http': 'http://127.0.0.1:7890',
'https': 'http://127.0.0.1:7890',
}
resp = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "Hello, how are you?",
}
]
)
print(resp)