mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 19:37:36 +08:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45e4096a12 | ||
|
|
8a1d4fe287 | ||
|
|
98f880ebc2 | ||
|
|
2b852853f3 | ||
|
|
c7a9988033 | ||
|
|
c475eebe1c | ||
|
|
0fe7355ae0 | ||
|
|
57de96e3a2 | ||
|
|
70571cef50 | ||
|
|
0b6deb3340 | ||
|
|
dcda85a825 | ||
|
|
9d3bff018b | ||
|
|
051376e0d2 | ||
|
|
a113785211 | ||
|
|
3f4ed4dc3c | ||
|
|
ac80764fae | ||
|
|
e43afd4891 | ||
|
|
f1aea1d495 | ||
|
|
0e2a5db104 | ||
|
|
3a4c9771fa | ||
|
|
f4f8ef9523 |
54
.github/workflows/build_docker_image.yml
vendored
54
.github/workflows/build_docker_image.yml
vendored
@@ -7,32 +7,32 @@ on:
|
||||
types: [published]
|
||||
jobs:
|
||||
publish-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||
run: |
|
||||
if [ -z "$GITHUB_REF" ]; then
|
||||
export GITHUB_REF=${{ github.ref }}
|
||||
fi
|
||||
- name: Check GITHUB_REF env
|
||||
run: echo $GITHUB_REF
|
||||
- name: Get version
|
||||
id: get_version
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
- name: Build # image name: rockchin/qchatgpt:<VERSION>
|
||||
run: docker build --network=host -t rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }} -t rockchin/qchatgpt:latest .
|
||||
- name: Login to Registry
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||
run: |
|
||||
if [ -z "$GITHUB_REF" ]; then
|
||||
export GITHUB_REF=${{ github.ref }}
|
||||
fi
|
||||
- name: Check GITHUB_REF env
|
||||
run: echo $GITHUB_REF
|
||||
- name: Get version
|
||||
id: get_version
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
- name: Build # image name: rockchin/qchatgpt:<VERSION>
|
||||
run: docker build --network=host -t rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }} -t rockchin/qchatgpt:latest .
|
||||
- name: Login to Registry
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Push image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }}
|
||||
- name: Push image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Push image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:latest
|
||||
- name: Push latest image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:latest
|
||||
|
||||
22
README.md
22
README.md
@@ -8,7 +8,7 @@
|
||||
# QChatGPT
|
||||
|
||||
<!-- 高稳定性/持续迭代/架构清晰/支持插件/高可自定义的 ChatGPT QQ机器人框架 -->
|
||||
“当然!下面是一个使用Java编写的快速排序算法的示例代码”
|
||||
<!-- “当然!下面是一个使用Java编写的快速排序算法的示例代码” -->
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||
<a href="https://hub.docker.com/repository/docker/rockchin/qchatgpt">
|
||||
@@ -25,7 +25,7 @@
|
||||
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=nC80H57wmKPwRDLFeQrDDjVl81XuC21P&authKey=2wTUTfoQ5v%2BD4C5zfpuR%2BSPMDqdXgDXA%2FS2wHI1NxTfWIG%2B%2FqK08dgyjMMOzhXa9&noverify=0&group_code=738382634">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-738382634-purple">
|
||||
</a>
|
||||
<a href="https://lazyfree.top/2023/08/16/QChatGPT%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C/">
|
||||
<a href="https://qchatgpt.rockchin.top">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E7%A4%BE%E5%8C%BA%E7%BC%96%E5%86%99%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C-blue">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV14h4y1w7TC">
|
||||
@@ -117,24 +117,24 @@
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅支持预设指令文字</summary>
|
||||
<summary>✅支持预设文字</summary>
|
||||
|
||||
- 支持以自然语言预设文字,自定义机器人人格等信息
|
||||
- 详见`config.py`中的`default_prompt`部分
|
||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||
- 支持设置多个预设情景,并通过!reset、!default等命令控制,详细请查看[wiki命令](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
||||
|
||||
- 现已支持OpenAI的对话`Completion API`和绘图`Image API`
|
||||
- 向机器人发送指令`!draw <prompt>`即可使用绘图模型
|
||||
- 向机器人发送命令`!draw <prompt>`即可使用绘图模型
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持指令控制热重载、热更新</summary>
|
||||
<summary>✅支持命令控制热重载、热更新</summary>
|
||||
|
||||
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送指令`!reload`进行热重载,无需重启
|
||||
- 运行期间允许以管理员账号向机器人发送指令`!update`进行热更新,拉取远程最新代码并执行热重载
|
||||
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送命令`!reload`进行热重载,无需重启
|
||||
- 运行期间允许以管理员账号向机器人发送命令`!update`进行热更新,拉取远程最新代码并执行热重载
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持插件加载🧩</summary>
|
||||
@@ -307,7 +307,7 @@ python3 main.py
|
||||
|
||||
</details>
|
||||
|
||||
**部署完成后必看: [指令说明](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
**部署完成后必看: [命令说明](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
|
||||
所有功能查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8)
|
||||
|
||||
@@ -332,10 +332,8 @@ python3 main.py
|
||||
|
||||
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版、Claude、Bard、Hugging Chat等破解版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/8-%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过命令切换使用的模型
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语音输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,支持HuggingFace上的[VITS模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer),azure语音合成,vits本地语音合成,sovits语音合成
|
||||
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
||||
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
||||
|
||||
@@ -248,7 +248,7 @@ image_api_params = {
|
||||
trace_function_calls = False
|
||||
|
||||
# 群内回复消息时是否引用原消息
|
||||
quote_origin = True
|
||||
quote_origin = False
|
||||
|
||||
# 群内回复消息时是否at发送者
|
||||
at_sender = False
|
||||
@@ -257,7 +257,7 @@ at_sender = False
|
||||
include_image_description = True
|
||||
|
||||
# 消息处理的超时时间,单位为秒
|
||||
process_message_timeout = 30
|
||||
process_message_timeout = 120
|
||||
|
||||
# 回复消息时是否显示[GPT]前缀
|
||||
show_prefix = False
|
||||
|
||||
9
main.py
9
main.py
@@ -191,13 +191,16 @@ def start(first_time_init=False):
|
||||
|
||||
# 配置OpenAI proxy
|
||||
import openai
|
||||
openai.proxy = None # 先重置,因为重载后可能需要清除proxy
|
||||
openai.proxies = None # 先重置,因为重载后可能需要清除proxy
|
||||
if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None:
|
||||
openai.proxy = config.openai_config["http_proxy"]
|
||||
openai.proxies = {
|
||||
"http": config.openai_config["http_proxy"],
|
||||
"https": config.openai_config["http_proxy"]
|
||||
}
|
||||
|
||||
# 配置openai api_base
|
||||
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
|
||||
openai.api_base = config.openai_config["reverse_proxy"]
|
||||
openai.base_url = config.openai_config["reverse_proxy"]
|
||||
|
||||
# 主启动流程
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
|
||||
@@ -63,10 +63,10 @@
|
||||
"size": "256x256"
|
||||
},
|
||||
"trace_function_calls": false,
|
||||
"quote_origin": true,
|
||||
"quote_origin": false,
|
||||
"at_sender": false,
|
||||
"include_image_description": true,
|
||||
"process_message_timeout": 30,
|
||||
"process_message_timeout": 120,
|
||||
"show_prefix": false,
|
||||
"force_delay_range": [
|
||||
1.5,
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import requests
|
||||
|
||||
@@ -42,15 +43,20 @@ class DataGatherer:
|
||||
只会报告此次请求的使用量,不会报告总量。
|
||||
不包含除版本号、使用类型、使用量以外的任何信息,仅供开发者分析使用情况。
|
||||
"""
|
||||
try:
|
||||
config = pkg.utils.context.get_config()
|
||||
if not config.report_usage:
|
||||
|
||||
def thread_func():
|
||||
|
||||
try:
|
||||
config = pkg.utils.context.get_config()
|
||||
if not config.report_usage:
|
||||
return
|
||||
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}&msg_source={}".format(subservice_name, self.version_str, count, config.msg_source_adapter))
|
||||
if res.status_code != 200 or res.text != "ok":
|
||||
logging.warning("report to server failed, status_code: {}, text: {}".format(res.status_code, res.text))
|
||||
except:
|
||||
return
|
||||
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}&msg_source={}".format(subservice_name, self.version_str, count, config.msg_source_adapter))
|
||||
if res.status_code != 200 or res.text != "ok":
|
||||
logging.warning("report to server failed, status_code: {}, text: {}".format(res.status_code, res.text))
|
||||
except:
|
||||
return
|
||||
|
||||
threading.Thread(target=thread_func).start()
|
||||
|
||||
def get_usage(self, key_md5):
|
||||
return self.usage[key_md5] if key_md5 in self.usage else {}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import openai
|
||||
from openai.types.chat import chat_completion_message
|
||||
import json
|
||||
import logging
|
||||
|
||||
@@ -13,13 +14,14 @@ class ChatCompletionRequest(RequestBase):
|
||||
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
|
||||
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
|
||||
"""
|
||||
|
||||
model: str
|
||||
messages: list[dict[str, str]]
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
pending_func_call: dict = None
|
||||
pending_func_call: chat_completion_message.FunctionCall = None
|
||||
|
||||
pending_msg: str
|
||||
|
||||
@@ -46,16 +48,18 @@ class ChatCompletionRequest(RequestBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: openai.Client,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.client = client
|
||||
self.model = model
|
||||
self.messages = messages.copy()
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = openai.ChatCompletion.acreate
|
||||
self.req_func = self.client.chat.completions.create
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
@@ -84,39 +88,48 @@ class ChatCompletionRequest(RequestBase):
|
||||
|
||||
# 拼接kwargs
|
||||
args = {**args, **self.kwargs}
|
||||
|
||||
from openai.types.chat import chat_completion
|
||||
|
||||
resp = self._req(**args)
|
||||
resp: chat_completion.ChatCompletion = self._req(**args)
|
||||
|
||||
choice0 = resp["choices"][0]
|
||||
choice0 = resp.choices[0]
|
||||
|
||||
# 如果不是函数调用,且finish_reason为stop,则停止迭代
|
||||
if choice0['finish_reason'] == 'stop': # and choice0["finish_reason"] == "stop"
|
||||
if choice0.finish_reason == 'stop': # and choice0["finish_reason"] == "stop"
|
||||
self.stopped = True
|
||||
|
||||
if 'function_call' in choice0['message']:
|
||||
self.pending_func_call = choice0['message']['function_call']
|
||||
if hasattr(choice0.message, 'function_call') and choice0.message.function_call is not None:
|
||||
self.pending_func_call = choice0.message.function_call
|
||||
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content=choice0['message']['content'],
|
||||
function_call=choice0['message']['function_call']
|
||||
content=choice0.message.content,
|
||||
function_call=choice0.message.function_call
|
||||
)
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"id": resp.id,
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"index": choice0.index,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "function_call",
|
||||
"content": choice0['message']['content'],
|
||||
"function_call": choice0['message']['function_call']
|
||||
"content": choice0.message.content,
|
||||
"function_call": {
|
||||
"name": choice0.message.function_call.name,
|
||||
"arguments": choice0.message.function_call.arguments
|
||||
}
|
||||
},
|
||||
"finish_reason": "function_call"
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
"usage": {
|
||||
"prompt_tokens": resp.usage.prompt_tokens,
|
||||
"completion_tokens": resp.usage.completion_tokens,
|
||||
"total_tokens": resp.usage.total_tokens
|
||||
}
|
||||
}
|
||||
else:
|
||||
|
||||
@@ -124,19 +137,23 @@ class ChatCompletionRequest(RequestBase):
|
||||
# 普通回复一定处于最后方,故不用再追加进内部messages
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"id": resp.id,
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"index": choice0.index,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0['message']['content']
|
||||
"content": choice0.message.content
|
||||
},
|
||||
"finish_reason": choice0["finish_reason"]
|
||||
"finish_reason": choice0.finish_reason
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
"usage": {
|
||||
"prompt_tokens": resp.usage.prompt_tokens,
|
||||
"completion_tokens": resp.usage.completion_tokens,
|
||||
"total_tokens": resp.usage.total_tokens
|
||||
}
|
||||
}
|
||||
else: # 处理函数调用请求
|
||||
|
||||
@@ -144,20 +161,20 @@ class ChatCompletionRequest(RequestBase):
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
func_name = cp_pending_func_call['name']
|
||||
func_name = cp_pending_func_call.name
|
||||
arguments = {}
|
||||
|
||||
try:
|
||||
|
||||
try:
|
||||
arguments = json.loads(cp_pending_func_call['arguments'])
|
||||
arguments = json.loads(cp_pending_func_call.arguments)
|
||||
# 若不是json格式的异常处理
|
||||
except json.decoder.JSONDecodeError:
|
||||
# 获取函数的参数列表
|
||||
func_schema = get_func_schema(func_name)
|
||||
|
||||
arguments = {
|
||||
func_schema['parameters']['required'][0]: cp_pending_func_call['arguments']
|
||||
func_schema['parameters']['required'][0]: cp_pending_func_call.arguments
|
||||
}
|
||||
|
||||
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import openai
|
||||
from openai.types import completion, completion_choice
|
||||
|
||||
from .model import RequestBase
|
||||
|
||||
@@ -17,10 +18,12 @@ class CompletionRequest(RequestBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: openai.Client,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.client = client
|
||||
self.model = model
|
||||
self.prompt = ""
|
||||
|
||||
@@ -31,7 +34,7 @@ class CompletionRequest(RequestBase):
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = openai.Completion.acreate
|
||||
self.req_func = self.client.completions.create
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
@@ -63,49 +66,35 @@ class CompletionRequest(RequestBase):
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
resp = self._req(
|
||||
resp: completion.Completion = self._req(
|
||||
model=self.model,
|
||||
prompt=self.prompt,
|
||||
**self.kwargs
|
||||
)
|
||||
|
||||
if resp["choices"][0]["finish_reason"] == "stop":
|
||||
if resp.choices[0].finish_reason == "stop":
|
||||
self.stopped = True
|
||||
|
||||
choice0 = resp["choices"][0]
|
||||
choice0: completion_choice.CompletionChoice = resp.choices[0]
|
||||
|
||||
self.prompt += choice0["text"]
|
||||
self.prompt += choice0.text
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"id": resp.id,
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"index": choice0.index,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0["text"]
|
||||
"content": choice0.text
|
||||
},
|
||||
"finish_reason": choice0["finish_reason"]
|
||||
"finish_reason": choice0.finish_reason
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
openai.api_key = os.environ["OPENAI_API_KEY"]
|
||||
|
||||
for resp in CompletionRequest(
|
||||
model="text-davinci-003",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello, who are you?"
|
||||
"usage": {
|
||||
"prompt_tokens": resp.usage.prompt_tokens,
|
||||
"completion_tokens": resp.usage.completion_tokens,
|
||||
"total_tokens": resp.usage.total_tokens
|
||||
}
|
||||
]
|
||||
):
|
||||
print(resp)
|
||||
if resp["choices"][0]["finish_reason"] == "stop":
|
||||
break
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import openai
|
||||
|
||||
class RequestBase:
|
||||
|
||||
client: openai.Client
|
||||
|
||||
req_func: callable
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -17,41 +19,17 @@ class RequestBase:
|
||||
import pkg.utils.context as context
|
||||
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||
logging.debug("切换api-key: switched={}, name={}".format(switched, name))
|
||||
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||
self.client.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||
|
||||
def _req(self, **kwargs):
|
||||
"""处理代理问题"""
|
||||
import config
|
||||
|
||||
ret: dict = {}
|
||||
exception: Exception = None
|
||||
ret = self.req_func(**kwargs)
|
||||
logging.debug("接口请求返回:%s", str(ret))
|
||||
|
||||
async def awrapper(**kwargs):
|
||||
nonlocal ret, exception
|
||||
|
||||
try:
|
||||
ret = await self.req_func(**kwargs)
|
||||
logging.debug("接口请求返回:%s", str(ret))
|
||||
|
||||
if config.switch_strategy == 'active':
|
||||
self._next_key()
|
||||
|
||||
return ret
|
||||
except Exception as e:
|
||||
exception = e
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
thr = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(awrapper(**kwargs),)
|
||||
)
|
||||
|
||||
thr.start()
|
||||
thr.join()
|
||||
|
||||
if exception is not None:
|
||||
raise exception
|
||||
if config.switch_strategy == 'active':
|
||||
self._next_key()
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ class OpenAIInteract:
|
||||
"size": "256x256",
|
||||
}
|
||||
|
||||
client: openai.Client = None
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
|
||||
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
|
||||
@@ -31,7 +33,9 @@ class OpenAIInteract:
|
||||
|
||||
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||
|
||||
openai.api_key = self.key_mgr.get_using_key()
|
||||
self.client = openai.Client(
|
||||
api_key=self.key_mgr.get_using_key()
|
||||
)
|
||||
|
||||
pkg.utils.context.set_openai_manager(self)
|
||||
|
||||
@@ -48,7 +52,7 @@ class OpenAIInteract:
|
||||
cp_parmas = config.completion_api_params.copy()
|
||||
del cp_parmas['model']
|
||||
|
||||
request = select_request_cls(model, messages, cp_parmas)
|
||||
request = select_request_cls(self.client, model, messages, cp_parmas)
|
||||
|
||||
# 请求接口
|
||||
for resp in request:
|
||||
|
||||
@@ -5,9 +5,8 @@ ChatCompletion - gpt-3.5-turbo 等模型
|
||||
Completion - text-davinci-003 等模型
|
||||
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
|
||||
"""
|
||||
import openai, logging, threading, asyncio
|
||||
import openai.error as aiE
|
||||
import tiktoken
|
||||
import openai
|
||||
|
||||
from pkg.openai.api.model import RequestBase
|
||||
from pkg.openai.api.completion import CompletionRequest
|
||||
@@ -53,11 +52,11 @@ IMAGE_MODELS = {
|
||||
}
|
||||
|
||||
|
||||
def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase:
|
||||
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> RequestBase:
|
||||
if model_name in CHAT_COMPLETION_MODELS:
|
||||
return ChatCompletionRequest(model_name, messages, **args)
|
||||
return ChatCompletionRequest(client, model_name, messages, **args)
|
||||
elif model_name in COMPLETION_MODELS:
|
||||
return CompletionRequest(model_name, messages, **args)
|
||||
return CompletionRequest(client, model_name, messages, **args)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||
|
||||
|
||||
|
||||
@@ -278,7 +278,7 @@ class Session:
|
||||
if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应
|
||||
|
||||
if not trace_func_calls:
|
||||
res_text += resp['choices'][0]['message']['content'] + "\n"
|
||||
res_text += resp['choices'][0]['message']['content']
|
||||
else:
|
||||
res_text = resp['choices'][0]['message']['content']
|
||||
pending_res_text = resp['choices'][0]['message']['content']
|
||||
|
||||
@@ -65,14 +65,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
if not event.is_prevented_default():
|
||||
reply = [prefix + text]
|
||||
|
||||
except openai.error.APIConnectionError as e:
|
||||
except openai.APIConnectionError as e:
|
||||
err_msg = str(e)
|
||||
if err_msg.__contains__('Error communicating with OpenAI'):
|
||||
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
|
||||
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
else:
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
except openai.error.RateLimitError as e:
|
||||
except openai.RateLimitError as e:
|
||||
logging.debug(type(e))
|
||||
logging.debug(e.error['message'])
|
||||
|
||||
@@ -116,14 +116,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
else:
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
|
||||
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
|
||||
except openai.error.InvalidRequestError as e:
|
||||
except openai.BadRequestError as e:
|
||||
if config.auto_reset and "This model's maximum context length is" in str(e):
|
||||
session.reset(persist=True)
|
||||
reply = [tips_custom.session_auto_reset_message]
|
||||
else:
|
||||
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||
except openai.error.ServiceUnavailableError as e:
|
||||
except openai.APIStatusError as e:
|
||||
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,5 +1,5 @@
|
||||
requests
|
||||
openai~=0.28.0
|
||||
openai
|
||||
dulwich~=0.21.6
|
||||
colorlog~=6.6.0
|
||||
yiri-mirai
|
||||
@@ -10,4 +10,4 @@ Pillow
|
||||
nakuru-project-idk
|
||||
CallingGPT
|
||||
tiktoken
|
||||
PyYaml
|
||||
PyYaml
|
||||
|
||||
@@ -48,12 +48,12 @@
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅支持预设指令文字</summary>
|
||||
<summary>✅支持预设文字</summary>
|
||||
|
||||
- 支持以自然语言预设文字,自定义机器人人格等信息
|
||||
- 详见`config.py`中的`default_prompt`部分
|
||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||
- 支持使用文件存储情景预设文字,并加载: 在`prompts/`目录新建文件写入预设文字,即可通过`!reset <文件名>`指令加载
|
||||
- 支持设置多个预设情景,并通过!reset、!default等命令控制,详细请查看[wiki命令](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E5%91%BD%E4%BB%A4)
|
||||
- 支持使用文件存储情景预设文字,并加载: 在`prompts/`目录新建文件写入预设文字,即可通过`!reset <文件名>`命令加载
|
||||
</details>
|
||||
|
||||
<details>
|
||||
@@ -61,25 +61,25 @@
|
||||
|
||||
- 使用SQLite进行会话内容持久化
|
||||
- 最后一次对话一定时间后自动保存,请到`config.py`中修改`session_expire_time`的值以自定义时间
|
||||
- 运行期间可使用`!reset` `!list` `!last` `!next` `!prompt`等指令管理会话
|
||||
- 运行期间可使用`!reset` `!list` `!last` `!next` `!prompt`等命令管理会话
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
||||
|
||||
- 现已支持OpenAI的对话`Completion API`和绘图`Image API`
|
||||
- 向机器人发送指令`!draw <prompt>`即可使用绘图模型
|
||||
- 向机器人发送命令`!draw <prompt>`即可使用绘图模型
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持指令控制热重载、热更新</summary>
|
||||
<summary>✅支持命令控制热重载、热更新</summary>
|
||||
|
||||
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送指令`!reload`进行热重载,无需重启
|
||||
- 运行期间允许以管理员账号向机器人发送指令`!update`进行热更新,拉取远程最新代码并执行热重载
|
||||
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送命令`!reload`进行热重载,无需重启
|
||||
- 运行期间允许以管理员账号向机器人发送命令`!update`进行热更新,拉取远程最新代码并执行热重载
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持插件加载🧩</summary>
|
||||
|
||||
- 自行实现插件加载器及相关支持
|
||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅私聊、群聊黑名单机制</summary>
|
||||
@@ -153,14 +153,14 @@
|
||||
|
||||
<img alt="绘图功能" src="https://github.com/RockChinQ/QChatGPT/blob/master/res/屏幕截图%202022-12-29%20194948.png" width="550" height="348"/>
|
||||
|
||||
### 机器人指令
|
||||
### 机器人命令
|
||||
|
||||
目前支持的指令
|
||||
目前支持的命令
|
||||
|
||||
> `<>` 中的为必填参数,使用时请不要包含`<>`
|
||||
> `[]` 中的为可选参数,使用时请不要包含`[]`
|
||||
|
||||
#### 用户级别指令
|
||||
#### 用户级别命令
|
||||
|
||||
> 可以使用`!help`命令来查看命令说明
|
||||
|
||||
@@ -174,7 +174,7 @@
|
||||
!del all 删除本会话对象的所有历史记录
|
||||
!last 切换到前一次会话
|
||||
!next 切换到后一次会话
|
||||
!reset [使用预设] 重置对象的当前会话,可指定使用的情景预设值(通过!default指令查看可用的)
|
||||
!reset [使用预设] 重置对象的当前会话,可指定使用的情景预设值(通过!default命令查看可用的)
|
||||
!prompt 查看对象当前会话的所有记录
|
||||
!usage 查看api-key的使用量
|
||||
!draw <提示语> 进行绘图
|
||||
@@ -184,7 +184,7 @@
|
||||
!default 查看可用的情景预设值
|
||||
```
|
||||
|
||||
#### 管理员指令
|
||||
#### 管理员命令
|
||||
|
||||
仅管理员私聊机器人时可使用,必须先在`config.py`中的`admin_qq`设置管理员QQ
|
||||
|
||||
@@ -197,9 +197,9 @@
|
||||
!delhst all 删除所有会话的所有历史记录
|
||||
```
|
||||
<details>
|
||||
<summary>⚙ !cfg 指令及其简化形式详解</summary>
|
||||
<summary>⚙ !cfg 命令及其简化形式详解</summary>
|
||||
|
||||
此指令可以在运行期间由管理员通过QQ私聊窗口修改配置信息,**重启之后会失效**。
|
||||
此命令可以在运行期间由管理员通过QQ私聊窗口修改配置信息,**重启之后会失效**。
|
||||
|
||||
用法:
|
||||
1. 查看所有配置项及其值
|
||||
@@ -239,7 +239,7 @@
|
||||
|
||||
格式:`!~<配置项名称>`
|
||||
其中`!~`等价于`!cfg `
|
||||
则前述三个指令分别可以简化为:
|
||||
则前述三个命令分别可以简化为:
|
||||
```
|
||||
!~all
|
||||
!~default_prompt
|
||||
@@ -290,11 +290,11 @@ sensitive_word_filter = True
|
||||
### 预设文字(default模式)
|
||||
|
||||
编辑`config.py`中的`default_prompt`字段,预设文字不宜过长(建议1000字以内),目前所有会话都会射到预设文字的影响。
|
||||
或将情景预设文字写入到`prompts/`目录下,运行期间即可使用`!reset <文件名>`指令加载,或使用`!default <文件名>`指令将其设为默认
|
||||
或将情景预设文字写入到`prompts/`目录下,运行期间即可使用`!reset <文件名>`命令加载,或使用`!default <文件名>`命令将其设为默认
|
||||
|
||||
### 预设文字(full_scenario模式)
|
||||
|
||||
将JSON情景写入到`scenario/`目录下,运行期间即可使用`!reset <文件名>`指令加载,或使用`!default <文件名>`指令将其设为默认.
|
||||
将JSON情景写入到`scenario/`目录下,运行期间即可使用`!reset <文件名>`命令加载,或使用`!default <文件名>`命令将其设为默认.
|
||||
|
||||
JSON情景模板参考`scenario/default_template.json`。
|
||||
|
||||
@@ -367,7 +367,7 @@ prompt_submit_length = <模型单次请求token数上限> - 情景预设中token
|
||||
|
||||
在运行期间,使用管理员QQ账号私聊机器人,发送`!reload`加载修改后的`config.py`的值或编辑后的代码,无需重启
|
||||
使用管理员账号私聊机器人,发送`!update`拉取最新代码并进行热更新,无需重启
|
||||
详见前述`管理员指令`段落
|
||||
详见前述`管理员命令`段落
|
||||
|
||||
### 群内无需@响应规则
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
#### 自动更新
|
||||
|
||||
由管理员QQ私聊机器人QQ发送`!update`指令
|
||||
由管理员QQ私聊机器人QQ发送`!update`命令
|
||||
|
||||
#### 手动更新
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
QChatGPT 插件开发Wiki
|
||||
|
||||
> 请先阅读[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
> 请先阅读[技术信息页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8A%80%E6%9C%AF%E4%BF%A1%E6%81%AF)
|
||||
> 请先阅读[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
> 请先阅读[技术信息页](https://github.com/RockChinQ/QChatGPT/wiki/4-%E6%8A%80%E6%9C%AF%E4%BF%A1%E6%81%AF)
|
||||
> 建议先阅读本项目源码,了解项目架构
|
||||
|
||||
> 问题、需求请到仓库issue发起
|
||||
@@ -354,18 +354,18 @@ PersonNormalMessageReceived = "person_normal_message_received"
|
||||
"""
|
||||
|
||||
PersonCommandSent = "person_command_sent"
|
||||
"""判断为应该处理的私聊指令时触发
|
||||
"""判断为应该处理的私聊命令时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
command: str 指令
|
||||
command: str 命令
|
||||
params: list[str] 参数列表
|
||||
text_message: str 完整指令文本
|
||||
text_message: str 完整命令文本
|
||||
is_admin: bool 是否为管理员
|
||||
|
||||
returns (optional):
|
||||
alter: str 修改后的完整指令文本
|
||||
alter: str 修改后的完整命令文本
|
||||
reply: list 回复消息组件列表,元素为YiriMirai支持的消息组件
|
||||
"""
|
||||
|
||||
@@ -383,18 +383,18 @@ GroupNormalMessageReceived = "group_normal_message_received"
|
||||
"""
|
||||
|
||||
GroupCommandSent = "group_command_sent"
|
||||
"""判断为应该处理的群聊指令时触发
|
||||
"""判断为应该处理的群聊命令时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
command: str 指令
|
||||
command: str 命令
|
||||
params: list[str] 参数列表
|
||||
text_message: str 完整指令文本
|
||||
text_message: str 完整命令文本
|
||||
is_admin: bool 是否为管理员
|
||||
|
||||
returns (optional):
|
||||
alter: str 修改后的完整指令文本
|
||||
alter: str 修改后的完整命令文本
|
||||
reply: list 回复消息组件列表,元素为YiriMirai支持的消息组件
|
||||
"""
|
||||
|
||||
|
||||
24
tests/proxy_test/forward_proxy_test.py
Normal file
24
tests/proxy_test/forward_proxy_test.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import os
|
||||
|
||||
import openai
|
||||
|
||||
client = openai.Client(
|
||||
api_key=os.environ["OPENAI_API_KEY"],
|
||||
)
|
||||
|
||||
openai.proxies = {
|
||||
'http': 'http://127.0.0.1:7890',
|
||||
'https': 'http://127.0.0.1:7890',
|
||||
}
|
||||
|
||||
resp = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello, how are you?",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
print(resp)
|
||||
Reference in New Issue
Block a user