Compare commits

...

28 Commits

Author SHA1 Message Date
RockChinQ
fde6822b5c chore: release v3.1.1 2024-05-08 02:28:40 +00:00
Junyan Qin
930321bcf1 Merge pull request #762 from RockChinQ/feat/deepseek
Feat: 支持 deepseek 模型
2024-05-07 22:48:37 +08:00
RockChinQ
c45931363a feat: deepseek配置迁移 2024-05-07 14:45:59 +00:00
RockChinQ
9c6491e5ee feat: 支持 deepseek 的模型 2024-05-07 14:28:52 +00:00
RockChinQ
9bc248f5bc feat: 删除submit-messages-tokens配置项 2024-05-07 12:32:54 +00:00
Junyan Qin
becac2fde5 doc(README.md): 添加 GitHub Trending 徽标 2024-04-29 21:00:22 +08:00
RockChinQ
1e1a103882 feat: aiocqhttp允许使用图片链接作为参数 2024-04-11 03:26:12 +00:00
RockChinQ
e5cffb7c9b chore: release v3.1.0.4 2024-04-06 16:51:15 +08:00
RockChinQ
e2becf7777 feat: 删除父进程判断 (#750) 2024-04-06 16:50:35 +08:00
RockChinQ
a6b875a242 fix: GroupMessageReceived 事件参数错误 2024-04-04 16:50:45 +08:00
RockChinQ
b5e67f3df8 fix: 内容函数调用时错误地传递了RuntimeContainer 2024-04-04 15:08:40 +08:00
RockChinQ
2093fb16a7 chore: release v3.1.0.3 2024-04-02 22:33:36 +08:00
RockChinQ
fc9a9d2386 fix: 缺失的 psutil 依赖 2024-04-02 22:33:06 +08:00
RockChinQ
5e69f78f7e chore: 不再支持python 3.9 2024-04-01 18:16:49 +08:00
RockChinQ
6919bece77 chore: release v3.1.0.2 2024-03-31 14:41:32 +08:00
RockChinQ
8b003739f1 feat: message.content 支持 mirai.MessageChain 对象 (#741) 2024-03-31 14:38:15 +08:00
RockChinQ
2e9229a6ad fix: 工作目录必须在 main.py 目录 2024-03-30 21:34:22 +08:00
RockChinQ
5a3e7fe8ee perf: 禁止双击运行 2024-03-30 21:28:42 +08:00
RockChinQ
7b3d7e7bd6 fix: json配置文件错误的加载流程 2024-03-30 19:01:59 +08:00
Junyan Qin
fdd7c1864d feat(chatcmpl): 对函数调用进行异常捕获 (#749) 2024-03-30 09:45:30 +00:00
Junyan Qin
cac5a5adff fix(qq-botpy): 群内单query多回复时msg_seq重复问题 2024-03-30 02:58:37 +00:00
RockChinQ
63307633c2 feat: chatcmpl请求时也忽略空的 system prompt message (#745) 2024-03-29 17:34:09 +08:00
RockChinQ
387dfa39ff fix: 内容过滤无效 (#743) 2024-03-29 17:24:42 +08:00
Junyan Qin
1f797f899c doc(README.md): 添加使用量计数徽标 2024-03-26 15:25:08 +08:00
RockChinQ
092bb0a1e2 chore: release v3.1.0.1 2024-03-23 22:50:54 +08:00
RockChinQ
2c3399e237 perf: 敏感词迁移的双条件检查 2024-03-23 22:41:21 +08:00
RockChinQ
835275b47f fix: 多处对 launcher_type 枚举的不当比较 (#736) 2024-03-23 22:39:42 +08:00
Junyan Qin
7b060ce3f9 doc(README.md): 更新wakapi路径 2024-03-23 19:14:43 +08:00
29 changed files with 194 additions and 66 deletions

2
.gitignore vendored
View File

@@ -34,4 +34,4 @@ bard.json
res/instance_id.json
.DS_Store
/data
botpy.log
botpy.log*

View File

@@ -2,34 +2,30 @@
<p align="center">
<img src="https://qchatgpt.rockchin.top/logo.png" alt="QChatGPT" width="180" />
</p>
<div align="center">
# QChatGPT
<a href="https://trendshift.io/repositories/6187" target="_blank"><img src="https://trendshift.io/api/badge/repositories/6187" alt="RockChinQ%2FQChatGPT | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/QChatGPT)](https://github.com/RockChinQ/QChatGPT/releases/latest)
<a href="https://hub.docker.com/repository/docker/rockchin/qchatgpt">
<img src="https://img.shields.io/docker/pulls/rockchin/qchatgpt?color=blue" alt="docker pull">
</a>
![Wakapi Count](https://wakapi.dev/api/badge/RockChinQ/interval:any/project:QChatGPT)
<a href="https://codecov.io/gh/RockChinQ/QChatGPT" >
<img src="https://codecov.io/gh/RockChinQ/QChatGPT/graph/badge.svg?token=pjxYIL2kbC"/>
</a>
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.qchatgpt.rockchin.top%2Fapi%2Fv2%2Fview%2Frealtime%2Fcount_query%3Fminute%3D10080&query=%24.data.count&label=%E4%BD%BF%E7%94%A8%E9%87%8F%EF%BC%887%E6%97%A5%EF%BC%89)
![Wakapi Count](https://wakapi.rockchin.top/api/badge/RockChinQ/interval:any/project:QChatGPT)
<br/>
<img src="https://img.shields.io/badge/python-3.9 | 3.10 | 3.11-blue.svg" alt="python">
<img src="https://img.shields.io/badge/python-3.10 | 3.11 | 3.12-blue.svg" alt="python">
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=66-aWvn8cbP4c1ut_1YYkvvGVeEtyTH8&authKey=pTaKBK5C%2B8dFzQ4XlENf6MHTCLaHnlKcCRx7c14EeVVlpX2nRSaS8lJm8YeM4mCU&noverify=0&group_code=195992197">
<img alt="Static Badge" src="https://img.shields.io/badge/%E5%AE%98%E6%96%B9%E7%BE%A4-195992197-purple">
</a>
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=nC80H57wmKPwRDLFeQrDDjVl81XuC21P&authKey=2wTUTfoQ5v%2BD4C5zfpuR%2BSPMDqdXgDXA%2FS2wHI1NxTfWIG%2B%2FqK08dgyjMMOzhXa9&noverify=0&group_code=248432104">
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-248432104-purple">
</a>
<a href="https://www.bilibili.com/video/BV14h4y1w7TC">
<img alt="Static Badge" src="https://img.shields.io/badge/%E8%A7%86%E9%A2%91%E6%95%99%E7%A8%8B-208647">
</a>
<a href="https://www.bilibili.com/video/BV11h4y1y74H">
<img alt="Static Badge" src="https://img.shields.io/badge/Linux%E9%83%A8%E7%BD%B2%E8%A7%86%E9%A2%91-208647">
</a>
<a href="https://codecov.io/gh/RockChinQ/QChatGPT" >
<img src="https://codecov.io/gh/RockChinQ/QChatGPT/graph/badge.svg?token=pjxYIL2kbC"/>
</a>
## 使用文档
<a href="https://qchatgpt.rockchin.top">项目主页</a>

18
main.py
View File

@@ -1,5 +1,6 @@
# QChatGPT 终端启动入口
# 在此层级解决依赖项检查。
# QChatGPT/main.py
asciiart = r"""
___ ___ _ _ ___ ___ _____
@@ -49,6 +50,23 @@ async def main_entry():
if __name__ == '__main__':
import os
# 检查本目录是否有main.py且包含QChatGPT字符串
invalid_pwd = False
if not os.path.exists('main.py'):
invalid_pwd = True
else:
with open('main.py', 'r', encoding='utf-8') as f:
content = f.read()
if "QChatGPT/main.py" not in content:
invalid_pwd = True
if invalid_pwd:
print("请在QChatGPT项目根目录下以命令形式运行此程序。")
input("按任意键退出...")
exit(0)
import asyncio
asyncio.run(main_entry())

View File

@@ -19,7 +19,13 @@ class JSONConfigFile(file_model.ConfigFile):
return os.path.exists(self.config_file_name)
async def create(self):
shutil.copyfile(self.template_file_name, self.config_file_name)
if self.template_file_name is not None:
shutil.copyfile(self.template_file_name, self.config_file_name)
elif self.template_data is not None:
with open(self.config_file_name, "w", encoding="utf-8") as f:
json.dump(self.template_data, f, indent=4, ensure_ascii=False)
else:
raise ValueError("template_file_name or template_data must be provided")
async def load(self) -> dict:
@@ -27,12 +33,11 @@ class JSONConfigFile(file_model.ConfigFile):
await self.create()
if self.template_file_name is not None:
with open(self.config_file_name, "r", encoding="utf-8") as f:
cfg = json.load(f)
with open(self.template_file_name, "r", encoding="utf-8") as f:
self.template_data = json.load(f)
# 从模板文件中进行补全
with open(self.template_file_name, "r", encoding="utf-8") as f:
self.template_data = json.load(f)
with open(self.config_file_name, "r", encoding="utf-8") as f:
cfg = json.load(f)
for key in self.template_data:
if key not in cfg:

View File

@@ -14,7 +14,7 @@ class SensitiveWordMigration(migration.Migration):
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移
"""
return os.path.exists("data/config/sensitive-words.json")
return os.path.exists("data/config/sensitive-words.json") and not os.path.exists("data/metadata/sensitive-words.json")
async def run(self):
"""执行迁移

View File

@@ -0,0 +1,30 @@
from __future__ import annotations
from .. import migration
@migration.migration_class("deepseek-config-completion", 5)
class DeepseekConfigCompletionMigration(migration.Migration):
"""OpenAI配置迁移
"""
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移
"""
return 'deepseek-chat-completions' not in self.ap.provider_cfg.data['requester'] \
or 'deepseek' not in self.ap.provider_cfg.data['keys']
async def run(self):
"""执行迁移
"""
if 'deepseek-chat-completions' not in self.ap.provider_cfg.data['requester']:
self.ap.provider_cfg.data['requester']['deepseek-chat-completions'] = {
'base-url': 'https://api.deepseek.com',
'args': {},
'timeout': 120,
}
if 'deepseek' not in self.ap.provider_cfg.data['keys']:
self.ap.provider_cfg.data['keys']['deepseek'] = []
await self.ap.provider_cfg.dump_config()

View File

@@ -13,6 +13,7 @@ required_deps = {
"tiktoken": "tiktoken",
"yaml": "pyyaml",
"aiohttp": "aiohttp",
"psutil": "psutil",
}

View File

@@ -94,7 +94,7 @@ class Conversation(pydantic.BaseModel):
class Session(pydantic.BaseModel):
"""会话,一个 Session 对应一个 {launcher_type}_{launcher_id}"""
"""会话,一个 Session 对应一个 {launcher_type.value}_{launcher_id}"""
launcher_type: LauncherTypes
launcher_id: int

View File

@@ -5,6 +5,7 @@ import importlib
from .. import stage, app
from ...config import migration
from ...config.migrations import m001_sensitive_word_migration, m002_openai_config_migration, m003_anthropic_requester_cfg_completion, m004_moonshot_cfg_completion
from ...config.migrations import m005_deepseek_cfg_completion
@stage.stage_class("MigrationStage")

View File

@@ -25,22 +25,24 @@ class BanSessionCheckStage(stage.PipelineStage):
sess_list = self.ap.pipeline_cfg.data['access-control'][mode]
if (query.launcher_type == 'group' and 'group_*' in sess_list) \
or (query.launcher_type == 'person' and 'person_*' in sess_list):
if (query.launcher_type.value == 'group' and 'group_*' in sess_list) \
or (query.launcher_type.value == 'person' and 'person_*' in sess_list):
found = True
else:
for sess in sess_list:
if sess == f"{query.launcher_type}_{query.launcher_id}":
if sess == f"{query.launcher_type.value}_{query.launcher_id}":
found = True
break
ctn = False
result = False
if mode == 'blacklist':
result = found
if mode == 'whitelist':
ctn = found
else:
ctn = not found
return entities.StageProcessResult(
result_type=entities.ResultType.CONTINUE if not result else entities.ResultType.INTERRUPT,
result_type=entities.ResultType.CONTINUE if ctn else entities.ResultType.INTERRUPT,
new_query=query,
debug_notice=f'根据访问控制忽略消息: {query.launcher_type}_{query.launcher_id}' if result else ''
console_notice=f'根据访问控制忽略消息: {query.launcher_type.value}_{query.launcher_id}' if not ctn else ''
)

View File

@@ -25,7 +25,7 @@ class ContentFilterStage(stage.PipelineStage):
async def initialize(self):
filters_required = [
"content-filter"
"content-ignore",
]
if self.ap.pipeline_cfg.data['check-sensitive-words']:
@@ -135,9 +135,17 @@ class ContentFilterStage(stage.PipelineStage):
query
)
elif stage_inst_name == 'PostContentFilterStage':
return await self._post_process(
query.resp_messages[-1].content,
query
)
# 仅处理 query.resp_messages[-1].content 是 str 的情况
if isinstance(query.resp_messages[-1].content, str):
return await self._post_process(
query.resp_messages[-1].content,
query
)
else:
self.ap.logger.debug(f"resp_messages[-1] 不是 str 类型,跳过内容过滤器检查。")
return entities.StageProcessResult(
result_type=entities.ResultType.CONTINUE,
new_query=query
)
else:
raise ValueError(f'未知的 stage_inst_name: {stage_inst_name}')

View File

@@ -56,8 +56,19 @@ class LongTextProcessStage(stage.PipelineStage):
await self.strategy_impl.initialize()
async def process(self, query: core_entities.Query, stage_inst_name: str) -> entities.StageProcessResult:
if len(str(query.resp_message_chain)) > self.ap.platform_cfg.data['long-text-process']['threshold']:
# 检查是否包含非 Plain 组件
contains_non_plain = False
for msg in query.resp_message_chain:
if not isinstance(msg, Plain):
contains_non_plain = True
break
if contains_non_plain:
self.ap.logger.debug("消息中包含非 Plain 组件,跳过长消息处理。")
elif len(str(query.resp_message_chain)) > self.ap.platform_cfg.data['long-text-process']['threshold']:
query.resp_message_chain = MessageChain(await self.strategy_impl.process(str(query.resp_message_chain), query))
return entities.StageProcessResult(
result_type=entities.ResultType.CONTINUE,
new_query=query

View File

@@ -44,7 +44,7 @@ class ChatMessageHandler(handler.MessageHandler):
query.resp_messages.append(
llm_entities.Message(
role='plugin',
content=str(mc),
content=mc,
)
)

View File

@@ -59,7 +59,7 @@ class RateLimit(stage.PipelineStage):
)
elif stage_inst_name == "ReleaseRateLimitOccupancy":
await self.algo.release_access(
query.launcher_type,
query.launcher_type.value,
query.launcher_id,
)
return entities.StageProcessResult(

View File

@@ -34,7 +34,10 @@ class ResponseWrapper(stage.PipelineStage):
new_query=query
)
elif query.resp_messages[-1].role == 'plugin':
query.resp_message_chain = mirai.MessageChain(query.resp_messages[-1].content)
if not isinstance(query.resp_messages[-1].content, mirai.MessageChain):
query.resp_message_chain = mirai.MessageChain(query.resp_messages[-1].content)
else:
query.resp_message_chain = query.resp_messages[-1].content
yield entities.StageProcessResult(
result_type=entities.ResultType.CONTINUE,

View File

@@ -82,8 +82,8 @@ class PlatformManager:
event_ctx = await self.ap.plugin_mgr.emit_event(
event=events.GroupMessageReceived(
launcher_type='person',
launcher_id=event.sender.id,
launcher_type='group',
launcher_id=event.group.id,
sender_id=event.sender.id,
message_chain=event.message_chain,
query=None

View File

@@ -30,7 +30,14 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
msg_id = msg.id
msg_time = msg.time
elif type(msg) is mirai.Image:
msg_list.append(aiocqhttp.MessageSegment.image(msg.path))
arg = ''
if msg.url:
arg = msg.url
elif msg.path:
arg = msg.path
msg_list.append(aiocqhttp.MessageSegment.image(arg))
elif type(msg) is mirai.At:
msg_list.append(aiocqhttp.MessageSegment.at(msg.target))
elif type(msg) is mirai.AtAll:

View File

@@ -368,11 +368,15 @@ class OfficialAdapter(adapter_model.MessageSourceAdapter):
member_openid_mapping: OpenIDMapping[str, int] = None
group_openid_mapping: OpenIDMapping[str, int] = None
group_msg_seq = None
def __init__(self, cfg: dict, ap: app.Application):
"""初始化适配器"""
self.cfg = cfg
self.ap = ap
self.group_msg_seq = 1
switchs = {}
for intent in cfg["intents"]:
@@ -419,8 +423,6 @@ class OfficialAdapter(adapter_model.MessageSourceAdapter):
message_list = self.message_converter.yiri2target(message)
msg_seq = 1
for msg in message_list:
args = {}
@@ -462,8 +464,8 @@ class OfficialAdapter(adapter_model.MessageSourceAdapter):
args["msg_id"] = cached_message_ids[
str(message_source.message_chain.message_id)
]
args["msg_seq"] = msg_seq
msg_seq += 1
args["msg_seq"] = self.group_msg_seq
self.group_msg_seq += 1
await self.bot.api.post_group_message(**args)
async def is_muted(self, group_id: int) -> bool:

View File

@@ -4,6 +4,8 @@ import typing
import enum
import pydantic
import mirai
class FunctionCall(pydantic.BaseModel):
name: str
@@ -28,7 +30,7 @@ class Message(pydantic.BaseModel):
name: typing.Optional[str] = None
"""名称,仅函数调用返回时设置"""
content: typing.Optional[str] = None
content: typing.Optional[str] | typing.Optional[mirai.MessageChain] = None
"""内容"""
function_call: typing.Optional[FunctionCall] = None
@@ -41,7 +43,7 @@ class Message(pydantic.BaseModel):
def readable_str(self) -> str:
if self.content is not None:
return self.content
return str(self.content)
elif self.function_call is not None:
return f'{self.function_call.name}({self.function_call.arguments})'
elif self.tool_calls is not None:

View File

@@ -37,7 +37,7 @@ class AnthropicMessages(api.LLMAPIRequester):
args["model"] = query.use_model.name if query.use_model.model_name is None else query.use_model.model_name
req_messages = [ # req_messages 仅用于类内,外部同步由 query.messages 进行
m.dict(exclude_none=True) for m in query.prompt.messages
m.dict(exclude_none=True) for m in query.prompt.messages if m.content.strip() != ""
] + [m.dict(exclude_none=True) for m in query.messages]
# 删除所有 role=system & content='' 的消息

View File

@@ -93,11 +93,12 @@ class OpenAIChatCompletions(api.LLMAPIRequester):
pending_tool_calls = []
req_messages = [ # req_messages 仅用于类内,外部同步由 query.messages 进行
m.dict(exclude_none=True) for m in query.prompt.messages
m.dict(exclude_none=True) for m in query.prompt.messages if m.content.strip() != ""
] + [m.dict(exclude_none=True) for m in query.messages]
# req_messages.append({"role": "user", "content": str(query.message_chain)})
# 首次请求
msg = await self._closure(req_messages, query.use_model, query.use_funcs)
yield msg
@@ -106,23 +107,36 @@ class OpenAIChatCompletions(api.LLMAPIRequester):
req_messages.append(msg.dict(exclude_none=True))
# 持续请求,只要还有待处理的工具调用就继续处理调用
while pending_tool_calls:
for tool_call in pending_tool_calls:
func = tool_call.function
try:
func = tool_call.function
parameters = json.loads(func.arguments)
parameters = json.loads(func.arguments)
func_ret = await self.ap.tool_mgr.execute_func_call(
query, func.name, parameters
)
func_ret = await self.ap.tool_mgr.execute_func_call(
query, func.name, parameters
)
msg = llm_entities.Message(
role="tool", content=json.dumps(func_ret, ensure_ascii=False), tool_call_id=tool_call.id
)
msg = llm_entities.Message(
role="tool", content=json.dumps(func_ret, ensure_ascii=False), tool_call_id=tool_call.id
)
yield msg
yield msg
req_messages.append(msg.dict(exclude_none=True))
req_messages.append(msg.dict(exclude_none=True))
except Exception as e:
# 出错,添加一个报错信息到 req_messages
err_msg = llm_entities.Message(
role="tool", content=f"err: {e}", tool_call_id=tool_call.id
)
yield err_msg
req_messages.append(
err_msg.dict(exclude_none=True)
)
# 处理完所有调用,继续请求
msg = await self._closure(req_messages, query.use_model, query.use_funcs)

View File

@@ -0,0 +1,15 @@
from __future__ import annotations
from ....core import app
from . import chatcmpl
from .. import api
@api.requester_class("deepseek-chat-completions")
class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
"""Deepseek ChatCompletion API 请求器"""
def __init__(self, ap: app.Application):
self.requester_cfg = ap.provider_cfg.data['requester']['deepseek-chat-completions']
self.ap = ap

View File

@@ -6,7 +6,7 @@ from . import entities
from ...core import app
from . import token, api
from .apis import chatcmpl, anthropicmsgs, moonshotchatcmpl
from .apis import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl
FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list"

View File

@@ -35,7 +35,7 @@ class ToolManager:
for plugin in self.ap.plugin_mgr.plugins:
for function in plugin.content_functions:
if function.name == name:
return function, plugin
return function, plugin.plugin_inst
return None, None
async def get_all_functions(self) -> list[entities.LLMFunction]:

View File

@@ -1 +1 @@
semantic_version = "v3.1.0"
semantic_version = "v3.1.1"

View File

@@ -12,4 +12,5 @@ PyYaml
aiohttp
pydantic
websockets
urllib3
urllib3
psutil

View File

@@ -59,6 +59,11 @@
"name": "moonshot-v1-128k",
"requester": "moonshot-chat-completions",
"token_mgr": "moonshot"
},
{
"name": "deepseek-chat",
"requester": "deepseek-chat-completions",
"token_mgr": "deepseek"
}
]
}

View File

@@ -25,7 +25,6 @@
"api-key": "",
"api-secret": ""
},
"submit-messages-tokens": 3072,
"rate-limit": {
"strategy": "drop",
"algo": "fixwin",

View File

@@ -9,6 +9,9 @@
],
"moonshot": [
"sk-1234567890"
],
"deepseek": [
"sk-1234567890"
]
},
"requester": {
@@ -28,6 +31,11 @@
"base-url": "https://api.moonshot.cn/v1",
"args": {},
"timeout": 120
},
"deepseek-chat-completions": {
"base-url": "https://api.deepseek.com",
"args": {},
"timeout": 120
}
},
"model": "gpt-3.5-turbo",