Compare commits

...

27 Commits

Author SHA1 Message Date
RockChinQ
eebd6a6ba3 chore: release v2.6.5 2023-11-14 23:16:02 +08:00
Junyan Qin
0407f3e4ac Merge pull request #599 from RockChinQ/refactor/modern-openai-api-style
Refactor: 修改 情景预设 置入风格
2023-11-14 21:36:25 +08:00
RockChinQ
5abca84437 debug: 添加请求参数输出 2023-11-14 21:35:02 +08:00
GitHub Actions
d2776cc1e6 Update override-all.json 2023-11-14 13:06:22 +00:00
RockChinQ
9fe0ee2b77 refactor: 使用system role置入default prompt 2023-11-14 21:06:00 +08:00
Junyan Qin
b68daac323 Merge pull request #598 from RockChinQ/perf/import-style
Refactor: 修改引入风格
2023-11-13 22:00:27 +08:00
RockChinQ
665de5dc43 refactor: 修改引入风格 2023-11-13 21:59:23 +08:00
RockChinQ
e3b280758c chore: 发布更新公告 2023-11-13 18:03:26 +08:00
RockChinQ
374ae25d9c fix: 启动时自动解决依赖后不正确的异常处理 2023-11-12 23:16:09 +08:00
RockChinQ
c86529ac99 feat: 启动时不再自动更新websockets依赖 2023-11-12 22:59:49 +08:00
RockChinQ
6309f1fb78 chore(deps): 更换为自有分支yiri-mirai-rc 2023-11-12 20:31:07 +08:00
RockChinQ
c246fb6d8e chore: release v2.6.4 2023-11-12 14:42:48 +08:00
RockChinQ
ec6c041bcf ci(Dockerfile): 修复依赖安装问题 2023-11-12 14:42:07 +08:00
RockChinQ
2da5a9f3c7 ci(Dockerfile): 显式更新httpcore httpx和openai库 2023-11-12 14:18:42 +08:00
Junyan Qin
4e0df52d7c Merge pull request #592 from RockChinQ/fix/plugin-downloading
Feat: 通过 GitHub API 进行插件安装和更新
2023-11-12 14:07:52 +08:00
RockChinQ
71b8bf13e4 fix: 插件加载bug 2023-11-12 13:52:04 +08:00
RockChinQ
a8b1e6ce91 ci: test 2023-11-12 12:05:04 +08:00
RockChinQ
1419d7611d ci(cmdpriv): 本地测试通过 2023-11-12 12:03:52 +08:00
RockChinQ
89c83ebf20 fix: 错误的判空变量 2023-11-12 11:30:10 +08:00
RockChinQ
76d7db88ea feat: 基于元数据记录的插件更新实现 2023-11-11 23:17:28 +08:00
RockChinQ
67a208bc90 feat: 添加插件元数据操作模块 2023-11-11 17:38:52 +08:00
RockChinQ
acbd55ded2 feat: 插件安装改为直接下载源码 2023-11-10 23:01:56 +08:00
Junyan Qin
11a240a6d1 Merge pull request #591 from RockChinQ/feat/new-model-names
Feat: 更新模型索引
2023-11-10 21:23:22 +08:00
RockChinQ
97c85abbe7 feat: 更新模型索引 2023-11-10 21:16:33 +08:00
RockChinQ
06a0cd2a3d chore: 发布兼容性问题公告 2023-11-10 12:20:29 +08:00
GitHub Actions
572b215df8 Update override-all.json 2023-11-10 04:04:45 +00:00
RockChinQ
2c542bf412 chore: 不再默认在启动时升级依赖库 2023-11-10 12:04:25 +08:00
62 changed files with 691 additions and 518 deletions

View File

@@ -10,6 +10,6 @@ updates:
schedule:
interval: "weekly"
allow:
- dependency-name: "yiri-mirai"
- dependency-name: "yiri-mirai-rc"
- dependency-name: "dulwich"
- dependency-name: "openai"

View File

@@ -21,12 +21,12 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.x
python-version: 3.10.13
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT tiktoken
python -m pip install --upgrade yiri-mirai-rc openai>=1.0.0 colorlog func_timeout dulwich Pillow CallingGPT tiktoken
python -m pip install -U openai>=1.0.0
- name: Copy Scripts
run: |

View File

@@ -29,7 +29,6 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
# 在此处添加您的项目所需的其他依赖
- name: Copy Scripts
run: |

View File

@@ -1,12 +1,13 @@
FROM python:3.10.13-alpine3.18
FROM python:3.10.13-bullseye
WORKDIR /QChatGPT
COPY . /QChatGPT/
RUN ls
RUN pip install -r requirements.txt
RUN pip install -U websockets==10.0
RUN python -m pip install -r requirements.txt && \
python -m pip install -U websockets==10.0 && \
python -m pip install -U httpcore httpx openai
# 生成配置文件
RUN python main.py

View File

@@ -278,7 +278,7 @@ cd QChatGPT
2. 安装依赖
```bash
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk CallingGPT tiktoken
pip3 install requests -r requirements.txt
```
3. 运行一次主程序,生成配置文件

View File

@@ -141,7 +141,7 @@ cd QChatGPT
2. Install dependencies
```bash
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk
pip3 install requests yiri-mirai-rc openai colorlog func_timeout dulwich Pillow nakuru-project-idk
```
3. Generate `config.py`

View File

@@ -114,7 +114,7 @@ admin_qq = 0
#
# 还可以加载文件中的预设文字使用方法请查看https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E9%A2%84%E8%AE%BE%E6%96%87%E5%AD%97
default_prompt = {
"default": "如果之后想获取帮助,请你说“输入!help获取帮助”",
"default": "如果用户之后想获取帮助,请你说“输入!help获取帮助”",
}
# 情景预设格式
@@ -208,29 +208,52 @@ auto_reset = True
# OpenAI补全API的参数
# 请在下方填写模型,程序自动选择接口
# 模型文档https://platform.openai.com/docs/models
# 现已支持的模型有:
#
# 'gpt-4'
# 'gpt-4-0613'
# 'gpt-4-32k'
# 'gpt-4-32k-0613'
# 'gpt-3.5-turbo'
# 'gpt-3.5-turbo-16k'
# 'gpt-3.5-turbo-0613'
# 'gpt-3.5-turbo-16k-0613'
# 'text-davinci-003'
# 'text-davinci-002'
# 'code-davinci-002'
# 'code-cushman-001'
# 'text-curie-001'
# 'text-babbage-001'
# 'text-ada-001'
# ChatCompletions 接口:
# # GPT 4 系列
# "gpt-4-1106-preview",
# "gpt-4-vision-preview",
# "gpt-4",
# "gpt-4-32k",
# "gpt-4-0613",
# "gpt-4-32k-0613",
# "gpt-4-0314", # legacy
# "gpt-4-32k-0314", # legacy
# # GPT 3.5 系列
# "gpt-3.5-turbo-1106",
# "gpt-3.5-turbo",
# "gpt-3.5-turbo-16k",
# "gpt-3.5-turbo-0613", # legacy
# "gpt-3.5-turbo-16k-0613", # legacy
# "gpt-3.5-turbo-0301", # legacy
#
# Completions接口
# "text-davinci-003", # legacy
# "text-davinci-002", # legacy
# "code-davinci-002", # legacy
# "code-cushman-001", # legacy
# "text-curie-001", # legacy
# "text-babbage-001", # legacy
# "text-ada-001", # legacy
# "gpt-3.5-turbo-instruct",
#
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
# 请将内容修改到config.py中请勿修改config-template.py
#
# 支持通过 One API 接入多种模型请在上方的openai_config中设置One API的代理地址
# 并在此填写您要使用的模型名称详细请参考https://github.com/songquanpeng/one-api
#
# 支持的 One API 模型:
# "SparkDesk",
# "chatglm_pro",
# "chatglm_std",
# "chatglm_lite",
# "qwen-v1",
# "qwen-plus-v1",
# "ERNIE-Bot",
# "ERNIE-Bot-turbo",
completion_api_params = {
"model": "gpt-3.5-turbo",
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
@@ -351,7 +374,7 @@ rate_limitation = {
rate_limit_strategy = "drop"
# 是否在启动时进行依赖库更新
upgrade_dependencies = True
upgrade_dependencies = False
# 是否上报统计信息
# 用于统计机器人的使用情况,不会收集任何用户信息

84
main.py
View File

@@ -13,6 +13,48 @@ sys.path.append(".")
from pkg.utils.log import init_runtime_log_file, reset_logging
def check_file():
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
if not os.path.exists('banlist.py'):
shutil.copy('res/templates/banlist-template.py', 'banlist.py')
# 检查是否有sensitive.json
if not os.path.exists("sensitive.json"):
shutil.copy("res/templates/sensitive-template.json", "sensitive.json")
# 检查是否有scenario/default.json
if not os.path.exists("scenario/default.json"):
shutil.copy("scenario/default-template.json", "scenario/default.json")
# 检查cmdpriv.json
if not os.path.exists("cmdpriv.json"):
shutil.copy("res/templates/cmdpriv-template.json", "cmdpriv.json")
# 检查tips_custom
if not os.path.exists("tips.py"):
shutil.copy("tips-custom-template.py", "tips.py")
# 检查temp目录
if not os.path.exists("temp/"):
os.mkdir("temp/")
# 检查并创建plugins、prompts目录
check_path = ["plugins", "prompts"]
for path in check_path:
if not os.path.exists(path):
os.mkdir(path)
# 配置文件存在性校验
if not os.path.exists('config.py'):
shutil.copy('config-template.py', 'config.py')
print('请先在config.py中填写配置')
sys.exit(0)
# 初始化相关文件
check_file()
try:
import colorlog
except ImportError:
@@ -20,7 +62,6 @@ except ImportError:
import pkg.utils.pkgmgr as pkgmgr
try:
pkgmgr.install_requirements("requirements.txt")
pkgmgr.install_upgrade("websockets")
import colorlog
except ImportError:
print("依赖不满足,请查看 https://github.com/RockChinQ/qcg-installer/issues/15")
@@ -368,53 +409,12 @@ def stop():
raise e
def check_file():
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
if not os.path.exists('banlist.py'):
shutil.copy('res/templates/banlist-template.py', 'banlist.py')
# 检查是否有sensitive.json
if not os.path.exists("sensitive.json"):
shutil.copy("res/templates/sensitive-template.json", "sensitive.json")
# 检查是否有scenario/default.json
if not os.path.exists("scenario/default.json"):
shutil.copy("scenario/default-template.json", "scenario/default.json")
# 检查cmdpriv.json
if not os.path.exists("cmdpriv.json"):
shutil.copy("res/templates/cmdpriv-template.json", "cmdpriv.json")
# 检查tips_custom
if not os.path.exists("tips.py"):
shutil.copy("tips-custom-template.py", "tips.py")
# 检查temp目录
if not os.path.exists("temp/"):
os.mkdir("temp/")
# 检查并创建plugins、prompts目录
check_path = ["plugins", "prompts"]
for path in check_path:
if not os.path.exists(path):
os.mkdir(path)
# 配置文件存在性校验
if not os.path.exists('config.py'):
shutil.copy('config-template.py', 'config.py')
print('请先在config.py中填写配置')
sys.exit(0)
def main():
global use_override
# 检查是否携带了 --override 或 -r 参数
if '--override' in sys.argv or '-r' in sys.argv:
use_override = True
# 初始化相关文件
check_file()
# 初始化logging
init_runtime_log_file()
pkg.utils.context.context['logger_handler'] = reset_logging()

View File

@@ -24,7 +24,7 @@
"switch_strategy": "active",
"admin_qq": 0,
"default_prompt": {
"default": "如果之后想获取帮助,请你说“输入!help获取帮助”"
"default": "如果用户之后想获取帮助,请你说“输入!help获取帮助”"
},
"preset_mode": "normal",
"response_rules": {
@@ -86,7 +86,7 @@
"default": 60
},
"rate_limit_strategy": "drop",
"upgrade_dependencies": true,
"upgrade_dependencies": false,
"report_usage": true,
"logging_level": 20
}

View File

@@ -9,8 +9,8 @@ import threading
import requests
import pkg.utils.context
import pkg.utils.updater
from ..utils import context
from ..utils import updater
class DataGatherer:
@@ -33,7 +33,7 @@ class DataGatherer:
def __init__(self):
self.load_from_db()
try:
self.version_str = pkg.utils.updater.get_current_tag() # 从updater模块获取版本号
self.version_str = updater.get_current_tag() # 从updater模块获取版本号
except:
pass
@@ -47,7 +47,7 @@ class DataGatherer:
def thread_func():
try:
config = pkg.utils.context.get_config()
config = context.get_config()
if not config.report_usage:
return
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}&msg_source={}".format(subservice_name, self.version_str, count, config.msg_source_adapter))
@@ -64,7 +64,7 @@ class DataGatherer:
def report_text_model_usage(self, model, total_tokens):
"""调用方报告文字模型请求文字使用量"""
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
key_md5 = context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
if key_md5 not in self.usage:
self.usage[key_md5] = {}
@@ -84,7 +84,7 @@ class DataGatherer:
def report_image_model_usage(self, size):
"""调用方报告图片模型请求图片使用量"""
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()
key_md5 = context.get_openai_manager().key_mgr.get_using_key_md5()
if key_md5 not in self.usage:
self.usage[key_md5] = {}
@@ -131,9 +131,9 @@ class DataGatherer:
return total
def dump_to_db(self):
pkg.utils.context.get_database_manager().dump_usage_json(self.usage)
context.get_database_manager().dump_usage_json(self.usage)
def load_from_db(self):
json_str = pkg.utils.context.get_database_manager().load_usage_json()
json_str = context.get_database_manager().load_usage_json()
if json_str is not None:
self.usage = json.loads(json_str)

View File

@@ -5,11 +5,10 @@ import hashlib
import json
import logging
import time
from sqlite3 import Cursor
import sqlite3
import pkg.utils.context
from ..utils import context
class DatabaseManager:
@@ -22,7 +21,7 @@ class DatabaseManager:
self.reconnect()
pkg.utils.context.set_database_manager(self)
context.set_database_manager(self)
# 连接到数据库文件
def reconnect(self):
@@ -33,7 +32,7 @@ class DatabaseManager:
def close(self):
self.conn.close()
def __execute__(self, *args, **kwargs) -> Cursor:
def __execute__(self, *args, **kwargs) -> sqlite3.Cursor:
# logging.debug('SQL: {}'.format(sql))
logging.debug('SQL: {}'.format(args))
c = self.cursor.execute(*args, **kwargs)
@@ -145,7 +144,7 @@ class DatabaseManager:
# 从数据库加载还没过期的session数据
def load_valid_sessions(self) -> dict:
# 从数据库中加载所有还没过期的session
config = pkg.utils.context.get_config()
config = context.get_config()
self.__execute__("""
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
from `sessions` where `last_interact_timestamp` > {}

View File

@@ -1,11 +1,11 @@
import openai
from openai.types.chat import chat_completion_message
import json
import logging
from .model import RequestBase
import openai
from openai.types.chat import chat_completion_message
from ..funcmgr import get_func_schema_list, execute_function, get_func, get_func_schema, ContentFunctionNotFoundError
from .model import RequestBase
from .. import funcmgr
class ChatCompletionRequest(RequestBase):
@@ -81,7 +81,7 @@ class ChatCompletionRequest(RequestBase):
"messages": self.messages,
}
funcs = get_func_schema_list()
funcs = funcmgr.get_func_schema_list()
if len(funcs) > 0:
args['functions'] = funcs
@@ -171,7 +171,7 @@ class ChatCompletionRequest(RequestBase):
# 若不是json格式的异常处理
except json.decoder.JSONDecodeError:
# 获取函数的参数列表
func_schema = get_func_schema(func_name)
func_schema = funcmgr.get_func_schema(func_name)
arguments = {
func_schema['parameters']['required'][0]: cp_pending_func_call.arguments
@@ -182,7 +182,7 @@ class ChatCompletionRequest(RequestBase):
# 执行函数调用
ret = ""
try:
ret = execute_function(func_name, arguments)
ret = funcmgr.execute_function(func_name, arguments)
logging.info("函数执行完成。")
except Exception as e:
@@ -216,6 +216,5 @@ class ChatCompletionRequest(RequestBase):
}
}
except ContentFunctionNotFoundError:
except funcmgr.ContentFunctionNotFoundError:
raise Exception("没有找到函数: {}".format(func_name))

View File

@@ -1,10 +1,10 @@
import openai
from openai.types import completion, completion_choice
from .model import RequestBase
from . import model
class CompletionRequest(RequestBase):
class CompletionRequest(model.RequestBase):
"""调用Completion接口的请求类。
调用方可以一直next completion直到finish_reason为stop。

View File

@@ -1,6 +1,4 @@
# 定义不同接口请求的模型
import threading
import asyncio
import logging
import openai
@@ -23,6 +21,7 @@ class RequestBase:
def _req(self, **kwargs):
"""处理代理问题"""
logging.debug("请求接口参数: %s", str(kwargs))
import config
ret = self.req_func(**kwargs)

View File

@@ -16,10 +16,6 @@ import os
# __scenario_from_files__ = {}
__universal_first_reply__ = "ok, I'll follow your commands."
"""通用首次回复"""
class ScenarioMode:
"""情景预设模式抽象类"""
@@ -66,17 +62,13 @@ class NormalScenarioMode(ScenarioMode):
"""普通情景预设模式"""
def __init__(self):
global __universal_first_reply__
# 加载config中的default_prompt值
if type(config.default_prompt) == str:
self.using_prompt_name = "default"
self.prompts = {"default": [
{
"role": "user",
"role": "system",
"content": config.default_prompt
},{
"role": "assistant",
"content": __universal_first_reply__
}
]}
@@ -84,11 +76,8 @@ class NormalScenarioMode(ScenarioMode):
for key in config.default_prompt:
self.prompts[key] = [
{
"role": "user",
"role": "system",
"content": config.default_prompt[key]
},{
"role": "assistant",
"content": __universal_first_reply__
}
]
@@ -98,11 +87,8 @@ class NormalScenarioMode(ScenarioMode):
with open(os.path.join("prompts", file), encoding="utf-8") as f:
self.prompts[file] = [
{
"role": "user",
"role": "system",
"content": f.read()
},{
"role": "assistant",
"content": __universal_first_reply__
}
]

View File

@@ -1,8 +1,7 @@
# 封装了function calling的一些支持函数
import logging
from pkg.plugin import host
from ..plugin import host
class ContentFunctionNotFoundError(Exception):

View File

@@ -2,8 +2,8 @@
import hashlib
import logging
import pkg.plugin.host as plugin_host
import pkg.plugin.models as plugin_models
from ..plugin import host as plugin_host
from ..plugin import models as plugin_models
class KeysManager:

View File

@@ -2,12 +2,11 @@ import logging
import openai
import pkg.openai.keymgr
import pkg.utils.context
import pkg.audit.gatherer
from pkg.openai.modelmgr import select_request_cls
from pkg.openai.api.model import RequestBase
from ..openai import keymgr
from ..utils import context
from ..audit import gatherer
from ..openai import modelmgr
from ..openai.api import model as api_model
class OpenAIInteract:
@@ -16,9 +15,9 @@ class OpenAIInteract:
将文字接口和图片接口封装供调用方使用
"""
key_mgr: pkg.openai.keymgr.KeysManager = None
key_mgr: keymgr.KeysManager = None
audit_mgr: pkg.audit.gatherer.DataGatherer = None
audit_mgr: gatherer.DataGatherer = None
default_image_api_params = {
"size": "256x256",
@@ -28,8 +27,8 @@ class OpenAIInteract:
def __init__(self, api_key: str):
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
self.audit_mgr = pkg.audit.gatherer.DataGatherer()
self.key_mgr = keymgr.KeysManager(api_key)
self.audit_mgr = gatherer.DataGatherer()
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
@@ -37,22 +36,22 @@ class OpenAIInteract:
api_key=self.key_mgr.get_using_key()
)
pkg.utils.context.set_openai_manager(self)
context.set_openai_manager(self)
def request_completion(self, messages: list):
"""请求补全接口回复=
"""
# 选择接口请求类
config = pkg.utils.context.get_config()
config = context.get_config()
request: RequestBase
request: api_model.RequestBase
model: str = config.completion_api_params['model']
cp_parmas = config.completion_api_params.copy()
del cp_parmas['model']
request = select_request_cls(self.client, model, messages, cp_parmas)
request = modelmgr.select_request_cls(self.client, model, messages, cp_parmas)
# 请求接口
for resp in request:
@@ -74,7 +73,7 @@ class OpenAIInteract:
Returns:
dict: 响应
"""
config = pkg.utils.context.get_config()
config = context.get_config()
params = config.image_api_params
response = openai.Image.create(

View File

@@ -8,39 +8,47 @@ Completion - text-davinci-003 等模型
import tiktoken
import openai
from pkg.openai.api.model import RequestBase
from pkg.openai.api.completion import CompletionRequest
from pkg.openai.api.chat_completion import ChatCompletionRequest
from ..openai.api import model as api_model
from ..openai.api import completion as api_completion
from ..openai.api import chat_completion as api_chat_completion
COMPLETION_MODELS = {
'text-davinci-003',
'text-davinci-002',
'code-davinci-002',
'code-cushman-001',
'text-curie-001',
'text-babbage-001',
'text-ada-001',
"text-davinci-003", # legacy
"text-davinci-002", # legacy
"code-davinci-002", # legacy
"code-cushman-001", # legacy
"text-curie-001", # legacy
"text-babbage-001", # legacy
"text-ada-001", # legacy
"gpt-3.5-turbo-instruct",
}
CHAT_COMPLETION_MODELS = {
'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k-0613',
# 'gpt-3.5-turbo-0301',
'gpt-4',
'gpt-4-0613',
'gpt-4-32k',
'gpt-4-32k-0613',
# GPT 4 系列
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-4-0314", # legacy
"gpt-4-32k-0314", # legacy
# GPT 3.5 系列
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613", # legacy
"gpt-3.5-turbo-16k-0613", # legacy
"gpt-3.5-turbo-0301", # legacy
# One-API 接入
'SparkDesk',
'chatglm_pro',
'chatglm_std',
'chatglm_lite',
'qwen-v1',
'qwen-plus-v1',
'ERNIE-Bot',
'ERNIE-Bot-turbo',
"SparkDesk",
"chatglm_pro",
"chatglm_std",
"chatglm_lite",
"qwen-v1",
"qwen-plus-v1",
"ERNIE-Bot",
"ERNIE-Bot-turbo",
}
EDIT_MODELS = {
@@ -52,11 +60,11 @@ IMAGE_MODELS = {
}
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> RequestBase:
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> api_model.RequestBase:
if model_name in CHAT_COMPLETION_MODELS:
return ChatCompletionRequest(client, model_name, messages, **args)
return api_chat_completion.ChatCompletionRequest(client, model_name, messages, **args)
elif model_name in COMPLETION_MODELS:
return CompletionRequest(client, model_name, messages, **args)
return api_completion.CompletionRequest(client, model_name, messages, **args)
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))

View File

@@ -8,15 +8,13 @@ import threading
import time
import json
import pkg.openai.manager
import pkg.openai.modelmgr
import pkg.database.manager
import pkg.utils.context
from ..openai import manager as openai_manager
from ..openai import modelmgr as openai_modelmgr
from ..database import manager as database_manager
from ..utils import context as context
import pkg.plugin.host as plugin_host
import pkg.plugin.models as plugin_models
from pkg.openai.modelmgr import count_tokens
from ..plugin import host as plugin_host
from ..plugin import models as plugin_models
# 运行时保存的所有session
sessions = {}
@@ -38,7 +36,7 @@ def reset_session_prompt(session_name, prompt):
f.write(prompt)
f.close()
# 生成新数据
config = pkg.utils.context.get_config()
config = context.get_config()
prompt = [
{
'role': 'system',
@@ -61,7 +59,7 @@ def load_sessions():
global sessions
db_inst = pkg.utils.context.get_database_manager()
db_inst = context.get_database_manager()
session_data = db_inst.load_valid_sessions()
@@ -172,7 +170,7 @@ class Session:
if self.create_timestamp != create_timestamp or self not in sessions.values():
return
config = pkg.utils.context.get_config()
config = context.get_config()
if int(time.time()) - self.last_interact_timestamp > config.session_expire_time:
logging.info('session {} 已过期'.format(self.name))
@@ -182,7 +180,7 @@ class Session:
'session': self,
'session_expire_time': config.session_expire_time
}
event = pkg.plugin.host.emit(plugin_models.SessionExpired, **args)
event = plugin_host.emit(plugin_models.SessionExpired, **args)
if event.is_prevented_default():
return
@@ -214,11 +212,11 @@ class Session:
'default_prompt': self.default_prompt,
}
event = pkg.plugin.host.emit(plugin_models.SessionFirstMessageReceived, **args)
event = plugin_host.emit(plugin_models.SessionFirstMessageReceived, **args)
if event.is_prevented_default():
return None, None, None
config = pkg.utils.context.get_config()
config = context.get_config()
max_length = config.prompt_submit_length
local_default_prompt = self.default_prompt.copy()
@@ -232,7 +230,7 @@ class Session:
'text_message': text,
}
event = pkg.plugin.host.emit(plugin_models.PromptPreProcessing, **args)
event = plugin_host.emit(plugin_models.PromptPreProcessing, **args)
if event.get_return_value('default_prompt') is not None:
local_default_prompt = event.get_return_value('default_prompt')
@@ -243,6 +241,7 @@ class Session:
if event.get_return_value('text_message') is not None:
text = event.get_return_value('text_message')
# 裁剪messages到合适长度
prompts, _ = self.cut_out(text, max_length, local_default_prompt, local_prompt)
res_text = ""
@@ -256,14 +255,14 @@ class Session:
funcs = []
trace_func_calls = config.trace_function_calls
botmgr = pkg.utils.context.get_qqbot_manager()
botmgr = context.get_qqbot_manager()
session_name_spt: list[str] = self.name.split("_")
pending_res_text = ""
# TODO 对不起,我知道这样非常非常屎山,但我之后会重构的
for resp in pkg.utils.context.get_openai_manager().request_completion(prompts):
for resp in context.get_openai_manager().request_completion(prompts):
if pending_res_text != "":
botmgr.adapter.send_message(
@@ -325,7 +324,6 @@ class Session:
)
pass
# 向API请求补全
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
# prompts,
@@ -383,13 +381,13 @@ class Session:
# 包装目前的对话回合内容
changable_prompts = []
use_model = pkg.utils.context.get_config().completion_api_params['model']
use_model = context.get_config().completion_api_params['model']
ptr = len(prompt) - 1
# 直接从后向前扫描拼接,不管是否是整回合
while ptr >= 0:
if count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
if openai_modelmgr.count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
break
changable_prompts.insert(0, prompt[ptr])
@@ -410,14 +408,14 @@ class Session:
logging.debug("cut_out: {}".format(json.dumps(result_prompt, ensure_ascii=False, indent=4)))
return result_prompt, count_tokens(changable_prompts, use_model)
return result_prompt, openai_modelmgr.count_tokens(changable_prompts, use_model)
# 持久化session
def persistence(self):
if self.prompt == self.get_default_prompt():
return
db_inst = pkg.utils.context.get_database_manager()
db_inst = context.get_database_manager()
name_spt = self.name.split('_')
@@ -439,12 +437,12 @@ class Session:
}
# 此事件不支持阻止默认行为
_ = pkg.plugin.host.emit(plugin_models.SessionExplicitReset, **args)
_ = plugin_host.emit(plugin_models.SessionExplicitReset, **args)
pkg.utils.context.get_database_manager().explicit_close_session(self.name, self.create_timestamp)
context.get_database_manager().explicit_close_session(self.name, self.create_timestamp)
if expired:
pkg.utils.context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
if not persist: # 不要求保持default prompt
self.default_prompt = self.get_default_prompt(use_prompt)
@@ -461,11 +459,11 @@ class Session:
# 将本session的数据库状态设置为on_going
def set_ongoing(self):
pkg.utils.context.get_database_manager().set_session_ongoing(self.name, self.create_timestamp)
context.get_database_manager().set_session_ongoing(self.name, self.create_timestamp)
# 切换到上一个session
def last_session(self):
last_one = pkg.utils.context.get_database_manager().last_session(self.name, self.last_interact_timestamp)
last_one = context.get_database_manager().last_session(self.name, self.last_interact_timestamp)
if last_one is None:
return None
else:
@@ -486,7 +484,7 @@ class Session:
# 切换到下一个session
def next_session(self):
next_one = pkg.utils.context.get_database_manager().next_session(self.name, self.last_interact_timestamp)
next_one = context.get_database_manager().next_session(self.name, self.last_interact_timestamp)
if next_one is None:
return None
else:
@@ -506,13 +504,13 @@ class Session:
return self
def list_history(self, capacity: int = 10, page: int = 0):
return pkg.utils.context.get_database_manager().list_history(self.name, capacity, page)
return context.get_database_manager().list_history(self.name, capacity, page)
def delete_history(self, index: int) -> bool:
return pkg.utils.context.get_database_manager().delete_history(self.name, index)
return context.get_database_manager().delete_history(self.name, index)
def delete_all_history(self) -> bool:
return pkg.utils.context.get_database_manager().delete_all_history(self.name)
return context.get_database_manager().delete_all_history(self.name)
def draw_image(self, prompt: str):
return pkg.utils.context.get_openai_manager().request_image(prompt)
return context.get_openai_manager().request_image(prompt)

View File

@@ -7,14 +7,19 @@ import pkgutil
import sys
import shutil
import traceback
import time
import re
import pkg.utils.updater as updater
import pkg.utils.context as context
import pkg.plugin.switch as switch
import pkg.plugin.settings as settings
import pkg.qqbot.adapter as msadapter
from ..utils import updater as updater
from ..utils import network as network
from ..utils import context as context
from ..plugin import switch as switch
from ..plugin import settings as settings
from ..qqbot import adapter as msadapter
from ..plugin import metadata as metadata
from mirai import Mirai
import requests
from CallingGPT.session.session import Session
@@ -65,6 +70,8 @@ def generate_plugin_order():
def iter_plugins():
"""按照顺序迭代插件"""
for plugin_name in __plugins_order__:
if plugin_name not in __plugins__:
continue
yield __plugins__[plugin_name]
@@ -113,10 +120,15 @@ def load_plugins():
# 加载插件顺序
settings.load_settings()
logging.debug("registered plugins: {}".format(__plugins__))
# 输出已注册的内容函数列表
logging.debug("registered content functions: {}".format(__callable_functions__))
logging.debug("function instance map: {}".format(__function_inst_map__))
# 迁移插件源地址记录
metadata.do_plugin_git_repo_migrate()
def initialize_plugins():
"""初始化插件"""
@@ -135,6 +147,7 @@ def initialize_plugins():
successfully_initialized_plugins.append(plugin['name'])
except:
logging.error("插件{}初始化时发生错误: {}".format(plugin['name'], sys.exc_info()))
logging.debug(traceback.format_exc())
logging.info("以下插件已初始化: {}".format(", ".join(successfully_initialized_plugins)))
@@ -155,34 +168,100 @@ def unload_plugins():
# logging.error("插件{}卸载时发生错误: {}".format(plugin['name'], sys.exc_info()))
def install_plugin(repo_url: str):
"""安装插件从git储存库获取并解决依赖"""
try:
import pkg.utils.pkgmgr
pkg.utils.pkgmgr.ensure_dulwich()
except:
pass
def get_github_plugin_repo_label(repo_url: str) -> list[str]:
"""获取username, repo"""
try:
import dulwich
except ModuleNotFoundError:
raise Exception("dulwich模块未安装,请查看 https://github.com/RockChinQ/QChatGPT/issues/77")
# 提取 username/repo , 正则表达式
repo = re.findall(r'(?:https?://github\.com/|git@github\.com:)([^/]+/[^/]+?)(?:\.git|/|$)', repo_url)
from dulwich import porcelain
if len(repo) > 0: # github
return repo[0].split("/")
else:
return None
logging.info("克隆插件储存库: {}".format(repo_url))
repo = porcelain.clone(repo_url, "plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/", checkout=True)
def download_plugin_source_code(repo_url: str, target_path: str) -> str:
"""下载插件源码"""
# 检查源类型
# 提取 username/repo , 正则表达式
repo = get_github_plugin_repo_label(repo_url)
target_path += repo[1]
if repo is not None: # github
logging.info("从 GitHub 下载插件源码...")
zipball_url = f"https://api.github.com/repos/{'/'.join(repo)}/zipball/HEAD"
zip_resp = requests.get(
url=zipball_url,
proxies=network.wrapper_proxies(),
stream=True
)
if zip_resp.status_code != 200:
raise Exception("下载源码失败: {}".format(zip_resp.text))
if os.path.exists("temp/"+target_path):
shutil.rmtree("temp/"+target_path)
if os.path.exists(target_path):
shutil.rmtree(target_path)
os.makedirs("temp/"+target_path)
with open("temp/"+target_path+"/source.zip", "wb") as f:
for chunk in zip_resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
logging.info("下载完成, 解压...")
import zipfile
with zipfile.ZipFile("temp/"+target_path+"/source.zip", 'r') as zip_ref:
zip_ref.extractall("temp/"+target_path)
os.remove("temp/"+target_path+"/source.zip")
# 目标是 username-repo-hash , 用正则表达式提取完整的文件夹名,复制到 plugins/repo
import glob
# 获取解压后的文件夹名
unzip_dir = glob.glob("temp/"+target_path+"/*")[0]
# 复制到 plugins/repo
shutil.copytree(unzip_dir, target_path+"/")
# 删除解压后的文件夹
shutil.rmtree(unzip_dir)
logging.info("解压完成")
else:
raise Exception("暂不支持的源类型,请使用 GitHub 仓库发行插件。")
return repo[1]
def check_requirements(path: str):
# 检查此目录是否包含requirements.txt
if os.path.exists("plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/requirements.txt"):
if os.path.exists(path+"/requirements.txt"):
logging.info("检测到requirements.txt正在安装依赖")
import pkg.utils.pkgmgr
pkg.utils.pkgmgr.install_requirements("plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/requirements.txt")
pkg.utils.pkgmgr.install_requirements(path+"/requirements.txt")
import pkg.utils.log as log
log.reset_logging()
def install_plugin(repo_url: str):
"""安装插件从git储存库获取并解决依赖"""
repo_label = download_plugin_source_code(repo_url, "plugins/")
check_requirements("plugins/"+repo_label)
metadata.set_plugin_metadata(repo_label, repo_url, int(time.time()), "HEAD")
def uninstall_plugin(plugin_name: str) -> str:
"""卸载插件"""
if plugin_name not in __plugins__:
@@ -202,39 +281,43 @@ def uninstall_plugin(plugin_name: str) -> str:
def update_plugin(plugin_name: str):
"""更新插件"""
# 检查是否有远程地址记录
target_plugin_dir = "plugins/" + __plugins__[plugin_name]['path'].replace("\\", "/").split("plugins/")[1].split("/")[0]
plugin_path_name = get_plugin_path_name_by_plugin_name(plugin_name)
remote_url = updater.get_remote_url(target_plugin_dir)
meta = metadata.get_plugin_metadata(plugin_path_name)
if meta == {}:
raise Exception("没有此插件元数据信息,无法更新")
remote_url = meta['source']
if remote_url == "https://github.com/RockChinQ/QChatGPT" or remote_url == "https://gitee.com/RockChin/QChatGPT" \
or remote_url == "" or remote_url is None or remote_url == "http://github.com/RockChinQ/QChatGPT" or remote_url == "http://gitee.com/RockChin/QChatGPT":
raise Exception("插件没有远程地址记录,无法更新")
# 把远程clone到temp/plugins/update/插件
logging.info("克隆插件储存库: {}".format(remote_url))
# 重新安装插件
logging.info("正在重新安装插件以进行更新...")
from dulwich import porcelain
clone_target_dir = "temp/plugins/update/"+target_plugin_dir.split("/")[-1]+"/"
install_plugin(remote_url)
if os.path.exists(clone_target_dir):
shutil.rmtree(clone_target_dir)
if not os.path.exists(clone_target_dir):
os.makedirs(clone_target_dir)
repo = porcelain.clone(remote_url, clone_target_dir, checkout=True)
def get_plugin_name_by_path_name(plugin_path_name: str) -> str:
for k, v in __plugins__.items():
if v['path'] == "plugins/"+plugin_path_name+"/main.py":
return k
return None
# 检查此目录是否包含requirements.txt
if os.path.exists(clone_target_dir+"requirements.txt"):
logging.info("检测到requirements.txt正在安装依赖")
import pkg.utils.pkgmgr
pkg.utils.pkgmgr.install_requirements(clone_target_dir+"requirements.txt")
import pkg.utils.log as log
log.reset_logging()
def get_plugin_path_name_by_plugin_name(plugin_name: str) -> str:
if plugin_name not in __plugins__:
return None
plugin_main_module_path = __plugins__[plugin_name]['path']
# 将temp/plugins/update/插件名 覆盖到 plugins/插件名
shutil.rmtree(target_plugin_dir)
plugin_main_module_path = plugin_main_module_path.replace("\\", "/")
spt = plugin_main_module_path.split("/")
return spt[1]
shutil.copytree(clone_target_dir, target_plugin_dir)
class EventContext:
"""事件上下文"""

87
pkg/plugin/metadata.py Normal file
View File

@@ -0,0 +1,87 @@
import os
import shutil
import json
import time
import dulwich.errors as dulwich_err
from ..utils import updater
def read_metadata_file() -> dict:
# 读取 plugins/metadata.json 文件
if not os.path.exists('plugins/metadata.json'):
return {}
with open('plugins/metadata.json', 'r') as f:
return json.load(f)
def write_metadata_file(metadata: dict):
if not os.path.exists('plugins'):
os.mkdir('plugins')
with open('plugins/metadata.json', 'w') as f:
json.dump(metadata, f, indent=4, ensure_ascii=False)
def do_plugin_git_repo_migrate():
# 仅在 plugins/metadata.json 不存在时执行
if os.path.exists('plugins/metadata.json'):
return
metadata = read_metadata_file()
# 遍历 plugins 下所有目录获取目录的git远程地址
for plugin_name in os.listdir('plugins'):
plugin_path = os.path.join('plugins', plugin_name)
if not os.path.isdir(plugin_path):
continue
remote_url = None
try:
remote_url = updater.get_remote_url(plugin_path)
except dulwich_err.NotGitRepository:
continue
if remote_url == "https://github.com/RockChinQ/QChatGPT" or remote_url == "https://gitee.com/RockChin/QChatGPT" \
or remote_url == "" or remote_url is None or remote_url == "http://github.com/RockChinQ/QChatGPT" or remote_url == "http://gitee.com/RockChin/QChatGPT":
continue
from . import host
if plugin_name not in metadata:
metadata[plugin_name] = {
'source': remote_url,
'install_timestamp': int(time.time()),
'ref': 'HEAD',
}
write_metadata_file(metadata)
def set_plugin_metadata(
plugin_name: str,
source: str,
install_timestamp: int,
ref: str,
):
metadata = read_metadata_file()
metadata[plugin_name] = {
'source': source,
'install_timestamp': install_timestamp,
'ref': ref,
}
write_metadata_file(metadata)
def remove_plugin_metadata(plugin_name: str):
metadata = read_metadata_file()
if plugin_name in metadata:
del metadata[plugin_name]
write_metadata_file(metadata)
def get_plugin_metadata(plugin_name: str) -> dict:
metadata = read_metadata_file()
if plugin_name in metadata:
return metadata[plugin_name]
return {}

View File

@@ -1,7 +1,7 @@
import logging
import pkg.plugin.host as host
import pkg.utils.context
from ..plugin import host
from ..utils import context
PersonMessageReceived = "person_message_received"
"""收到私聊消息时,在判断是否应该响应前触发
@@ -285,7 +285,7 @@ def register(name: str, description: str, version: str, author: str):
cls.description = description
cls.version = version
cls.author = author
cls.host = pkg.utils.context.get_plugin_host()
cls.host = context.get_plugin_host()
cls.enabled = True
cls.path = host.__current_module_path__

View File

@@ -1,9 +1,9 @@
import json
import os
import pkg.plugin.host as host
import logging
from ..plugin import host
def wrapper_dict_from_runtime_context() -> dict:
"""从变量中包装settings.json的数据字典"""

View File

@@ -3,7 +3,7 @@ import json
import logging
import os
import pkg.plugin.host as host
from ..plugin import host
def wrapper_dict_from_plugin_list() -> dict:

View File

@@ -1,18 +1,18 @@
import pkg.utils.context
from ..utils import context
def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
if not pkg.utils.context.get_qqbot_manager().enable_banlist:
if not context.get_qqbot_manager().enable_banlist:
return False
result = False
if launcher_type == 'group':
# 检查是否显式声明发起人QQ要被person忽略
if sender_id in pkg.utils.context.get_qqbot_manager().ban_person:
if sender_id in context.get_qqbot_manager().ban_person:
result = True
else:
for group_rule in pkg.utils.context.get_qqbot_manager().ban_group:
for group_rule in context.get_qqbot_manager().ban_group:
if type(group_rule) == int:
if group_rule == launcher_id: # 此群群号被禁用
result = True
@@ -32,7 +32,7 @@ def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
else:
# ban_person, 与群规则相同
for person_rule in pkg.utils.context.get_qqbot_manager().ban_person:
for person_rule in context.get_qqbot_manager().ban_person:
if type(person_rule) == int:
if person_rule == launcher_id:
result = True

View File

@@ -2,21 +2,21 @@
import os
import time
import base64
import typing
import config
from mirai.models.message import MessageComponent, MessageChain, Image
from mirai.models.message import ForwardMessageNode
from mirai.models.base import MiraiBaseModel
from typing import List
import pkg.utils.context as context
import pkg.utils.text2img as text2img
from ..utils import text2img
import config
class ForwardMessageDiaplay(MiraiBaseModel):
title: str = "群聊的聊天记录"
brief: str = "[聊天记录]"
source: str = "聊天记录"
preview: List[str] = []
preview: typing.List[str] = []
summary: str = "查看x条转发消息"
@@ -26,7 +26,7 @@ class Forward(MessageComponent):
"""消息组件类型。"""
display: ForwardMessageDiaplay
"""显示信息"""
node_list: List[ForwardMessageNode]
node_list: typing.List[ForwardMessageNode]
"""转发消息节点列表。"""
def __init__(self, *args, **kwargs):
if len(args) == 1:

View File

@@ -1,10 +1,7 @@
import importlib
import inspect
import logging
import copy
import pkgutil
import traceback
import types
import json

View File

@@ -1,11 +1,12 @@
from ..aamgr import AbstractCommandNode, Context
import logging
from mirai import Image
import mirai
from .. import aamgr
import config
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="draw",
description="使用DALL·E生成图片",
@@ -13,9 +14,9 @@ import config
aliases=[],
privilege=1
)
class DrawCommand(AbstractCommandNode):
class DrawCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
reply = []
@@ -28,7 +29,7 @@ class DrawCommand(AbstractCommandNode):
res = session.draw_image(" ".join(ctx.params))
logging.debug("draw_image result:{}".format(res))
reply = [Image(url=res['data'][0]['url'])]
reply = [mirai.Image(url=res['data'][0]['url'])]
if not (hasattr(config, 'include_image_description')
and not config.include_image_description):
reply.append(" ".join(ctx.params))

View File

@@ -1,10 +1,9 @@
from ..aamgr import AbstractCommandNode, Context
import logging
import json
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="func",
description="管理内容函数",
@@ -12,9 +11,9 @@ import json
aliases=[],
privilege=1
)
class FuncCommand(AbstractCommandNode):
class FuncCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
from pkg.plugin.models import host
reply = []

View File

@@ -1,12 +1,9 @@
from ..aamgr import AbstractCommandNode, Context
import os
import pkg.plugin.host as plugin_host
import pkg.utils.updater as updater
from ....plugin import host as plugin_host
from ....utils import updater
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="plugin",
description="插件管理",
@@ -14,9 +11,9 @@ import pkg.utils.updater as updater
aliases=[],
privilege=1
)
class PluginCommand(AbstractCommandNode):
class PluginCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
reply = []
plugin_list = plugin_host.__plugins__
if len(ctx.params) == 0:
@@ -48,7 +45,7 @@ class PluginCommand(AbstractCommandNode):
return False, []
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=PluginCommand,
name="get",
description="安装插件",
@@ -56,9 +53,9 @@ class PluginCommand(AbstractCommandNode):
aliases=[],
privilege=2
)
class PluginGetCommand(AbstractCommandNode):
class PluginGetCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import threading
import logging
import pkg.utils.context
@@ -81,17 +78,17 @@ class PluginGetCommand(AbstractCommandNode):
return True, reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=PluginCommand,
name="update",
description="更新所有插件",
description="更新指定插件或全部插件",
usage="!plugin update",
aliases=[],
privilege=2
)
class PluginUpdateCommand(AbstractCommandNode):
class PluginUpdateCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import threading
import logging
plugin_list = plugin_host.__plugins__
@@ -110,7 +107,9 @@ class PluginUpdateCommand(AbstractCommandNode):
plugin_host.update_plugin(key)
updated.append(key)
else:
if ctx.crt_params[0] in plugin_list:
plugin_path_name = plugin_host.get_plugin_path_name_by_plugin_name(ctx.crt_params[0])
if plugin_path_name is not None:
plugin_host.update_plugin(ctx.crt_params[0])
updated.append(ctx.crt_params[0])
else:
@@ -119,7 +118,7 @@ class PluginUpdateCommand(AbstractCommandNode):
pkg.utils.context.get_qqbot_manager().notify_admin("已更新插件: {}, 请发送 !reload 重载插件".format(", ".join(updated)))
except Exception as e:
logging.error("插件更新失败:{}".format(e))
pkg.utils.context.get_qqbot_manager().notify_admin("插件更新失败:{} 请尝试手动更新插件".format(e))
pkg.utils.context.get_qqbot_manager().notify_admin("插件更新失败:{}使用 !plugin 命令确认插件名称或尝试手动更新插件".format(e))
reply = ["[bot]正在更新插件,请勿重复发起..."]
threading.Thread(target=closure).start()
@@ -128,7 +127,7 @@ class PluginUpdateCommand(AbstractCommandNode):
return True, reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=PluginCommand,
name="del",
description="删除插件",
@@ -136,9 +135,9 @@ class PluginUpdateCommand(AbstractCommandNode):
aliases=[],
privilege=2
)
class PluginDelCommand(AbstractCommandNode):
class PluginDelCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
plugin_list = plugin_host.__plugins__
reply = []
@@ -155,7 +154,7 @@ class PluginDelCommand(AbstractCommandNode):
return True, reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=PluginCommand,
name="on",
description="启用指定插件",
@@ -163,7 +162,7 @@ class PluginDelCommand(AbstractCommandNode):
aliases=[],
privilege=2
)
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=PluginCommand,
name="off",
description="禁用指定插件",
@@ -171,9 +170,9 @@ class PluginDelCommand(AbstractCommandNode):
aliases=[],
privilege=2
)
class PluginOnOffCommand(AbstractCommandNode):
class PluginOnOffCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.plugin.switch as plugin_switch
plugin_list = plugin_host.__plugins__

View File

@@ -1,7 +1,6 @@
from ..aamgr import AbstractCommandNode, Context
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="default",
description="操作情景预设",
@@ -9,9 +8,9 @@ from ..aamgr import AbstractCommandNode, Context
aliases=[],
privilege=1
)
class DefaultCommand(AbstractCommandNode):
class DefaultCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name
params = ctx.params
@@ -45,7 +44,7 @@ class DefaultCommand(AbstractCommandNode):
return True, reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=DefaultCommand,
name="set",
description="设置默认情景预设",
@@ -53,9 +52,9 @@ class DefaultCommand(AbstractCommandNode):
aliases=[],
privilege=2
)
class DefaultSetCommand(AbstractCommandNode):
class DefaultSetCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
reply = []
if len(ctx.crt_params) == 0:

View File

@@ -1,8 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
import datetime
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="del",
description="删除当前会话的历史记录",
@@ -10,9 +9,9 @@ import datetime
aliases=[],
privilege=1
)
class DelCommand(AbstractCommandNode):
class DelCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name
params = ctx.params
@@ -33,7 +32,7 @@ class DelCommand(AbstractCommandNode):
return True, reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=DelCommand,
name="all",
description="删除当前会话的全部历史记录",
@@ -41,9 +40,9 @@ class DelCommand(AbstractCommandNode):
aliases=[],
privilege=1
)
class DelAllCommand(AbstractCommandNode):
class DelAllCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name
reply = []

View File

@@ -1,7 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="delhst",
description="删除指定会话的所有历史记录",
@@ -9,9 +9,9 @@ from ..aamgr import AbstractCommandNode, Context
aliases=[],
privilege=2
)
class DelHistoryCommand(AbstractCommandNode):
class DelHistoryCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
import pkg.utils.context
params = ctx.params
@@ -31,7 +31,7 @@ class DelHistoryCommand(AbstractCommandNode):
return True, reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=DelHistoryCommand,
name="all",
description="删除所有会话的全部历史记录",
@@ -39,9 +39,9 @@ class DelHistoryCommand(AbstractCommandNode):
aliases=[],
privilege=2
)
class DelAllHistoryCommand(AbstractCommandNode):
class DelAllHistoryCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.utils.context
reply = []
pkg.utils.context.get_database_manager().delete_all_session_history()

View File

@@ -1,8 +1,9 @@
from ..aamgr import AbstractCommandNode, Context
import datetime
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="last",
description="切换前一次对话",
@@ -10,9 +11,9 @@ import datetime
aliases=[],
privilege=1
)
class LastCommand(AbstractCommandNode):
class LastCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name

View File

@@ -1,9 +1,10 @@
from ..aamgr import AbstractCommandNode, Context
import datetime
import json
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name='list',
description='列出当前会话的所有历史记录',
@@ -11,9 +12,9 @@ import json
aliases=[],
privilege=1
)
class ListCommand(AbstractCommandNode):
class ListCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name
params = ctx.params

View File

@@ -1,8 +1,9 @@
from ..aamgr import AbstractCommandNode, Context
import datetime
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="next",
description="切换后一次对话",
@@ -10,9 +11,9 @@ import datetime
aliases=[],
privilege=1
)
class NextCommand(AbstractCommandNode):
class NextCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name
reply = []

View File

@@ -1,8 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
import datetime
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="prompt",
description="获取当前会话的前文",
@@ -10,9 +9,9 @@ import datetime
aliases=[],
privilege=1
)
class PromptCommand(AbstractCommandNode):
class PromptCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import pkg.openai.session
session_name = ctx.session_name
params = ctx.params

View File

@@ -1,8 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
import datetime
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="resend",
description="重新获取上一次问题的回复",
@@ -10,20 +9,22 @@ import datetime
aliases=[],
privilege=1
)
class ResendCommand(AbstractCommandNode):
class ResendCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
import pkg.openai.session
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
from ....openai import session as openai_session
from ....utils import context
from ....qqbot import message
import config
session_name = ctx.session_name
reply = []
session = pkg.openai.session.get_session(session_name)
session = openai_session.get_session(session_name)
to_send = session.undo()
mgr = pkg.utils.context.get_qqbot_manager()
mgr = context.get_qqbot_manager()
reply = pkg.qqbot.message.process_normal_message(to_send, mgr, config,
reply = message.process_normal_message(to_send, mgr, config,
ctx.launcher_type, ctx.launcher_id,
ctx.sender_id)

View File

@@ -1,11 +1,11 @@
from ..aamgr import AbstractCommandNode, Context
import tips as tips_custom
import pkg.openai.session
import pkg.utils.context
from .. import aamgr
from ....openai import session
from ....utils import context
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name='reset',
description='重置当前会话',
@@ -13,21 +13,21 @@ import pkg.utils.context
aliases=[],
privilege=1
)
class ResetCommand(AbstractCommandNode):
class ResetCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
params = ctx.params
session_name = ctx.session_name
reply = ""
if len(params) == 0:
pkg.openai.session.get_session(session_name).reset(explicit=True)
session.get_session(session_name).reset(explicit=True)
reply = [tips_custom.command_reset_message]
else:
try:
import pkg.openai.dprompt as dprompt
pkg.openai.session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
reply = [tips_custom.command_reset_name_message+"{}".format(dprompt.mode_inst().get_full_name(params[0]))]
except Exception as e:
reply = ["[bot]会话重置失败:{}".format(e)]

View File

@@ -1,6 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
import json
from .. import aamgr
def config_operation(cmd, params):
reply = []
@@ -85,7 +86,7 @@ def config_operation(cmd, params):
return reply
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="cfg",
description="配置项管理",
@@ -93,8 +94,8 @@ def config_operation(cmd, params):
aliases=[],
privilege=2
)
class CfgCommand(AbstractCommandNode):
class CfgCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
return True, config_operation(ctx.command, ctx.params)

View File

@@ -1,7 +1,7 @@
from ..aamgr import AbstractCommandNode, Context, __command_list__
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="cmd",
description="显示指令列表",
@@ -9,10 +9,10 @@ from ..aamgr import AbstractCommandNode, Context, __command_list__
aliases=[],
privilege=1
)
class CmdCommand(AbstractCommandNode):
class CmdCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
command_list = __command_list__
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
command_list = aamgr.__command_list__
reply = []

View File

@@ -1,7 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="help",
description="显示自定义的帮助信息",
@@ -9,9 +9,9 @@ from ..aamgr import AbstractCommandNode, Context
aliases=[],
privilege=1
)
class HelpCommand(AbstractCommandNode):
class HelpCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import tips
reply = ["[bot] "+tips.help_message + "\n请输入 !cmd 查看指令列表"]

View File

@@ -1,7 +1,9 @@
from ..aamgr import AbstractCommandNode, Context
import threading
@AbstractCommandNode.register(
from .. import aamgr
@aamgr.AbstractCommandNode.register(
parent=None,
name="reload",
description="执行热重载",
@@ -9,9 +11,9 @@ import threading
aliases=[],
privilege=2
)
class ReloadCommand(AbstractCommandNode):
class ReloadCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
reply = []
import pkg.utils.reloader

View File

@@ -1,9 +1,10 @@
from ..aamgr import AbstractCommandNode, Context
import threading
import traceback
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="update",
description="更新程序",
@@ -11,9 +12,9 @@ import traceback
aliases=[],
privilege=2
)
class UpdateCommand(AbstractCommandNode):
class UpdateCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
reply = []
import pkg.utils.updater
import pkg.utils.reloader

View File

@@ -1,8 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
import logging
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="usage",
description="获取使用情况",
@@ -10,9 +9,9 @@ import logging
aliases=[],
privilege=1
)
class UsageCommand(AbstractCommandNode):
class UsageCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
import config
import pkg.utils.credit as credit
import pkg.utils.context

View File

@@ -1,7 +1,7 @@
from ..aamgr import AbstractCommandNode, Context
from .. import aamgr
@AbstractCommandNode.register(
@aamgr.AbstractCommandNode.register(
parent=None,
name="version",
description="查看版本信息",
@@ -9,9 +9,9 @@ from ..aamgr import AbstractCommandNode, Context
aliases=[],
privilege=1
)
class VersionCommand(AbstractCommandNode):
class VersionCommand(aamgr.AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
reply = []
import pkg.utils.updater

View File

@@ -1,23 +1,7 @@
# 指令处理模块
import logging
import json
import datetime
import os
import threading
import traceback
import pkg.openai.session
import pkg.openai.manager
import pkg.utils.reloader
import pkg.utils.updater
import pkg.utils.context
import pkg.qqbot.message
import pkg.utils.credit as credit
# import pkg.qqbot.cmds.model as cmdmodel
import pkg.qqbot.cmds.aamgr as cmdmgr
from mirai import Image
from ..qqbot.cmds import aamgr as cmdmgr
def process_command(session_name: str, text_message: str, mgr, config,

View File

@@ -1,32 +1,25 @@
import asyncio
import json
import os
import threading
import logging
from mirai import At, GroupMessage, MessageEvent, Mirai, StrangerMessage, WebSocketAdapter, HTTPAdapter, \
FriendMessage, Image, MessageChain, Plain
from func_timeout import func_set_timeout
import func_timeout
import pkg.openai.session
import pkg.openai.manager
from func_timeout import FunctionTimedOut
import logging
from ..openai import session as openai_session
import pkg.qqbot.filter
import pkg.qqbot.process as processor
import pkg.utils.context
import pkg.plugin.host as plugin_host
import pkg.plugin.models as plugin_models
from ..qqbot import filter as qqbot_filter
from ..qqbot import process as processor
from ..utils import context
from ..plugin import host as plugin_host
from ..plugin import models as plugin_models
import tips as tips_custom
import pkg.qqbot.adapter as msadapter
from ..qqbot import adapter as msadapter
# 检查消息是否符合泛响应匹配机制
def check_response_rule(group_id:int, text: str):
config = pkg.utils.context.get_config()
config = context.get_config()
rules = config.response_rules
@@ -55,7 +48,7 @@ def check_response_rule(group_id:int, text: str):
def response_at(group_id: int):
config = pkg.utils.context.get_config()
config = context.get_config()
use_response_rule = config.response_rules
@@ -73,7 +66,7 @@ def response_at(group_id: int):
def random_responding(group_id):
config = pkg.utils.context.get_config()
config = context.get_config()
use_response_rule = config.response_rules
@@ -130,10 +123,10 @@ class QQBotManager:
self.adapter = NakuruProjectAdapter(config.nakuru_config)
self.bot_account_id = self.adapter.bot_account_id
else:
self.adapter = pkg.utils.context.get_qqbot_manager().adapter
self.bot_account_id = pkg.utils.context.get_qqbot_manager().bot_account_id
self.adapter = context.get_qqbot_manager().adapter
self.bot_account_id = context.get_qqbot_manager().bot_account_id
pkg.utils.context.set_qqbot_manager(self)
context.set_qqbot_manager(self)
# 注册诸事件
# Caution: 注册新的事件处理器之后请务必在unsubscribe_all中编写相应的取消订阅代码
@@ -154,7 +147,7 @@ class QQBotManager:
self.on_person_message(event)
pkg.utils.context.get_thread_ctl().submit_user_task(
context.get_thread_ctl().submit_user_task(
friend_message_handler,
)
self.adapter.register_listener(
@@ -179,7 +172,7 @@ class QQBotManager:
self.on_person_message(event)
pkg.utils.context.get_thread_ctl().submit_user_task(
context.get_thread_ctl().submit_user_task(
stranger_message_handler,
)
# nakuru不区分好友和陌生人故仅为yirimirai注册陌生人事件
@@ -206,7 +199,7 @@ class QQBotManager:
self.on_group_message(event)
pkg.utils.context.get_thread_ctl().submit_user_task(
context.get_thread_ctl().submit_user_task(
group_message_handler,
event
)
@@ -250,22 +243,22 @@ class QQBotManager:
if hasattr(banlist, "enable_group"):
self.enable_group = banlist.enable_group
config = pkg.utils.context.get_config()
config = context.get_config()
if os.path.exists("sensitive.json") \
and config.sensitive_word_filter is not None \
and config.sensitive_word_filter:
with open("sensitive.json", "r", encoding="utf-8") as f:
sensitive_json = json.load(f)
self.reply_filter = pkg.qqbot.filter.ReplyFilter(
self.reply_filter = qqbot_filter.ReplyFilter(
sensitive_words=sensitive_json['words'],
mask=sensitive_json['mask'] if 'mask' in sensitive_json else '*',
mask_word=sensitive_json['mask_word'] if 'mask_word' in sensitive_json else ''
)
else:
self.reply_filter = pkg.qqbot.filter.ReplyFilter([])
self.reply_filter = qqbot_filter.ReplyFilter([])
def send(self, event, msg, check_quote=True, check_at_sender=True):
config = pkg.utils.context.get_config()
config = context.get_config()
if check_at_sender and config.at_sender:
msg.insert(
@@ -306,7 +299,7 @@ class QQBotManager:
for i in range(self.retry):
try:
@func_set_timeout(config.process_message_timeout)
@func_timeout.func_set_timeout(config.process_message_timeout)
def time_ctrl_wrapper():
reply = processor.process_message('person', event.sender.id, str(event.message_chain),
event.message_chain,
@@ -315,16 +308,16 @@ class QQBotManager:
reply = time_ctrl_wrapper()
break
except FunctionTimedOut:
except func_timeout.FunctionTimedOut:
logging.warning("person_{}: 超时,重试中({})".format(event.sender.id, i))
pkg.openai.session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
if "person_{}".format(event.sender.id) in pkg.qqbot.process.processing:
pkg.qqbot.process.processing.remove('person_{}'.format(event.sender.id))
openai_session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
if "person_{}".format(event.sender.id) in processor.processing:
processor.processing.remove('person_{}'.format(event.sender.id))
failed += 1
continue
if failed == self.retry:
pkg.openai.session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
openai_session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
self.notify_admin("{} 请求超时".format("person_{}".format(event.sender.id)))
reply = [tips_custom.reply_message]
@@ -344,7 +337,7 @@ class QQBotManager:
failed = 0
for i in range(self.retry):
try:
@func_set_timeout(config.process_message_timeout)
@func_timeout.func_set_timeout(config.process_message_timeout)
def time_ctrl_wrapper():
replys = processor.process_message('group', event.group.id,
str(event.message_chain).strip() if text is None else text,
@@ -354,16 +347,16 @@ class QQBotManager:
replys = time_ctrl_wrapper()
break
except FunctionTimedOut:
except func_timeout.FunctionTimedOut:
logging.warning("group_{}: 超时,重试中({})".format(event.group.id, i))
pkg.openai.session.get_session('group_{}'.format(event.group.id)).release_response_lock()
if "group_{}".format(event.group.id) in pkg.qqbot.process.processing:
pkg.qqbot.process.processing.remove('group_{}'.format(event.group.id))
openai_session.get_session('group_{}'.format(event.group.id)).release_response_lock()
if "group_{}".format(event.group.id) in processor.processing:
processor.processing.remove('group_{}'.format(event.group.id))
failed += 1
continue
if failed == self.retry:
pkg.openai.session.get_session('group_{}'.format(event.group.id)).release_response_lock()
openai_session.get_session('group_{}'.format(event.group.id)).release_response_lock()
self.notify_admin("{} 请求超时".format("group_{}".format(event.group.id)))
replys = [tips_custom.replys_message]
@@ -392,7 +385,7 @@ class QQBotManager:
# 通知系统管理员
def notify_admin(self, message: str):
config = pkg.utils.context.get_config()
config = context.get_config()
if config.admin_qq != 0 and config.admin_qq != []:
logging.info("通知管理员:{}".format(message))
if type(config.admin_qq) == int:
@@ -410,7 +403,7 @@ class QQBotManager:
)
def notify_admin_message_chain(self, message):
config = pkg.utils.context.get_config()
config = context.get_config()
if config.admin_qq != 0 and config.admin_qq != []:
logging.info("通知管理员:{}".format(message))
if type(config.admin_qq) == int:

View File

@@ -1,19 +1,20 @@
# 普通消息处理模块
import logging
import openai
import pkg.utils.context
import pkg.openai.session
import pkg.plugin.host as plugin_host
import pkg.plugin.models as plugin_models
import pkg.qqbot.blob as blob
import openai
from ..utils import context
from ..openai import session as openai_session
from ..plugin import host as plugin_host
from ..plugin import models as plugin_models
import tips as tips_custom
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
"""处理异常当notify_admin不为空时会通知管理员返回通知用户的消息"""
import config
pkg.utils.context.get_qqbot_manager().notify_admin(notify_admin)
context.get_qqbot_manager().notify_admin(notify_admin)
if config.hide_exce_info_to_user:
return [tips_custom.alter_tip_message] if tips_custom.alter_tip_message else []
else:
@@ -26,7 +27,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
logging.info("[{}]发送消息:{}".format(session_name, text_message[:min(20, len(text_message))] + (
"..." if len(text_message) > 20 else "")))
session = pkg.openai.session.get_session(session_name)
session = openai_session.get_session(session_name)
unexpected_exception_times = 0
@@ -54,7 +55,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
"funcs_called": funcs,
}
event = pkg.plugin.host.emit(plugin_models.NormalMessageResponded, **args)
event = plugin_host.emit(plugin_models.NormalMessageResponded, **args)
if event.get_return_value("prefix") is not None:
prefix = event.get_return_value("prefix")
@@ -78,29 +79,29 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
if 'message' in e.error and e.error['message'].__contains__('You exceeded your current quota'):
# 尝试切换api-key
current_key_name = pkg.utils.context.get_openai_manager().key_mgr.get_key_name(
pkg.utils.context.get_openai_manager().key_mgr.using_key
current_key_name = context.get_openai_manager().key_mgr.get_key_name(
context.get_openai_manager().key_mgr.using_key
)
pkg.utils.context.get_openai_manager().key_mgr.set_current_exceeded()
context.get_openai_manager().key_mgr.set_current_exceeded()
# 触发插件事件
args = {
'key_name': current_key_name,
'usage': pkg.utils.context.get_openai_manager().audit_mgr
.get_usage(pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()),
'exceeded_keys': pkg.utils.context.get_openai_manager().key_mgr.exceeded,
'usage': context.get_openai_manager().audit_mgr
.get_usage(context.get_openai_manager().key_mgr.get_using_key_md5()),
'exceeded_keys': context.get_openai_manager().key_mgr.exceeded,
}
event = plugin_host.emit(plugin_models.KeyExceeded, **args)
if not event.is_prevented_default():
switched, name = pkg.utils.context.get_openai_manager().key_mgr.auto_switch()
switched, name = context.get_openai_manager().key_mgr.auto_switch()
if not switched:
reply = handle_exception(
"api-key调用额度超限({}),无可用api_key,请向OpenAI账户充值或在config.py中更换api_key如果你认为这是误判请尝试重启程序。".format(
current_key_name), "[bot]err:API调用额度超额请联系管理员或等待修复")
else:
openai.api_key = pkg.utils.context.get_openai_manager().key_mgr.get_using_key()
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
mgr.notify_admin("api-key调用额度超限({}),接口报错,已切换到{}".format(current_key_name, name))
reply = ["[bot]err:API调用额度超额已自动切换请重新发送消息"]
continue

View File

@@ -5,28 +5,22 @@ import time
import mirai
import logging
from mirai import MessageChain, Plain
# 这里不使用动态引入config
# 因为在这里动态引入会卡死程序
# 而此模块静态引用config与动态引入的表现一致
# 已弃用,由于超时时间现已动态使用
# import config as config_init_import
import pkg.openai.session
import pkg.openai.manager
import pkg.utils.reloader
import pkg.utils.updater
import pkg.utils.context
import pkg.qqbot.message
import pkg.qqbot.command
import pkg.qqbot.ratelimit as ratelimit
from ..qqbot import ratelimit
from ..qqbot import command, message
from ..openai import session as openai_session
from ..utils import context
import pkg.plugin.host as plugin_host
import pkg.plugin.models as plugin_models
import pkg.qqbot.ignore as ignore
import pkg.qqbot.banlist as banlist
import pkg.qqbot.blob as blob
from ..plugin import host as plugin_host
from ..plugin import models as plugin_models
from ..qqbot import ignore
from ..qqbot import banlist
from ..qqbot import blob
import tips as tips_custom
processing = []
@@ -41,11 +35,11 @@ def is_admin(qq: int) -> bool:
return qq == config.admin_qq
def process_message(launcher_type: str, launcher_id: int, text_message: str, message_chain: MessageChain,
sender_id: int) -> MessageChain:
def process_message(launcher_type: str, launcher_id: int, text_message: str, message_chain: mirai.MessageChain,
sender_id: int) -> mirai.MessageChain:
global processing
mgr = pkg.utils.context.get_qqbot_manager()
mgr = context.get_qqbot_manager()
reply = []
session_name = "{}_{}".format(launcher_type, launcher_id)
@@ -62,7 +56,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
import config
if not config.wait_last_done and session_name in processing:
return MessageChain([Plain(tips_custom.message_drop_tip)])
return mirai.MessageChain([mirai.Plain(tips_custom.message_drop_tip)])
# 检查是否被禁言
if launcher_type == 'group':
@@ -74,9 +68,9 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
import config
if config.income_msg_check:
if mgr.reply_filter.is_illegal(text_message):
return MessageChain(Plain("[bot] 消息中存在不合适的内容, 请更换措辞"))
return mirai.MessageChain(mirai.Plain("[bot] 消息中存在不合适的内容, 请更换措辞"))
pkg.openai.session.get_session(session_name).acquire_response_lock()
openai_session.get_session(session_name).acquire_response_lock()
text_message = text_message.strip()
@@ -87,7 +81,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
# 处理消息
try:
config = pkg.utils.context.get_config()
config = context.get_config()
processing.append(session_name)
try:
@@ -114,7 +108,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
reply = event.get_return_value("reply")
if not event.is_prevented_default():
reply = pkg.qqbot.command.process_command(session_name, text_message,
reply = command.process_command(session_name, text_message,
mgr, config, launcher_type, launcher_id, sender_id, is_admin(sender_id))
else: # 消息
@@ -124,7 +118,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
if ratelimit.is_reach_limit(session_name):
logging.info("根据限速策略丢弃[{}]消息: {}".format(session_name, text_message))
return MessageChain(["[bot]"+tips_custom.rate_limit_drop_tip]) if tips_custom.rate_limit_drop_tip != "" else []
return mirai.MessageChain(["[bot]"+tips_custom.rate_limit_drop_tip]) if tips_custom.rate_limit_drop_tip != "" else []
before = time.time()
# 触发插件事件
@@ -146,7 +140,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
reply = event.get_return_value("reply")
if not event.is_prevented_default():
reply = pkg.qqbot.message.process_normal_message(text_message,
reply = message.process_normal_message(text_message,
mgr, config, launcher_type, launcher_id, sender_id)
# 限速等待时间
@@ -170,7 +164,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
finally:
processing.remove(session_name)
finally:
pkg.openai.session.get_session(session_name).release_response_lock()
openai_session.get_session(session_name).release_response_lock()
# 检查延迟时间
if config.force_delay_range[1] == 0:
@@ -191,4 +185,4 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
logging.info("[风控] 强制延迟{:.2f}秒(如需关闭请到config.py修改force_delay_range字段)".format(delay_time))
time.sleep(delay_time)
return MessageChain(reply)
return mirai.MessageChain(reply)

View File

@@ -1,19 +1,18 @@
import mirai
from ..adapter import MessageSourceAdapter, MessageConverter, EventConverter
import nakuru
import nakuru.entities.components as nkc
import asyncio
import typing
import traceback
import logging
import json
from pkg.qqbot.blob import Forward, ForwardMessageNode, ForwardMessageDiaplay
import mirai
import nakuru
import nakuru.entities.components as nkc
from .. import adapter as adapter_model
from ...qqbot import blob
class NakuruProjectMessageConverter(MessageConverter):
class NakuruProjectMessageConverter(adapter_model.MessageConverter):
"""消息转换器"""
@staticmethod
def yiri2target(message_chain: mirai.MessageChain) -> list:
@@ -49,7 +48,7 @@ class NakuruProjectMessageConverter(MessageConverter):
nakuru_msg_list.append(nkc.Record.fromURL(component.url))
elif component.path is not None:
nakuru_msg_list.append(nkc.Record.fromFileSystem(component.path))
elif type(component) is Forward:
elif type(component) is blob.Forward:
# 转发消息
yiri_forward_node_list = component.node_list
nakuru_forward_node_list = []
@@ -102,7 +101,7 @@ class NakuruProjectMessageConverter(MessageConverter):
return chain
class NakuruProjectEventConverter(EventConverter):
class NakuruProjectEventConverter(adapter_model.EventConverter):
"""事件转换器"""
@staticmethod
def yiri2target(event: typing.Type[mirai.Event]):
@@ -157,7 +156,7 @@ class NakuruProjectEventConverter(EventConverter):
raise Exception("未支持转换的事件类型: " + str(event))
class NakuruProjectAdapter(MessageSourceAdapter):
class NakuruProjectAdapter(adapter_model.MessageSourceAdapter):
"""nakuru-project适配器"""
bot: nakuru.CQHTTP
bot_account_id: int
@@ -185,7 +184,11 @@ class NakuruProjectAdapter(MessageSourceAdapter):
if resp.status_code == 403:
logging.error("go-cqhttp拒绝访问请检查config.py中nakuru_config的token是否与go-cqhttp设置的access-token匹配")
raise Exception("go-cqhttp拒绝访问请检查config.py中nakuru_config的token是否与go-cqhttp设置的access-token匹配")
self.bot_account_id = int(resp.json()['data']['user_id'])
try:
self.bot_account_id = int(resp.json()['data']['user_id'])
except Exception as e:
logging.error("获取go-cqhttp账号信息失败: {}, 请检查是否已启动go-cqhttp并配置正确".format(e))
raise Exception("获取go-cqhttp账号信息失败: {}, 请检查是否已启动go-cqhttp并配置正确".format(e))
def send_message(
self,

View File

@@ -1,13 +1,14 @@
from ..adapter import MessageSourceAdapter
import asyncio
import typing
import mirai
import mirai.models.bus
from mirai.bot import MiraiRunner
import asyncio
import typing
from .. import adapter as adapter_model
class YiriMiraiAdapter(MessageSourceAdapter):
class YiriMiraiAdapter(adapter_model.MessageSourceAdapter):
"""YiriMirai适配器"""
bot: mirai.Mirai

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,5 @@
import threading
from pkg.utils import ThreadCtl
from . import threadctl
context = {
@@ -87,8 +87,8 @@ def set_thread_ctl(inst):
context_lock.release()
def get_thread_ctl() -> ThreadCtl:
def get_thread_ctl() -> threadctl.ThreadCtl:
context_lock.acquire()
t: ThreadCtl = context['pool_ctl']
t: threadctl.ThreadCtl = context['pool_ctl']
context_lock.release()
return t

View File

@@ -1,6 +1,6 @@
from pip._internal import main as pipmain
import pkg.utils.log as log
from . import log
def install(package):
@@ -19,7 +19,7 @@ def run_pip(params: list):
def install_requirements(file):
pipmain(['install', '-r', file, "--upgrade", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
pipmain(['install', '-r', file, "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
log.reset_logging()

View File

@@ -1,10 +1,9 @@
import logging
import threading
import importlib
import pkgutil
import pkg.utils.context as context
import pkg.plugin.host
from . import context
from ..plugin import host as plugin_host
def walk(module, prefix='', path_prefix=''):
@@ -15,7 +14,7 @@ def walk(module, prefix='', path_prefix=''):
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
else:
logging.info('reload module: {}, path: {}'.format(prefix + item.name, path_prefix + item.name + '.py'))
pkg.plugin.host.__current_module_path__ = "plugins/" + path_prefix + item.name + '.py'
plugin_host.__current_module_path__ = "plugins/" + path_prefix + item.name + '.py'
importlib.reload(__import__(module.__name__ + '.' + item.name, fromlist=['']))

View File

@@ -1,11 +1,11 @@
import logging
from PIL import Image, ImageDraw, ImageFont
import re
import os
import config
import traceback
from PIL import Image, ImageDraw, ImageFont
text_render_font: ImageFont = None
if config.blob_message_strategy == "image": # 仅在启用了image时才加载字体

View File

@@ -3,10 +3,9 @@ import logging
import os.path
import requests
import json
import pkg.utils.constants
import pkg.utils.network as network
from . import constants
from . import network
def check_dulwich_closure():
@@ -70,7 +69,7 @@ def get_release_list() -> list:
def get_current_tag() -> str:
"""获取当前tag"""
current_tag = pkg.utils.constants.semantic_version
current_tag = constants.semantic_version
if os.path.exists("current_tag"):
with open("current_tag", "r") as f:
current_tag = f.read()

View File

@@ -2,7 +2,7 @@ requests
openai
dulwich~=0.21.6
colorlog~=6.6.0
yiri-mirai
yiri-mirai-rc
websockets
urllib3
func_timeout~=4.3.5

View File

@@ -4,5 +4,17 @@
"time": "2023-08-01 10:49:26",
"timestamp": 1690858166,
"content": "现已支持GPT函数调用功能欢迎了解https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0"
},
{
"id": 3,
"time": "2023-11-10 12:20:09",
"timestamp": 1699590009,
"content": "OpenAI 库1.0版本已发行,若出现 OpenAI 调用问题,请更新 QChatGPT 版本。详见项目主页https://github.com/RockChinQ/QChatGPT"
},
{
"id": 4,
"time": "2023-11-13 18:02:39",
"timestamp": 1699869759,
"content": "近期 OpenAI 接口改动频繁正在积极适配并添加新功能请尽快更新到最新版本更新方式https://github.com/RockChinQ/QChatGPT/discussions/595"
}
]

View File

@@ -0,0 +1,7 @@
import re
repo_url = "git@github.com:RockChinQ/WebwlkrPlugin.git"
repo = re.findall(r'(?:https?://github\.com/|git@github\.com:)([^/]+/[^/]+?)(?:\.git|/|$)', repo_url)
print(repo)