mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 19:37:36 +08:00
Compare commits
94 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0cca0a4c2 | ||
|
|
a2bda85a9c | ||
|
|
20677cff86 | ||
|
|
c8af5d8445 | ||
|
|
2dbe984539 | ||
|
|
6b8fa664f1 | ||
|
|
2b9612e933 | ||
|
|
749d0219fb | ||
|
|
a11a152bd7 | ||
|
|
fc803a3742 | ||
|
|
13a1e15f24 | ||
|
|
3f41b94da5 | ||
|
|
0fb5bfda20 | ||
|
|
dc1fd73ebb | ||
|
|
161b694f71 | ||
|
|
45d1c89e45 | ||
|
|
e26664aa51 | ||
|
|
e29691efbd | ||
|
|
6d45327882 | ||
|
|
fbd41eef49 | ||
|
|
0a30c88322 | ||
|
|
4f5af0e8c8 | ||
|
|
df3f0fd159 | ||
|
|
f2493c79dd | ||
|
|
a86a035b6b | ||
|
|
7995793bfd | ||
|
|
a56b340646 | ||
|
|
7473cdfe16 | ||
|
|
24273ac158 | ||
|
|
fe6275000e | ||
|
|
5fbf369f82 | ||
|
|
4400475ffa | ||
|
|
796eb7c95d | ||
|
|
89a01378e7 | ||
|
|
f4735e5e30 | ||
|
|
f1bb3045aa | ||
|
|
96e474a555 | ||
|
|
833d29b101 | ||
|
|
dce6734ba2 | ||
|
|
0481167dc6 | ||
|
|
a002f93f7b | ||
|
|
3c894fe70e | ||
|
|
8c69b8a1d9 | ||
|
|
a9dae05303 | ||
|
|
ae6994e241 | ||
|
|
caa72fa40c | ||
|
|
46cc9220c3 | ||
|
|
ddb56d7a8e | ||
|
|
a0267416d7 | ||
|
|
56e1ef3602 | ||
|
|
b4fc1057d1 | ||
|
|
06037df607 | ||
|
|
dce134d08d | ||
|
|
cca471d068 | ||
|
|
ddb211b74a | ||
|
|
cef70751ff | ||
|
|
2d2219fc6e | ||
|
|
514a6b4192 | ||
|
|
7a552b3434 | ||
|
|
ecebd1b0e0 | ||
|
|
8dc34d2a88 | ||
|
|
d52644ceec | ||
|
|
3052510591 | ||
|
|
777a5617db | ||
|
|
e17c1087e9 | ||
|
|
633695175a | ||
|
|
9e78bf3d21 | ||
|
|
43aa68a55d | ||
|
|
b8308f8c57 | ||
|
|
466bfbddeb | ||
|
|
b6da07b225 | ||
|
|
2f2159239a | ||
|
|
67d1ca8a65 | ||
|
|
497a393e83 | ||
|
|
782c0e22ea | ||
|
|
2932fc6dfd | ||
|
|
0a9eab2113 | ||
|
|
50a673a8ec | ||
|
|
9e25d0f9e4 | ||
|
|
23cd7be711 | ||
|
|
025b9e33f1 | ||
|
|
bab2f64913 | ||
|
|
b00e09aa9c | ||
|
|
0b109fdc7a | ||
|
|
018fea2ddb | ||
|
|
f8a3cc4352 | ||
|
|
6ab853acc1 | ||
|
|
e825dea02f | ||
|
|
cf8740d16e | ||
|
|
9c4809e26f | ||
|
|
0a232fd9ef | ||
|
|
23016a0791 | ||
|
|
cdcc67ff23 | ||
|
|
92274bfc34 |
@@ -1,34 +0,0 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "QChatGPT 3.10",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
"image": "mcr.microsoft.com/devcontainers/python:0-3.10",
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
"customizations": {
|
||||
"codespaces": {
|
||||
"repositories": {
|
||||
"RockChinQ/QChatGPT": {
|
||||
"permissions": "write-all"
|
||||
},
|
||||
"RockChinQ/revLibs": {
|
||||
"permissions": "write-all"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
||||
22
.github/workflows/sync-wiki.yml
vendored
22
.github/workflows/sync-wiki.yml
vendored
@@ -1,7 +1,14 @@
|
||||
name: Update Wiki
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'res/wiki/**'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'res/wiki/**'
|
||||
|
||||
@@ -23,11 +30,16 @@ jobs:
|
||||
- name: Copy res/wiki content to wiki
|
||||
run: |
|
||||
cp -r res/wiki/* wiki/
|
||||
- name: Check for changes
|
||||
run: |
|
||||
cd wiki
|
||||
if git diff --quiet; then
|
||||
echo "No changes to commit."
|
||||
exit 0
|
||||
fi
|
||||
- name: Commit and Push Changes
|
||||
run: |
|
||||
cd wiki
|
||||
if git diff --name-only; then
|
||||
git add .
|
||||
git commit -m "Update wiki"
|
||||
git push
|
||||
fi
|
||||
git add .
|
||||
git commit -m "Update wiki"
|
||||
git push
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow
|
||||
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT tiktoken
|
||||
|
||||
- name: Copy Scripts
|
||||
run: |
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -25,3 +25,4 @@ bin/
|
||||
.vscode
|
||||
test_*
|
||||
venv/
|
||||
hugchat.json
|
||||
34
README.md
34
README.md
@@ -7,18 +7,23 @@
|
||||
[English](README_en.md) | 简体中文
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||

|
||||
|
||||
> 2023/4/27 正在对纯Python实现的QQ登录框架、YAML配置文件、异步编程等功能进行概念验证,欢迎体验[LightQChat](https://github.com/RockChinQ/LightQChat)项目
|
||||
|
||||
> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果,请见[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
> 2023/4/24 支持使用go-cqhttp登录QQ,请查看[此文档](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE)
|
||||
> 2023/3/18 现已支持GPT-4 API(内测),请查看`config-template.py`中的`completion_api_params`
|
||||
> 2023/3/15 逆向库已支持New Bing,使用方法查看[插件文档](https://github.com/RockChinQ/revLibs)
|
||||
|
||||
|
||||
**QChatGPT需要Python版本>=3.9**
|
||||
- 到[项目Wiki](https://github.com/RockChinQ/QChatGPT/wiki)可了解项目详细信息
|
||||
- 官方交流、答疑群: 656285629
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- 社区群(内有一键部署包、图形化界面等资源): 362515018
|
||||
- 社区群(内有一键部署包、图形化界面等资源): 891448839
|
||||
- QQ频道机器人见[QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
||||
- 欢迎各种形式的贡献,请查看[贡献指引](CONTRIBUTING.md)
|
||||
- 购买ChatGPT账号: [此链接](http://fk.kimi.asia)
|
||||
|
||||
## 🍺模型适配一览
|
||||
|
||||
@@ -33,7 +38,7 @@
|
||||
- ChatGPT网页版GPT-3.5模型, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- ChatGPT网页版GPT-4模型, 目前需要ChatGPT Plus订阅, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- New Bing逆向库, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- HuggingChat, 由[插件](https://github.com/RockChinQ/revLibs)接入, 无需账号,仅支持英文
|
||||
- HuggingChat, 由[插件](https://github.com/RockChinQ/revLibs)接入, 仅支持英文
|
||||
|
||||
### 故事续写
|
||||
|
||||
@@ -107,6 +112,7 @@
|
||||
<summary>✅支持插件加载🧩</summary>
|
||||
|
||||
- 自行实现插件加载器及相关支持
|
||||
- 支持GPT的Function Calling功能
|
||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
</details>
|
||||
<details>
|
||||
@@ -161,6 +167,9 @@
|
||||
|
||||
### - 注册OpenAI账号
|
||||
|
||||
<details>
|
||||
<summary>点此查看步骤</summary>
|
||||
|
||||
> 若您要直接使用非OpenAI的模型(如New Bing),可跳过此步骤,直接进行之后的部署,完成后按照相关插件的文档进行配置即可
|
||||
|
||||
参考以下文章自行注册
|
||||
@@ -171,6 +180,8 @@
|
||||
注册成功后请前往[个人中心查看](https://beta.openai.com/account/api-keys)api_key
|
||||
完成注册后,使用以下自动化或手动部署步骤
|
||||
|
||||
</details>
|
||||
|
||||
### - 自动化部署
|
||||
|
||||
<details>
|
||||
@@ -231,7 +242,7 @@ cd QChatGPT
|
||||
2. 安装依赖
|
||||
|
||||
```bash
|
||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk
|
||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk CallingGPT tiktoken
|
||||
```
|
||||
|
||||
3. 运行一次主程序,生成配置文件
|
||||
@@ -271,21 +282,16 @@ python3 main.py
|
||||
详见[Wiki插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
开发教程见[Wiki插件开发页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
||||
|
||||
⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling),请查看[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
|
||||
<details>
|
||||
<summary>查看插件列表</summary>
|
||||
|
||||
### 示例插件
|
||||
[所有插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6),欢迎提出issue以提交新的插件
|
||||
|
||||
在`tests/plugin_examples`目录下,将其整个目录复制到`plugins`目录下即可使用
|
||||
|
||||
- `cmdcn` - 主程序指令中文形式
|
||||
- `hello_plugin` - 在收到消息`hello`时回复相应消息
|
||||
- `urlikethisijustsix` - 收到冒犯性消息时回复相应消息
|
||||
|
||||
### 更多
|
||||
|
||||
欢迎提交新的插件
|
||||
### 部分插件
|
||||
|
||||
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
English | [简体中文](README.md)
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||

|
||||
|
||||
- Refer to [Wiki](https://github.com/RockChinQ/QChatGPT/wiki) to get further information.
|
||||
- Official QQ group: 656285629
|
||||
@@ -27,7 +28,7 @@ English | [简体中文](README.md)
|
||||
- ChatGPT website edition (GPT-3.5), see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- ChatGPT website edition (GPT-4), ChatGPT plus subscription required, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- New Bing, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- HuggingChat, see [revLibs plugin](https://github.com/RockChinQ/revLibs), no accounts required, English only
|
||||
- HuggingChat, see [revLibs plugin](https://github.com/RockChinQ/revLibs), English only
|
||||
|
||||
### Story
|
||||
|
||||
|
||||
@@ -195,16 +195,22 @@ encourage_sponsor_at_start = True
|
||||
# 注意:较大的prompt_submit_length会导致OpenAI账户额度消耗更快
|
||||
prompt_submit_length = 2048
|
||||
|
||||
# 是否在token超限报错时自动重置会话
|
||||
# 可在tips.py中编辑提示语
|
||||
auto_reset = True
|
||||
|
||||
# OpenAI补全API的参数
|
||||
# 请在下方填写模型,程序自动选择接口
|
||||
# 现已支持的模型有:
|
||||
#
|
||||
# 'gpt-4'
|
||||
# 'gpt-4-0314'
|
||||
# 'gpt-4-0613'
|
||||
# 'gpt-4-32k'
|
||||
# 'gpt-4-32k-0314'
|
||||
# 'gpt-4-32k-0613'
|
||||
# 'gpt-3.5-turbo'
|
||||
# 'gpt-3.5-turbo-0301'
|
||||
# 'gpt-3.5-turbo-16k'
|
||||
# 'gpt-3.5-turbo-0613'
|
||||
# 'gpt-3.5-turbo-16k-0613'
|
||||
# 'text-davinci-003'
|
||||
# 'text-davinci-002'
|
||||
# 'code-davinci-002'
|
||||
|
||||
223
main.py
223
main.py
@@ -47,7 +47,7 @@ def init_db():
|
||||
|
||||
def ensure_dependencies():
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "--upgrade",
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "tiktoken", "--upgrade",
|
||||
"-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
|
||||
@@ -134,127 +134,144 @@ def start(first_time_init=False):
|
||||
|
||||
known_exception_caught = False
|
||||
try:
|
||||
|
||||
sh = reset_logging()
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
|
||||
# 检查是否设置了管理员
|
||||
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
||||
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||
while True:
|
||||
try:
|
||||
config.admin_qq = int(input("未设置管理员QQ,管理员权限指令及运行告警将无法使用,请输入管理员QQ号: "))
|
||||
# 写入到文件
|
||||
|
||||
# 读取文件
|
||||
config_file_str = ""
|
||||
with open("config.py", "r", encoding="utf-8") as f:
|
||||
config_file_str = f.read()
|
||||
# 替换
|
||||
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(config.admin_qq))
|
||||
# 写入
|
||||
with open("config.py", "w", encoding="utf-8") as f:
|
||||
f.write(config_file_str)
|
||||
|
||||
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
||||
time.sleep(4)
|
||||
break
|
||||
except ValueError:
|
||||
print("请输入数字")
|
||||
|
||||
import pkg.openai.manager
|
||||
import pkg.database.manager
|
||||
import pkg.openai.session
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.dprompt
|
||||
import pkg.qqbot.cmds.aamgr
|
||||
|
||||
try:
|
||||
pkg.openai.dprompt.register_all()
|
||||
pkg.qqbot.cmds.aamgr.register_all()
|
||||
pkg.qqbot.cmds.aamgr.apply_privileges()
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
traceback.print_exc()
|
||||
|
||||
# 配置openai api_base
|
||||
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
|
||||
sh = reset_logging()
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
|
||||
# 检查是否设置了管理员
|
||||
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
||||
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||
while True:
|
||||
try:
|
||||
config.admin_qq = int(input("未设置管理员QQ,管理员权限指令及运行告警将无法使用,请输入管理员QQ号: "))
|
||||
# 写入到文件
|
||||
|
||||
# 读取文件
|
||||
config_file_str = ""
|
||||
with open("config.py", "r", encoding="utf-8") as f:
|
||||
config_file_str = f.read()
|
||||
# 替换
|
||||
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(config.admin_qq))
|
||||
# 写入
|
||||
with open("config.py", "w", encoding="utf-8") as f:
|
||||
f.write(config_file_str)
|
||||
|
||||
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
||||
time.sleep(4)
|
||||
break
|
||||
except ValueError:
|
||||
print("请输入数字")
|
||||
|
||||
import pkg.openai.manager
|
||||
import pkg.database.manager
|
||||
import pkg.openai.session
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.dprompt
|
||||
import pkg.qqbot.cmds.aamgr
|
||||
|
||||
try:
|
||||
pkg.openai.dprompt.register_all()
|
||||
pkg.qqbot.cmds.aamgr.register_all()
|
||||
pkg.qqbot.cmds.aamgr.apply_privileges()
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
traceback.print_exc()
|
||||
|
||||
# 配置OpenAI proxy
|
||||
import openai
|
||||
openai.api_base = config.openai_config["reverse_proxy"]
|
||||
openai.proxy = None # 先重置,因为重载后可能需要清除proxy
|
||||
if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None:
|
||||
openai.proxy = config.openai_config["http_proxy"]
|
||||
|
||||
# 主启动流程
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
# 配置openai api_base
|
||||
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
|
||||
openai.api_base = config.openai_config["reverse_proxy"]
|
||||
|
||||
database.initialize_database()
|
||||
# 主启动流程
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
|
||||
openai_interact = pkg.openai.manager.OpenAIInteract(config.openai_config['api_key'])
|
||||
database.initialize_database()
|
||||
|
||||
# 加载所有未超时的session
|
||||
pkg.openai.session.load_sessions()
|
||||
openai_interact = pkg.openai.manager.OpenAIInteract(config.openai_config['api_key'])
|
||||
|
||||
# 初始化qq机器人
|
||||
qqbot = pkg.qqbot.manager.QQBotManager(first_time_init=first_time_init)
|
||||
# 加载所有未超时的session
|
||||
pkg.openai.session.load_sessions()
|
||||
|
||||
# 加载插件
|
||||
import pkg.plugin.host
|
||||
pkg.plugin.host.load_plugins()
|
||||
# 初始化qq机器人
|
||||
qqbot = pkg.qqbot.manager.QQBotManager(first_time_init=first_time_init)
|
||||
|
||||
pkg.plugin.host.initialize_plugins()
|
||||
# 加载插件
|
||||
import pkg.plugin.host
|
||||
pkg.plugin.host.load_plugins()
|
||||
|
||||
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
||||
pkg.plugin.host.initialize_plugins()
|
||||
|
||||
import mirai.exceptions
|
||||
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
||||
|
||||
def run_bot_wrapper():
|
||||
global known_exception_caught
|
||||
try:
|
||||
logging.info("使用账号: {}".format(qqbot.bot_account_id))
|
||||
qqbot.adapter.run_sync()
|
||||
except TypeError as e:
|
||||
if str(e).__contains__("argument 'debug'"):
|
||||
logging.error(
|
||||
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("As of 3.10, the *loop*"):
|
||||
logging.error(
|
||||
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
||||
known_exception_caught = True
|
||||
import mirai.exceptions
|
||||
|
||||
except websockets.exceptions.InvalidStatus as e:
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
except mirai.exceptions.NetworkError as e:
|
||||
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
||||
known_exception_caught = True
|
||||
except Exception as e:
|
||||
if str(e).__contains__("404"):
|
||||
def run_bot_wrapper():
|
||||
global known_exception_caught
|
||||
try:
|
||||
logging.info("使用账号: {}".format(qqbot.bot_account_id))
|
||||
qqbot.adapter.run_sync()
|
||||
except TypeError as e:
|
||||
if str(e).__contains__("argument 'debug'"):
|
||||
logging.error(
|
||||
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("As of 3.10, the *loop*"):
|
||||
logging.error(
|
||||
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
||||
known_exception_caught = True
|
||||
|
||||
except websockets.exceptions.InvalidStatus as e:
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("signal only works in main thread"):
|
||||
logging.error(
|
||||
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
||||
e))
|
||||
except mirai.exceptions.NetworkError as e:
|
||||
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("did not receive a valid HTTP"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
logging.error(
|
||||
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
||||
known_exception_caught = True
|
||||
raise e
|
||||
finally:
|
||||
time.sleep(12)
|
||||
threading.Thread(
|
||||
target=run_bot_wrapper
|
||||
).start()
|
||||
except Exception as e:
|
||||
if str(e).__contains__("404"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("signal only works in main thread"):
|
||||
logging.error(
|
||||
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("did not receive a valid HTTP"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
logging.error(
|
||||
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
||||
known_exception_caught = True
|
||||
raise e
|
||||
finally:
|
||||
time.sleep(12)
|
||||
threading.Thread(
|
||||
target=run_bot_wrapper
|
||||
).start()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
logging.info("程序被用户中止")
|
||||
sys.exit(0)
|
||||
elif isinstance(e, SyntaxError):
|
||||
logging.error("配置文件存在语法错误,请检查配置文件:\n1. 是否存在中文符号\n2. 是否已按照文件中的说明填写正确")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.error("初始化失败:{}".format(e))
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# 判断若是Windows,输出选择模式可能会暂停程序的警告
|
||||
if os.name == 'nt':
|
||||
|
||||
@@ -53,6 +53,7 @@
|
||||
"inappropriate_message_tips": "[百度云]请珍惜机器人,当前返回内容不合规",
|
||||
"encourage_sponsor_at_start": true,
|
||||
"prompt_submit_length": 2048,
|
||||
"auto_reset": true,
|
||||
"completion_api_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
|
||||
0
pkg/openai/api/__init__.py
Normal file
0
pkg/openai/api/__init__.py
Normal file
200
pkg/openai/api/chat_completion.py
Normal file
200
pkg/openai/api/chat_completion.py
Normal file
@@ -0,0 +1,200 @@
|
||||
import openai
|
||||
import json
|
||||
import logging
|
||||
|
||||
from .model import RequestBase
|
||||
|
||||
from ..funcmgr import get_func_schema_list, execute_function, get_func, get_func_schema, ContentFunctionNotFoundError
|
||||
|
||||
|
||||
class ChatCompletionRequest(RequestBase):
|
||||
"""调用ChatCompletion接口的请求类。
|
||||
|
||||
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
|
||||
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
|
||||
"""
|
||||
model: str
|
||||
messages: list[dict[str, str]]
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
pending_func_call: dict = None
|
||||
|
||||
pending_msg: str
|
||||
|
||||
def flush_pending_msg(self):
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content=self.pending_msg
|
||||
)
|
||||
self.pending_msg = ""
|
||||
|
||||
def append_message(self, role: str, content: str, name: str=None):
|
||||
msg = {
|
||||
"role": role,
|
||||
"content": content
|
||||
}
|
||||
|
||||
if name is not None:
|
||||
msg['name'] = name
|
||||
|
||||
self.messages.append(msg)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.model = model
|
||||
self.messages = messages.copy()
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = openai.ChatCompletion.acreate
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
self.stopped = False
|
||||
|
||||
self.pending_msg = ""
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self) -> dict:
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
if self.pending_func_call is None: # 没有待处理的函数调用请求
|
||||
|
||||
args = {
|
||||
"model": self.model,
|
||||
"messages": self.messages,
|
||||
}
|
||||
|
||||
funcs = get_func_schema_list()
|
||||
|
||||
if len(funcs) > 0:
|
||||
args['functions'] = funcs
|
||||
|
||||
# 拼接kwargs
|
||||
args = {**args, **self.kwargs}
|
||||
|
||||
resp = self._req(**args)
|
||||
|
||||
choice0 = resp["choices"][0]
|
||||
|
||||
# 如果不是函数调用,且finish_reason为stop,则停止迭代
|
||||
if 'function_call' not in choice0['message'] and choice0["finish_reason"] == "stop":
|
||||
self.stopped = True
|
||||
|
||||
if 'function_call' in choice0['message']:
|
||||
self.pending_func_call = choice0['message']['function_call']
|
||||
|
||||
# self.append_message(
|
||||
# role="assistant",
|
||||
# content="function call: "+json.dumps(self.pending_func_call, ensure_ascii=False)
|
||||
# )
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "function_call",
|
||||
"content": None,
|
||||
"function_call": choice0['message']['function_call']
|
||||
},
|
||||
"finish_reason": "function_call"
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
else:
|
||||
|
||||
# self.pending_msg += choice0['message']['content']
|
||||
# 普通回复一定处于最后方,故不用再追加进内部messages
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0['message']['content']
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
else: # 处理函数调用请求
|
||||
|
||||
cp_pending_func_call = self.pending_func_call.copy()
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
func_name = cp_pending_func_call['name']
|
||||
arguments = {}
|
||||
|
||||
try:
|
||||
|
||||
try:
|
||||
arguments = json.loads(cp_pending_func_call['arguments'])
|
||||
# 若不是json格式的异常处理
|
||||
except json.decoder.JSONDecodeError:
|
||||
# 获取函数的参数列表
|
||||
func_schema = get_func_schema(func_name)
|
||||
|
||||
arguments = {
|
||||
func_schema['parameters']['required'][0]: cp_pending_func_call['arguments']
|
||||
}
|
||||
|
||||
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
|
||||
|
||||
# 执行函数调用
|
||||
ret = ""
|
||||
try:
|
||||
ret = execute_function(func_name, arguments)
|
||||
|
||||
logging.info("函数执行完成。")
|
||||
except Exception as e:
|
||||
ret = "error: execute function failed: {}".format(str(e))
|
||||
logging.error("函数执行失败: {}".format(str(e)))
|
||||
|
||||
self.append_message(
|
||||
role="function",
|
||||
content=json.dumps(ret, ensure_ascii=False),
|
||||
name=func_name
|
||||
)
|
||||
|
||||
return {
|
||||
"id": -1,
|
||||
"choices": [
|
||||
{
|
||||
"index": -1,
|
||||
"message": {
|
||||
"role": "function",
|
||||
"type": "function_return",
|
||||
"function_name": func_name,
|
||||
"content": json.dumps(ret, ensure_ascii=False)
|
||||
},
|
||||
"finish_reason": "function_return"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"total_tokens": 0
|
||||
}
|
||||
}
|
||||
|
||||
except ContentFunctionNotFoundError:
|
||||
raise Exception("没有找到函数: {}".format(func_name))
|
||||
|
||||
111
pkg/openai/api/completion.py
Normal file
111
pkg/openai/api/completion.py
Normal file
@@ -0,0 +1,111 @@
|
||||
import openai
|
||||
|
||||
from .model import RequestBase
|
||||
|
||||
|
||||
class CompletionRequest(RequestBase):
|
||||
"""调用Completion接口的请求类。
|
||||
|
||||
调用方可以一直next completion直到finish_reason为stop。
|
||||
"""
|
||||
|
||||
model: str
|
||||
prompt: str
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.model = model
|
||||
self.prompt = ""
|
||||
|
||||
for message in messages:
|
||||
self.prompt += message["role"] + ": " + message["content"] + "\n"
|
||||
|
||||
self.prompt += "assistant: "
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = openai.Completion.acreate
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self) -> dict:
|
||||
"""调用Completion接口,返回生成的文本
|
||||
|
||||
{
|
||||
"id": "id",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": "message"
|
||||
},
|
||||
"finish_reason": "reason"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 10,
|
||||
"completion_tokens": 20,
|
||||
"total_tokens": 30
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
resp = self._req(
|
||||
model=self.model,
|
||||
prompt=self.prompt,
|
||||
**self.kwargs
|
||||
)
|
||||
|
||||
if resp["choices"][0]["finish_reason"] == "stop":
|
||||
self.stopped = True
|
||||
|
||||
choice0 = resp["choices"][0]
|
||||
|
||||
self.prompt += choice0["text"]
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0["text"]
|
||||
},
|
||||
"finish_reason": choice0["finish_reason"]
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
openai.api_key = os.environ["OPENAI_API_KEY"]
|
||||
|
||||
for resp in CompletionRequest(
|
||||
model="text-davinci-003",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello, who are you?"
|
||||
}
|
||||
]
|
||||
):
|
||||
print(resp)
|
||||
if resp["choices"][0]["finish_reason"] == "stop":
|
||||
break
|
||||
51
pkg/openai/api/model.py
Normal file
51
pkg/openai/api/model.py
Normal file
@@ -0,0 +1,51 @@
|
||||
# 定义不同接口请求的模型
|
||||
import threading
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import openai
|
||||
|
||||
|
||||
class RequestBase:
|
||||
|
||||
req_func: callable
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def _req(self, **kwargs):
|
||||
"""处理代理问题"""
|
||||
|
||||
ret: dict = {}
|
||||
exception: Exception = None
|
||||
|
||||
async def awrapper(**kwargs):
|
||||
nonlocal ret, exception
|
||||
|
||||
try:
|
||||
ret = await self.req_func(**kwargs)
|
||||
logging.debug("接口请求返回:%s", str(ret))
|
||||
return ret
|
||||
except Exception as e:
|
||||
exception = e
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
thr = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(awrapper(**kwargs),)
|
||||
)
|
||||
|
||||
thr.start()
|
||||
thr.join()
|
||||
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
return ret
|
||||
|
||||
def __iter__(self):
|
||||
raise self
|
||||
|
||||
def __next__(self):
|
||||
raise NotImplementedError
|
||||
47
pkg/openai/funcmgr.py
Normal file
47
pkg/openai/funcmgr.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# 封装了function calling的一些支持函数
|
||||
import logging
|
||||
|
||||
|
||||
from pkg.plugin import host
|
||||
|
||||
|
||||
class ContentFunctionNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_func_schema_list() -> list:
|
||||
"""从plugin包中的函数结构中获取并处理成受GPT支持的格式"""
|
||||
if not host.__enable_content_functions__:
|
||||
return []
|
||||
|
||||
schemas = []
|
||||
|
||||
for func in host.__callable_functions__:
|
||||
if func['enabled']:
|
||||
fun_cp = func.copy()
|
||||
|
||||
del fun_cp['enabled']
|
||||
|
||||
schemas.append(fun_cp)
|
||||
|
||||
return schemas
|
||||
|
||||
def get_func(name: str) -> callable:
|
||||
if name not in host.__function_inst_map__:
|
||||
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||
|
||||
return host.__function_inst_map__[name]
|
||||
|
||||
def get_func_schema(name: str) -> dict:
|
||||
for func in host.__callable_functions__:
|
||||
if func['name'] == name:
|
||||
return func
|
||||
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||
|
||||
def execute_function(name: str, kwargs: dict) -> any:
|
||||
"""执行函数调用"""
|
||||
|
||||
logging.debug("executing function: name='{}', kwargs={}".format(name, kwargs))
|
||||
|
||||
func = get_func(name)
|
||||
return func(**kwargs)
|
||||
@@ -5,7 +5,9 @@ import openai
|
||||
import pkg.openai.keymgr
|
||||
import pkg.utils.context
|
||||
import pkg.audit.gatherer
|
||||
from pkg.openai.modelmgr import ModelRequest, create_openai_model_request
|
||||
from pkg.openai.modelmgr import select_request_cls
|
||||
|
||||
from pkg.openai.api.model import RequestBase
|
||||
|
||||
|
||||
class OpenAIInteract:
|
||||
@@ -33,45 +35,31 @@ class OpenAIInteract:
|
||||
|
||||
pkg.utils.context.set_openai_manager(self)
|
||||
|
||||
# 请求OpenAI Completion
|
||||
def request_completion(self, prompts) -> tuple[str, int]:
|
||||
"""请求补全接口回复
|
||||
|
||||
Parameters:
|
||||
prompts (str): 提示语
|
||||
|
||||
Returns:
|
||||
str: 回复
|
||||
def request_completion(self, messages: list):
|
||||
"""请求补全接口回复=
|
||||
"""
|
||||
|
||||
# 选择接口请求类
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
# 根据模型选择使用的接口
|
||||
ai: ModelRequest = create_openai_model_request(
|
||||
config.completion_api_params['model'],
|
||||
'user',
|
||||
config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None
|
||||
)
|
||||
ai.request(
|
||||
prompts,
|
||||
**config.completion_api_params
|
||||
)
|
||||
response = ai.get_response()
|
||||
request: RequestBase
|
||||
|
||||
logging.debug("OpenAI response: %s", response)
|
||||
model: str = config.completion_api_params['model']
|
||||
|
||||
# 记录使用量
|
||||
current_round_token = 0
|
||||
if 'model' in config.completion_api_params:
|
||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['model'],
|
||||
ai.get_total_tokens())
|
||||
current_round_token = ai.get_total_tokens()
|
||||
elif 'engine' in config.completion_api_params:
|
||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'],
|
||||
response['usage']['total_tokens'])
|
||||
current_round_token = response['usage']['total_tokens']
|
||||
cp_parmas = config.completion_api_params.copy()
|
||||
del cp_parmas['model']
|
||||
|
||||
return ai.get_message(), current_round_token
|
||||
request = select_request_cls(model, messages, cp_parmas)
|
||||
|
||||
# 请求接口
|
||||
for resp in request:
|
||||
|
||||
if resp['usage']['total_tokens'] > 0:
|
||||
self.audit_mgr.report_text_model_usage(
|
||||
model,
|
||||
resp['usage']['total_tokens']
|
||||
)
|
||||
|
||||
yield resp
|
||||
|
||||
def request_image(self, prompt) -> dict:
|
||||
"""请求图片接口回复
|
||||
|
||||
@@ -7,6 +7,11 @@ Completion - text-davinci-003 等模型
|
||||
"""
|
||||
import openai, logging, threading, asyncio
|
||||
import openai.error as aiE
|
||||
import tiktoken
|
||||
|
||||
from pkg.openai.api.model import RequestBase
|
||||
from pkg.openai.api.completion import CompletionRequest
|
||||
from pkg.openai.api.chat_completion import ChatCompletionRequest
|
||||
|
||||
COMPLETION_MODELS = {
|
||||
'text-davinci-003',
|
||||
@@ -20,11 +25,14 @@ COMPLETION_MODELS = {
|
||||
|
||||
CHAT_COMPLETION_MODELS = {
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-0301',
|
||||
'gpt-3.5-turbo-16k',
|
||||
'gpt-3.5-turbo-0613',
|
||||
'gpt-3.5-turbo-16k-0613',
|
||||
# 'gpt-3.5-turbo-0301',
|
||||
'gpt-4',
|
||||
'gpt-4-0314',
|
||||
'gpt-4-0613',
|
||||
'gpt-4-32k',
|
||||
'gpt-4-32k-0314'
|
||||
'gpt-4-32k-0613'
|
||||
}
|
||||
|
||||
EDIT_MODELS = {
|
||||
@@ -36,153 +44,76 @@ IMAGE_MODELS = {
|
||||
}
|
||||
|
||||
|
||||
class ModelRequest:
|
||||
"""模型接口请求父类"""
|
||||
|
||||
can_chat = False
|
||||
runtime: threading.Thread = None
|
||||
ret = {}
|
||||
proxy: str = None
|
||||
request_ready = True
|
||||
error_info: str = "若在没有任何错误的情况下看到这句话,请带着配置文件上报Issues"
|
||||
|
||||
def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None):
|
||||
self.model_name = model_name
|
||||
self.user_name = user_name
|
||||
self.request_fun = request_fun
|
||||
self.time_out = time_out
|
||||
if http_proxy != None:
|
||||
self.proxy = http_proxy
|
||||
openai.proxy = self.proxy
|
||||
self.request_ready = False
|
||||
|
||||
async def __a_request__(self, **kwargs):
|
||||
"""异步请求"""
|
||||
|
||||
try:
|
||||
self.ret: dict = await self.request_fun(**kwargs)
|
||||
self.request_ready = True
|
||||
except aiE.APIConnectionError as e:
|
||||
self.error_info = "{}\n请检查网络连接或代理是否正常".format(e)
|
||||
raise ConnectionError(self.error_info)
|
||||
except ValueError as e:
|
||||
self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的"
|
||||
except Exception as e:
|
||||
self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e)
|
||||
raise type(e)(self.error_info)
|
||||
|
||||
def request(self, **kwargs):
|
||||
"""向接口发起请求"""
|
||||
|
||||
if self.proxy != None: #异步请求
|
||||
self.request_ready = False
|
||||
loop = asyncio.new_event_loop()
|
||||
self.runtime = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(self.__a_request__(**kwargs),)
|
||||
)
|
||||
self.runtime.start()
|
||||
else: #同步请求
|
||||
self.ret = self.request_fun(**kwargs)
|
||||
|
||||
def __msg_handle__(self, msg):
|
||||
"""将prompt dict转换成接口需要的格式"""
|
||||
return msg
|
||||
|
||||
def ret_handle(self):
|
||||
'''
|
||||
API消息返回处理函数
|
||||
若重写该方法,应检查异步线程状态,或在需要检查处super该方法
|
||||
'''
|
||||
if self.runtime != None and isinstance(self.runtime, threading.Thread):
|
||||
self.runtime.join(self.time_out)
|
||||
if self.request_ready:
|
||||
return
|
||||
raise Exception(self.error_info)
|
||||
|
||||
def get_total_tokens(self):
|
||||
try:
|
||||
return self.ret['usage']['total_tokens']
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_message(self):
|
||||
return self.message
|
||||
|
||||
def get_response(self):
|
||||
return self.ret
|
||||
|
||||
|
||||
class ChatCompletionModel(ModelRequest):
|
||||
"""ChatCompletion接口的请求实现"""
|
||||
|
||||
Chat_role = ['system', 'user', 'assistant']
|
||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
||||
if http_proxy == None:
|
||||
request_fun = openai.ChatCompletion.create
|
||||
else:
|
||||
request_fun = openai.ChatCompletion.acreate
|
||||
self.can_chat = True
|
||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
||||
|
||||
def request(self, prompts, **kwargs):
|
||||
prompts = self.__msg_handle__(prompts)
|
||||
kwargs['messages'] = prompts
|
||||
super().request(**kwargs)
|
||||
self.ret_handle()
|
||||
|
||||
def __msg_handle__(self, msgs):
|
||||
temp_msgs = []
|
||||
# 把msgs拷贝进temp_msgs
|
||||
for msg in msgs:
|
||||
temp_msgs.append(msg.copy())
|
||||
return temp_msgs
|
||||
|
||||
def get_message(self):
|
||||
return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗
|
||||
|
||||
|
||||
class CompletionModel(ModelRequest):
|
||||
"""Completion接口的请求实现"""
|
||||
|
||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
||||
if http_proxy == None:
|
||||
request_fun = openai.Completion.create
|
||||
else:
|
||||
request_fun = openai.Completion.acreate
|
||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
||||
|
||||
def request(self, prompts, **kwargs):
|
||||
prompts = self.__msg_handle__(prompts)
|
||||
kwargs['prompt'] = prompts
|
||||
super().request(**kwargs)
|
||||
self.ret_handle()
|
||||
|
||||
def __msg_handle__(self, msgs):
|
||||
prompt = ''
|
||||
for msg in msgs:
|
||||
prompt = prompt + "{}: {}\n".format(msg['role'], msg['content'])
|
||||
# for msg in msgs:
|
||||
# if msg['role'] == 'assistant':
|
||||
# prompt = prompt + "{}\n".format(msg['content'])
|
||||
# else:
|
||||
# prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content'])
|
||||
prompt = prompt + "assistant: "
|
||||
return prompt
|
||||
|
||||
def get_message(self):
|
||||
return self.ret["choices"][0]["text"]
|
||||
|
||||
|
||||
def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest:
|
||||
"""使用给定的模型名称创建模型请求对象"""
|
||||
def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase:
|
||||
if model_name in CHAT_COMPLETION_MODELS:
|
||||
model = ChatCompletionModel(model_name, user_name, http_proxy)
|
||||
return ChatCompletionRequest(model_name, messages, **args)
|
||||
elif model_name in COMPLETION_MODELS:
|
||||
model = CompletionModel(model_name, user_name, http_proxy)
|
||||
else :
|
||||
log = "找不到模型[{}],请检查配置文件".format(model_name)
|
||||
logging.error(log)
|
||||
raise IndexError(log)
|
||||
logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name))
|
||||
return model
|
||||
return CompletionRequest(model_name, messages, **args)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||
|
||||
|
||||
def count_chat_completion_tokens(messages: list, model: str) -> int:
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
# print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""count_chat_completion_tokens() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
|
||||
def count_completion_tokens(messages: list, model: str) -> int:
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
text = ""
|
||||
|
||||
for message in messages:
|
||||
text += message['role'] + message['content'] + "\n"
|
||||
|
||||
text += "assistant: "
|
||||
|
||||
return len(encoding.encode(text))
|
||||
|
||||
|
||||
def count_tokens(messages: list, model: str):
|
||||
if model in CHAT_COMPLETION_MODELS:
|
||||
return count_chat_completion_tokens(messages, model)
|
||||
elif model in COMPLETION_MODELS:
|
||||
return count_completion_tokens(messages, model)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model))
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# 计费模块
|
||||
# 已弃用 https://github.com/RockChinQ/QChatGPT/issues/81
|
||||
|
||||
import logging
|
||||
|
||||
pricing = {
|
||||
"base": { # 文字模型单位是1000字符
|
||||
"text-davinci-003": 0.02,
|
||||
},
|
||||
"image": {
|
||||
"256x256": 0.016,
|
||||
"512x512": 0.018,
|
||||
"1024x1024": 0.02,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def language_base_price(model, text):
|
||||
salt_rate = 0.93
|
||||
length = ((len(text.encode('utf-8')) - len(text)) / 2 + len(text)) * salt_rate
|
||||
logging.debug("text length: %d" % length)
|
||||
|
||||
return pricing["base"][model] * length / 1000
|
||||
|
||||
|
||||
def image_price(size):
|
||||
logging.debug("image size: %s" % size)
|
||||
return pricing["image"][size]
|
||||
@@ -16,6 +16,8 @@ import pkg.utils.context
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
|
||||
from pkg.openai.modelmgr import count_tokens
|
||||
|
||||
# 运行时保存的所有session
|
||||
sessions = {}
|
||||
|
||||
@@ -83,7 +85,7 @@ def load_sessions():
|
||||
|
||||
|
||||
# 获取指定名称的session,如果不存在则创建一个新的
|
||||
def get_session(session_name: str):
|
||||
def get_session(session_name: str) -> 'Session':
|
||||
global sessions
|
||||
if session_name not in sessions:
|
||||
sessions[session_name] = Session(session_name)
|
||||
@@ -107,9 +109,6 @@ class Session:
|
||||
prompt = []
|
||||
"""使用list来保存会话中的回合"""
|
||||
|
||||
token_counts = []
|
||||
"""每个回合的token数量"""
|
||||
|
||||
default_prompt = []
|
||||
"""本session的默认prompt"""
|
||||
|
||||
@@ -195,7 +194,7 @@ class Session:
|
||||
|
||||
# 请求回复
|
||||
# 这个函数是阻塞的
|
||||
def append(self, text: str) -> str:
|
||||
def append(self, text: str=None) -> str:
|
||||
"""向session中添加一条消息,返回接口回复"""
|
||||
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
@@ -215,29 +214,92 @@ class Session:
|
||||
config = pkg.utils.context.get_config()
|
||||
max_length = config.prompt_submit_length
|
||||
|
||||
prompts, counts = self.cut_out(text, max_length)
|
||||
local_default_prompt = self.default_prompt.copy()
|
||||
local_prompt = self.prompt.copy()
|
||||
|
||||
# 触发PromptPreProcessing事件
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'default_prompt': self.default_prompt,
|
||||
'prompt': self.prompt,
|
||||
'text_message': text,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.PromptPreProcessing, **args)
|
||||
|
||||
if event.get_return_value('default_prompt') is not None:
|
||||
local_default_prompt = event.get_return_value('default_prompt')
|
||||
|
||||
if event.get_return_value('prompt') is not None:
|
||||
local_prompt = event.get_return_value('prompt')
|
||||
|
||||
if event.get_return_value('text_message') is not None:
|
||||
text = event.get_return_value('text_message')
|
||||
|
||||
prompts, _ = self.cut_out(text, max_length, local_default_prompt, local_prompt)
|
||||
|
||||
res_text = ""
|
||||
|
||||
pending_msgs = []
|
||||
|
||||
total_tokens = 0
|
||||
|
||||
for resp in pkg.utils.context.get_openai_manager().request_completion(prompts):
|
||||
if resp['choices'][0]['message']['type'] == 'text': # 普通回复
|
||||
res_text += resp['choices'][0]['message']['content']
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
|
||||
pending_msgs.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": resp['choices'][0]['message']['content']
|
||||
}
|
||||
)
|
||||
|
||||
elif resp['choices'][0]['message']['type'] == 'function_call':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "assistant",
|
||||
# "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
# }
|
||||
# )
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
elif resp['choices'][0]['message']['type'] == 'function_return':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "function",
|
||||
# "name": resp['choices'][0]['message']['function_name'],
|
||||
# "content": json.dumps(resp['choices'][0]['message']['content'])
|
||||
# }
|
||||
# )
|
||||
|
||||
# total_tokens += resp['usage']['total_tokens']
|
||||
pass
|
||||
|
||||
|
||||
# 计算请求前的prompt数量
|
||||
total_token_before_query = 0
|
||||
for token_count in counts:
|
||||
total_token_before_query += token_count
|
||||
|
||||
# 向API请求补全
|
||||
message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
||||
prompts,
|
||||
)
|
||||
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
||||
# prompts,
|
||||
# )
|
||||
|
||||
# 成功获取,处理回复
|
||||
res_test = message
|
||||
res_ans = res_test.strip()
|
||||
# res_test = message
|
||||
res_ans = res_text.strip()
|
||||
|
||||
# 将此次对话的双方内容加入到prompt中
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||
# self.prompt.append({'role': 'user', 'content': text})
|
||||
# self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||
if text:
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
# 添加pending_msgs
|
||||
self.prompt += pending_msgs
|
||||
|
||||
# 向token_counts中添加本回合的token数量
|
||||
self.token_counts.append(total_token-total_token_before_query)
|
||||
logging.debug("本回合使用token: {}, session counts: {}".format(total_token-total_token_before_query, self.token_counts))
|
||||
# self.token_counts.append(total_tokens-total_token_before_query)
|
||||
# logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
|
||||
|
||||
if self.just_switched_to_exist_session:
|
||||
self.just_switched_to_exist_session = False
|
||||
@@ -261,7 +323,7 @@ class Session:
|
||||
return question
|
||||
|
||||
# 构建对话体
|
||||
def cut_out(self, msg: str, max_tokens: int) -> tuple[list, list]:
|
||||
def cut_out(self, msg: str, max_tokens: int, default_prompt: list, prompt: list) -> tuple[list, list]:
|
||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens
|
||||
|
||||
:return: (新的prompt, 新的token_counts)
|
||||
@@ -274,42 +336,35 @@ class Session:
|
||||
|
||||
# 包装目前的对话回合内容
|
||||
changable_prompts = []
|
||||
changable_counts = []
|
||||
# 倒着来, 遍历prompt的步长为2, 遍历tokens_counts的步长为1
|
||||
changable_index = len(self.prompt) - 1
|
||||
token_count_index = len(self.token_counts) - 1
|
||||
|
||||
packed_tokens = 0
|
||||
use_model = pkg.utils.context.get_config().completion_api_params['model']
|
||||
|
||||
while changable_index >= 0 and token_count_index >= 0:
|
||||
if packed_tokens + self.token_counts[token_count_index] > max_tokens:
|
||||
ptr = len(prompt) - 1
|
||||
|
||||
# 直接从后向前扫描拼接,不管是否是整回合
|
||||
while ptr >= 0:
|
||||
if count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
|
||||
break
|
||||
|
||||
changable_prompts.insert(0, self.prompt[changable_index])
|
||||
changable_prompts.insert(0, self.prompt[changable_index - 1])
|
||||
changable_counts.insert(0, self.token_counts[token_count_index])
|
||||
packed_tokens += self.token_counts[token_count_index]
|
||||
changable_prompts.insert(0, prompt[ptr])
|
||||
|
||||
changable_index -= 2
|
||||
token_count_index -= 1
|
||||
ptr -= 1
|
||||
|
||||
# 将default_prompt和changable_prompts合并
|
||||
result_prompt = self.default_prompt + changable_prompts
|
||||
result_prompt = default_prompt + changable_prompts
|
||||
|
||||
# 添加当前问题
|
||||
result_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
)
|
||||
if msg:
|
||||
result_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
)
|
||||
|
||||
logging.debug('cut_out: {}\nchangable section tokens: {}\npacked counts: {}\nsession counts: {}'.format(json.dumps(result_prompt, ensure_ascii=False, indent=4),
|
||||
packed_tokens,
|
||||
changable_counts,
|
||||
self.token_counts))
|
||||
logging.debug("cut_out: {}".format(json.dumps(result_prompt, ensure_ascii=False, indent=4)))
|
||||
|
||||
return result_prompt, changable_counts
|
||||
return result_prompt, count_tokens(changable_prompts, use_model)
|
||||
|
||||
# 持久化session
|
||||
def persistence(self):
|
||||
@@ -327,7 +382,7 @@ class Session:
|
||||
json.dumps(self.prompt), json.dumps(self.default_prompt), json.dumps(self.token_counts))
|
||||
|
||||
# 重置session
|
||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None):
|
||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None, persist: bool = False):
|
||||
if self.prompt:
|
||||
self.persistence()
|
||||
if explicit:
|
||||
@@ -345,7 +400,8 @@ class Session:
|
||||
if expired:
|
||||
pkg.utils.context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
|
||||
|
||||
self.default_prompt = self.get_default_prompt(use_prompt)
|
||||
if not persist: # 不要求保持default prompt
|
||||
self.default_prompt = self.get_default_prompt(use_prompt)
|
||||
self.prompt = []
|
||||
self.token_counts = []
|
||||
self.create_timestamp = int(time.time())
|
||||
|
||||
@@ -16,6 +16,8 @@ import pkg.qqbot.adapter as msadapter
|
||||
|
||||
from mirai import Mirai
|
||||
|
||||
from CallingGPT.session.session import Session
|
||||
|
||||
__plugins__ = {}
|
||||
"""插件列表
|
||||
|
||||
@@ -42,6 +44,15 @@ __plugins__ = {}
|
||||
__plugins_order__ = []
|
||||
"""插件顺序"""
|
||||
|
||||
__enable_content_functions__ = True
|
||||
"""是否启用内容函数"""
|
||||
|
||||
__callable_functions__ = []
|
||||
"""供GPT调用的函数结构"""
|
||||
|
||||
__function_inst_map__: dict[str, callable] = {}
|
||||
"""函数名:实例 映射"""
|
||||
|
||||
|
||||
def generate_plugin_order():
|
||||
"""根据__plugin__生成插件初始顺序,无视是否启用"""
|
||||
@@ -102,6 +113,10 @@ def load_plugins():
|
||||
# 加载插件顺序
|
||||
settings.load_settings()
|
||||
|
||||
# 输出已注册的内容函数列表
|
||||
logging.debug("registered content functions: {}".format(__callable_functions__))
|
||||
logging.debug("function instance map: {}".format(__function_inst_map__))
|
||||
|
||||
|
||||
def initialize_plugins():
|
||||
"""初始化插件"""
|
||||
@@ -251,7 +266,7 @@ class EventContext:
|
||||
self.__return_value__[key] = []
|
||||
self.__return_value__[key].append(ret)
|
||||
|
||||
def get_return(self, key: str):
|
||||
def get_return(self, key: str) -> list:
|
||||
"""获取key的所有返回值"""
|
||||
if key in self.__return_value__:
|
||||
return self.__return_value__[key]
|
||||
@@ -300,7 +315,9 @@ class PluginHost:
|
||||
"""插件宿主"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化插件宿主"""
|
||||
context.set_plugin_host(self)
|
||||
self.calling_gpt_session = Session([])
|
||||
|
||||
def get_runtime_context(self) -> context:
|
||||
"""获取运行时上下文(pkg.utils.context模块的对象)
|
||||
|
||||
@@ -132,18 +132,64 @@ KeySwitched = "key_switched"
|
||||
key_list: list[str] api-key列表
|
||||
"""
|
||||
|
||||
PromptPreProcessing = "prompt_pre_processing"
|
||||
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
default_prompt: list 此session使用的情景预设内容
|
||||
prompt: list 此session现有的prompt内容
|
||||
text_message: str 用户发送的消息文本
|
||||
|
||||
returns (optional):
|
||||
default_prompt: list 修改后的情景预设内容
|
||||
prompt: list 修改后的prompt内容
|
||||
text_message: str 修改后的消息文本
|
||||
"""
|
||||
|
||||
def on(event: str):
|
||||
|
||||
def on(*args, **kwargs):
|
||||
"""注册事件监听器
|
||||
:param
|
||||
event: str 事件名称
|
||||
"""
|
||||
return Plugin.on(event)
|
||||
return Plugin.on(*args, **kwargs)
|
||||
|
||||
def func(*args, **kwargs):
|
||||
"""注册内容函数,声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用
|
||||
此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
|
||||
所述的格式编写函数的docstring。
|
||||
此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。
|
||||
"""
|
||||
return Plugin.func(*args, **kwargs)
|
||||
|
||||
|
||||
__current_registering_plugin__ = ""
|
||||
|
||||
|
||||
def require_ver(ge: str, le: str="v999.9.9") -> bool:
|
||||
"""插件版本要求装饰器
|
||||
|
||||
Args:
|
||||
ge (str): 最低版本要求
|
||||
le (str, optional): 最高版本要求
|
||||
|
||||
Returns:
|
||||
bool: 是否满足要求, False时为无法获取版本号,True时为满足要求,报错为不满足要求
|
||||
"""
|
||||
qchatgpt_version = ""
|
||||
|
||||
from pkg.utils.updater import get_current_tag, compare_version_str
|
||||
|
||||
try:
|
||||
qchatgpt_version = get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
return False
|
||||
|
||||
if compare_version_str(qchatgpt_version, ge) < 0 or \
|
||||
(compare_version_str(qchatgpt_version, le) > 0):
|
||||
raise Exception("QChatGPT 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, qchatgpt_version))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Plugin:
|
||||
"""插件基类"""
|
||||
|
||||
@@ -176,6 +222,34 @@ class Plugin:
|
||||
|
||||
return wrapper
|
||||
|
||||
@classmethod
|
||||
def func(cls, name: str=None):
|
||||
"""内容函数装饰器
|
||||
"""
|
||||
global __current_registering_plugin__
|
||||
from CallingGPT.entities.namespace import get_func_schema
|
||||
|
||||
def wrapper(func):
|
||||
|
||||
function_schema = get_func_schema(func)
|
||||
function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name)
|
||||
|
||||
function_schema['enabled'] = True
|
||||
|
||||
host.__function_inst_map__[function_schema['name']] = function_schema['function']
|
||||
|
||||
del function_schema['function']
|
||||
|
||||
# logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema))
|
||||
|
||||
host.__callable_functions__.append(
|
||||
function_schema
|
||||
)
|
||||
|
||||
return func
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def register(name: str, description: str, version: str, author: str):
|
||||
"""注册插件, 此函数作为装饰器使用
|
||||
|
||||
@@ -8,7 +8,10 @@ import logging
|
||||
def wrapper_dict_from_runtime_context() -> dict:
|
||||
"""从变量中包装settings.json的数据字典"""
|
||||
settings = {
|
||||
"order": []
|
||||
"order": [],
|
||||
"functions": {
|
||||
"enabled": host.__enable_content_functions__
|
||||
}
|
||||
}
|
||||
|
||||
for plugin_name in host.__plugins_order__:
|
||||
@@ -22,6 +25,11 @@ def apply_settings(settings: dict):
|
||||
if "order" in settings:
|
||||
host.__plugins_order__ = settings["order"]
|
||||
|
||||
if "functions" in settings:
|
||||
if "enabled" in settings["functions"]:
|
||||
host.__enable_content_functions__ = settings["functions"]["enabled"]
|
||||
# logging.debug("set content function enabled: {}".format(host.__enable_content_functions__))
|
||||
|
||||
|
||||
def dump_settings():
|
||||
"""保存settings.json数据"""
|
||||
@@ -78,6 +86,17 @@ def load_settings():
|
||||
settings["order"].append(plugin_name)
|
||||
settings_modified = True
|
||||
|
||||
if "functions" not in settings:
|
||||
settings["functions"] = {
|
||||
"enabled": host.__enable_content_functions__
|
||||
}
|
||||
settings_modified = True
|
||||
elif "enabled" not in settings["functions"]:
|
||||
settings["functions"]["enabled"] = host.__enable_content_functions__
|
||||
settings_modified = True
|
||||
|
||||
logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enabled"] else "禁用"))
|
||||
|
||||
apply_settings(settings)
|
||||
|
||||
if settings_modified:
|
||||
|
||||
@@ -28,6 +28,11 @@ def apply_switch(switch: dict):
|
||||
for plugin_name in switch:
|
||||
host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"]
|
||||
|
||||
# 查找此插件的所有内容函数
|
||||
for func in host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name + '-'):
|
||||
func['enabled'] = switch[plugin_name]["enabled"]
|
||||
|
||||
|
||||
def dump_switch():
|
||||
"""保存开关数据"""
|
||||
|
||||
28
pkg/qqbot/cmds/funcs/func.py
Normal file
28
pkg/qqbot/cmds/funcs/func.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import logging
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="func",
|
||||
description="管理内容函数",
|
||||
usage="!func",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class FuncCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
from pkg.plugin.models import host
|
||||
|
||||
reply = []
|
||||
|
||||
reply_str = "当前已加载的内容函数:\n\n"
|
||||
|
||||
index = 1
|
||||
for func in host.__callable_functions__:
|
||||
reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description'])
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
@@ -12,7 +12,7 @@ import pkg.utils.updater as updater
|
||||
description="插件管理",
|
||||
usage="!plugin\n!plugin get <插件仓库地址>\n!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
privilege=1
|
||||
)
|
||||
class PluginCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
@@ -188,6 +188,11 @@ class PluginOnOffCommand(AbstractCommandNode):
|
||||
plugin_name = ctx.crt_params[0]
|
||||
if plugin_name in plugin_list:
|
||||
plugin_list[plugin_name]['enabled'] = new_status
|
||||
|
||||
for func in plugin_host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name+"-"):
|
||||
func['enabled'] = new_status
|
||||
|
||||
plugin_switch.dump_switch()
|
||||
reply = ["[bot]已{}插件: {}".format("启用" if new_status else "禁用", plugin_name)]
|
||||
else:
|
||||
|
||||
27
pkg/qqbot/cmds/session/continue.py
Normal file
27
pkg/qqbot/cmds/session/continue.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="continue",
|
||||
description="继续未完成的响应",
|
||||
usage="!continue",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ContinueCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
import config
|
||||
session_name = ctx.session_name
|
||||
|
||||
reply = []
|
||||
|
||||
session = pkg.openai.session.get_session(session_name)
|
||||
|
||||
text = session.append()
|
||||
|
||||
reply = [text]
|
||||
|
||||
return True, reply
|
||||
@@ -8,7 +8,7 @@ def config_operation(cmd, params):
|
||||
config = pkg.utils.context.get_config()
|
||||
reply_str = ""
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]err:请输入配置项"]
|
||||
reply = ["[bot]err:请输入!cmd cfg查看使用方法"]
|
||||
else:
|
||||
cfg_name = params[0]
|
||||
if cfg_name == 'all':
|
||||
@@ -26,45 +26,61 @@ def config_operation(cmd, params):
|
||||
else:
|
||||
reply_str += "{}: {}\n".format(cfg, getattr(config, cfg))
|
||||
reply = [reply_str]
|
||||
elif cfg_name in dir(config):
|
||||
if len(params) == 1:
|
||||
# 按照配置项类型进行格式化
|
||||
if isinstance(getattr(config, cfg_name), str):
|
||||
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, getattr(config, cfg_name))
|
||||
elif isinstance(getattr(config, cfg_name), dict):
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
||||
json.dumps(getattr(config, cfg_name),
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, getattr(config, cfg_name))
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_value = " ".join(params[1:])
|
||||
# 类型转换,如果是json则转换为字典
|
||||
if cfg_value == 'true':
|
||||
cfg_value = True
|
||||
elif cfg_value == 'false':
|
||||
cfg_value = False
|
||||
elif cfg_value.isdigit():
|
||||
cfg_value = int(cfg_value)
|
||||
elif cfg_value.startswith('{') and cfg_value.endswith('}'):
|
||||
cfg_value = json.loads(cfg_value)
|
||||
else:
|
||||
try:
|
||||
cfg_value = float(cfg_value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# 检查类型是否匹配
|
||||
if isinstance(getattr(config, cfg_name), type(cfg_value)):
|
||||
setattr(config, cfg_name, cfg_value)
|
||||
pkg.utils.context.set_config(config)
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
else:
|
||||
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
||||
|
||||
else:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
cfg_entry_path = cfg_name.split('.')
|
||||
|
||||
try:
|
||||
if len(params) == 1:
|
||||
cfg_entry = getattr(config, cfg_entry_path[0])
|
||||
if len(cfg_entry_path) > 1:
|
||||
for i in range(1, len(cfg_entry_path)):
|
||||
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||
|
||||
if isinstance(cfg_entry, str):
|
||||
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, cfg_entry)
|
||||
elif isinstance(cfg_entry, dict):
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
||||
json.dumps(cfg_entry,
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, cfg_entry)
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_value = " ".join(params[1:])
|
||||
# 类型转换,如果是json则转换为字典
|
||||
# if cfg_value == 'true':
|
||||
# cfg_value = True
|
||||
# elif cfg_value == 'false':
|
||||
# cfg_value = False
|
||||
# elif cfg_value.isdigit():
|
||||
# cfg_value = int(cfg_value)
|
||||
# elif cfg_value.startswith('{') and cfg_value.endswith('}'):
|
||||
# cfg_value = json.loads(cfg_value)
|
||||
# else:
|
||||
# try:
|
||||
# cfg_value = float(cfg_value)
|
||||
# except ValueError:
|
||||
# pass
|
||||
cfg_value = eval(cfg_value)
|
||||
|
||||
cfg_entry = getattr(config, cfg_entry_path[0])
|
||||
if len(cfg_entry_path) > 1:
|
||||
for i in range(1, len(cfg_entry_path) - 1):
|
||||
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||
if isinstance(cfg_entry[cfg_entry_path[-1]], type(cfg_value)):
|
||||
cfg_entry[cfg_entry_path[-1]] = cfg_value
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
else:
|
||||
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
||||
else:
|
||||
setattr(config, cfg_entry_path[0], cfg_value)
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
except AttributeError:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
except ValueError:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
# else:
|
||||
# reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
|
||||
return reply
|
||||
|
||||
|
||||
@@ -102,6 +102,9 @@ class QQBotManager:
|
||||
|
||||
enable_banlist = False
|
||||
|
||||
enable_private = True
|
||||
enable_group = True
|
||||
|
||||
ban_person = []
|
||||
ban_group = []
|
||||
|
||||
@@ -242,6 +245,11 @@ class QQBotManager:
|
||||
self.ban_group = banlist.group
|
||||
logging.info("加载禁用列表: person: {}, group: {}".format(self.ban_person, self.ban_group))
|
||||
|
||||
if hasattr(banlist, "enable_private"):
|
||||
self.enable_private = banlist.enable_private
|
||||
if hasattr(banlist, "enable_group"):
|
||||
self.enable_group = banlist.enable_group
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
if os.path.exists("sensitive.json") \
|
||||
and config.sensitive_word_filter is not None \
|
||||
@@ -269,7 +277,9 @@ class QQBotManager:
|
||||
import config
|
||||
reply = ''
|
||||
|
||||
if event.sender.id == self.bot_account_id:
|
||||
if not self.enable_private:
|
||||
logging.debug("已在banlist.py中禁用所有私聊")
|
||||
elif event.sender.id == self.bot_account_id:
|
||||
pass
|
||||
else:
|
||||
if Image in event.message_chain:
|
||||
@@ -342,8 +352,10 @@ class QQBotManager:
|
||||
replys = [tips_custom.replys_message]
|
||||
|
||||
return replys
|
||||
|
||||
if Image in event.message_chain:
|
||||
|
||||
if not self.enable_group:
|
||||
logging.debug("已在banlist.py中禁用所有群聊")
|
||||
elif Image in event.message_chain:
|
||||
pass
|
||||
else:
|
||||
if At(self.bot_account_id) in event.message_chain and response_at(event.group.id):
|
||||
|
||||
@@ -114,8 +114,12 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
|
||||
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
|
||||
except openai.error.InvalidRequestError as e:
|
||||
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||
if config.auto_reset and "This model's maximum context length is" in str(e):
|
||||
session.reset(persist=True)
|
||||
reply = [tips_custom.session_auto_reset_message]
|
||||
else:
|
||||
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||
except openai.error.ServiceUnavailableError as e:
|
||||
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
|
||||
except Exception as e:
|
||||
|
||||
@@ -74,7 +74,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
import config
|
||||
if config.income_msg_check:
|
||||
if mgr.reply_filter.is_illegal(text_message):
|
||||
return MessageChain(Plain("[bot] 你的提问中有不合适的内容, 请更换措辞~"))
|
||||
return MessageChain(Plain("[bot] 消息中存在不合适的内容, 请更换措辞"))
|
||||
|
||||
pkg.openai.session.get_session(session_name).acquire_response_lock()
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -34,13 +34,18 @@ def pull_latest(repo_path: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def is_newer_ignored_bugfix_ver(new_tag: str, old_tag: str):
|
||||
"""判断版本是否更新,忽略第四位版本"""
|
||||
def is_newer(new_tag: str, old_tag: str):
|
||||
"""判断版本是否更新,忽略第四位版本和第一位版本"""
|
||||
if new_tag == old_tag:
|
||||
return False
|
||||
|
||||
new_tag = new_tag.split(".")
|
||||
old_tag = old_tag.split(".")
|
||||
|
||||
# 判断主版本是否相同
|
||||
if new_tag[0] != old_tag[0]:
|
||||
return False
|
||||
|
||||
if len(new_tag) < 4:
|
||||
return True
|
||||
|
||||
@@ -73,6 +78,34 @@ def get_current_tag() -> str:
|
||||
return current_tag
|
||||
|
||||
|
||||
def compare_version_str(v0: str, v1: str) -> int:
|
||||
"""比较两个版本号"""
|
||||
|
||||
# 删除版本号前的v
|
||||
if v0.startswith("v"):
|
||||
v0 = v0[1:]
|
||||
if v1.startswith("v"):
|
||||
v1 = v1[1:]
|
||||
|
||||
v0:list = v0.split(".")
|
||||
v1:list = v1.split(".")
|
||||
|
||||
# 如果两个版本号节数不同,把短的后面用0补齐
|
||||
if len(v0) < len(v1):
|
||||
v0.extend(["0"]*(len(v1)-len(v0)))
|
||||
elif len(v0) > len(v1):
|
||||
v1.extend(["0"]*(len(v0)-len(v1)))
|
||||
|
||||
# 从高位向低位比较
|
||||
for i in range(len(v0)):
|
||||
if int(v0[i]) > int(v1[i]):
|
||||
return 1
|
||||
elif int(v0[i]) < int(v1[i]):
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def update_all(cli: bool = False) -> bool:
|
||||
"""检查更新并下载源码"""
|
||||
current_tag = get_current_tag()
|
||||
@@ -97,7 +130,7 @@ def update_all(cli: bool = False) -> bool:
|
||||
else:
|
||||
print("更新日志: {}".format(rls_notes))
|
||||
|
||||
if latest_rls == {} and not is_newer_ignored_bugfix_ver(latest_tag_name, current_tag): # 没有新版本
|
||||
if latest_rls == {} and not is_newer(latest_tag_name, current_tag): # 没有新版本
|
||||
return False
|
||||
|
||||
# 下载最新版本的zip到temp目录
|
||||
@@ -254,7 +287,7 @@ def is_new_version_available() -> bool:
|
||||
latest_tag_name = rls['tag_name']
|
||||
break
|
||||
|
||||
return is_newer_ignored_bugfix_ver(latest_tag_name, current_tag)
|
||||
return is_newer(latest_tag_name, current_tag)
|
||||
|
||||
|
||||
def get_rls_notes() -> list:
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
requests~=2.28.1
|
||||
openai~=0.27.6
|
||||
requests~=2.31.0
|
||||
openai~=0.27.8
|
||||
dulwich~=0.21.5
|
||||
colorlog~=6.6.0
|
||||
yiri-mirai~=0.2.7
|
||||
yiri-mirai
|
||||
websockets
|
||||
urllib3~=1.26.10
|
||||
func_timeout~=4.3.5
|
||||
Pillow
|
||||
nakuru-project-idk
|
||||
nakuru-project-idk
|
||||
CallingGPT
|
||||
tiktoken
|
||||
@@ -4,5 +4,11 @@
|
||||
"time": "2023-04-24 16:05:20",
|
||||
"timestamp": 1682323520,
|
||||
"content": "现已支持使用go-cqhttp替换mirai作为QQ登录框架, 请更新并查看 https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"time": "2023-05-21 17:33:18",
|
||||
"timestamp": 1684661598,
|
||||
"content": "NewBing不再需要鉴权,若您正在使用revLibs逆向库插件,请立即使用!plugin update revLibs命令更新插件到最新版。"
|
||||
}
|
||||
]
|
||||
BIN
res/screenshots/webwlkr_plugin.png
Normal file
BIN
res/screenshots/webwlkr_plugin.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
@@ -1,3 +1,13 @@
|
||||
# 是否处理群聊消息
|
||||
# 为False时忽略所有群聊消息
|
||||
# 优先级高于下方禁用列表
|
||||
enable_group = True
|
||||
|
||||
# 是否处理私聊消息
|
||||
# 为False时忽略所有私聊消息
|
||||
# 优先级高于下方禁用列表
|
||||
enable_private = True
|
||||
|
||||
# 是否启用禁用列表
|
||||
enable = True
|
||||
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
{
|
||||
"comment": "以下为命令权限,请设置到cmdpriv.json中。关于此功能的说明,请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6",
|
||||
"draw": 1,
|
||||
"plugin": 2,
|
||||
"func": 1,
|
||||
"plugin": 1,
|
||||
"plugin.get": 2,
|
||||
"plugin.update": 2,
|
||||
"plugin.del": 2,
|
||||
"plugin.off": 2,
|
||||
"plugin.on": 2,
|
||||
"continue": 1,
|
||||
"default": 1,
|
||||
"default.set": 2,
|
||||
"del": 1,
|
||||
|
||||
@@ -180,6 +180,7 @@
|
||||
!draw <提示语> 进行绘图
|
||||
!version 查看当前版本并检查更新
|
||||
!resend 重新回复上一个问题
|
||||
!continue 继续响应未完成的回合(通常用于内容函数继续调用)
|
||||
!plugin 用法请查看插件使用页的`管理`章节
|
||||
!default 查看可用的情景预设值
|
||||
```
|
||||
@@ -225,7 +226,7 @@
|
||||
格式: `!cfg <配置项名称> <配置项新值>`
|
||||
以修改`default_prompt`示例
|
||||
```
|
||||
!cfg default_prompt 我是Rock Chin
|
||||
!cfg default_prompt "我是Rock Chin"
|
||||
```
|
||||
|
||||
输出示例
|
||||
@@ -243,7 +244,15 @@
|
||||
```
|
||||
!~all
|
||||
!~default_prompt
|
||||
!~default_prompt 我是Rock Chin
|
||||
!~default_prompt "我是Rock Chin"
|
||||
```
|
||||
|
||||
5. 配置项名称支持使用点号(.)拼接以索引子配置项
|
||||
|
||||
例如: `openai_config.api_key`将索引`config`字典中的`openai_config`字典中的`api_key`字段,可以通过这个方式查看或修改此子配置项
|
||||
|
||||
```
|
||||
!~openai_config.api_key
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -367,4 +376,5 @@ prompt_submit_length = <模型单次请求token数上限> - 情景预设中token
|
||||
|
||||
### 加入黑名单
|
||||
|
||||
编辑`banlist.py`,设置`enable = True`,并在其中的`person`或`group`列表中加入要封禁的人或群聊,修改完成后重启程序或进行热重载
|
||||
- 支持禁用所有`私聊`或`群聊`,请查看`banlist.py`中的`enable_private`和`enable_group`字段
|
||||
- 编辑`banlist.py`,设置`enable = True`,并在其中的`person`或`group`列表中加入要封禁的人或群聊,修改完成后重启程序或进行热重载
|
||||
24
res/wiki/插件使用-内容函数.md
Normal file
24
res/wiki/插件使用-内容函数.md
Normal file
@@ -0,0 +1,24 @@
|
||||
> 说白了就是ChatGPT官方插件那种东西
|
||||
|
||||
内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||
|
||||
例如我们为GPT提供一个函数`access_the_web`,并提供其详细的描述以及其参数的描述,那么当我们在与GPT对话时涉及类似以下内容时:
|
||||
|
||||
```
|
||||
Q: 请搜索一下github上有那些QQ机器人项目?
|
||||
Q: 请为我搜索一些不错的云服务商网站?
|
||||
Q:阅读并总结这篇文章:https://zhuanlan.zhihu.com/p/607570830
|
||||
Q:搜一下清远今天天气如何
|
||||
```
|
||||
|
||||
GPT将会回复一个对`access_the_web`的函数调用请求,QChatGPT将自动处理执行该调用,并返回结果给GPT使其生成新的回复。
|
||||
|
||||
当然,函数调用功能不止局限于网络访问,还可以实现图片处理、科学计算、行程规划等需要调用函数的功能,理论上我们可以通过内容函数实现与`ChatGPT Plugins`相同的功能。
|
||||
|
||||
- 您需要使用`v2.5.0`以上的版本才能加载包含内容函数的插件
|
||||
- 您需要同时在`config.py`中的`completion_api_params`中设置`model`为支持函数调用的模型,推荐使用`gpt-3.5-turbo-16k`
|
||||
- 使用此功能可能会造成难以预期的账号余额消耗,请关注
|
||||
|
||||
## QChatGPT的一些不错的内容函数插件
|
||||
|
||||
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||
@@ -4,6 +4,8 @@ QChatGPT 插件使用Wiki
|
||||
|
||||
`plugins`目录下的所有`.py`程序都将被加载,除了`__init__.py`之外的模块支持热加载
|
||||
|
||||
> 插件分为`行为插件`和`内容插件`两种,行为插件由主程序运行中的事件驱动,内容插件由GPT生成的内容驱动,请查看内容插件页
|
||||
|
||||
## 安装
|
||||
|
||||
### 储存库克隆(推荐)
|
||||
@@ -33,6 +35,8 @@ QChatGPT 插件使用Wiki
|
||||
!plugin del <插件名> 删除插件(需要管理员权限)
|
||||
!plugin on <插件名> 启用插件(需要管理员权限)
|
||||
!plugin off <插件名> 禁用插件(需要管理员权限)
|
||||
|
||||
!func 列出所有内容函数
|
||||
```
|
||||
|
||||
### 控制插件执行顺序
|
||||
@@ -42,4 +46,9 @@ QChatGPT 插件使用Wiki
|
||||
### 启用或关闭插件
|
||||
|
||||
无需卸载即可管理插件的开关
|
||||
编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关
|
||||
编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关
|
||||
|
||||
### 控制全局内容函数开关
|
||||
|
||||
内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||
每个插件可以自行注册内容函数,您可以在`plugins`目录下的`settings.json`中设置`functions`下的`enabled`为`true`或`false`控制这些内容函数的启用或禁用。
|
||||
207
res/wiki/插件开发.md
207
res/wiki/插件开发.md
@@ -113,6 +113,199 @@ class HelloPlugin(Plugin):
|
||||
- 一个目录内可以存放多个Python程序文件,以独立出插件的各个功能,便于开发者管理,但不建议在一个目录内注册多个插件
|
||||
- 插件需要的依赖库请在插件目录下的`requirements.txt`中指定,程序从储存库获取此插件时将自动安装依赖
|
||||
|
||||
## 🪝内容函数
|
||||
|
||||
通过[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的`内容函数`,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||
|
||||
> 您的插件比一定必须包含内容函数,请先查看内容函数页了解此功能
|
||||
|
||||
<details>
|
||||
<summary>示例:联网插件</summary>
|
||||
|
||||
加载含有联网功能的内容函数的插件[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin),向机器人询问在线内容
|
||||
|
||||
```
|
||||
# 控制台输出
|
||||
[2023-07-29 17:37:18.698] message.py (26) - [INFO] : [person_1010553892]发送消息:介绍一下这个项目:https://git...
|
||||
[2023-07-29 17:37:21.292] util.py (67) - [INFO] : message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=1902 request_id=941afc13b2e1bba1e7877b92a970cdea response_code=200
|
||||
[2023-07-29 17:37:21.293] chat_completion.py (159) - [INFO] : 执行函数调用: name=Webwlkr-access_the_web, arguments={'url': 'https://github.com/RockChinQ/QChatGPT', 'brief_len': 512}
|
||||
[2023-07-29 17:37:21.848] chat_completion.py (164) - [INFO] : 函数执行完成。
|
||||
```
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
### 内容函数编写步骤
|
||||
|
||||
1️⃣ 请先按照上方步骤编写您的插件基础结构,现在请删除(当然你也可以不删,只是为了简洁)上述插件内容的诸个由`@on`装饰的类函数
|
||||
|
||||
<details>
|
||||
<summary>删除后的结构</summary>
|
||||
|
||||
```python
|
||||
from pkg.plugin.models import *
|
||||
from pkg.plugin.host import EventContext, PluginHost
|
||||
|
||||
"""
|
||||
在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!"
|
||||
"""
|
||||
|
||||
|
||||
# 注册插件
|
||||
@register(name="Hello", description="hello world", version="0.1", author="RockChinQ")
|
||||
class HelloPlugin(Plugin):
|
||||
|
||||
# 插件加载时触发
|
||||
# plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码
|
||||
def __init__(self, plugin_host: PluginHost):
|
||||
pass
|
||||
|
||||
# 插件卸载时触发
|
||||
def __del__(self):
|
||||
pass
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
2️⃣ 现在我们将以下函数添加到刚刚删除的函数的位置
|
||||
|
||||
```Python
|
||||
|
||||
# 要添加的函数
|
||||
|
||||
@func(name="access_the_web") # 设置函数名称
|
||||
def _(url: str):
|
||||
"""Call this function to search about the question before you answer any questions.
|
||||
- Do not search through baidu.com at any time.
|
||||
- If you need to search somthing, visit https://www.google.com/search?q=xxx.
|
||||
- If user ask you to open a url (start with http:// or https://), visit it directly.
|
||||
- Summary the plain content result by yourself, DO NOT directly output anything in the result you got.
|
||||
|
||||
Args:
|
||||
url(str): url to visit
|
||||
|
||||
Returns:
|
||||
str: plain text content of the web page
|
||||
"""
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
# 你需要先使用
|
||||
# pip install beautifulsoup4
|
||||
# 安装依赖
|
||||
|
||||
r = requests.get(
|
||||
url,
|
||||
timeout=10,
|
||||
headers={
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183"
|
||||
}
|
||||
)
|
||||
soup = BeautifulSoup(r.text, 'html.parser')
|
||||
|
||||
s = soup.get_text()
|
||||
|
||||
# 删除多余的空行或仅有\t和空格的行
|
||||
s = re.sub(r'\n\s*\n', '\n', s)
|
||||
|
||||
if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字
|
||||
return s[:512]
|
||||
|
||||
return s
|
||||
|
||||
```
|
||||
<details>
|
||||
<summary>现在这个文件内容应该是这样</summary>
|
||||
|
||||
```python
|
||||
from pkg.plugin.models import *
|
||||
from pkg.plugin.host import EventContext, PluginHost
|
||||
|
||||
"""
|
||||
在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!"
|
||||
"""
|
||||
|
||||
|
||||
# 注册插件
|
||||
@register(name="Hello", description="hello world", version="0.1", author="RockChinQ")
|
||||
class HelloPlugin(Plugin):
|
||||
|
||||
# 插件加载时触发
|
||||
# plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码
|
||||
def __init__(self, plugin_host: PluginHost):
|
||||
pass
|
||||
|
||||
@func(name="access_the_web")
|
||||
def _(url: str):
|
||||
"""Call this function to search about the question before you answer any questions.
|
||||
- Do not search through baidu.com at any time.
|
||||
- If you need to search somthing, visit https://www.google.com/search?q=xxx.
|
||||
- If user ask you to open a url (start with http:// or https://), visit it directly.
|
||||
- Summary the plain content result by yourself, DO NOT directly output anything in the result you got.
|
||||
|
||||
Args:
|
||||
url(str): url to visit
|
||||
|
||||
Returns:
|
||||
str: plain text content of the web page
|
||||
"""
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
# 你需要先使用
|
||||
# pip install beautifulsoup4
|
||||
# 安装依赖
|
||||
|
||||
r = requests.get(
|
||||
url,
|
||||
timeout=10,
|
||||
headers={
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183"
|
||||
}
|
||||
)
|
||||
soup = BeautifulSoup(r.text, 'html.parser')
|
||||
|
||||
s = soup.get_text()
|
||||
|
||||
# 删除多余的空行或仅有\t和空格的行
|
||||
s = re.sub(r'\n\s*\n', '\n', s)
|
||||
|
||||
if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字
|
||||
return s[:512]
|
||||
|
||||
return s
|
||||
|
||||
# 插件卸载时触发
|
||||
def __del__(self):
|
||||
pass
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### 请注意:
|
||||
|
||||
- 函数的注释必须严格按照要求的格式进行书写,具体格式请查看[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
|
||||
- 内容函数和`以@on装饰的行为函数`可以同时存在于同一个插件,并同时受到`switch.json`中的插件开关的控制
|
||||
- 务必确保您使用的模型支持函数调用功能,可以到`config.py`的`completion_api_params`中修改模型,推荐使用`gpt-3.5-turbo-16k`
|
||||
|
||||
3️⃣ 现在您的程序已具备网络访问功能,重启程序,询问机器人有关在线的内容或直接发送文章链接请求其总结。
|
||||
|
||||
- 这仅仅是一个示例,需要更高效的网络访问能力支持插件,请查看[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin)
|
||||
|
||||
## 🔒版本要求
|
||||
|
||||
若您的插件对主程序的版本有要求,可以使用以下函数进行断言,若不符合版本,此函数将报错并打断此函数所在的流程:
|
||||
|
||||
```python
|
||||
require_ver("v2.5.1") # 要求最低版本为 v2.5.1
|
||||
```
|
||||
|
||||
```python
|
||||
require_ver("v2.5.1", "v2.6.0") # 要求最低版本为 v2.5.1, 同时要求最高版本为 v2.6.0
|
||||
```
|
||||
|
||||
- 此函数在主程序`v2.5.1`中加入
|
||||
- 此函数声明在`pkg.plugin.models`模块中,在插件示例代码最前方已引入此模块所有内容,故可直接使用
|
||||
|
||||
## 📄API参考
|
||||
|
||||
### 说明
|
||||
@@ -257,6 +450,20 @@ KeySwitched = "key_switched"
|
||||
key_name: str 切换成功的api-key名称
|
||||
key_list: list[str] api-key列表
|
||||
"""
|
||||
|
||||
PromptPreProcessing = "prompt_pre_processing" # 于v2.5.1加入
|
||||
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
default_prompt: list 此session使用的情景预设内容
|
||||
prompt: list 此session现有的prompt内容
|
||||
text_message: str 用户发送的消息文本
|
||||
|
||||
returns (optional):
|
||||
default_prompt: list 修改后的情景预设内容
|
||||
prompt: list 修改后的prompt内容
|
||||
text_message: str 修改后的消息文本
|
||||
"""
|
||||
```
|
||||
|
||||
### host: PluginHost 详解
|
||||
|
||||
42
tests/bs_test/bs_test.py
Normal file
42
tests/bs_test/bs_test.py
Normal file
@@ -0,0 +1,42 @@
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
|
||||
user_agents = [
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Version/14.1.2 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Version/14.1 Safari/537.36',
|
||||
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
||||
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0'
|
||||
]
|
||||
|
||||
r = requests.get(
|
||||
sys.argv[1],
|
||||
headers={
|
||||
"User-Agent": random.choice(user_agents)
|
||||
}
|
||||
)
|
||||
soup = BeautifulSoup(r.text, 'html.parser')
|
||||
# print(soup.get_text())
|
||||
|
||||
raw = soup.get_text()
|
||||
|
||||
import re
|
||||
|
||||
# strip每一行
|
||||
# raw = '\n'.join([line.strip() for line in raw.split('\n')])
|
||||
|
||||
# # 删除所有空行或只有空格的行
|
||||
# raw = re.sub(r'\n\s*\n', '\n', raw)
|
||||
|
||||
|
||||
print(raw)
|
||||
57
tests/ssh_client_test/ssh_client.py
Normal file
57
tests/ssh_client_test/ssh_client.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import os
|
||||
import sys
|
||||
import paramiko
|
||||
import time
|
||||
import select
|
||||
|
||||
|
||||
class sshClient:
|
||||
#创建一个ssh客户端,和服务器连接上,准备发消息
|
||||
def __init__(self,host,port,user,password):
|
||||
self.trans = paramiko.Transport((host, port))
|
||||
self.trans.start_client()
|
||||
self.trans.auth_password(username=user, password=password)
|
||||
self.channel = self.trans.open_session()
|
||||
self.channel.get_pty()
|
||||
self.channel.invoke_shell()
|
||||
|
||||
#给服务器发送一个命令
|
||||
def sendCmd(self,cmd):
|
||||
self.channel.sendall(cmd)
|
||||
|
||||
#接收的时候,有时候服务器处理的比较慢,需要设置一个延时等待一下。
|
||||
def recvResponse(self,timeout):
|
||||
data=b''
|
||||
while True:
|
||||
try:
|
||||
#使用select,不断的读取数据,直到没有多余的数据了,超时返回。
|
||||
readable,w,e= select.select([self.channel],[],[],timeout)
|
||||
if self.channel in readable:
|
||||
data = self.channel.recv(1024)
|
||||
else:
|
||||
sys.stdout.write(data.decode())
|
||||
sys.stdout.flush()
|
||||
return data.decode()
|
||||
except TimeoutError:
|
||||
sys.stdout.write(data.decode())
|
||||
sys.stdout.flush()
|
||||
return data.decode
|
||||
#关闭客户端
|
||||
def close(self):
|
||||
self.channel.close()
|
||||
self.trans.close()
|
||||
|
||||
host='host'
|
||||
port=22#your port
|
||||
user='root'
|
||||
pwd='pass'
|
||||
|
||||
ssh = sshClient(host,port,user,pwd)
|
||||
response = ssh.recvResponse(1)
|
||||
response = ssh.sendCmd("ls\n")
|
||||
ssh.sendCmd("cd /home\n")
|
||||
response = ssh.recvResponse(1)
|
||||
ssh.sendCmd("ls\n")
|
||||
response = ssh.recvResponse(1)
|
||||
|
||||
ssh.close()
|
||||
124
tests/token_test/tiktoken_test.py
Normal file
124
tests/token_test/tiktoken_test.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import tiktoken
|
||||
import openai
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
def encode(text: str, model: str):
|
||||
import tiktoken
|
||||
enc = tiktoken.get_encoding("cl100k_base")
|
||||
assert enc.decode(enc.encode("hello world")) == "hello world"
|
||||
|
||||
# To get the tokeniser corresponding to a specific model in the OpenAI API:
|
||||
enc = tiktoken.encoding_for_model(model)
|
||||
|
||||
return enc.encode(text)
|
||||
|
||||
|
||||
# def ask(prompt: str, model: str = "gpt-3.5-turbo"):
|
||||
# # To get the tokeniser corresponding to a specific model in the OpenAI API:
|
||||
# enc = tiktoken.encoding_for_model(model)
|
||||
|
||||
# resp = openai.ChatCompletion.create(
|
||||
# model=model,
|
||||
# messages=[
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": prompt
|
||||
# }
|
||||
# ]
|
||||
# )
|
||||
|
||||
# return enc.encode(prompt), enc.encode(resp['choices'][0]['message']['content']), resp
|
||||
|
||||
def ask(
|
||||
messages: list,
|
||||
model: str = "gpt-3.5-turbo"
|
||||
):
|
||||
enc = tiktoken.encoding_for_model(model)
|
||||
|
||||
resp = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages
|
||||
)
|
||||
|
||||
txt = ""
|
||||
|
||||
for r in messages:
|
||||
txt += r['role'] + r['content'] + "\n"
|
||||
|
||||
txt += "assistant: "
|
||||
|
||||
return enc.encode(txt), enc.encode(resp['choices'][0]['message']['content']), resp
|
||||
|
||||
|
||||
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return num_tokens_from_messages(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "你叫什么名字?"
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": "我是AI助手,没有具体的名字。你可以叫我GPT-3。有什么可以帮到你的吗?"
|
||||
},{
|
||||
"role": "user",
|
||||
"content": "你是由谁开发的?"
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": "我是由OpenAI开发的,一家人工智能研究实验室。OpenAI的使命是促进人工智能的发展,使其为全人类带来积极影响。我是由OpenAI团队使用GPT-3模型训练而成的。"
|
||||
},{
|
||||
"role": "user",
|
||||
"content": "很高兴见到你。"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
pro, rep, resp=ask(messages)
|
||||
|
||||
print(len(pro), len(rep))
|
||||
print(resp)
|
||||
print(resp['choices'][0]['message']['content'])
|
||||
|
||||
print(num_tokens_from_messages(messages, model="gpt-3.5-turbo"))
|
||||
@@ -30,5 +30,8 @@ command_admin_message = "[bot]err:权限不足: "
|
||||
command_err_message = "[bot]err:指令不存在:"
|
||||
|
||||
# 会话重置提示
|
||||
command_reset_message = "[bot]:会话已重置"
|
||||
command_reset_name_message = "[bot]:会话已重置,使用场景预设:"
|
||||
command_reset_message = "[bot]会话已重置"
|
||||
command_reset_name_message = "[bot]会话已重置,使用场景预设:"
|
||||
|
||||
# 会话自动重置时的提示
|
||||
session_auto_reset_message = "[bot]会话token超限,已自动重置,请重新发送消息"
|
||||
|
||||
Reference in New Issue
Block a user