mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 11:29:39 +08:00
Compare commits
165 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a724bfe155 | ||
|
|
179a372bfe | ||
|
|
651d765ab0 | ||
|
|
7ddc853f63 | ||
|
|
1bd1bfc725 | ||
|
|
f6ec0fda7a | ||
|
|
7be368ae8c | ||
|
|
f67db2617b | ||
|
|
ed5bf8100f | ||
|
|
0ef8a1c9ae | ||
|
|
32460cbf78 | ||
|
|
6f6c9c222c | ||
|
|
438d0ed1ea | ||
|
|
3ef1c71cad | ||
|
|
aaadf6b8ba | ||
|
|
6af614f319 | ||
|
|
c75dbd67df | ||
|
|
dc3d186e2a | ||
|
|
44550feddd | ||
|
|
a0810d5f63 | ||
|
|
cfc97fb22d | ||
|
|
d67dbe8062 | ||
|
|
e89035e11c | ||
|
|
2ea711e629 | ||
|
|
a716f071be | ||
|
|
3450a91824 | ||
|
|
d2c2b457e5 | ||
|
|
9cd7e49804 | ||
|
|
e9155e836f | ||
|
|
ed248539c7 | ||
|
|
54cc75506f | ||
|
|
4269c7927e | ||
|
|
064ac7f603 | ||
|
|
48ccf15273 | ||
|
|
b920ced6d4 | ||
|
|
69610a674c | ||
|
|
1828e34190 | ||
|
|
d53f4e3917 | ||
|
|
01706d5b4e | ||
|
|
8916b8a450 | ||
|
|
ed33af5638 | ||
|
|
c94a9e1ae6 | ||
|
|
e2e93afd06 | ||
|
|
a810158d5b | ||
|
|
5a5ebb95fc | ||
|
|
61dd9e29c0 | ||
|
|
ac65d81ba1 | ||
|
|
7288d3cb15 | ||
|
|
7477c7c67f | ||
|
|
453952859e | ||
|
|
85d46089e3 | ||
|
|
3b55f706de | ||
|
|
f448276423 | ||
|
|
830ee704da | ||
|
|
393369e446 | ||
|
|
2cc6a09905 | ||
|
|
d7d9d88e16 | ||
|
|
357d6aaf75 | ||
|
|
8059c422e3 | ||
|
|
b336e1334d | ||
|
|
12a0942ddb | ||
|
|
7e5a77f77e | ||
|
|
2933d4843f | ||
|
|
c5de978098 | ||
|
|
8b9cfab072 | ||
|
|
ea5f3c222f | ||
|
|
36bcbca15b | ||
|
|
2b2060e71b | ||
|
|
451688f2df | ||
|
|
d993852de7 | ||
|
|
9d73770a4e | ||
|
|
2541acf9d2 | ||
|
|
a1bfbad24e | ||
|
|
8af4918048 | ||
|
|
49f4ab0ec8 | ||
|
|
85c623fb0f | ||
|
|
9e28298250 | ||
|
|
7a04ef0985 | ||
|
|
83005e9ba9 | ||
|
|
f0c78f0529 | ||
|
|
3f638adcf9 | ||
|
|
d9405d8d5d | ||
|
|
606713a418 | ||
|
|
52102f0d0a | ||
|
|
61c29829ed | ||
|
|
df30931aad | ||
|
|
5afcc03e8b | ||
|
|
fbeb4673f4 | ||
|
|
4aba319560 | ||
|
|
74f79e002c | ||
|
|
2668ef2b3f | ||
|
|
74c018e271 | ||
|
|
64776fd601 | ||
|
|
59877bf71d | ||
|
|
d2800ac58b | ||
|
|
ffef944119 | ||
|
|
651b291ef6 | ||
|
|
e4b581f197 | ||
|
|
4f3939e2d9 | ||
|
|
1048ca612d | ||
|
|
b1a2d21ee9 | ||
|
|
dd4e8bdc8b | ||
|
|
e28c9bae0c | ||
|
|
5c10f520fb | ||
|
|
f8abe90674 | ||
|
|
964ad42cb4 | ||
|
|
424b970469 | ||
|
|
792366e221 | ||
|
|
79e970c4c3 | ||
|
|
d12acd5f31 | ||
|
|
13e55e05a4 | ||
|
|
9a7490bc2f | ||
|
|
a610a9d3d3 | ||
|
|
56e906c83f | ||
|
|
101f26e5a3 | ||
|
|
0bba205cf2 | ||
|
|
cc3beb191f | ||
|
|
42f5092bb9 | ||
|
|
bc6728d123 | ||
|
|
754278f80f | ||
|
|
c9c980b6fe | ||
|
|
a457d13d2c | ||
|
|
7440e9e5d2 | ||
|
|
39d901a5cb | ||
|
|
2e1ebff985 | ||
|
|
b8ed9ba321 | ||
|
|
c89a8e1cd1 | ||
|
|
480d201c55 | ||
|
|
a4b7d4a012 | ||
|
|
7fe676712b | ||
|
|
552733129c | ||
|
|
a4d73090f8 | ||
|
|
7d39b72800 | ||
|
|
f1e12563e9 | ||
|
|
0ac5e5b35e | ||
|
|
6b3f74a39a | ||
|
|
3c3e2e86c3 | ||
|
|
204a778db2 | ||
|
|
3594e64bfc | ||
|
|
c23d114094 | ||
|
|
6cb3fdc7c9 | ||
|
|
c57642bd4e | ||
|
|
891ee0fac8 | ||
|
|
1b69f0b668 | ||
|
|
46b310ceb9 | ||
|
|
85fe44ec92 | ||
|
|
fdcec0fbf7 | ||
|
|
2664ea8622 | ||
|
|
862724da74 | ||
|
|
a1c167fb7f | ||
|
|
adc2290fc1 | ||
|
|
8713fd8130 | ||
|
|
77df3d1ae5 | ||
|
|
2234e9db0e | ||
|
|
dd3d403de8 | ||
|
|
5364c36a79 | ||
|
|
118fbe3f7d | ||
|
|
61ec8e96f2 | ||
|
|
19289527ae | ||
|
|
77fdd6ddb8 | ||
|
|
f7830b5e9d | ||
|
|
13e5d76a44 | ||
|
|
7b8ad2e315 | ||
|
|
623f094e5b | ||
|
|
fd25d61b56 |
11
.gitignore
vendored
11
.gitignore
vendored
@@ -3,10 +3,17 @@ config.py
|
||||
__pycache__/
|
||||
database.db
|
||||
qchatgpt.log
|
||||
config.py
|
||||
/banlist.py
|
||||
plugins/
|
||||
!plugins/__init__.py
|
||||
/revcfg.py
|
||||
prompts/
|
||||
logs/
|
||||
logs/
|
||||
sensitive.json
|
||||
temp/
|
||||
current_tag
|
||||
scenario/
|
||||
!scenario/default-template.json
|
||||
override.json
|
||||
cookies.json
|
||||
res/announcement_saved
|
||||
19
CONTRIBUTING.md
Normal file
19
CONTRIBUTING.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## 参与项目
|
||||
|
||||
欢迎为此项目贡献代码或其他支持,以使您的点子或众人期待的功能成为现实,助力社区成长。
|
||||
|
||||
### 贡献形式
|
||||
|
||||
- 提交PR,解决issues中提到的bug或期待的功能
|
||||
- 提交PR,实现您设想的功能(请先提出issue与作者沟通)
|
||||
- 优化代码架构,使各个模块的组织更加整洁优雅
|
||||
- 在issues中提出发现的bug或者期待的功能
|
||||
- 为本项目在其他社交平台撰写文章、制作视频等
|
||||
- 为本项目的衍生项目作出贡献,或开发插件增加功能
|
||||
|
||||
### 如何开始
|
||||
|
||||
- 加入本项目交流群,一同探讨项目相关事务
|
||||
- 解决本项目或衍生项目的issues中亟待解决的问题
|
||||
- 阅读并完善本项目文档
|
||||
- 在各个社交媒体撰写本项目教程等
|
||||
95
README.md
95
README.md
@@ -1,36 +1,52 @@
|
||||
# QChatGPT🤖
|
||||
|
||||
### 🎉现已支持接入ChatGPT网页版,详情请完成部署并查看底部**插件**小节或[此仓库](https://github.com/RockChinQ/revLibs)
|
||||
> 2023/3/15 逆向库已支持New Bing,使用方法查看[插件文档](https://github.com/RockChinQ/revLibs)
|
||||
> 2023/3/15 逆向库已支持GPT-4模型,使用方法查看[插件](https://github.com/RockChinQ/revLibs)
|
||||
> 2023/3/3 官方接口疑似被墙,可考虑使用网络代理[#198](https://github.com/RockChinQ/QChatGPT/issues/198)
|
||||
> 2023/3/3 现已在主线支持官方ChatGPT接口,使用方法查看[#195](https://github.com/RockChinQ/QChatGPT/issues/195)
|
||||
> 2023/2/16 现已支持接入ChatGPT网页版,详情请完成部署并查看底部**插件**小节或[此仓库](https://github.com/RockChinQ/revLibs)
|
||||
|
||||
- 到[项目Wiki](https://github.com/RockChinQ/QChatGPT/wiki)可了解项目详细信息
|
||||
- 由bilibili TheLazy制作的[视频教程](https://www.bilibili.com/video/BV15v4y1X7aP)
|
||||
- 测试号: 2196084348(已加载逆向库插件、每分钟限速)、~~1480613886(已加载逆向库插件)~~(被封)
|
||||
- 交流、答疑群: ~~204785790~~(已满)、691226829
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- 交流、答疑群: ~~204785790~~(已满)、~~691226829~~(已满)、656285629
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- QQ频道机器人见[QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
||||
|
||||
通过调用OpenAI GPT-3模型提供的Completion API来实现一个更加智能的QQ机器人
|
||||
通过调用OpenAI的ChatGPT等语言模型来实现一个更加智能的QQ机器人
|
||||
|
||||
## 🍺模型适配一览
|
||||
|
||||
### 文字对话
|
||||
|
||||
- OpenAI GPT-3.5模型(ChatGPT API), 本项目原生支持, 默认使用
|
||||
- OpenAI GPT-3模型, 本项目原生支持, 部署完成后前往config.py切换
|
||||
- ChatGPT网页版GPT-3.5模型, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- ChatGPT网页版GPT-4模型, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- New Bing逆向库, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
|
||||
### 故事续写
|
||||
|
||||
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
||||
|
||||
### 图片绘制
|
||||
|
||||
- OpenAI DALL·E模型, 本项目原生支持, 使用方法查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
||||
|
||||
### 语音生成
|
||||
|
||||
- TTS+VITS, 由[插件](https://github.com/dominoar/QChatPlugins)接入
|
||||
|
||||
## ✅功能
|
||||
|
||||
<details>
|
||||
<summary>✅回复符合上下文</summary>
|
||||
|
||||
- 程序向模型发送近几次对话内容,模型根据上下文生成回复
|
||||
- 您可在`config.py`中修改`prompt_submit_length`自定义联系上下文的范围
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅支持敏感词过滤,避免账号风险</summary>
|
||||
|
||||
- 难以监测机器人与用户对话时的内容,故引入此功能以减少机器人风险
|
||||
- 加入了百度云内容审核,在`config.py`中修改`baidu_check`的值,并填写`baidu_api_key`和`baidu_secret_key`以开启此功能
|
||||
- 编辑`sensitive.json`,并在`config.py`中修改`sensitive_word_filter`的值以开启此功能
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>✅群内多种响应规则,不必at</summary>
|
||||
|
||||
@@ -38,14 +54,6 @@
|
||||
- 详细见`config.py`中的`response_rules`字段
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅使用官方api,不需要网络代理,稳定快捷</summary>
|
||||
|
||||
- 不使用ChatGPT逆向接口,而使用官方的Completion API,稳定性高
|
||||
- 您可以在`config.py`中自定义`completion_api_params`字段,设置向官方API提交的参数以自定义机器人的风格
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅完善的多api-key管理,超额自动切换</summary>
|
||||
|
||||
@@ -55,13 +63,6 @@
|
||||
- 运行期间向机器人说`!usage`以查看当前使用情况
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅组件少,部署方便,提供一键安装器及Docker安装</summary>
|
||||
|
||||
- 手动部署步骤少
|
||||
- 提供自动安装器及docker方式,详见以下安装步骤
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅支持预设指令文字</summary>
|
||||
|
||||
@@ -70,13 +71,6 @@
|
||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>✅完善的会话管理,重启不丢失</summary>
|
||||
|
||||
- 使用SQLite进行会话内容持久化
|
||||
- 最后一次对话一定时间后自动保存,请到`config.py`中修改`session_expire_time`的值以自定义时间
|
||||
- 运行期间可使用`!reset` `!list` `!last` `!next` `!prompt`等指令管理会话
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
||||
|
||||
@@ -102,6 +96,12 @@
|
||||
- 详见Wiki`加入黑名单`节
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅长消息处理策略</summary>
|
||||
|
||||
- 支持将长消息转换成图片或消息记录组件,避免消息刷屏
|
||||
- 请查看`config.py`中`blob_message_strategy`等字段
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅回复速度限制</summary>
|
||||
|
||||
- 支持限制单会话内每分钟可进行的对话次数
|
||||
@@ -119,10 +119,9 @@
|
||||
|
||||
### - 注册OpenAI账号
|
||||
|
||||
**可以直接进群找群主购买**
|
||||
或参考以下文章自行注册
|
||||
参考以下文章自行注册
|
||||
|
||||
> ~~[只需 1 元搞定 ChatGPT 注册](https://zhuanlan.zhihu.com/p/589470082)~~(已失效)
|
||||
> [国内注册ChatGPT的方法(100%可用)](https://www.pythonthree.com/register-openai-chatgpt/)
|
||||
> [手把手教你如何注册ChatGPT,超级详细](https://guxiaobei.com/51461)
|
||||
|
||||
注册成功后请前往[个人中心查看](https://beta.openai.com/account/api-keys)api_key
|
||||
@@ -148,10 +147,7 @@
|
||||
<details>
|
||||
<summary>手动部署适用于所有平台</summary>
|
||||
|
||||
- 请使用Python 3.9.x以上版本
|
||||
- 请注意OpenAI账号额度消耗
|
||||
- 每个账户仅有18美元免费额度,如未绑定银行卡,则会在超出时报错
|
||||
- OpenAI收费标准:默认使用的`text-davinci-003`模型 0.02美元/千字
|
||||
- 请使用Python 3.9.x以上版本
|
||||
|
||||
#### 配置Mirai
|
||||
|
||||
@@ -170,8 +166,7 @@ cd QChatGPT
|
||||
2. 安装依赖
|
||||
|
||||
```bash
|
||||
pip3 install yiri-mirai openai colorlog func_timeout
|
||||
pip3 install dulwich
|
||||
pip3 install yiri-mirai openai colorlog func_timeout dulwich Pillow
|
||||
```
|
||||
|
||||
3. 运行一次主程序,生成配置文件
|
||||
@@ -194,7 +189,7 @@ python3 main.py
|
||||
|
||||
**常见问题**
|
||||
|
||||
- mirai登录提示`QQ版本过低`,见[此issue](https://github.com/RockChinQ/QChatGPT/issues/38)
|
||||
- mirai登录提示`QQ版本过低`,见[此issue](https://github.com/RockChinQ/QChatGPT/issues/137)
|
||||
- 如提示安装`uvicorn`或`hypercorn`请*不要*安装,这两个不是必需的,目前存在未知原因bug
|
||||
- 如报错`TypeError: As of 3.10, the *loop* parameter was removed from Lock() since it is no longer necessary`, 请参考 [此处](https://github.com/RockChinQ/QChatGPT/issues/5)
|
||||
|
||||
@@ -224,7 +219,7 @@ python3 main.py
|
||||
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E4%B8%8EChatGPT%E7%BD%91%E9%A1%B5%E7%89%88)
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
- [dominoar/QchatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语言输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语言输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
||||
|
||||
## 😘致谢
|
||||
@@ -232,9 +227,9 @@ python3 main.py
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV15v4y1X7aP)
|
||||
- [@mikumifa](https://github.com/mikumifa) 本项目Docker部署仓库开发者
|
||||
- [@dominoar](https://github.com/dominoar) 为本项目开发多种插件
|
||||
- [@hissincn](https://github.com/hissincn) 本项目贡献者
|
||||
- [@万神的星空](https://github.com/qq255204159) 整合包发行
|
||||
|
||||
以及其他所有为本项目提供支持的朋友们。
|
||||
以及所有[贡献者](https://github.com/RockChinQ/QChatGPT/graphs/contributors)和其他为本项目提供支持的朋友们。
|
||||
|
||||
## 👍赞赏
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ mirai_http_api_config = {
|
||||
|
||||
# [必需] OpenAI的配置
|
||||
# api_key: OpenAI的API Key
|
||||
# http_proxy: 请求OpenAI时使用的代理,None为不使用,https和socks5暂不能使用
|
||||
# 若只有一个api-key,请直接修改以下内容中的"openai_api_key"为你的api-key
|
||||
#
|
||||
# 如准备了多个api-key,可以以字典的形式填写,程序会自动选择可用的api-key
|
||||
@@ -30,11 +31,13 @@ mirai_http_api_config = {
|
||||
# "key1": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key2": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# },
|
||||
# "http_proxy": "http://127.0.0.1:12345"
|
||||
# }
|
||||
openai_config = {
|
||||
"api_key": {
|
||||
"default": "openai_api_key"
|
||||
},
|
||||
"http_proxy": None
|
||||
}
|
||||
|
||||
# [必需] 管理员QQ号,用于接收报错等通知及执行管理员级别指令
|
||||
@@ -76,15 +79,24 @@ default_prompt = {
|
||||
"default": "如果我之后想获取帮助,请你说“输入!help获取帮助”",
|
||||
}
|
||||
|
||||
# 情景预设格式
|
||||
# 参考值:旧版本方式:default | 完整情景:full_scenario
|
||||
# 旧版本的格式为上述default_prompt中的内容,或prompts目录下的文件名
|
||||
# 完整情景预设的格式为JSON,在JSON文件中列出对话的每个回合,编写方法见scenario/default-template.json
|
||||
preset_mode = "default"
|
||||
|
||||
# 群内响应规则
|
||||
# 符合此消息的群内消息即使不包含at机器人也会响应
|
||||
# 支持消息前缀匹配及正则表达式匹配
|
||||
# 支持设置是否响应at消息、随机响应概率
|
||||
# 注意:由消息前缀(prefix)匹配的消息中将会删除此前缀,正则表达式(regexp)匹配的消息不会删除匹配的部分
|
||||
# 前缀匹配优先级高于正则表达式匹配
|
||||
# 正则表达式简明教程:https://www.runoob.com/regexp/regexp-tutorial.html
|
||||
response_rules = {
|
||||
"at": True, # 是否响应at机器人的消息
|
||||
"prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
"regexp": [] # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
||||
"regexp": [], # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
||||
"random_rate": 0.0, # 随机响应概率,0.0-1.0,0.0为不随机响应,1.0为响应所有消息, 仅在前几项判断不通过时生效
|
||||
}
|
||||
|
||||
# 消息忽略规则
|
||||
@@ -99,10 +111,27 @@ ignore_rules = {
|
||||
"regexp": []
|
||||
}
|
||||
|
||||
# 是否检查收到的消息中是否包含敏感词
|
||||
# 若收到的消息无法通过下方指定的敏感词检查策略,则发送提示信息
|
||||
income_msg_check = False
|
||||
|
||||
# 敏感词过滤开关,以同样数量的*代替敏感词回复
|
||||
# 请在sensitive.json中添加敏感词
|
||||
sensitive_word_filter = True
|
||||
|
||||
# 是否启用百度云内容安全审核
|
||||
# 注册方式查看 https://cloud.baidu.com/doc/ANTIPORN/s/Wkhu9d5iy
|
||||
baidu_check = False
|
||||
|
||||
# 百度云API_KEY 24位英文数字字符串
|
||||
baidu_api_key = ""
|
||||
|
||||
# 百度云SECRET_KEY 32位的英文数字字符串
|
||||
baidu_secret_key = ""
|
||||
|
||||
# 不合规消息自定义返回
|
||||
inappropriate_message_tips = "[百度云]请珍惜机器人,当前返回内容不合规"
|
||||
|
||||
# 启动时是否发送赞赏码
|
||||
# 仅当使用量已经超过2048字时发送
|
||||
encourage_sponsor_at_start = True
|
||||
@@ -112,12 +141,25 @@ encourage_sponsor_at_start = True
|
||||
# 注意:较大的prompt_submit_length会导致OpenAI账户额度消耗更快
|
||||
prompt_submit_length = 1024
|
||||
|
||||
# OpenAI的completion API的参数
|
||||
# OpenAI补全API的参数
|
||||
# 请在下方填写模型,程序自动选择接口
|
||||
# 现已支持的模型有:
|
||||
#
|
||||
# 'gpt-3.5-turbo'
|
||||
# 'gpt-3.5-turbo-0301'
|
||||
# 'text-davinci-003'
|
||||
# 'text-davinci-002'
|
||||
# 'code-davinci-002'
|
||||
# 'code-cushman-001'
|
||||
# 'text-curie-001'
|
||||
# 'text-babbage-001'
|
||||
# 'text-ada-001'
|
||||
#
|
||||
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
|
||||
completion_api_params = {
|
||||
"model": "text-davinci-003",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
||||
"max_tokens": 512, # 每次获取OpenAI接口响应的文字量上限, 不高于4096
|
||||
"max_tokens": 1024, # 每次获取OpenAI接口响应的文字量上限, 不高于4096
|
||||
"top_p": 1, # 生成的文本的文本与要求的符合度, 取值范围[0, 1]
|
||||
"frequency_penalty": 0.2,
|
||||
"presence_penalty": 1.0,
|
||||
@@ -138,21 +180,24 @@ include_image_description = True
|
||||
# 消息处理的超时时间,单位为秒
|
||||
process_message_timeout = 30
|
||||
|
||||
# 会话对象名称,此配置与会话对象管理相关,
|
||||
# 若不了解相关功能,无需修改此配置
|
||||
# 详细说明请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E6%8A%80%E6%9C%AF%E4%BF%A1%E6%81%AF#%E4%BC%9A%E8%AF%9Dsession
|
||||
# user_name: 管理员(主人)的名字
|
||||
# bot_name: 机器人的名字
|
||||
user_name = 'You'
|
||||
bot_name = 'Bot'
|
||||
|
||||
# [暂未实现] 群内会话是否启用多对象名称
|
||||
# 若不启用,群内会话的prompt只使用user_name和bot_name
|
||||
multi_subject = False
|
||||
|
||||
# 回复消息时是否显示[GPT]前缀
|
||||
show_prefix = False
|
||||
|
||||
# 应用长消息处理策略的阈值
|
||||
# 当回复消息长度超过此值时,将使用长消息处理策略
|
||||
blob_message_threshold = 256
|
||||
|
||||
# 长消息处理策略
|
||||
# - "image": 将长消息转换为图片发送
|
||||
# - "forward": 将长消息转换为转发消息组件发送
|
||||
blob_message_strategy = "forward"
|
||||
|
||||
# 文字转图片时使用的字体文件路径
|
||||
# 当策略为"image"时生效
|
||||
# 若在Windows系统下,程序会自动使用Windows自带的微软雅黑字体
|
||||
# 若未填写或不存在且不是Windows,将禁用文字转图片功能,改为使用转发消息组件
|
||||
font_path = ""
|
||||
|
||||
# 消息处理超时重试次数
|
||||
retry_times = 3
|
||||
|
||||
@@ -166,6 +211,11 @@ hide_exce_info_to_user = False
|
||||
# 设置为空字符串时,不发送提示信息
|
||||
alter_tip_message = '出错了,请稍后再试'
|
||||
|
||||
# 机器人线程池大小
|
||||
# 该参数决定机器人可以同时处理几个人的消息,超出线程池数量的请求会被阻塞,不会被丢弃
|
||||
# 如果你不清楚该参数的意义,请不要更改
|
||||
pool_num = 10
|
||||
|
||||
# 每个会话的过期时间,单位为秒
|
||||
# 默认值20分钟
|
||||
session_expire_time = 60 * 20
|
||||
@@ -186,6 +236,9 @@ rate_limit_strategy = "wait"
|
||||
# 若设置为空字符串,则不发送提示信息
|
||||
rate_limit_drop_tip = "本分钟对话次数超过限速次数,此对话被丢弃"
|
||||
|
||||
# 是否在启动时进行依赖库更新
|
||||
upgrade_dependencies = True
|
||||
|
||||
# 是否上报统计信息
|
||||
# 用于统计机器人的使用情况,不会收集任何用户信息
|
||||
# 仅上报时间、字数使用量、绘图使用量,其他信息不会上报
|
||||
@@ -202,11 +255,4 @@ help_message = """此机器人通过调用OpenAI的GPT-3大型语言模型生成
|
||||
每次会话最后一次交互后{}分钟后会自动结束,结束后将开启新会话,如需继续前一次会话请发送 !last 重新开启
|
||||
欢迎到github.com/RockChinQ/QChatGPT 给个star
|
||||
|
||||
帮助信息:
|
||||
!help - 显示帮助
|
||||
!reset - 重置会话
|
||||
!last - 切换到前一次的对话
|
||||
!next - 切换到后一次的对话
|
||||
!prompt - 显示当前对话所有内容
|
||||
!list - 列出所有历史会话
|
||||
!usage - 列出各个api-key的使用量""".format(session_expire_time // 60)
|
||||
指令帮助信息请查看: https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4""".format(session_expire_time // 60)
|
||||
|
||||
141
main.py
141
main.py
@@ -1,4 +1,5 @@
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
@@ -32,7 +33,7 @@ log_colors_config = {
|
||||
'INFO': 'white',
|
||||
'WARNING': 'yellow',
|
||||
'ERROR': 'red',
|
||||
'CRITICAL': 'bold_red',
|
||||
'CRITICAL': 'cyan',
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +44,13 @@ def init_db():
|
||||
database.initialize_database()
|
||||
|
||||
|
||||
def ensure_dependencies():
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "--upgrade",
|
||||
"-i", "https://pypi.douban.com/simple/",
|
||||
"--trusted-host", "pypi.douban.com"])
|
||||
|
||||
|
||||
known_exception_caught = False
|
||||
|
||||
log_file_name = "qchatgpt.log"
|
||||
@@ -100,13 +108,22 @@ def reset_logging():
|
||||
|
||||
|
||||
def main(first_time_init=False):
|
||||
"""启动流程,reload之后会被执行"""
|
||||
|
||||
global known_exception_caught
|
||||
|
||||
# 检查并创建plugins、prompts目录
|
||||
check_path = ["plugins", "prompts"]
|
||||
for path in check_path:
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
import config
|
||||
# 更新openai库到最新版本
|
||||
if not hasattr(config, 'upgrade_dependencies') or config.upgrade_dependencies:
|
||||
print("正在更新依赖库,请等待...")
|
||||
if not hasattr(config, 'upgrade_dependencies'):
|
||||
print("这个操作不是必须的,如果不想更新,请在config.py中添加upgrade_dependencies=False")
|
||||
else:
|
||||
print("这个操作不是必须的,如果不想更新,请在config.py中将upgrade_dependencies设置为False")
|
||||
try:
|
||||
ensure_dependencies()
|
||||
except Exception as e:
|
||||
print("更新openai库失败:{}, 请忽略或自行更新".format(e))
|
||||
|
||||
known_exception_caught = False
|
||||
try:
|
||||
@@ -115,13 +132,39 @@ def main(first_time_init=False):
|
||||
|
||||
config = importlib.import_module('config')
|
||||
|
||||
import pkg.utils.context
|
||||
pkg.utils.context.set_config(config)
|
||||
|
||||
init_runtime_log_file()
|
||||
|
||||
sh = reset_logging()
|
||||
|
||||
# 配置完整性校验
|
||||
is_integrity = True
|
||||
config_template = importlib.import_module('config-template')
|
||||
for key in dir(config_template):
|
||||
if not key.startswith("__") and not hasattr(config, key):
|
||||
setattr(config, key, getattr(config_template, key))
|
||||
logging.warning("[{}]不存在".format(key))
|
||||
is_integrity = False
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("配置文件不完整,请依据config-template.py检查config.py")
|
||||
logging.warning("以上配置已被设为默认值,将在5秒后继续启动... ")
|
||||
|
||||
# 检查override.json覆盖
|
||||
if os.path.exists("override.json"):
|
||||
override_json = json.load(open("override.json", "r", encoding="utf-8"))
|
||||
for key in override_json:
|
||||
if hasattr(config, key):
|
||||
setattr(config, key, override_json[key])
|
||||
logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
||||
else:
|
||||
logging.error("无法覆写配置[{}]为[{}],该配置不存在,请检查override.json是否正确".format(key, override_json[key]))
|
||||
|
||||
if not is_integrity:
|
||||
time.sleep(5)
|
||||
|
||||
import pkg.utils.context
|
||||
pkg.utils.context.set_config(config)
|
||||
|
||||
# 检查是否设置了管理员
|
||||
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
||||
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||
@@ -153,6 +196,7 @@ def main(first_time_init=False):
|
||||
import pkg.openai.dprompt
|
||||
|
||||
pkg.openai.dprompt.read_prompt_from_file()
|
||||
pkg.openai.dprompt.read_scenario_from_file()
|
||||
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
# 主启动流程
|
||||
@@ -168,7 +212,7 @@ def main(first_time_init=False):
|
||||
# 初始化qq机器人
|
||||
qqbot = pkg.qqbot.manager.QQBotManager(mirai_http_api_config=config.mirai_http_api_config,
|
||||
timeout=config.process_message_timeout, retry=config.retry_times,
|
||||
first_time_init=first_time_init)
|
||||
first_time_init=first_time_init, pool_num=config.pool_num)
|
||||
|
||||
# 加载插件
|
||||
import pkg.plugin.host
|
||||
@@ -176,7 +220,7 @@ def main(first_time_init=False):
|
||||
|
||||
pkg.plugin.host.initialize_plugins()
|
||||
|
||||
if first_time_init: # 不是热重载之后的启动,则不启动新的bot线程
|
||||
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
||||
|
||||
import mirai.exceptions
|
||||
|
||||
@@ -226,6 +270,11 @@ def main(first_time_init=False):
|
||||
qq_bot_thread = threading.Thread(target=run_bot_wrapper, args=(), daemon=True)
|
||||
qq_bot_thread.start()
|
||||
finally:
|
||||
# 判断若是Windows,输出选择模式可能会暂停程序的警告
|
||||
if os.name == 'nt':
|
||||
time.sleep(2)
|
||||
logging.info("您正在使用Windows系统,若命令行窗口处于“选择”模式,程序可能会被暂停,此时请右键点击窗口空白区域使其取消选择模式。")
|
||||
|
||||
time.sleep(12)
|
||||
if first_time_init:
|
||||
if not known_exception_caught:
|
||||
@@ -258,24 +307,22 @@ def main(first_time_init=False):
|
||||
import pkg.utils.updater
|
||||
try:
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("新版本可用,请发送 !update 进行自动更新")
|
||||
logging.info("新版本可用,请发送 !update 进行自动更新\n更新日志:\n{}".format("\n".join(pkg.utils.updater.get_rls_notes())))
|
||||
else:
|
||||
logging.info("当前已是最新版本")
|
||||
|
||||
except Exception as e:
|
||||
logging.warning("检查更新失败:{}".format(e))
|
||||
|
||||
while True:
|
||||
try:
|
||||
time.sleep(10)
|
||||
if qqbot != pkg.utils.context.get_qqbot_manager(): # 已经reload了
|
||||
logging.info("以前的main流程由于reload退出")
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
stop()
|
||||
try:
|
||||
import pkg.utils.announcement as announcement
|
||||
new_announcement = announcement.fetch_new()
|
||||
if new_announcement != "":
|
||||
logging.critical("[公告] {}".format(new_announcement))
|
||||
except Exception as e:
|
||||
logging.warning("获取公告失败:{}".format(e))
|
||||
|
||||
print("程序退出")
|
||||
sys.exit(0)
|
||||
return qqbot
|
||||
|
||||
|
||||
def stop():
|
||||
@@ -309,28 +356,44 @@ if __name__ == '__main__':
|
||||
if not os.path.exists('banlist.py'):
|
||||
shutil.copy('banlist-template.py', 'banlist.py')
|
||||
|
||||
# 检查是否有sensitive.json
|
||||
if not os.path.exists("sensitive.json"):
|
||||
shutil.copy("sensitive-template.json", "sensitive.json")
|
||||
|
||||
# 检查是否有scenario/default.json
|
||||
if not os.path.exists("scenario/default.json"):
|
||||
shutil.copy("scenario/default-template.json", "scenario/default.json")
|
||||
|
||||
# 检查temp目录
|
||||
if not os.path.exists("temp/"):
|
||||
os.mkdir("temp/")
|
||||
|
||||
# 检查并创建plugins、prompts目录
|
||||
check_path = ["plugins", "prompts"]
|
||||
for path in check_path:
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == 'init_db':
|
||||
init_db()
|
||||
sys.exit(0)
|
||||
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == 'update':
|
||||
try:
|
||||
try:
|
||||
import pkg.utils.pkgmgr
|
||||
pkg.utils.pkgmgr.ensure_dulwich()
|
||||
except:
|
||||
pass
|
||||
|
||||
from dulwich import porcelain
|
||||
|
||||
repo = porcelain.open_repo('.')
|
||||
porcelain.pull(repo)
|
||||
except ModuleNotFoundError:
|
||||
print("dulwich模块未安装,请查看 https://github.com/RockChinQ/QChatGPT/issues/77")
|
||||
print("正在进行程序更新...")
|
||||
import pkg.utils.updater as updater
|
||||
updater.update_all(cli=True)
|
||||
sys.exit(0)
|
||||
|
||||
# import pkg.utils.configmgr
|
||||
#
|
||||
# pkg.utils.configmgr.set_config_and_reload("quote_origin", False)
|
||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||
main(True)
|
||||
|
||||
qqbot = main(True)
|
||||
|
||||
import pkg.utils.context
|
||||
while True:
|
||||
try:
|
||||
time.sleep(10)
|
||||
except KeyboardInterrupt:
|
||||
stop()
|
||||
|
||||
print("程序退出")
|
||||
sys.exit(0)
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
审计相关操作
|
||||
"""
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
使用量统计以及数据上报功能实现
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
@@ -10,8 +14,11 @@ import pkg.utils.updater
|
||||
|
||||
class DataGatherer:
|
||||
"""数据收集器"""
|
||||
|
||||
usage = {}
|
||||
"""以key值md5为key,{
|
||||
"""各api-key的使用量
|
||||
|
||||
以key值md5为key,{
|
||||
"text": {
|
||||
"text-davinci-003": 文字量:int,
|
||||
},
|
||||
@@ -20,16 +27,21 @@ class DataGatherer:
|
||||
}
|
||||
}为值的字典"""
|
||||
|
||||
version_str = "0.1.0"
|
||||
version_str = "undetermined"
|
||||
|
||||
def __init__(self):
|
||||
self.load_from_db()
|
||||
try:
|
||||
self.version_str = pkg.utils.updater.get_commit_id_and_time_and_msg()[:40 if len(pkg.utils.updater.get_commit_id_and_time_and_msg()) > 40 else len(pkg.utils.updater.get_commit_id_and_time_and_msg())]
|
||||
self.version_str = pkg.utils.updater.get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
pass
|
||||
|
||||
def report_to_server(self, subservice_name: str, count: int):
|
||||
"""向中央服务器报告使用量
|
||||
|
||||
只会报告此次请求的使用量,不会报告总量。
|
||||
不包含除版本号、使用类型、使用量以外的任何信息,仅供开发者分析使用情况。
|
||||
"""
|
||||
try:
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "report_usage") and not config.report_usage:
|
||||
@@ -44,7 +56,9 @@ class DataGatherer:
|
||||
return self.usage[key_md5] if key_md5 in self.usage else {}
|
||||
|
||||
def report_text_model_usage(self, model, total_tokens):
|
||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()
|
||||
"""调用方报告文字模型请求文字使用量"""
|
||||
|
||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
|
||||
|
||||
if key_md5 not in self.usage:
|
||||
self.usage[key_md5] = {}
|
||||
@@ -62,6 +76,8 @@ class DataGatherer:
|
||||
self.report_to_server("text", length)
|
||||
|
||||
def report_image_model_usage(self, size):
|
||||
"""调用方报告图片模型请求图片使用量"""
|
||||
|
||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()
|
||||
|
||||
if key_md5 not in self.usage:
|
||||
@@ -79,6 +95,7 @@ class DataGatherer:
|
||||
self.report_to_server("image", 1)
|
||||
|
||||
def get_text_length_of_key(self, key):
|
||||
"""获取指定api-key (明文) 的文字总使用量(本地记录)"""
|
||||
key_md5 = hashlib.md5(key.encode('utf-8')).hexdigest()
|
||||
if key_md5 not in self.usage:
|
||||
return 0
|
||||
@@ -88,6 +105,8 @@ class DataGatherer:
|
||||
return sum(self.usage[key_md5]["text"].values())
|
||||
|
||||
def get_image_count_of_key(self, key):
|
||||
"""获取指定api-key (明文) 的图片总使用量(本地记录)"""
|
||||
|
||||
key_md5 = hashlib.md5(key.encode('utf-8')).hexdigest()
|
||||
if key_md5 not in self.usage:
|
||||
return 0
|
||||
@@ -97,6 +116,7 @@ class DataGatherer:
|
||||
return sum(self.usage[key_md5]["image"].values())
|
||||
|
||||
def get_total_text_length(self):
|
||||
"""获取所有api-key的文字总使用量(本地记录)"""
|
||||
total = 0
|
||||
for key in self.usage:
|
||||
if "text" not in self.usage[key]:
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
数据库操作封装
|
||||
"""
|
||||
@@ -1,3 +1,6 @@
|
||||
"""
|
||||
数据库管理模块
|
||||
"""
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
@@ -9,9 +12,9 @@ import sqlite3
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
# 数据库管理
|
||||
# 为其他模块提供数据库操作接口
|
||||
class DatabaseManager:
|
||||
"""封装数据库底层操作,并提供方法给上层使用"""
|
||||
|
||||
conn = None
|
||||
cursor = None
|
||||
|
||||
@@ -23,21 +26,25 @@ class DatabaseManager:
|
||||
|
||||
# 连接到数据库文件
|
||||
def reconnect(self):
|
||||
"""连接到数据库"""
|
||||
self.conn = sqlite3.connect('database.db', check_same_thread=False)
|
||||
self.cursor = self.conn.cursor()
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
def execute(self, *args, **kwargs) -> Cursor:
|
||||
def __execute__(self, *args, **kwargs) -> Cursor:
|
||||
# logging.debug('SQL: {}'.format(sql))
|
||||
logging.debug('SQL: {}'.format(args))
|
||||
c = self.cursor.execute(*args, **kwargs)
|
||||
self.conn.commit()
|
||||
return c
|
||||
|
||||
# 初始化数据库的函数
|
||||
def initialize_database(self):
|
||||
self.execute("""
|
||||
"""创建数据表"""
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `sessions` (
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`name` varchar(255) not null,
|
||||
@@ -46,11 +53,24 @@ class DatabaseManager:
|
||||
`create_timestamp` bigint not null,
|
||||
`last_interact_timestamp` bigint not null,
|
||||
`status` varchar(255) not null default 'on_going',
|
||||
`default_prompt` text not null default '',
|
||||
`prompt` text not null
|
||||
)
|
||||
""")
|
||||
|
||||
self.execute("""
|
||||
# 检查sessions表是否存在`default_prompt`字段
|
||||
self.__execute__("PRAGMA table_info('sessions')")
|
||||
columns = self.cursor.fetchall()
|
||||
has_default_prompt = False
|
||||
for field in columns:
|
||||
if field[1] == 'default_prompt':
|
||||
has_default_prompt = True
|
||||
break
|
||||
if not has_default_prompt:
|
||||
self.__execute__("alter table `sessions` add column `default_prompt` text not null default ''")
|
||||
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `account_fee`(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`key_md5` varchar(255) not null,
|
||||
@@ -59,7 +79,7 @@ class DatabaseManager:
|
||||
)
|
||||
""")
|
||||
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
create table if not exists `account_usage`(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`json` text not null
|
||||
@@ -69,47 +89,49 @@ class DatabaseManager:
|
||||
|
||||
# session持久化
|
||||
def persistence_session(self, subject_type: str, subject_number: int, create_timestamp: int,
|
||||
last_interact_timestamp: int, prompt: str):
|
||||
last_interact_timestamp: int, prompt: str, default_prompt: str = ''):
|
||||
"""持久化指定session"""
|
||||
|
||||
# 检查是否已经有了此name和create_timestamp的session
|
||||
# 如果有,就更新prompt和last_interact_timestamp
|
||||
# 如果没有,就插入一条新的记录
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select count(*) from `sessions` where `type` = '{}' and `number` = {} and `create_timestamp` = {}
|
||||
""".format(subject_type, subject_number, create_timestamp))
|
||||
count = self.cursor.fetchone()[0]
|
||||
if count == 0:
|
||||
|
||||
sql = """
|
||||
insert into `sessions` (`name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`)
|
||||
values (?, ?, ?, ?, ?, ?)
|
||||
insert into `sessions` (`name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `default_prompt`)
|
||||
values (?, ?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
self.execute(sql,
|
||||
("{}_{}".format(subject_type, subject_number), subject_type, subject_number, create_timestamp,
|
||||
last_interact_timestamp, prompt))
|
||||
self.__execute__(sql,
|
||||
("{}_{}".format(subject_type, subject_number), subject_type, subject_number, create_timestamp,
|
||||
last_interact_timestamp, prompt, default_prompt))
|
||||
else:
|
||||
sql = """
|
||||
update `sessions` set `last_interact_timestamp` = ?, `prompt` = ?
|
||||
where `type` = ? and `number` = ? and `create_timestamp` = ?
|
||||
"""
|
||||
|
||||
self.execute(sql, (last_interact_timestamp, prompt, subject_type,
|
||||
subject_number, create_timestamp))
|
||||
self.__execute__(sql, (last_interact_timestamp, prompt, subject_type,
|
||||
subject_number, create_timestamp))
|
||||
|
||||
# 显式关闭一个session
|
||||
def explicit_close_session(self, session_name: str, create_timestamp: int):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'explicitly_closed' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
def set_session_ongoing(self, session_name: str, create_timestamp: int):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'on_going' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
# 设置session为过期
|
||||
def set_session_expired(self, session_name: str, create_timestamp: int):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'expired' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
@@ -117,8 +139,8 @@ class DatabaseManager:
|
||||
def load_valid_sessions(self) -> dict:
|
||||
# 从数据库中加载所有还没过期的session
|
||||
config = pkg.utils.context.get_config()
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`
|
||||
from `sessions` where `last_interact_timestamp` > {}
|
||||
""".format(int(time.time()) - config.session_expire_time))
|
||||
results = self.cursor.fetchall()
|
||||
@@ -131,6 +153,7 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
|
||||
# 当且仅当最后一个该对象的会话是on_going状态时,才会被加载
|
||||
if status == 'on_going':
|
||||
@@ -139,7 +162,8 @@ class DatabaseManager:
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt
|
||||
}
|
||||
else:
|
||||
if session_name in sessions:
|
||||
@@ -150,8 +174,8 @@ class DatabaseManager:
|
||||
# 获取此session_name前一个session的数据
|
||||
def last_session(self, session_name: str, cursor_timestamp: int):
|
||||
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`
|
||||
from `sessions` where `name` = '{}' and `last_interact_timestamp` < {} order by `last_interact_timestamp` desc
|
||||
limit 1
|
||||
""".format(session_name, cursor_timestamp))
|
||||
@@ -167,20 +191,22 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
|
||||
return {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt
|
||||
}
|
||||
|
||||
# 获取此session_name后一个session的数据
|
||||
def next_session(self, session_name: str, cursor_timestamp: int):
|
||||
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`
|
||||
from `sessions` where `name` = '{}' and `last_interact_timestamp` > {} order by `last_interact_timestamp` asc
|
||||
limit 1
|
||||
""".format(session_name, cursor_timestamp))
|
||||
@@ -196,19 +222,21 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
|
||||
return {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt
|
||||
}
|
||||
|
||||
# 列出与某个对象的所有对话session
|
||||
def list_history(self, session_name: str, capacity: int, page: int, replace: str = ""):
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
def list_history(self, session_name: str, capacity: int, page: int):
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`
|
||||
from `sessions` where `name` = '{}' order by `last_interact_timestamp` desc limit {} offset {}
|
||||
""".format(session_name, capacity, capacity * page))
|
||||
results = self.cursor.fetchall()
|
||||
@@ -221,17 +249,40 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
|
||||
sessions.append({
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt if replace == "" else prompt.replace(replace, "")
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt
|
||||
})
|
||||
|
||||
return sessions
|
||||
|
||||
def delete_history(self, session_name: str, index: int) -> bool:
|
||||
# 删除倒序第index个session
|
||||
# 查找其id再删除
|
||||
self.__execute__("""
|
||||
delete from `sessions` where `id` in (select `id` from `sessions` where `name` = '{}' order by `last_interact_timestamp` desc limit 1 offset {})
|
||||
""".format(session_name, index))
|
||||
|
||||
return self.cursor.rowcount == 1
|
||||
|
||||
def delete_all_history(self, session_name: str) -> bool:
|
||||
self.__execute__("""
|
||||
delete from `sessions` where `name` = '{}'
|
||||
""".format(session_name))
|
||||
return self.cursor.rowcount > 0
|
||||
|
||||
def delete_all_session_history(self) -> bool:
|
||||
self.__execute__("""
|
||||
delete from `sessions`
|
||||
""")
|
||||
return self.cursor.rowcount > 0
|
||||
|
||||
# 将apikey的使用量存进数据库
|
||||
def dump_api_key_usage(self, api_keys: dict, usage: dict):
|
||||
logging.debug('dumping api key usage...')
|
||||
@@ -246,22 +297,22 @@ class DatabaseManager:
|
||||
usage_count = usage[key_md5]
|
||||
# 将使用量存进数据库
|
||||
# 先检查是否已存在
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select count(*) from `api_key_usage` where `key_md5` = '{}'""".format(key_md5))
|
||||
result = self.cursor.fetchone()
|
||||
if result[0] == 0:
|
||||
# 不存在则插入
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
insert into `api_key_usage` (`key_md5`, `usage`,`timestamp`) values ('{}', {}, {})
|
||||
""".format(key_md5, usage_count, int(time.time())))
|
||||
else:
|
||||
# 存在则更新,timestamp设置为当前
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `api_key_usage` set `usage` = {}, `timestamp` = {} where `key_md5` = '{}'
|
||||
""".format(usage_count, int(time.time()), key_md5))
|
||||
|
||||
def load_api_key_usage(self):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select `key_md5`, `usage` from `api_key_usage`
|
||||
""")
|
||||
results = self.cursor.fetchall()
|
||||
@@ -273,23 +324,24 @@ class DatabaseManager:
|
||||
return usage
|
||||
|
||||
def dump_usage_json(self, usage: dict):
|
||||
|
||||
json_str = json.dumps(usage)
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select count(*) from `account_usage`""")
|
||||
result = self.cursor.fetchone()
|
||||
if result[0] == 0:
|
||||
# 不存在则插入
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
insert into `account_usage` (`json`) values ('{}')
|
||||
""".format(json_str))
|
||||
else:
|
||||
# 存在则更新
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `account_usage` set `json` = '{}' where `id` = 1
|
||||
""".format(json_str))
|
||||
|
||||
def load_usage_json(self):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select `json` from `account_usage` order by id desc limit 1
|
||||
""")
|
||||
result = self.cursor.fetchone()
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
"""OpenAI 接口处理及会话管理相关
|
||||
"""
|
||||
|
||||
@@ -1,11 +1,20 @@
|
||||
# 多情景预设值管理
|
||||
import json
|
||||
import logging
|
||||
|
||||
__current__ = "default"
|
||||
"""当前默认使用的情景预设的名称
|
||||
|
||||
由管理员使用`!default <名称>`指令切换
|
||||
"""
|
||||
|
||||
__prompts_from_files__ = {}
|
||||
"""从文件中读取的情景预设值"""
|
||||
|
||||
__scenario_from_files__ = {}
|
||||
|
||||
|
||||
def read_prompt_from_file() -> str:
|
||||
def read_prompt_from_file():
|
||||
"""从文件读取预设值"""
|
||||
# 读取prompts/目录下的所有文件,以文件名为键,文件内容为值
|
||||
# 保存在__prompts_from_files__中
|
||||
@@ -18,6 +27,19 @@ def read_prompt_from_file() -> str:
|
||||
__prompts_from_files__[file] = f.read()
|
||||
|
||||
|
||||
def read_scenario_from_file():
|
||||
"""从JSON文件读取情景预设"""
|
||||
global __scenario_from_files__
|
||||
import os
|
||||
|
||||
__scenario_from_files__ = {}
|
||||
for file in os.listdir("scenario"):
|
||||
if file == "default-template.json":
|
||||
continue
|
||||
with open(os.path.join("scenario", file), encoding="utf-8") as f:
|
||||
__scenario_from_files__[file] = json.load(f)
|
||||
|
||||
|
||||
def get_prompt_dict() -> dict:
|
||||
"""获取预设值字典"""
|
||||
import config
|
||||
@@ -60,15 +82,40 @@ def set_to_default():
|
||||
__current__ = list(default_dict.keys())[0]
|
||||
|
||||
|
||||
def get_prompt(name: str = None) -> str:
|
||||
def get_prompt(name: str = None) -> list:
|
||||
global __scenario_from_files__
|
||||
import config
|
||||
preset_mode = config.preset_mode
|
||||
|
||||
"""获取预设值"""
|
||||
if name is None:
|
||||
name = get_current()
|
||||
|
||||
default_dict = get_prompt_dict()
|
||||
# JSON预设方式
|
||||
if preset_mode == 'full_scenario':
|
||||
import os
|
||||
|
||||
for key in default_dict:
|
||||
if key.lower().startswith(name.lower()):
|
||||
return default_dict[key]
|
||||
for key in __scenario_from_files__:
|
||||
if key.lower().startswith(name.lower()):
|
||||
logging.debug('成功加载情景预设从JSON文件: {}'.format(key))
|
||||
return __scenario_from_files__[key]['prompt']
|
||||
|
||||
# 默认预设方式
|
||||
elif preset_mode == 'default':
|
||||
|
||||
raise KeyError("未找到情景预设: " + name)
|
||||
default_dict = get_prompt_dict()
|
||||
|
||||
for key in default_dict:
|
||||
if key.lower().startswith(name.lower()):
|
||||
return [
|
||||
{
|
||||
"role": "user",
|
||||
"content": default_dict[key]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "好的。"
|
||||
}
|
||||
]
|
||||
|
||||
raise KeyError("未找到默认情景预设: " + name)
|
||||
|
||||
@@ -5,18 +5,26 @@ import logging
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
|
||||
|
||||
class KeysManager:
|
||||
api_key = {}
|
||||
"""所有api-key"""
|
||||
|
||||
# api-key的使用量
|
||||
# 其中键为api-key的md5值,值为使用量
|
||||
using_key = ""
|
||||
"""当前使用的api-key
|
||||
"""
|
||||
|
||||
alerted = []
|
||||
"""已提示过超额的key
|
||||
|
||||
记录在此以避免重复提示
|
||||
"""
|
||||
|
||||
# 在此list中的都是经超额报错标记过的api-key
|
||||
# 记录的是key值,仅在运行时有效
|
||||
exceeded = []
|
||||
"""已超额的key
|
||||
|
||||
供自动切换功能识别
|
||||
"""
|
||||
|
||||
def get_using_key(self):
|
||||
return self.using_key
|
||||
@@ -25,8 +33,6 @@ class KeysManager:
|
||||
return hashlib.md5(self.using_key.encode('utf-8')).hexdigest()
|
||||
|
||||
def __init__(self, api_key):
|
||||
# if hasattr(config, 'api_key_usage_threshold'):
|
||||
# self.api_key_usage_threshold = config.api_key_usage_threshold
|
||||
|
||||
if type(api_key) is dict:
|
||||
self.api_key = api_key
|
||||
@@ -42,9 +48,13 @@ class KeysManager:
|
||||
|
||||
self.auto_switch()
|
||||
|
||||
# 根据tested自动切换到可用的api-key
|
||||
# 返回是否切换成功, 切换后的api-key的别名
|
||||
def auto_switch(self) -> (bool, str):
|
||||
"""尝试切换api-key
|
||||
|
||||
Returns:
|
||||
是否切换成功, 切换后的api-key的别名
|
||||
"""
|
||||
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] not in self.exceeded:
|
||||
self.using_key = self.api_key[key_name]
|
||||
@@ -68,12 +78,9 @@ class KeysManager:
|
||||
def add(self, key_name, key):
|
||||
self.api_key[key_name] = key
|
||||
|
||||
# 设置当前使用的api-key使用量超限
|
||||
# 这是在尝试调用api时发生超限异常时调用的
|
||||
def set_current_exceeded(self):
|
||||
# md5 = hashlib.md5(self.using_key.encode('utf-8')).hexdigest()
|
||||
# self.usage[md5] = self.api_key_usage_threshold
|
||||
# self.fee[md5] = self.api_key_fee_threshold
|
||||
"""设置当前使用的api-key使用量超限
|
||||
"""
|
||||
self.exceeded.append(self.using_key)
|
||||
|
||||
def get_key_name(self, api_key):
|
||||
@@ -81,4 +88,4 @@ class KeysManager:
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] == api_key:
|
||||
return key_name
|
||||
return ""
|
||||
return ""
|
||||
|
||||
@@ -5,11 +5,14 @@ import openai
|
||||
import pkg.openai.keymgr
|
||||
import pkg.utils.context
|
||||
import pkg.audit.gatherer
|
||||
from pkg.openai.modelmgr import ModelRequest, create_openai_model_request
|
||||
|
||||
|
||||
# 为其他模块提供与OpenAI交互的接口
|
||||
class OpenAIInteract:
|
||||
api_params = {}
|
||||
"""OpenAI 接口封装
|
||||
|
||||
将文字接口和图片接口封装供调用方使用
|
||||
"""
|
||||
|
||||
key_mgr: pkg.openai.keymgr.KeysManager = None
|
||||
|
||||
@@ -20,7 +23,6 @@ class OpenAIInteract:
|
||||
}
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
# self.api_key = api_key
|
||||
|
||||
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
|
||||
self.audit_mgr = pkg.audit.gatherer.DataGatherer()
|
||||
@@ -32,27 +34,50 @@ class OpenAIInteract:
|
||||
pkg.utils.context.set_openai_manager(self)
|
||||
|
||||
# 请求OpenAI Completion
|
||||
def request_completion(self, prompt, stop):
|
||||
def request_completion(self, prompts) -> str:
|
||||
"""请求补全接口回复
|
||||
|
||||
Parameters:
|
||||
prompts (str): 提示语
|
||||
|
||||
Returns:
|
||||
str: 回复
|
||||
"""
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
response = openai.Completion.create(
|
||||
prompt=prompt,
|
||||
stop=stop,
|
||||
|
||||
# 根据模型选择使用的接口
|
||||
ai: ModelRequest = create_openai_model_request(
|
||||
config.completion_api_params['model'],
|
||||
'user',
|
||||
config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None
|
||||
)
|
||||
ai.request(
|
||||
prompts,
|
||||
**config.completion_api_params
|
||||
)
|
||||
response = ai.get_response()
|
||||
|
||||
logging.debug("OpenAI response: %s", response)
|
||||
|
||||
if 'model' in config.completion_api_params:
|
||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['model'],
|
||||
response['usage']['total_tokens'])
|
||||
ai.get_total_tokens())
|
||||
elif 'engine' in config.completion_api_params:
|
||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'],
|
||||
response['usage']['total_tokens'])
|
||||
|
||||
return response
|
||||
return ai.get_message()
|
||||
|
||||
def request_image(self, prompt):
|
||||
def request_image(self, prompt) -> dict:
|
||||
"""请求图片接口回复
|
||||
|
||||
Parameters:
|
||||
prompt (str): 提示语
|
||||
|
||||
Returns:
|
||||
dict: 响应
|
||||
"""
|
||||
config = pkg.utils.context.get_config()
|
||||
params = config.image_api_params if hasattr(config, "image_api_params") else self.default_image_api_params
|
||||
|
||||
|
||||
@@ -1,7 +1,26 @@
|
||||
# 提供与模型交互的抽象接口
|
||||
"""OpenAI 接口底层封装
|
||||
|
||||
目前使用的对话接口有:
|
||||
ChatCompletion - gpt-3.5-turbo 等模型
|
||||
Completion - text-davinci-003 等模型
|
||||
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
|
||||
"""
|
||||
import openai, logging, threading, asyncio
|
||||
import openai.error as aiE
|
||||
|
||||
COMPLETION_MODELS = {
|
||||
'text-davinci-003'
|
||||
'text-davinci-003',
|
||||
'text-davinci-002',
|
||||
'code-davinci-002',
|
||||
'code-cushman-001',
|
||||
'text-curie-001',
|
||||
'text-babbage-001',
|
||||
'text-ada-001',
|
||||
}
|
||||
|
||||
CHAT_COMPLETION_MODELS = {
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-0301',
|
||||
}
|
||||
|
||||
EDIT_MODELS = {
|
||||
@@ -13,22 +32,153 @@ IMAGE_MODELS = {
|
||||
}
|
||||
|
||||
|
||||
# ModelManager
|
||||
# 由session包含
|
||||
class ModelMgr(object):
|
||||
class ModelRequest:
|
||||
"""模型接口请求父类"""
|
||||
|
||||
using_completion_model = ""
|
||||
using_edit_model = ""
|
||||
using_image_model = ""
|
||||
can_chat = False
|
||||
runtime: threading.Thread = None
|
||||
ret = {}
|
||||
proxy: str = None
|
||||
request_ready = True
|
||||
error_info: str = "若在没有任何错误的情况下看到这句话,请带着配置文件上报Issues"
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None):
|
||||
self.model_name = model_name
|
||||
self.user_name = user_name
|
||||
self.request_fun = request_fun
|
||||
self.time_out = time_out
|
||||
if http_proxy != None:
|
||||
self.proxy = http_proxy
|
||||
openai.proxy = self.proxy
|
||||
self.request_ready = False
|
||||
|
||||
def get_using_completion_model(self):
|
||||
return self.using_completion_model
|
||||
async def __a_request__(self, **kwargs):
|
||||
"""异步请求"""
|
||||
|
||||
def get_using_edit_model(self):
|
||||
return self.using_edit_model
|
||||
try:
|
||||
self.ret:dict = await self.request_fun(**kwargs)
|
||||
self.request_ready = True
|
||||
except aiE.APIConnectionError as e:
|
||||
self.error_info = "{}\n请检查网络连接或代理是否正常".format(e)
|
||||
raise ConnectionError(self.error_info)
|
||||
except ValueError as e:
|
||||
self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的"
|
||||
except Exception as e:
|
||||
self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e)
|
||||
raise Exception(self.error_info)
|
||||
|
||||
def get_using_image_model(self):
|
||||
return self.using_image_model
|
||||
def request(self, **kwargs):
|
||||
"""向接口发起请求"""
|
||||
|
||||
if self.proxy != None: #异步请求
|
||||
self.request_ready = False
|
||||
loop = asyncio.new_event_loop()
|
||||
self.runtime = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(self.__a_request__(**kwargs),)
|
||||
)
|
||||
self.runtime.start()
|
||||
else: #同步请求
|
||||
self.ret = self.request_fun(**kwargs)
|
||||
|
||||
def __msg_handle__(self, msg):
|
||||
"""将prompt dict转换成接口需要的格式"""
|
||||
return msg
|
||||
|
||||
def ret_handle(self):
|
||||
'''
|
||||
API消息返回处理函数
|
||||
若重写该方法,应检查异步线程状态,或在需要检查处super该方法
|
||||
'''
|
||||
if self.runtime != None and isinstance(self.runtime, threading.Thread):
|
||||
self.runtime.join(self.time_out)
|
||||
if self.request_ready:
|
||||
return
|
||||
raise Exception(self.error_info)
|
||||
|
||||
def get_total_tokens(self):
|
||||
try:
|
||||
return self.ret['usage']['total_tokens']
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_message(self):
|
||||
return self.message
|
||||
|
||||
def get_response(self):
|
||||
return self.ret
|
||||
|
||||
|
||||
class ChatCompletionModel(ModelRequest):
|
||||
"""ChatCompletion接口的请求实现"""
|
||||
|
||||
Chat_role = ['system', 'user', 'assistant']
|
||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
||||
if http_proxy == None:
|
||||
request_fun = openai.ChatCompletion.create
|
||||
else:
|
||||
request_fun = openai.ChatCompletion.acreate
|
||||
self.can_chat = True
|
||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
||||
|
||||
def request(self, prompts, **kwargs):
|
||||
prompts = self.__msg_handle__(prompts)
|
||||
kwargs['messages'] = prompts
|
||||
super().request(**kwargs)
|
||||
self.ret_handle()
|
||||
|
||||
def __msg_handle__(self, msgs):
|
||||
temp_msgs = []
|
||||
# 把msgs拷贝进temp_msgs
|
||||
for msg in msgs:
|
||||
temp_msgs.append(msg.copy())
|
||||
return temp_msgs
|
||||
|
||||
def get_message(self):
|
||||
return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗
|
||||
|
||||
|
||||
class CompletionModel(ModelRequest):
|
||||
"""Completion接口的请求实现"""
|
||||
|
||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
||||
if http_proxy == None:
|
||||
request_fun = openai.Completion.create
|
||||
else:
|
||||
request_fun = openai.Completion.acreate
|
||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
||||
|
||||
def request(self, prompts, **kwargs):
|
||||
prompts = self.__msg_handle__(prompts)
|
||||
kwargs['prompt'] = prompts
|
||||
super().request(**kwargs)
|
||||
self.ret_handle()
|
||||
|
||||
def __msg_handle__(self, msgs):
|
||||
prompt = ''
|
||||
for msg in msgs:
|
||||
prompt = prompt + "{}: {}\n".format(msg['role'], msg['content'])
|
||||
# for msg in msgs:
|
||||
# if msg['role'] == 'assistant':
|
||||
# prompt = prompt + "{}\n".format(msg['content'])
|
||||
# else:
|
||||
# prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content'])
|
||||
prompt = prompt + "assistant: "
|
||||
return prompt
|
||||
|
||||
def get_message(self):
|
||||
return self.ret["choices"][0]["text"]
|
||||
|
||||
|
||||
def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest:
|
||||
"""使用给定的模型名称创建模型请求对象"""
|
||||
if model_name in CHAT_COMPLETION_MODELS:
|
||||
model = ChatCompletionModel(model_name, user_name, http_proxy)
|
||||
elif model_name in COMPLETION_MODELS:
|
||||
model = CompletionModel(model_name, user_name, http_proxy)
|
||||
else :
|
||||
log = "找不到模型[{}],请检查配置文件".format(model_name)
|
||||
logging.error(log)
|
||||
raise IndexError(log)
|
||||
logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name))
|
||||
return model
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
"""主线使用的会话管理模块
|
||||
|
||||
每个人、每个群单独一个session,session内部保留了对话的上下文,
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import json
|
||||
|
||||
import pkg.openai.manager
|
||||
import pkg.openai.modelmgr
|
||||
import pkg.database.manager
|
||||
import pkg.utils.context
|
||||
|
||||
@@ -18,8 +25,38 @@ class SessionOfflineStatus:
|
||||
EXPLICITLY_CLOSED = 'explicitly_closed'
|
||||
|
||||
|
||||
# 重置session.prompt
|
||||
def reset_session_prompt(session_name, prompt):
|
||||
# 备份原始数据
|
||||
bak_path = 'logs/{}-{}.bak'.format(
|
||||
session_name,
|
||||
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
)
|
||||
f = open(bak_path, 'w+')
|
||||
f.write(prompt)
|
||||
f.close()
|
||||
# 生成新数据
|
||||
config = pkg.utils.context.get_config()
|
||||
prompt = [
|
||||
{
|
||||
'role': 'system',
|
||||
'content': config.default_prompt['default'] if type(config.default_prompt) == dict else config.default_prompt
|
||||
}
|
||||
]
|
||||
# 警告
|
||||
logging.warning(
|
||||
"""
|
||||
用户[{}]的数据已被重置,有可能是因为数据版本过旧或存储错误
|
||||
原始数据将备份在:
|
||||
{}""".format(session_name, bak_path)
|
||||
) # 为保证多行文本格式正确故无缩进
|
||||
return prompt
|
||||
|
||||
|
||||
# 从数据加载session
|
||||
def load_sessions():
|
||||
"""从数据库加载sessions"""
|
||||
|
||||
global sessions
|
||||
|
||||
db_inst = pkg.utils.context.get_database_manager()
|
||||
@@ -33,7 +70,13 @@ def load_sessions():
|
||||
temp_session.name = session_name
|
||||
temp_session.create_timestamp = session_data[session_name]['create_timestamp']
|
||||
temp_session.last_interact_timestamp = session_data[session_name]['last_interact_timestamp']
|
||||
temp_session.prompt = session_data[session_name]['prompt']
|
||||
try:
|
||||
temp_session.prompt = json.loads(session_data[session_name]['prompt'])
|
||||
except Exception:
|
||||
temp_session.prompt = reset_session_prompt(session_name, session_data[session_name]['prompt'])
|
||||
temp_session.persistence()
|
||||
temp_session.default_prompt = json.loads(session_data[session_name]['default_prompt']) if \
|
||||
session_data[session_name]['default_prompt'] else []
|
||||
|
||||
sessions[session_name] = temp_session
|
||||
|
||||
@@ -60,16 +103,17 @@ def dump_session(session_name: str):
|
||||
class Session:
|
||||
name = ''
|
||||
|
||||
prompt = ""
|
||||
prompt = []
|
||||
"""使用list来保存会话中的回合"""
|
||||
|
||||
import config
|
||||
|
||||
user_name = config.user_name if hasattr(config, 'user_name') and config.user_name != '' else 'You'
|
||||
bot_name = config.bot_name if hasattr(config, 'bot_name') and config.bot_name != '' else 'Bot'
|
||||
default_prompt = []
|
||||
"""本session的默认prompt"""
|
||||
|
||||
create_timestamp = 0
|
||||
"""会话创建时间"""
|
||||
|
||||
last_interact_timestamp = 0
|
||||
"""上次交互(产生回复)时间"""
|
||||
|
||||
just_switched_to_exist_session = False
|
||||
|
||||
@@ -89,21 +133,14 @@ class Session:
|
||||
logging.debug('{},lock release successfully,{}'.format(self.name, self.response_lock))
|
||||
|
||||
# 从配置文件获取会话预设信息
|
||||
def get_default_prompt(self, use_default: str=None):
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
def get_default_prompt(self, use_default: str = None):
|
||||
import pkg.openai.dprompt as dprompt
|
||||
|
||||
if use_default is None:
|
||||
current_default_prompt = dprompt.get_prompt(dprompt.get_current())
|
||||
else:
|
||||
current_default_prompt = dprompt.get_prompt(use_default)
|
||||
use_default = dprompt.get_current()
|
||||
|
||||
user_name = config.user_name if hasattr(config, 'user_name') and config.user_name != '' else 'You'
|
||||
bot_name = config.bot_name if hasattr(config, 'bot_name') and config.bot_name != '' else 'Bot'
|
||||
|
||||
return (user_name + ":{}\n".format(current_default_prompt) + bot_name + ":好的\n") \
|
||||
if current_default_prompt != '' else ''
|
||||
current_default_prompt = dprompt.get_prompt(use_default)
|
||||
return current_default_prompt
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
@@ -112,7 +149,9 @@ class Session:
|
||||
self.schedule()
|
||||
|
||||
self.response_lock = threading.Lock()
|
||||
self.prompt = self.get_default_prompt()
|
||||
|
||||
self.default_prompt = self.get_default_prompt()
|
||||
logging.debug("prompt is: {}".format(self.default_prompt))
|
||||
|
||||
# 设定检查session最后一次对话是否超过过期时间的计时器
|
||||
def schedule(self):
|
||||
@@ -151,36 +190,32 @@ class Session:
|
||||
# 请求回复
|
||||
# 这个函数是阻塞的
|
||||
def append(self, text: str) -> str:
|
||||
"""向session中添加一条消息,返回接口回复"""
|
||||
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
# 触发插件事件
|
||||
if self.prompt == self.get_default_prompt():
|
||||
if not self.prompt:
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'session': self,
|
||||
'default_prompt': self.prompt,
|
||||
'default_prompt': self.default_prompt,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.SessionFirstMessageReceived, **args)
|
||||
if event.is_prevented_default():
|
||||
return None
|
||||
|
||||
# max_rounds = config.prompt_submit_round_amount if hasattr(config, 'prompt_submit_round_amount') else 7
|
||||
config = pkg.utils.context.get_config()
|
||||
max_rounds = 1000 # 不再限制回合数
|
||||
max_length = config.prompt_submit_length if hasattr(config, "prompt_submit_length") else 1024
|
||||
|
||||
# 向API请求补全
|
||||
response = pkg.utils.context.get_openai_manager().request_completion(
|
||||
self.cut_out(self.prompt + self.user_name + ':' +
|
||||
text + '\n' + self.bot_name + ':',
|
||||
max_rounds, max_length),
|
||||
self.user_name + ':')
|
||||
message = pkg.utils.context.get_openai_manager().request_completion(
|
||||
self.cut_out(text, max_length),
|
||||
)
|
||||
|
||||
self.prompt += self.user_name + ':' + text + '\n' + self.bot_name + ':'
|
||||
# print(response)
|
||||
# 处理回复
|
||||
res_test = response["choices"][0]["text"]
|
||||
# 成功获取,处理回复
|
||||
res_test = message
|
||||
res_ans = res_test
|
||||
|
||||
# 去除开头可能的提示
|
||||
@@ -189,50 +224,59 @@ class Session:
|
||||
del (res_ans_spt[0])
|
||||
res_ans = '\n\n'.join(res_ans_spt)
|
||||
|
||||
self.prompt += "{}".format(res_ans) + '\n'
|
||||
# 将此次对话的双方内容加入到prompt中
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||
|
||||
if self.just_switched_to_exist_session:
|
||||
self.just_switched_to_exist_session = False
|
||||
self.set_ongoing()
|
||||
|
||||
return res_ans
|
||||
return res_ans if res_ans[0] != '\n' else res_ans[1:]
|
||||
|
||||
# 删除上一回合并返回上一回合的问题
|
||||
def undo(self) -> str:
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
# 删除上一回合
|
||||
to_delete = self.cut_out(self.prompt, 1, 1024)
|
||||
# 删除最后两个消息
|
||||
if len(self.prompt) < 2:
|
||||
raise Exception('之前无对话,无法撤销')
|
||||
|
||||
self.prompt = self.prompt.replace(to_delete, '')
|
||||
question = self.prompt[-2]['content']
|
||||
self.prompt = self.prompt[:-2]
|
||||
|
||||
# 返回上一回合的问题
|
||||
return to_delete.split(self.bot_name + ':')[0].split(self.user_name + ':')[1].strip()
|
||||
return question
|
||||
|
||||
# 从尾部截取prompt里不多于max_rounds个回合,长度不大于max_tokens的字符串
|
||||
# 保证都是完整的对话
|
||||
def cut_out(self, prompt: str, max_rounds: int, max_tokens: int) -> str:
|
||||
# 分隔出每个回合
|
||||
rounds_spt_by_user_name = prompt.split(self.user_name + ':')
|
||||
# 构建对话体
|
||||
def cut_out(self, msg: str, max_tokens: int) -> list:
|
||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens"""
|
||||
# 如果用户消息长度超过max_tokens,直接返回
|
||||
temp_prompt: list = []
|
||||
temp_prompt += self.default_prompt
|
||||
temp_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
)
|
||||
|
||||
result = ''
|
||||
token_count = 0
|
||||
for item in temp_prompt:
|
||||
token_count += len(item['content'])
|
||||
|
||||
checked_rounds = 0
|
||||
# 从后往前遍历,加到result前面,检查result是否符合要求
|
||||
for i in range(len(rounds_spt_by_user_name) - 1, 0, -1):
|
||||
result_temp = self.user_name + ':' + rounds_spt_by_user_name[i] + result
|
||||
checked_rounds += 1
|
||||
|
||||
if checked_rounds > max_rounds:
|
||||
# 倒序遍历prompt
|
||||
for i in range(len(self.prompt) - 1, -1, -1):
|
||||
if token_count >= max_tokens:
|
||||
break
|
||||
|
||||
if int((len(result_temp.encode('utf-8')) - len(result_temp)) / 2 + len(result_temp)) > max_tokens:
|
||||
break
|
||||
# 将prompt加到temp_prompt倒数第二个位置
|
||||
temp_prompt.insert(len(self.default_prompt), self.prompt[i])
|
||||
token_count += len(self.prompt[i]['content'])
|
||||
|
||||
result = result_temp
|
||||
logging.debug('cut_out: {}'.format(json.dumps(temp_prompt, ensure_ascii=False, indent=4)))
|
||||
|
||||
logging.debug('cut_out: {}'.format(result))
|
||||
return result
|
||||
return temp_prompt
|
||||
|
||||
# 持久化session
|
||||
def persistence(self):
|
||||
@@ -247,11 +291,11 @@ class Session:
|
||||
subject_number = int(name_spt[1])
|
||||
|
||||
db_inst.persistence_session(subject_type, subject_number, self.create_timestamp, self.last_interact_timestamp,
|
||||
self.prompt)
|
||||
json.dumps(self.prompt), json.dumps(self.default_prompt))
|
||||
|
||||
# 重置session
|
||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None):
|
||||
if not self.prompt.endswith(':好的\n'):
|
||||
if self.prompt:
|
||||
self.persistence()
|
||||
if explicit:
|
||||
# 触发插件事件
|
||||
@@ -267,7 +311,9 @@ class Session:
|
||||
|
||||
if expired:
|
||||
pkg.utils.context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
|
||||
self.prompt = self.get_default_prompt(use_prompt)
|
||||
|
||||
self.default_prompt = self.get_default_prompt(use_prompt)
|
||||
self.prompt = []
|
||||
self.create_timestamp = int(time.time())
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
self.just_switched_to_exist_session = False
|
||||
@@ -291,7 +337,12 @@ class Session:
|
||||
|
||||
self.create_timestamp = last_one['create_timestamp']
|
||||
self.last_interact_timestamp = last_one['last_interact_timestamp']
|
||||
self.prompt = last_one['prompt']
|
||||
try:
|
||||
self.prompt = json.loads(last_one['prompt'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
self.prompt = reset_session_prompt(self.name, last_one['prompt'])
|
||||
self.persistence()
|
||||
self.default_prompt = json.loads(last_one['default_prompt']) if last_one['default_prompt'] else []
|
||||
|
||||
self.just_switched_to_exist_session = True
|
||||
return self
|
||||
@@ -306,14 +357,24 @@ class Session:
|
||||
|
||||
self.create_timestamp = next_one['create_timestamp']
|
||||
self.last_interact_timestamp = next_one['last_interact_timestamp']
|
||||
self.prompt = next_one['prompt']
|
||||
try:
|
||||
self.prompt = json.loads(next_one['prompt'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
self.prompt = reset_session_prompt(self.name, next_one['prompt'])
|
||||
self.persistence()
|
||||
self.default_prompt = json.loads(next_one['default_prompt']) if next_one['default_prompt'] else []
|
||||
|
||||
self.just_switched_to_exist_session = True
|
||||
return self
|
||||
|
||||
def list_history(self, capacity: int = 10, page: int = 0):
|
||||
return pkg.utils.context.get_database_manager().list_history(self.name, capacity, page,
|
||||
self.get_default_prompt())
|
||||
return pkg.utils.context.get_database_manager().list_history(self.name, capacity, page)
|
||||
|
||||
def delete_history(self, index: int) -> bool:
|
||||
return pkg.utils.context.get_database_manager().delete_history(self.name, index)
|
||||
|
||||
def delete_all_history(self) -> bool:
|
||||
return pkg.utils.context.get_database_manager().delete_all_history(self.name)
|
||||
|
||||
def draw_image(self, prompt: str):
|
||||
return pkg.utils.context.get_openai_manager().request_image(prompt)
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
"""插件支持包
|
||||
|
||||
包含插件基类、插件宿主以及部分API接口
|
||||
"""
|
||||
@@ -116,7 +116,9 @@ def initialize_plugins():
|
||||
|
||||
|
||||
def unload_plugins():
|
||||
""" 卸载插件 """
|
||||
""" 卸载插件
|
||||
"""
|
||||
# 不再显式卸载插件,因为当程序结束时,插件的析构函数会被系统执行
|
||||
# for plugin in __plugins__.values():
|
||||
# if plugin['enabled'] and plugin['instance'] is not None:
|
||||
# if not hasattr(plugin['instance'], '__del__'):
|
||||
|
||||
@@ -145,6 +145,7 @@ __current_registering_plugin__ = ""
|
||||
|
||||
|
||||
class Plugin:
|
||||
"""插件基类"""
|
||||
|
||||
host: host.PluginHost
|
||||
"""插件宿主,提供插件的一些基础功能"""
|
||||
|
||||
@@ -1,30 +1,34 @@
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
def is_banned(launcher_type: str, launcher_id: int) -> bool:
|
||||
def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
|
||||
if not pkg.utils.context.get_qqbot_manager().enable_banlist:
|
||||
return False
|
||||
|
||||
result = False
|
||||
|
||||
if launcher_type == 'group':
|
||||
for group_rule in pkg.utils.context.get_qqbot_manager().ban_group:
|
||||
if type(group_rule) == int:
|
||||
if group_rule == launcher_id: # 此群群号被禁用
|
||||
result = True
|
||||
elif type(group_rule) == str:
|
||||
if group_rule.startswith('!'):
|
||||
# 截取!后面的字符串作为表达式,判断是否匹配
|
||||
reg_str = group_rule[1:]
|
||||
import re
|
||||
if re.match(reg_str, str(launcher_id)): # 被豁免,最高级别
|
||||
result = False
|
||||
break
|
||||
else:
|
||||
# 判断是否匹配regexp
|
||||
import re
|
||||
if re.match(group_rule, str(launcher_id)): # 此群群号被禁用
|
||||
# 检查是否显式声明发起人QQ要被person忽略
|
||||
if sender_id in pkg.utils.context.get_qqbot_manager().ban_person:
|
||||
result = True
|
||||
else:
|
||||
for group_rule in pkg.utils.context.get_qqbot_manager().ban_group:
|
||||
if type(group_rule) == int:
|
||||
if group_rule == launcher_id: # 此群群号被禁用
|
||||
result = True
|
||||
elif type(group_rule) == str:
|
||||
if group_rule.startswith('!'):
|
||||
# 截取!后面的字符串作为表达式,判断是否匹配
|
||||
reg_str = group_rule[1:]
|
||||
import re
|
||||
if re.match(reg_str, str(launcher_id)): # 被豁免,最高级别
|
||||
result = False
|
||||
break
|
||||
else:
|
||||
# 判断是否匹配regexp
|
||||
import re
|
||||
if re.match(group_rule, str(launcher_id)): # 此群群号被禁用
|
||||
result = True
|
||||
|
||||
else:
|
||||
# ban_person, 与群规则相同
|
||||
|
||||
104
pkg/qqbot/blob.py
Normal file
104
pkg/qqbot/blob.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# 长消息处理相关
|
||||
import os
|
||||
import time
|
||||
import base64
|
||||
|
||||
import config
|
||||
from mirai.models.message import MessageComponent, MessageChain, Image
|
||||
from mirai.models.message import ForwardMessageNode
|
||||
from mirai.models.base import MiraiBaseModel
|
||||
from typing import List
|
||||
import pkg.utils.context as context
|
||||
import pkg.utils.text2img as text2img
|
||||
|
||||
|
||||
class ForwardMessageDiaplay(MiraiBaseModel):
|
||||
title: str = "群聊的聊天记录"
|
||||
brief: str = "[聊天记录]"
|
||||
source: str = "聊天记录"
|
||||
preview: List[str] = []
|
||||
summary: str = "查看x条转发消息"
|
||||
|
||||
|
||||
class Forward(MessageComponent):
|
||||
"""合并转发。"""
|
||||
type: str = "Forward"
|
||||
"""消息组件类型。"""
|
||||
display: ForwardMessageDiaplay
|
||||
"""显示信息"""
|
||||
node_list: List[ForwardMessageNode]
|
||||
"""转发消息节点列表。"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
if len(args) == 1:
|
||||
self.node_list = args[0]
|
||||
super().__init__(**kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '[聊天记录]'
|
||||
|
||||
|
||||
def text_to_image(text: str) -> MessageComponent:
|
||||
"""将文本转换成图片"""
|
||||
# 检查temp文件夹是否存在
|
||||
if not os.path.exists('temp'):
|
||||
os.mkdir('temp')
|
||||
img_path = text2img.text_to_image(text_str=text, save_as='temp/{}.png'.format(int(time.time())))
|
||||
|
||||
compressed_path, size = text2img.compress_image(img_path, outfile="temp/{}_compressed.png".format(int(time.time())))
|
||||
# 读取图片,转换成base64
|
||||
with open(compressed_path, 'rb') as f:
|
||||
img = f.read()
|
||||
|
||||
b64 = base64.b64encode(img)
|
||||
|
||||
# 删除图片
|
||||
os.remove(img_path)
|
||||
|
||||
# 判断compressed_path是否存在
|
||||
if os.path.exists(compressed_path):
|
||||
os.remove(compressed_path)
|
||||
# 返回图片
|
||||
return Image(base64=b64.decode('utf-8'))
|
||||
|
||||
|
||||
def check_text(text: str) -> list:
|
||||
"""检查文本是否为长消息,并转换成该使用的消息链组件"""
|
||||
if not hasattr(config, 'blob_message_threshold'):
|
||||
return [text]
|
||||
|
||||
if len(text) > config.blob_message_threshold:
|
||||
if not hasattr(config, 'blob_message_strategy'):
|
||||
raise AttributeError('未定义长消息处理策略')
|
||||
|
||||
# logging.info("长消息: {}".format(text))
|
||||
if config.blob_message_strategy == 'image':
|
||||
# 转换成图片
|
||||
return [text_to_image(text)]
|
||||
elif config.blob_message_strategy == 'forward':
|
||||
# 敏感词屏蔽
|
||||
text = context.get_qqbot_manager().reply_filter.process(text)
|
||||
|
||||
# 包装转发消息
|
||||
display = ForwardMessageDiaplay(
|
||||
title='群聊的聊天记录',
|
||||
brief='[聊天记录]',
|
||||
source='聊天记录',
|
||||
preview=["bot: "+text],
|
||||
summary="查看1条转发消息"
|
||||
)
|
||||
|
||||
node = ForwardMessageNode(
|
||||
sender_id=config.mirai_http_api_config['qq'],
|
||||
sender_name='bot',
|
||||
message_chain=MessageChain([text])
|
||||
)
|
||||
|
||||
forward = Forward(
|
||||
display=display,
|
||||
node_list=[node]
|
||||
)
|
||||
|
||||
return [forward]
|
||||
else:
|
||||
return [text]
|
||||
@@ -4,6 +4,7 @@ import json
|
||||
import datetime
|
||||
import os
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import pkg.openai.session
|
||||
import pkg.openai.manager
|
||||
@@ -185,11 +186,7 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到前一次的对话:\n创建时间:{}\n".format(
|
||||
datetime_str) + result.prompt[
|
||||
:min(100,
|
||||
len(result.prompt))] + \
|
||||
("..." if len(result.prompt) > 100 else "#END#")]
|
||||
reply = ["[bot]已切换到前一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
elif cmd == 'next':
|
||||
result = pkg.openai.session.get_session(session_name).next_session()
|
||||
if result is None:
|
||||
@@ -197,13 +194,18 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到后一次的对话:\n创建时间:{}\n".format(
|
||||
datetime_str) + result.prompt[
|
||||
:min(100,
|
||||
len(result.prompt))] + \
|
||||
("..." if len(result.prompt) > 100 else "#END#")]
|
||||
reply = ["[bot]已切换到后一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
elif cmd == 'prompt':
|
||||
reply = ["[bot]当前对话所有内容:\n" + pkg.openai.session.get_session(session_name).prompt]
|
||||
msgs = ""
|
||||
session:list = pkg.openai.session.get_session(session_name).prompt
|
||||
for msg in session:
|
||||
if len(params) != 0 and params[0] in ['-all', '-a']:
|
||||
msgs = msgs + "{}: {}\n\n".format(msg['role'], msg['content'])
|
||||
elif len(msg['content']) > 30:
|
||||
msgs = msgs + "[{}]: {}...\n\n".format(msg['role'], msg['content'][:30])
|
||||
else:
|
||||
msgs = msgs + "[{}]: {}\n\n".format(msg['role'], msg['content'])
|
||||
reply = ["[bot]当前对话所有内容:\n{}".format(msgs)]
|
||||
elif cmd == 'list':
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
page = 0
|
||||
@@ -223,10 +225,21 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
for i in range(len(results)):
|
||||
# 时间(使用create_timestamp转换) 序号 部分内容
|
||||
datetime_obj = datetime.datetime.fromtimestamp(results[i]['create_timestamp'])
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
results[i]['prompt'][
|
||||
:min(20, len(results[i]['prompt']))])
|
||||
msg = ""
|
||||
try:
|
||||
msg = json.loads(results[i]['prompt'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
msg = pkg.openai.session.reset_session_prompt(session_name, results[i]['prompt'])
|
||||
# 持久化
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
if len(msg) >= 2:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
msg[0]['content'])
|
||||
else:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"无内容")
|
||||
if results[i]['create_timestamp'] == pkg.openai.session.get_session(
|
||||
session_name).create_timestamp:
|
||||
current = i + page * 10
|
||||
@@ -244,6 +257,20 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
|
||||
reply = pkg.qqbot.message.process_normal_message(to_send, mgr, config,
|
||||
launcher_type, launcher_id, sender_id)
|
||||
elif cmd == 'del': # 删除指定会话历史记录
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]参数不足, 格式: !del <序号>\n可以通过!list查看序号"]
|
||||
else:
|
||||
if params[0] == 'all':
|
||||
pkg.openai.session.get_session(session_name).delete_all_history()
|
||||
reply = ["[bot]已删除所有历史会话"]
|
||||
elif params[0].isdigit():
|
||||
if pkg.openai.session.get_session(session_name).delete_history(int(params[0])):
|
||||
reply = ["[bot]已删除历史会话 #{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]没有历史会话 #{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]参数错误, 格式: !del <序号>\n可以通过!list查看序号"]
|
||||
elif cmd == 'usage':
|
||||
reply_str = "[bot]各api-key使用情况:\n\n"
|
||||
|
||||
@@ -310,6 +337,18 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
reply = ["[bot]err: 未找到情景预设:{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]err: 仅管理员可设置默认情景预设"]
|
||||
elif cmd == "delhst" and is_admin:
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]err:请输入要删除的会话名: group_<群号> 或者 person_<QQ号>, 或使用 !delhst all 删除所有会话的历史记录"]
|
||||
else:
|
||||
if params[0] == "all":
|
||||
pkg.utils.context.get_database_manager().delete_all_session_history()
|
||||
reply = ["[bot]已删除所有会话的历史记录"]
|
||||
else:
|
||||
if pkg.utils.context.get_database_manager().delete_all_history(params[0]):
|
||||
reply = ["[bot]已删除会话 {} 的所有历史记录".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]未找到会话 {} 的历史记录".format(params[0])]
|
||||
elif cmd == 'reload' and is_admin:
|
||||
def reload_task():
|
||||
pkg.utils.reloader.reload_all()
|
||||
@@ -324,6 +363,7 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
else:
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("无新版本")
|
||||
except Exception as e0:
|
||||
traceback.print_exc()
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新失败:{}".format(e0))
|
||||
return
|
||||
|
||||
|
||||
@@ -1,19 +1,84 @@
|
||||
# 敏感词过滤模块
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
class ReplyFilter:
|
||||
|
||||
sensitive_words = []
|
||||
mask = "*"
|
||||
mask_word = ""
|
||||
|
||||
def __init__(self, sensitive_words: list):
|
||||
# 默认值( 兼容性考虑 )
|
||||
baidu_check = False
|
||||
baidu_api_key = ""
|
||||
baidu_secret_key = ""
|
||||
inappropriate_message_tips = "[百度云]请珍惜机器人,当前返回内容不合规"
|
||||
|
||||
def __init__(self, sensitive_words: list, mask: str = "*", mask_word: str = ""):
|
||||
self.sensitive_words = sensitive_words
|
||||
self.mask = mask
|
||||
self.mask_word = mask_word
|
||||
import config
|
||||
if hasattr(config, 'baidu_check') and hasattr(config, 'baidu_api_key') and hasattr(config, 'baidu_secret_key'):
|
||||
self.baidu_check = config.baidu_check
|
||||
self.baidu_api_key = config.baidu_api_key
|
||||
self.baidu_secret_key = config.baidu_secret_key
|
||||
self.inappropriate_message_tips = config.inappropriate_message_tips
|
||||
|
||||
def is_illegal(self, message: str) -> bool:
|
||||
processed = self.process(message)
|
||||
if processed != message:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, message: str) -> str:
|
||||
|
||||
# 本地关键词屏蔽
|
||||
for word in self.sensitive_words:
|
||||
match = re.findall(word, message)
|
||||
if len(match) > 0:
|
||||
for i in range(len(match)):
|
||||
message = message.replace(match[i], "*" * len(match[i]))
|
||||
if self.mask_word == "":
|
||||
message = message.replace(match[i], self.mask * len(match[i]))
|
||||
else:
|
||||
message = message.replace(match[i], self.mask_word)
|
||||
|
||||
# 百度云审核
|
||||
if self.baidu_check:
|
||||
|
||||
# 百度云审核URL
|
||||
baidu_url = "https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined?access_token=" + \
|
||||
str(requests.post("https://aip.baidubce.com/oauth/2.0/token",
|
||||
params={"grant_type": "client_credentials",
|
||||
"client_id": self.baidu_api_key,
|
||||
"client_secret": self.baidu_secret_key}).json().get("access_token"))
|
||||
|
||||
# 百度云审核
|
||||
payload = "text=" + message
|
||||
logging.info("向百度云发送:" + payload)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}
|
||||
|
||||
if isinstance(payload, str):
|
||||
payload = payload.encode('utf-8')
|
||||
|
||||
response = requests.request("POST", baidu_url, headers=headers, data=payload)
|
||||
response_dict = json.loads(response.text)
|
||||
|
||||
if "error_code" in response_dict:
|
||||
error_msg = response_dict.get("error_msg")
|
||||
logging.warning(f"百度云判定出错,错误信息:{error_msg}")
|
||||
conclusion = f"百度云判定出错,错误信息:{error_msg}\n以下是原消息:{message}"
|
||||
else:
|
||||
conclusion = response_dict["conclusion"]
|
||||
if conclusion in ("合规"):
|
||||
logging.info(f"百度云判定结果:{conclusion}")
|
||||
return message
|
||||
else:
|
||||
logging.warning(f"百度云判定结果:{conclusion}")
|
||||
conclusion = self.inappropriate_message_tips
|
||||
# 返回百度云审核结果
|
||||
return conclusion
|
||||
|
||||
return message
|
||||
|
||||
@@ -2,6 +2,7 @@ import asyncio
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import mirai.models.bus
|
||||
from mirai import At, GroupMessage, MessageEvent, Mirai, StrangerMessage, WebSocketAdapter, HTTPAdapter, \
|
||||
@@ -21,12 +22,6 @@ import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
|
||||
|
||||
# 并行运行
|
||||
def go(func, args=()):
|
||||
thread = threading.Thread(target=func, args=args, daemon=True)
|
||||
thread.start()
|
||||
|
||||
|
||||
# 检查消息是否符合泛响应匹配机制
|
||||
def check_response_rule(text: str):
|
||||
config = pkg.utils.context.get_config()
|
||||
@@ -51,10 +46,29 @@ def check_response_rule(text: str):
|
||||
return False, ""
|
||||
|
||||
|
||||
def response_at():
|
||||
config = pkg.utils.context.get_config()
|
||||
if 'at' not in config.response_rules:
|
||||
return True
|
||||
|
||||
return config.response_rules['at']
|
||||
|
||||
|
||||
def random_responding():
|
||||
config = pkg.utils.context.get_config()
|
||||
if 'random_rate' in config.response_rules:
|
||||
import random
|
||||
return random.random() < config.response_rules['random_rate']
|
||||
return False
|
||||
|
||||
|
||||
# 控制QQ消息输入输出的类
|
||||
class QQBotManager:
|
||||
retry = 3
|
||||
|
||||
#线程池控制
|
||||
pool = None
|
||||
|
||||
bot: Mirai = None
|
||||
|
||||
reply_filter = None
|
||||
@@ -64,11 +78,14 @@ class QQBotManager:
|
||||
ban_person = []
|
||||
ban_group = []
|
||||
|
||||
def __init__(self, mirai_http_api_config: dict, timeout: int = 60, retry: int = 3, first_time_init=True):
|
||||
|
||||
def __init__(self, mirai_http_api_config: dict, timeout: int = 60, retry: int = 3, pool_num: int = 10, first_time_init=True):
|
||||
self.timeout = timeout
|
||||
self.retry = retry
|
||||
|
||||
self.pool_num = pool_num
|
||||
self.pool = ThreadPoolExecutor(max_workers=self.pool_num)
|
||||
logging.debug("Registered thread pool Size:{}".format(pool_num))
|
||||
|
||||
# 加载禁用列表
|
||||
if os.path.exists("banlist.py"):
|
||||
import banlist
|
||||
@@ -82,7 +99,12 @@ class QQBotManager:
|
||||
and config.sensitive_word_filter is not None \
|
||||
and config.sensitive_word_filter:
|
||||
with open("sensitive.json", "r", encoding="utf-8") as f:
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter(json.load(f)['words'])
|
||||
sensitive_json = json.load(f)
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter(
|
||||
sensitive_words=sensitive_json['words'],
|
||||
mask=sensitive_json['mask'] if 'mask' in sensitive_json else '*',
|
||||
mask_word=sensitive_json['mask_word'] if 'mask_word' in sensitive_json else ''
|
||||
)
|
||||
else:
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter([])
|
||||
|
||||
@@ -116,7 +138,7 @@ class QQBotManager:
|
||||
|
||||
self.on_person_message(event)
|
||||
|
||||
go(friend_message_handler, (event,))
|
||||
self.go(friend_message_handler, event)
|
||||
|
||||
@self.bot.on(StrangerMessage)
|
||||
async def on_stranger_message(event: StrangerMessage):
|
||||
@@ -136,7 +158,7 @@ class QQBotManager:
|
||||
|
||||
self.on_person_message(event)
|
||||
|
||||
go(stranger_message_handler, (event,))
|
||||
self.go(stranger_message_handler, event)
|
||||
|
||||
@self.bot.on(GroupMessage)
|
||||
async def on_group_message(event: GroupMessage):
|
||||
@@ -156,7 +178,7 @@ class QQBotManager:
|
||||
|
||||
self.on_group_message(event)
|
||||
|
||||
go(group_message_handler, (event,))
|
||||
self.go(group_message_handler, event)
|
||||
|
||||
def unsubscribe_all():
|
||||
"""取消所有订阅
|
||||
@@ -173,6 +195,9 @@ class QQBotManager:
|
||||
|
||||
self.unsubscribe_all = unsubscribe_all
|
||||
|
||||
def go(self, func, *args, **kwargs):
|
||||
self.pool.submit(func, *args, **kwargs)
|
||||
|
||||
def first_time_init(self, mirai_http_api_config: dict):
|
||||
"""热重载后不再运行此函数"""
|
||||
|
||||
@@ -288,14 +313,19 @@ class QQBotManager:
|
||||
|
||||
if Image in event.message_chain:
|
||||
pass
|
||||
elif At(self.bot.qq) not in event.message_chain:
|
||||
check, result = check_response_rule(str(event.message_chain).strip())
|
||||
|
||||
if check:
|
||||
reply = process(result.strip())
|
||||
else:
|
||||
# 直接调用
|
||||
reply = process()
|
||||
if At(self.bot.qq) in event.message_chain and response_at():
|
||||
# 直接调用
|
||||
reply = process()
|
||||
else:
|
||||
check, result = check_response_rule(str(event.message_chain).strip())
|
||||
|
||||
if check:
|
||||
reply = process(result.strip())
|
||||
# 检查是否随机响应
|
||||
elif random_responding():
|
||||
logging.info("随机响应group_{}消息".format(event.group.id))
|
||||
reply = process()
|
||||
|
||||
if reply:
|
||||
return self.send(event, reply)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# 普通消息处理模块
|
||||
import logging
|
||||
import time
|
||||
import openai
|
||||
import pkg.utils.context
|
||||
import pkg.openai.session
|
||||
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
import pkg.qqbot.blob as blob
|
||||
|
||||
|
||||
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
|
||||
@@ -63,7 +63,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
reply = event.get_return_value("reply")
|
||||
|
||||
if not event.is_prevented_default():
|
||||
reply = [prefix + text]
|
||||
reply = prefix + text
|
||||
except openai.error.APIConnectionError as e:
|
||||
err_msg = str(e)
|
||||
if err_msg.__contains__('Error communicating with OpenAI'):
|
||||
|
||||
@@ -26,6 +26,7 @@ import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
import pkg.qqbot.ignore as ignore
|
||||
import pkg.qqbot.banlist as banlist
|
||||
import pkg.qqbot.blob as blob
|
||||
|
||||
processing = []
|
||||
|
||||
@@ -49,7 +50,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
session_name = "{}_{}".format(launcher_type, launcher_id)
|
||||
|
||||
# 检查发送方是否被禁用
|
||||
if banlist.is_banned(launcher_type, launcher_id):
|
||||
if banlist.is_banned(launcher_type, launcher_id, sender_id):
|
||||
logging.info("根据禁用列表忽略{}_{}的消息".format(launcher_type, launcher_id))
|
||||
return []
|
||||
|
||||
@@ -66,6 +67,11 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
result.mute_time_remaining))
|
||||
return reply
|
||||
|
||||
import config
|
||||
if hasattr(config, 'income_msg_check') and config.income_msg_check:
|
||||
if mgr.reply_filter.is_illegal(text_message):
|
||||
return MessageChain(Plain("[bot] 你的提问中有不合适的内容, 请更换措辞~"))
|
||||
|
||||
pkg.openai.session.get_session(session_name).acquire_response_lock()
|
||||
|
||||
text_message = text_message.strip()
|
||||
@@ -152,8 +158,9 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
reply[0][:min(100, len(reply[0]))] + (
|
||||
"..." if len(reply[0]) > 100 else "")))
|
||||
reply = [mgr.reply_filter.process(reply[0])]
|
||||
reply = blob.check_text(reply[0])
|
||||
else:
|
||||
logging.info("回复[{}]图片消息:{}".format(session_name, reply))
|
||||
logging.info("回复[{}]消息".format(session_name))
|
||||
|
||||
finally:
|
||||
processing.remove(session_name)
|
||||
|
||||
44
pkg/utils/announcement.py
Normal file
44
pkg/utils/announcement.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import base64
|
||||
import os
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def read_latest() -> str:
|
||||
resp = requests.get(
|
||||
url="https://api.github.com/repos/RockChinQ/QChatGPT/contents/res/announcement",
|
||||
)
|
||||
obj_json = resp.json()
|
||||
b64_content = obj_json["content"]
|
||||
# 解码
|
||||
content = base64.b64decode(b64_content).decode("utf-8")
|
||||
return content
|
||||
|
||||
|
||||
def read_saved() -> str:
|
||||
# 已保存的在res/announcement_saved
|
||||
# 检查是否存在
|
||||
if not os.path.exists("res/announcement_saved"):
|
||||
with open("res/announcement_saved", "w") as f:
|
||||
f.write("")
|
||||
|
||||
with open("res/announcement_saved", "r") as f:
|
||||
content = f.read()
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def write_saved(content: str):
|
||||
# 已保存的在res/announcement_saved
|
||||
with open("res/announcement_saved", "w") as f:
|
||||
f.write(content)
|
||||
|
||||
|
||||
def fetch_new() -> str:
|
||||
latest = read_latest()
|
||||
saved = read_saved()
|
||||
if latest.replace(saved, "").strip() == "":
|
||||
return ""
|
||||
else:
|
||||
write_saved(latest)
|
||||
return latest.replace(saved, "").strip()
|
||||
File diff suppressed because one or more lines are too long
@@ -8,6 +8,11 @@ def install(package):
|
||||
main.reset_logging()
|
||||
|
||||
|
||||
def run_pip(params: list):
|
||||
pipmain(params)
|
||||
main.reset_logging()
|
||||
|
||||
|
||||
def install_requirements(file):
|
||||
pipmain(['install', '-r', file, "--upgrade"])
|
||||
main.reset_logging()
|
||||
|
||||
@@ -7,13 +7,15 @@ import pkg.utils.context
|
||||
import pkg.plugin.host
|
||||
|
||||
|
||||
def walk(module, prefix=''):
|
||||
def walk(module, prefix='', path_prefix=''):
|
||||
"""遍历并重载所有模块"""
|
||||
for item in pkgutil.iter_modules(module.__path__):
|
||||
if item.ispkg:
|
||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.')
|
||||
|
||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
|
||||
else:
|
||||
logging.info('reload module: {}'.format(prefix + item.name))
|
||||
logging.info('reload module: {}, path: {}'.format(prefix + item.name, path_prefix + item.name + '.py'))
|
||||
pkg.plugin.host.__current_module_path__ = "plugins/" + path_prefix + item.name + '.py'
|
||||
importlib.reload(__import__(module.__name__ + '.' + item.name, fromlist=['']))
|
||||
|
||||
|
||||
|
||||
193
pkg/utils/text2img.py
Normal file
193
pkg/utils/text2img.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import logging
|
||||
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
import re
|
||||
import os
|
||||
import config
|
||||
import traceback
|
||||
|
||||
text_render_font: ImageFont = None
|
||||
|
||||
if hasattr(config, "blob_message_strategy") and config.blob_message_strategy == "image": # 仅在启用了image时才加载字体
|
||||
use_font = config.font_path if hasattr(config, "font_path") else ""
|
||||
try:
|
||||
|
||||
# 检查是否存在
|
||||
if not os.path.exists(use_font):
|
||||
# 若是windows系统,使用微软雅黑
|
||||
if os.name == "nt":
|
||||
use_font = "C:/Windows/Fonts/msyh.ttc"
|
||||
if not os.path.exists(use_font):
|
||||
logging.warn("未找到字体文件,且无法使用Windows自带字体,更换为转发消息组件以发送长消息,您可以在config.py中调整相关设置。")
|
||||
config.blob_message_strategy = "forward"
|
||||
else:
|
||||
logging.info("使用Windows自带字体:" + use_font)
|
||||
text_render_font = ImageFont.truetype(use_font, 32, encoding="utf-8")
|
||||
else:
|
||||
logging.warn("未找到字体文件,且无法使用Windows自带字体,更换为转发消息组件以发送长消息,您可以在config.py中调整相关设置。")
|
||||
config.blob_message_strategy = "forward"
|
||||
else:
|
||||
text_render_font = ImageFont.truetype(use_font, 32, encoding="utf-8")
|
||||
except:
|
||||
traceback.print_exc()
|
||||
logging.error("加载字体文件失败({}),更换为转发消息组件以发送长消息,您可以在config.py中调整相关设置。".format(use_font))
|
||||
config.blob_message_strategy = "forward"
|
||||
|
||||
|
||||
def indexNumber(path=''):
|
||||
"""
|
||||
查找字符串中数字所在串中的位置
|
||||
:param path:目标字符串
|
||||
:return:<class 'list'>: <class 'list'>: [['1', 16], ['2', 35], ['1', 51]]
|
||||
"""
|
||||
kv = []
|
||||
nums = []
|
||||
beforeDatas = re.findall('[\d]+', path)
|
||||
for num in beforeDatas:
|
||||
indexV = []
|
||||
times = path.count(num)
|
||||
if times > 1:
|
||||
if num not in nums:
|
||||
indexs = re.finditer(num, path)
|
||||
for index in indexs:
|
||||
iV = []
|
||||
i = index.span()[0]
|
||||
iV.append(num)
|
||||
iV.append(i)
|
||||
kv.append(iV)
|
||||
nums.append(num)
|
||||
else:
|
||||
index = path.find(num)
|
||||
indexV.append(num)
|
||||
indexV.append(index)
|
||||
kv.append(indexV)
|
||||
# 根据数字位置排序
|
||||
indexSort = []
|
||||
resultIndex = []
|
||||
for vi in kv:
|
||||
indexSort.append(vi[1])
|
||||
indexSort.sort()
|
||||
for i in indexSort:
|
||||
for v in kv:
|
||||
if i == v[1]:
|
||||
resultIndex.append(v)
|
||||
return resultIndex
|
||||
|
||||
|
||||
def get_size(file):
|
||||
# 获取文件大小:KB
|
||||
size = os.path.getsize(file)
|
||||
return size / 1024
|
||||
|
||||
|
||||
def get_outfile(infile, outfile):
|
||||
if outfile:
|
||||
return outfile
|
||||
dir, suffix = os.path.splitext(infile)
|
||||
outfile = '{}-out{}'.format(dir, suffix)
|
||||
return outfile
|
||||
|
||||
|
||||
def compress_image(infile, outfile='', kb=100, step=20, quality=90):
|
||||
"""不改变图片尺寸压缩到指定大小
|
||||
:param infile: 压缩源文件
|
||||
:param outfile: 压缩文件保存地址
|
||||
:param mb: 压缩目标,KB
|
||||
:param step: 每次调整的压缩比率
|
||||
:param quality: 初始压缩比率
|
||||
:return: 压缩文件地址,压缩文件大小
|
||||
"""
|
||||
o_size = get_size(infile)
|
||||
if o_size <= kb:
|
||||
return infile, o_size
|
||||
outfile = get_outfile(infile, outfile)
|
||||
while o_size > kb:
|
||||
im = Image.open(infile)
|
||||
im.save(outfile, quality=quality)
|
||||
if quality - step < 0:
|
||||
break
|
||||
quality -= step
|
||||
o_size = get_size(outfile)
|
||||
return outfile, get_size(outfile)
|
||||
|
||||
|
||||
def text_to_image(text_str: str, save_as="temp.png", width=800):
|
||||
global text_render_font
|
||||
|
||||
text_str = text_str.replace("\t", " ")
|
||||
|
||||
# 分行
|
||||
lines = text_str.split('\n')
|
||||
|
||||
# 计算并分割
|
||||
final_lines = []
|
||||
|
||||
text_width = width-80
|
||||
for line in lines:
|
||||
# 如果长了就分割
|
||||
line_width = text_render_font.getlength(line)
|
||||
if line_width < text_width:
|
||||
final_lines.append(line)
|
||||
continue
|
||||
else:
|
||||
rest_text = line
|
||||
while True:
|
||||
# 分割最前面的一行
|
||||
point = int(len(rest_text) * (text_width / line_width))
|
||||
|
||||
# 检查断点是否在数字中间
|
||||
numbers = indexNumber(rest_text)
|
||||
|
||||
for number in numbers:
|
||||
if number[1] < point < number[1] + len(number[0]) and number[1] != 0:
|
||||
point = number[1]
|
||||
break
|
||||
|
||||
final_lines.append(rest_text[:point])
|
||||
rest_text = rest_text[point:]
|
||||
line_width = text_render_font.getlength(rest_text)
|
||||
if line_width < text_width:
|
||||
final_lines.append(rest_text)
|
||||
break
|
||||
else:
|
||||
continue
|
||||
# 准备画布
|
||||
img = Image.new('RGBA', (width, max(280, len(final_lines) * 35 + 65)), (255, 255, 255, 255))
|
||||
draw = ImageDraw.Draw(img, mode='RGBA')
|
||||
|
||||
|
||||
# 绘制正文
|
||||
line_number = 0
|
||||
offset_x = 20
|
||||
offset_y = 30
|
||||
for final_line in final_lines:
|
||||
draw.text((offset_x, offset_y + 35 * line_number), final_line, fill=(0, 0, 0), font=text_render_font)
|
||||
# 遍历此行,检查是否有emoji
|
||||
idx_in_line = 0
|
||||
for ch in final_line:
|
||||
# if self.is_emoji(ch):
|
||||
# emoji_img_valid = ensure_emoji(hex(ord(ch))[2:])
|
||||
# if emoji_img_valid: # emoji图像可用,绘制到指定位置
|
||||
# emoji_image = Image.open("emojis/{}.png".format(hex(ord(ch))[2:]), mode='r').convert('RGBA')
|
||||
# emoji_image = emoji_image.resize((32, 32))
|
||||
|
||||
# x, y = emoji_image.size
|
||||
|
||||
# final_emoji_img = Image.new('RGBA', emoji_image.size, (255, 255, 255))
|
||||
# final_emoji_img.paste(emoji_image, (0, 0, x, y), emoji_image)
|
||||
|
||||
# img.paste(final_emoji_img, box=(int(offset_x + idx_in_line * 32), offset_y + 35 * line_number))
|
||||
|
||||
# 检查字符占位宽
|
||||
char_code = ord(ch)
|
||||
if char_code >= 127:
|
||||
idx_in_line += 1
|
||||
else:
|
||||
idx_in_line += 0.5
|
||||
|
||||
line_number += 1
|
||||
|
||||
|
||||
img.save(save_as)
|
||||
|
||||
return save_as
|
||||
@@ -1,6 +1,11 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
import pkg.utils.context
|
||||
import requests
|
||||
import json
|
||||
|
||||
import pkg.utils.constants
|
||||
|
||||
|
||||
def check_dulwich_closure():
|
||||
@@ -28,34 +33,122 @@ def pull_latest(repo_path: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def update_all() -> bool:
|
||||
"""使用dulwich更新源码"""
|
||||
check_dulwich_closure()
|
||||
import dulwich
|
||||
try:
|
||||
before_commit_id = get_current_commit_id()
|
||||
from dulwich import porcelain
|
||||
repo = porcelain.open_repo('.')
|
||||
porcelain.pull(repo)
|
||||
def get_release_list() -> list:
|
||||
"""获取发行列表"""
|
||||
rls_list_resp = requests.get(
|
||||
url="https://api.github.com/repos/RockChinQ/QChatGPT/releases"
|
||||
)
|
||||
|
||||
change_log = ""
|
||||
rls_list = rls_list_resp.json()
|
||||
|
||||
for entry in repo.get_walker():
|
||||
if str(entry.commit.id)[2:-1] == before_commit_id:
|
||||
break
|
||||
tz = datetime.timezone(datetime.timedelta(hours=entry.commit.commit_timezone // 3600))
|
||||
dt = datetime.datetime.fromtimestamp(entry.commit.commit_time, tz)
|
||||
change_log += dt.strftime('%Y-%m-%d %H:%M:%S') + " [" + str(entry.commit.message, encoding="utf-8").strip()+"]\n"
|
||||
return rls_list
|
||||
|
||||
if change_log != "":
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("代码拉取完成,更新内容如下:\n"+change_log)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except ModuleNotFoundError:
|
||||
raise Exception("dulwich模块未安装,请查看 https://github.com/RockChinQ/QChatGPT/issues/77")
|
||||
except dulwich.porcelain.DivergedBranches:
|
||||
raise Exception("分支不一致,自动更新仅支持master分支,请手动更新(https://github.com/RockChinQ/QChatGPT/issues/76)")
|
||||
|
||||
def get_current_tag() -> str:
|
||||
"""获取当前tag"""
|
||||
current_tag = pkg.utils.constants.semantic_version
|
||||
if os.path.exists("current_tag"):
|
||||
with open("current_tag", "r") as f:
|
||||
current_tag = f.read()
|
||||
|
||||
return current_tag
|
||||
|
||||
|
||||
def update_all(cli: bool = False) -> bool:
|
||||
"""检查更新并下载源码"""
|
||||
current_tag = get_current_tag()
|
||||
|
||||
rls_list = get_release_list()
|
||||
|
||||
latest_rls = {}
|
||||
rls_notes = []
|
||||
for rls in rls_list:
|
||||
rls_notes.append(rls['name']) # 使用发行名称作为note
|
||||
if rls['tag_name'] == current_tag:
|
||||
break
|
||||
|
||||
if latest_rls == {}:
|
||||
latest_rls = rls
|
||||
if not cli:
|
||||
logging.info("更新日志: {}".format(rls_notes))
|
||||
else:
|
||||
print("更新日志: {}".format(rls_notes))
|
||||
|
||||
if latest_rls == {}: # 没有新版本
|
||||
return False
|
||||
|
||||
# 下载最新版本的zip到temp目录
|
||||
if not cli:
|
||||
logging.info("开始下载最新版本: {}".format(latest_rls['zipball_url']))
|
||||
else:
|
||||
print("开始下载最新版本: {}".format(latest_rls['zipball_url']))
|
||||
zip_url = latest_rls['zipball_url']
|
||||
zip_resp = requests.get(url=zip_url)
|
||||
zip_data = zip_resp.content
|
||||
|
||||
# 检查temp/updater目录
|
||||
if not os.path.exists("temp"):
|
||||
os.mkdir("temp")
|
||||
if not os.path.exists("temp/updater"):
|
||||
os.mkdir("temp/updater")
|
||||
with open("temp/updater/{}.zip".format(latest_rls['tag_name']), "wb") as f:
|
||||
f.write(zip_data)
|
||||
|
||||
if not cli:
|
||||
logging.info("下载最新版本完成: {}".format("temp/updater/{}.zip".format(latest_rls['tag_name'])))
|
||||
else:
|
||||
print("下载最新版本完成: {}".format("temp/updater/{}.zip".format(latest_rls['tag_name'])))
|
||||
|
||||
# 解压zip到temp/updater/<tag_name>/
|
||||
import zipfile
|
||||
# 检查目标文件夹
|
||||
if os.path.exists("temp/updater/{}".format(latest_rls['tag_name'])):
|
||||
import shutil
|
||||
shutil.rmtree("temp/updater/{}".format(latest_rls['tag_name']))
|
||||
os.mkdir("temp/updater/{}".format(latest_rls['tag_name']))
|
||||
with zipfile.ZipFile("temp/updater/{}.zip".format(latest_rls['tag_name']), 'r') as zip_ref:
|
||||
zip_ref.extractall("temp/updater/{}".format(latest_rls['tag_name']))
|
||||
|
||||
# 覆盖源码
|
||||
source_root = ""
|
||||
# 找到temp/updater/<tag_name>/中的第一个子目录路径
|
||||
for root, dirs, files in os.walk("temp/updater/{}".format(latest_rls['tag_name'])):
|
||||
if root != "temp/updater/{}".format(latest_rls['tag_name']):
|
||||
source_root = root
|
||||
break
|
||||
|
||||
# 覆盖源码
|
||||
import shutil
|
||||
for root, dirs, files in os.walk(source_root):
|
||||
# 覆盖所有子文件子目录
|
||||
for file in files:
|
||||
src = os.path.join(root, file)
|
||||
dst = src.replace(source_root, ".")
|
||||
if os.path.exists(dst):
|
||||
os.remove(dst)
|
||||
|
||||
# 检查目标文件夹是否存在
|
||||
if not os.path.exists(os.path.dirname(dst)):
|
||||
os.makedirs(os.path.dirname(dst))
|
||||
# 检查目标文件是否存在
|
||||
if not os.path.exists(dst):
|
||||
# 创建目标文件
|
||||
open(dst, "w").close()
|
||||
|
||||
shutil.copy(src, dst)
|
||||
|
||||
# 把current_tag写入文件
|
||||
current_tag = latest_rls['tag_name']
|
||||
with open("current_tag", "w") as f:
|
||||
f.write(current_tag)
|
||||
|
||||
# 通知管理员
|
||||
if not cli:
|
||||
import pkg.utils.context
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("已更新到最新版本: {}\n更新日志:\n{}\n新功能通常可以在config-template.py中看到,完整的更新日志请前往 https://github.com/RockChinQ/QChatGPT/releases 查看".format(current_tag, "\n".join(rls_notes)))
|
||||
else:
|
||||
print("已更新到最新版本: {}\n更新日志:\n{}\n新功能通常可以在config-template.py中看到,完整的更新日志请前往 https://github.com/RockChinQ/QChatGPT/releases 查看".format(current_tag, "\n".join(rls_notes)))
|
||||
return True
|
||||
|
||||
|
||||
def is_repo(path: str) -> bool:
|
||||
@@ -81,24 +174,12 @@ def get_remote_url(repo_path: str) -> str:
|
||||
|
||||
def get_current_version_info() -> str:
|
||||
"""获取当前版本信息"""
|
||||
check_dulwich_closure()
|
||||
|
||||
from dulwich import porcelain
|
||||
|
||||
repo = porcelain.open_repo('.')
|
||||
|
||||
version_str = ""
|
||||
|
||||
for entry in repo.get_walker():
|
||||
version_str += "提交编号: "+str(entry.commit.id)[2:9] + "\n"
|
||||
tz = datetime.timezone(datetime.timedelta(hours=entry.commit.commit_timezone // 3600))
|
||||
dt = datetime.datetime.fromtimestamp(entry.commit.commit_time, tz)
|
||||
version_str += "时间: "+dt.strftime('%m-%d %H:%M:%S') + "\n"
|
||||
version_str += "说明: "+str(entry.commit.message, encoding="utf-8").strip() + "\n"
|
||||
version_str += "提交作者: '" + str(entry.commit.author)[2:-1] + "'"
|
||||
break
|
||||
|
||||
return version_str
|
||||
rls_list = get_release_list()
|
||||
current_tag = get_current_tag()
|
||||
for rls in rls_list:
|
||||
if rls['tag_name'] == current_tag:
|
||||
return rls['name'] + "\n" + rls['body']
|
||||
return "未知版本"
|
||||
|
||||
|
||||
def get_commit_id_and_time_and_msg() -> str:
|
||||
@@ -132,15 +213,42 @@ def get_current_commit_id() -> str:
|
||||
|
||||
def is_new_version_available() -> bool:
|
||||
"""检查是否有新版本"""
|
||||
check_dulwich_closure()
|
||||
# 从github获取release列表
|
||||
rls_list = get_release_list()
|
||||
if rls_list is None:
|
||||
return False
|
||||
|
||||
from dulwich import porcelain
|
||||
# 获取当前版本
|
||||
current_tag = get_current_tag()
|
||||
|
||||
repo = porcelain.open_repo('.')
|
||||
fetch_res = porcelain.ls_remote(porcelain.get_remote_repo(repo, "origin")[1])
|
||||
# 检查是否有新版本
|
||||
for rls in rls_list:
|
||||
if rls['tag_name'] == current_tag:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
current_commit_id = get_current_commit_id()
|
||||
|
||||
latest_commit_id = str(fetch_res[b'HEAD'])[2:-1]
|
||||
def get_rls_notes() -> list:
|
||||
"""获取更新日志"""
|
||||
# 从github获取release列表
|
||||
rls_list = get_release_list()
|
||||
if rls_list is None:
|
||||
return None
|
||||
|
||||
return current_commit_id != latest_commit_id
|
||||
# 获取当前版本
|
||||
current_tag = get_current_tag()
|
||||
|
||||
# 检查是否有新版本
|
||||
rls_notes = []
|
||||
for rls in rls_list:
|
||||
if rls['tag_name'] == current_tag:
|
||||
break
|
||||
|
||||
rls_notes.append(rls['name'])
|
||||
|
||||
return rls_notes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
update_all()
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
requests~=2.28.1
|
||||
openai~=0.26.5
|
||||
pip~=22.3.1
|
||||
openai~=0.27.2
|
||||
dulwich~=0.21.3
|
||||
colorlog~=6.6.0
|
||||
yiri-mirai~=0.2.6.1
|
||||
websockets~=10.4
|
||||
urllib3~=1.26.10
|
||||
func_timeout~=4.3.5
|
||||
func_timeout~=4.3.5
|
||||
Pillow
|
||||
1
res/announcement
Normal file
1
res/announcement
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
12
scenario/default-template.json
Normal file
12
scenario/default-template.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"prompt": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant. 如果我需要帮助,你要说“输入!help获得帮助”"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "好的,我是一个能干的AI助手。 如果你需要帮助,我会说“输入!help获得帮助”"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,4 +1,7 @@
|
||||
{
|
||||
"说明": "mask将替换敏感词中的每一个字,若mask_word值不为空,则将敏感词整个替换为mask_word的值",
|
||||
"mask": "*",
|
||||
"mask_word": "",
|
||||
"words": [
|
||||
"习近平",
|
||||
"胡锦涛",
|
||||
@@ -9,6 +12,7 @@
|
||||
"毛泽东",
|
||||
"邓小平",
|
||||
"周恩来",
|
||||
"马克思",
|
||||
"社会主义",
|
||||
"共产党",
|
||||
"共产主义",
|
||||
@@ -21,6 +25,8 @@
|
||||
"天安门",
|
||||
"六四",
|
||||
"政治局常委",
|
||||
"两会",
|
||||
"共青团",
|
||||
"学潮",
|
||||
"八九",
|
||||
"二十大",
|
||||
@@ -48,6 +54,7 @@
|
||||
"作爱",
|
||||
"做爱",
|
||||
"性交",
|
||||
"性爱",
|
||||
"自慰",
|
||||
"阴茎",
|
||||
"淫妇",
|
||||
46
tests/compatibility_tests/models_and_interfaces.py
Normal file
46
tests/compatibility_tests/models_and_interfaces.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import openai
|
||||
import time
|
||||
|
||||
# 测试completion api
|
||||
models = [
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-0301',
|
||||
'text-davinci-003',
|
||||
'text-davinci-002',
|
||||
'code-davinci-002',
|
||||
'code-cushman-001',
|
||||
'text-curie-001',
|
||||
'text-babbage-001',
|
||||
'text-ada-001',
|
||||
]
|
||||
|
||||
openai.api_key = "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"
|
||||
|
||||
for model in models:
|
||||
print('Testing model: ', model)
|
||||
|
||||
# completion api
|
||||
try:
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
prompt="Say this is a test",
|
||||
max_tokens=7,
|
||||
temperature=0
|
||||
)
|
||||
print(' completion api: ', response['choices'][0]['text'].strip())
|
||||
except Exception as e:
|
||||
print(' completion api err: ', e)
|
||||
|
||||
# chat completion api
|
||||
try:
|
||||
completion = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "user", "content": "Hello!"}
|
||||
]
|
||||
)
|
||||
print(" chat api: ",completion.choices[0].message['content'].strip())
|
||||
except Exception as e:
|
||||
print(' chat api err: ', e)
|
||||
|
||||
time.sleep(60)
|
||||
Reference in New Issue
Block a user