mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 11:29:39 +08:00
Compare commits
143 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9ace69a72 | ||
|
|
aef0b2a26e | ||
|
|
f7712d71ec | ||
|
|
e94b44e3b8 | ||
|
|
524e863c78 | ||
|
|
bbc80ac901 | ||
|
|
f969ddd6ca | ||
|
|
1cc9781333 | ||
|
|
a609801bae | ||
|
|
d8b606d372 | ||
|
|
572a440e65 | ||
|
|
6e4eeae9b7 | ||
|
|
1a73669df8 | ||
|
|
91ebaf1122 | ||
|
|
46703eb906 | ||
|
|
b9dd9d5193 | ||
|
|
884481a4ec | ||
|
|
9040b37a63 | ||
|
|
99d47b2fa2 | ||
|
|
6575359a94 | ||
|
|
a2fc726372 | ||
|
|
3bfce8ab51 | ||
|
|
ff9a9830f2 | ||
|
|
e2b59e8efe | ||
|
|
04dad9757f | ||
|
|
75ea1080ad | ||
|
|
e25b064319 | ||
|
|
5d0dbc40ce | ||
|
|
beae8de5eb | ||
|
|
c4ff30c722 | ||
|
|
6f4ecb101b | ||
|
|
9f9b0ef846 | ||
|
|
de6957062c | ||
|
|
0a9b43e6fa | ||
|
|
5b0edd9937 | ||
|
|
8a400d202a | ||
|
|
5a1e9f7fb2 | ||
|
|
e03af75cf8 | ||
|
|
0da4919255 | ||
|
|
914e566d1f | ||
|
|
6ec2b653fe | ||
|
|
ba0a088b9c | ||
|
|
478e83bcd9 | ||
|
|
386124a3b9 | ||
|
|
ff5e7c16d1 | ||
|
|
7ff7a66012 | ||
|
|
c99dfb8a86 | ||
|
|
10f9d4c6b3 | ||
|
|
d347813411 | ||
|
|
7a93898b3f | ||
|
|
c057ea900f | ||
|
|
512266e74f | ||
|
|
e36aee11c7 | ||
|
|
97421299f5 | ||
|
|
bc41e5aa80 | ||
|
|
2fa30e7def | ||
|
|
1c6a7d9ba5 | ||
|
|
47435c42a5 | ||
|
|
39a1b421e6 | ||
|
|
b5edf2295b | ||
|
|
fb650a3d7a | ||
|
|
521541f311 | ||
|
|
7020abadbf | ||
|
|
d95fb3b5be | ||
|
|
3e524dc790 | ||
|
|
a64940bff8 | ||
|
|
c739290f0b | ||
|
|
af292fe050 | ||
|
|
634c7fb302 | ||
|
|
33efb94013 | ||
|
|
549e4dc02e | ||
|
|
3d40909c02 | ||
|
|
1aef81e38f | ||
|
|
1b0ae8da58 | ||
|
|
7979a8e97f | ||
|
|
080e53d9a9 | ||
|
|
89bb364b16 | ||
|
|
3586cd941f | ||
|
|
054d0839ac | ||
|
|
dd75f98d85 | ||
|
|
ec23bb5268 | ||
|
|
bc99db4fc1 | ||
|
|
c8275fcfbf | ||
|
|
a345043c30 | ||
|
|
382d37d479 | ||
|
|
32c144a75d | ||
|
|
7ca2aa5e39 | ||
|
|
86cc4a23ac | ||
|
|
08d1e138bd | ||
|
|
a9fe86542f | ||
|
|
4e29776fcd | ||
|
|
ee3eae8f4d | ||
|
|
a84575858a | ||
|
|
ac472291c7 | ||
|
|
f304873c6a | ||
|
|
18caf8face | ||
|
|
d21115aaa8 | ||
|
|
a05ecd2e7f | ||
|
|
32a725126d | ||
|
|
0528690622 | ||
|
|
819339142e | ||
|
|
1d0573e7ff | ||
|
|
00623bc431 | ||
|
|
c872264456 | ||
|
|
1336d3cb9a | ||
|
|
d1459578cd | ||
|
|
8a67fcf40f | ||
|
|
7930370aa9 | ||
|
|
0b854bdcf1 | ||
|
|
cba6aab48d | ||
|
|
12a9ca7a77 | ||
|
|
a6cbd226e1 | ||
|
|
3577e62b41 | ||
|
|
f86e69fcd1 | ||
|
|
292e00b078 | ||
|
|
2a91497bcf | ||
|
|
b0cca0a4c2 | ||
|
|
a2bda85a9c | ||
|
|
20677cff86 | ||
|
|
c8af5d8445 | ||
|
|
2dbe984539 | ||
|
|
6b8fa664f1 | ||
|
|
2b9612e933 | ||
|
|
749d0219fb | ||
|
|
a11a152bd7 | ||
|
|
fc803a3742 | ||
|
|
13a1e15f24 | ||
|
|
3f41b94da5 | ||
|
|
0fb5bfda20 | ||
|
|
dc1fd73ebb | ||
|
|
161b694f71 | ||
|
|
45d1c89e45 | ||
|
|
e26664aa51 | ||
|
|
e29691efbd | ||
|
|
6d45327882 | ||
|
|
fbd41eef49 | ||
|
|
0a30c88322 | ||
|
|
4f5af0e8c8 | ||
|
|
df3f0fd159 | ||
|
|
f2493c79dd | ||
|
|
a86a035b6b | ||
|
|
7995793bfd | ||
|
|
8c67d3c58f |
@@ -1,34 +0,0 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "QChatGPT 3.10",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
"image": "mcr.microsoft.com/devcontainers/python:0-3.10",
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
"customizations": {
|
||||
"codespaces": {
|
||||
"repositories": {
|
||||
"RockChinQ/QChatGPT": {
|
||||
"permissions": "write-all"
|
||||
},
|
||||
"RockChinQ/revLibs": {
|
||||
"permissions": "write-all"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
||||
38
.github/workflows/build_docker_image.yml
vendored
Normal file
38
.github/workflows/build_docker_image.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: Build Docker Image
|
||||
on:
|
||||
#防止fork乱用action设置只能手动触发构建
|
||||
workflow_dispatch:
|
||||
## 发布release的时候会自动构建
|
||||
release:
|
||||
types: [published]
|
||||
jobs:
|
||||
publish-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||
run: |
|
||||
if [ -z "$GITHUB_REF" ]; then
|
||||
export GITHUB_REF=${{ github.ref }}
|
||||
fi
|
||||
- name: Check GITHUB_REF env
|
||||
run: echo $GITHUB_REF
|
||||
- name: Get version
|
||||
id: get_version
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
- name: Build # image name: rockchin/qchatgpt:<VERSION>
|
||||
run: docker build --network=host -t rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }} -t rockchin/qchatgpt:latest .
|
||||
- name: Login to Registry
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Push image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Push image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:latest
|
||||
18
.github/workflows/sync-wiki.yml
vendored
18
.github/workflows/sync-wiki.yml
vendored
@@ -27,14 +27,22 @@ jobs:
|
||||
with:
|
||||
repository: RockChinQ/QChatGPT.wiki
|
||||
path: wiki
|
||||
- name: Delete old wiki content
|
||||
run: |
|
||||
rm -rf wiki/*
|
||||
- name: Copy res/wiki content to wiki
|
||||
run: |
|
||||
cp -r res/wiki/* wiki/
|
||||
- name: Check for changes
|
||||
run: |
|
||||
cd wiki
|
||||
if git diff --quiet; then
|
||||
echo "No changes to commit."
|
||||
exit 0
|
||||
fi
|
||||
- name: Commit and Push Changes
|
||||
run: |
|
||||
cd wiki
|
||||
if git diff --name-only; then
|
||||
git add .
|
||||
git commit -m "Update wiki"
|
||||
git push
|
||||
fi
|
||||
git add .
|
||||
git commit -m "Update wiki"
|
||||
git push
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT
|
||||
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT tiktoken
|
||||
|
||||
- name: Copy Scripts
|
||||
run: |
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -25,4 +25,9 @@ bin/
|
||||
.vscode
|
||||
test_*
|
||||
venv/
|
||||
hugchat.json
|
||||
hugchat.json
|
||||
qcapi
|
||||
claude.json
|
||||
bard.json
|
||||
/*yaml
|
||||
!/docker-compose.yaml
|
||||
@@ -17,3 +17,10 @@
|
||||
- 解决本项目或衍生项目的issues中亟待解决的问题
|
||||
- 阅读并完善本项目文档
|
||||
- 在各个社交媒体撰写本项目教程等
|
||||
|
||||
### 代码规范
|
||||
|
||||
- 代码中的注解`务必`符合Google风格的规范
|
||||
- 模块顶部的引入代码请遵循`系统模块`、`第三方库模块`、`自定义模块`的顺序进行引入
|
||||
- `不要`直接引入模块的特定属性,而是引入这个模块,再通过`xxx.yyy`的形式使用属性
|
||||
- 任何作用域的字段`必须`先声明后使用,并在声明处注明类型提示
|
||||
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,17 +1,14 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10.13-alpine3.18
|
||||
WORKDIR /QChatGPT
|
||||
|
||||
RUN sed -i "s/deb.debian.org/mirrors.tencent.com/g" /etc/apt/sources.list \
|
||||
&& sed -i 's|security.debian.org/debian-security|mirrors.tencent.com/debian-security|g' /etc/apt/sources.list \
|
||||
&& apt-get clean \
|
||||
&& apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get install -y git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY . /QChatGPT/
|
||||
|
||||
RUN pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
RUN ls
|
||||
|
||||
CMD [ "python", "main.py" ]
|
||||
RUN pip install -r requirements.txt
|
||||
RUN pip install -U websockets==10.0
|
||||
|
||||
# 生成配置文件
|
||||
RUN python main.py
|
||||
|
||||
CMD [ "python", "main.py" ]
|
||||
172
README.md
172
README.md
@@ -1,34 +1,59 @@
|
||||
# QChatGPT🤖
|
||||
|
||||
<p align="center">
|
||||
<img src="res/social.png" alt="QChatGPT" width="640" />
|
||||
<img src="res/logo.png" alt="QChatGPT" width="120" />
|
||||
</p>
|
||||
|
||||
[English](README_en.md) | 简体中文
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||
# QChatGPT
|
||||
|
||||
<!-- 高稳定性/持续迭代/架构清晰/支持插件/高可自定义的 ChatGPT QQ机器人框架 -->
|
||||
“当然!下面是一个使用Java编写的快速排序算法的示例代码”
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||
<a href="https://hub.docker.com/repository/docker/rockchin/qchatgpt">
|
||||
<img src="https://img.shields.io/docker/pulls/rockchin/qchatgpt?color=blue" alt="docker pull">
|
||||
</a>
|
||||

|
||||
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="python">
|
||||
<a href="https://github.com/RockChinQ/QChatGPT/wiki">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E9%A1%B9%E7%9B%AEWiki-blue">
|
||||
</a><br/>
|
||||
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=66-aWvn8cbP4c1ut_1YYkvvGVeEtyTH8&authKey=pTaKBK5C%2B8dFzQ4XlENf6MHTCLaHnlKcCRx7c14EeVVlpX2nRSaS8lJm8YeM4mCU&noverify=0&group_code=195992197">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E5%AE%98%E6%96%B9%E7%BE%A4-195992197-purple">
|
||||
</a>
|
||||
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=nC80H57wmKPwRDLFeQrDDjVl81XuC21P&authKey=2wTUTfoQ5v%2BD4C5zfpuR%2BSPMDqdXgDXA%2FS2wHI1NxTfWIG%2B%2FqK08dgyjMMOzhXa9&noverify=0&group_code=738382634">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-738382634-purple">
|
||||
</a>
|
||||
<a href="https://lazyfree.top/2023/08/16/QChatGPT%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C/">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E7%A4%BE%E5%8C%BA%E7%BC%96%E5%86%99%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C-blue">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV14h4y1w7TC">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E8%A7%86%E9%A2%91%E6%95%99%E7%A8%8B-208647">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV11h4y1y74H">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Linux%E9%83%A8%E7%BD%B2%E8%A7%86%E9%A2%91-208647">
|
||||
</a>
|
||||
|
||||
|
||||
> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果,请见[Wiki中的内容函数节](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91#%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
> 2023/4/24 支持使用go-cqhttp登录QQ,请查看[此文档](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE)
|
||||
> 2023/3/18 现已支持GPT-4 API(内测),请查看`config-template.py`中的`completion_api_params`
|
||||
> 2023/3/15 逆向库已支持New Bing,使用方法查看[插件文档](https://github.com/RockChinQ/revLibs)
|
||||
|
||||
|
||||
**QChatGPT需要Python版本>=3.9**
|
||||
- 到[项目Wiki](https://github.com/RockChinQ/QChatGPT/wiki)可了解项目详细信息
|
||||
- 官方交流、答疑群: 656285629
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- 社区群(内有一键部署包、图形化界面等资源): 891448839
|
||||
- QQ频道机器人见[QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
||||
- 欢迎各种形式的贡献,请查看[贡献指引](CONTRIBUTING.md)
|
||||
- 购买ChatGPT账号: [此链接](http://fk.kimi.asia)
|
||||
|
||||
## 🍺模型适配一览
|
||||
|
||||
<details>
|
||||
<summary>点击此处展开</summary>
|
||||
<summary>回复效果演示(带有联网插件)</summary>
|
||||
<img alt="联网演示GIF" src="res/webwlkr-demo.gif" width="300px">
|
||||
</details>
|
||||
</div>
|
||||
|
||||
> **NOTE**
|
||||
> 2023/9/13 现已支持通过[One API](https://github.com/songquanpeng/one-api)接入 Azure、Anthropic Claude、Google PaLM 2、智谱 ChatGLM、百度文心一言、讯飞星火认知、阿里通义千问以及 360 智脑等模型,欢迎测试并反馈。
|
||||
> 2023/8/29 [逆向库插件](https://github.com/RockChinQ/revLibs)已支持 gpt4free
|
||||
> 2023/8/14 [逆向库插件](https://github.com/RockChinQ/revLibs)已支持Claude和Bard
|
||||
> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果,请见[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/6-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
|
||||
## 🍺模型一览和功能点
|
||||
|
||||
</summary>
|
||||
|
||||
### 文字对话
|
||||
|
||||
@@ -39,6 +64,14 @@
|
||||
- ChatGPT网页版GPT-4模型, 目前需要ChatGPT Plus订阅, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- New Bing逆向库, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- HuggingChat, 由[插件](https://github.com/RockChinQ/revLibs)接入, 仅支持英文
|
||||
- Claude, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- Google Bard, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
|
||||
### 模型聚合平台
|
||||
|
||||
- [One API](https://github.com/songquanpeng/one-api), Azure、Anthropic Claude、Google PaLM 2、智谱 ChatGLM、百度文心一言、讯飞星火认知、阿里通义千问以及 360 智脑等模型的官方接口转换成 OpenAI API 接入,QChatGPT 原生支持,您需要先配置 One API,之后在`config.py`中设置反向代理和`One API`的密钥后使用。
|
||||
- [gpt4free](https://github.com/xtekky/gpt4free), 破解以免费使用多个平台的各种文字模型, 由[插件](https://github.com/RockChinQ/revLibs)接入, 无需鉴权, 稳定性较差。
|
||||
- [Poe](https://poe.com), 破解免费使用Poe上多个平台的模型, 由[oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot)接入(由于 Poe 上可用的大部分模型现已通过[revLibs插件](https://github.com/RockChinQ/revLubs)或其他方式接入,此插件现已停止维护)。
|
||||
|
||||
### 故事续写
|
||||
|
||||
@@ -46,7 +79,7 @@
|
||||
|
||||
### 图片绘制
|
||||
|
||||
- OpenAI DALL·E模型, 本项目原生支持, 使用方法查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
- OpenAI DALL·E模型, 本项目原生支持, 使用方法查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
||||
|
||||
### 语音生成
|
||||
@@ -55,14 +88,9 @@
|
||||
- Plachta/VITS-Umamusume-voice-synthesizer, 由[插件](https://github.com/oliverkirk-sudo/chat_voice)接入
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
安装[此插件](https://github.com/RockChinQ/Switcher),即可在使用中切换文字模型。
|
||||
|
||||
## ✅功能
|
||||
|
||||
<details>
|
||||
<summary>点击此处展开概述</summary>
|
||||
### 功能点
|
||||
|
||||
<details>
|
||||
<summary>✅支持敏感词过滤,避免账号风险</summary>
|
||||
@@ -93,7 +121,7 @@
|
||||
|
||||
- 支持以自然语言预设文字,自定义机器人人格等信息
|
||||
- 详见`config.py`中的`default_prompt`部分
|
||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
@@ -113,7 +141,7 @@
|
||||
|
||||
- 自行实现插件加载器及相关支持
|
||||
- 支持GPT的Function Calling功能
|
||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅私聊、群聊黑名单机制</summary>
|
||||
@@ -157,13 +185,22 @@
|
||||
<br/>
|
||||
<img alt="New Bing" src="res/screenshots/person_newbing.png" width="400"/>
|
||||
|
||||
详情请查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8)
|
||||
|
||||
</details>
|
||||
|
||||
详情请查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
<details>
|
||||
|
||||
## 🔩部署
|
||||
<summary>
|
||||
|
||||
**部署过程中遇到任何问题,请先在[QChatGPT](https://github.com/RockChinQ/QChatGPT/issues)或[qcg-installer](https://github.com/RockChinQ/qcg-installer/issues)的issue里进行搜索**
|
||||
## 🚀部署和使用
|
||||
|
||||
</summary>
|
||||
|
||||
> **NOTE**
|
||||
> - 部署过程中遇到任何问题,请先在[QChatGPT](https://github.com/RockChinQ/QChatGPT/issues)或[qcg-installer](https://github.com/RockChinQ/qcg-installer/issues)的issue里进行搜索
|
||||
> - QChatGPT需要Python版本>=3.9
|
||||
> - 官方群和社区群群号请见文档顶部
|
||||
|
||||
### - 注册OpenAI账号
|
||||
|
||||
@@ -182,17 +219,16 @@
|
||||
|
||||
</details>
|
||||
|
||||
### - 自动化部署
|
||||
### - Docker或自动化部署
|
||||
|
||||
<details>
|
||||
<summary>展开查看,以下方式二选一,Linux首选Docker,Windows首选安装器</summary>
|
||||
|
||||
#### Docker方式
|
||||
|
||||
> docker方式目前仅支持使用mirai登录,若您不**熟悉**docker的操作及相关知识,强烈建议您使用其他方式部署,我们**不会且难以**解决您主机上多个容器的连接问题。
|
||||
> docker方式较为复杂,若您不**熟悉**docker的操作及相关知识,强烈建议您使用其他方式部署,我们**不会且难以**解决您主机上多个容器的连接问题。
|
||||
|
||||
请查看[此文档](res/docs/docker_deploy.md)
|
||||
由[@mikumifa](https://github.com/mikumifa)贡献
|
||||
请查看[此文档](res/docs/docker_deployment.md)
|
||||
|
||||
#### 安装器方式
|
||||
|
||||
@@ -224,7 +260,7 @@
|
||||
<details>
|
||||
<summary>go-cqhttp</summary>
|
||||
|
||||
1. 按照[此文档](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE)配置go-cqhttp
|
||||
1. 按照[此文档](https://github.com/RockChinQ/QChatGPT/wiki/9-go-cqhttp%E9%85%8D%E7%BD%AE)配置go-cqhttp
|
||||
2. 启动go-cqhttp,确保登录成功,保持运行
|
||||
3. 在下一步配置主程序时请在config.py中将`msg_source_adapter`设为`nakuru`
|
||||
|
||||
@@ -242,7 +278,7 @@ cd QChatGPT
|
||||
2. 安装依赖
|
||||
|
||||
```bash
|
||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk
|
||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk CallingGPT tiktoken
|
||||
```
|
||||
|
||||
3. 运行一次主程序,生成配置文件
|
||||
@@ -271,51 +307,59 @@ python3 main.py
|
||||
|
||||
</details>
|
||||
|
||||
## 🚀使用
|
||||
**部署完成后必看: [指令说明](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
|
||||
**部署完成后必看: [指令说明](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
所有功能查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E4%BD%BF%E7%94%A8%E6%96%B9%E5%BC%8F)
|
||||
所有功能查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
|
||||
## 🧩插件生态
|
||||
|
||||
现已支持自行开发插件对功能进行扩展或自定义程序行为
|
||||
详见[Wiki插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
开发教程见[Wiki插件开发页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
||||
</summary>
|
||||
|
||||
⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling),请查看wiki的插件开发页以查看如何在QChatGPT中使用此功能
|
||||
⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling),请查看[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/6-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
|
||||
<details>
|
||||
<summary>查看插件列表</summary>
|
||||
> 使用方法见:[Wiki插件使用](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
> 开发教程见:[Wiki插件开发](https://github.com/RockChinQ/QChatGPT/wiki/7-%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
||||
|
||||
### 示例插件
|
||||
|
||||
在`tests/plugin_examples`目录下,将其整个目录复制到`plugins`目录下即可使用
|
||||
[所有插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6),欢迎提出issue以提交新的插件
|
||||
|
||||
- `cmdcn` - 主程序指令中文形式
|
||||
- `hello_plugin` - 在收到消息`hello`时回复相应消息
|
||||
- `urlikethisijustsix` - 收到冒犯性消息时回复相应消息
|
||||
|
||||
### 更多
|
||||
|
||||
[插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6),欢迎提出issue以提交新的插件
|
||||
### 部分插件
|
||||
|
||||
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版、Claude、Bard、Hugging Chat等破解版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/8-%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语音输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,使用HuggingFace上的[VITS-Umamusume-voice-synthesizer模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer)
|
||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,支持HuggingFace上的[VITS模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer),azure语音合成,vits本地语音合成,sovits语音合成
|
||||
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
||||
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
||||
- [oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot) - 接入[Poe](https://poe.com/)上的机器人
|
||||
- [lieyanqzu/WeatherPlugin](https://github.com/lieyanqzu/WeatherPlugin) - 天气查询插件
|
||||
- [SysStatPlugin](https://github.com/RockChinQ/SysStatPlugin) - 查看系统状态
|
||||
- [oliverkirk-sudo/qchat_system_status](https://github.com/oliverkirk-sudo/qchat_system_status) - 以图片的形式输出系统状态
|
||||
- [oliverkirk-sudo/QChatAIPaint](https://github.com/oliverkirk-sudo/QChatAIPaint) - 基于[Holara](https://holara.ai/)的ai绘图插件
|
||||
- [oliverkirk-sudo/QChatCodeRunner](https://github.com/oliverkirk-sudo/QChatCodeRunner) - 基于[CodeRunner-Plugin](https://github.com/oliverkirk-sudo/CodeRunner-Plugin)的代码运行与图表生成插件
|
||||
- [oliverkirk-sudo/QChatWeather](https://github.com/oliverkirk-sudo/QChatWeather) - 生成好看的天气图片,基于和风天气
|
||||
- [oliverkirk-sudo/QChatMarkdown](https://github.com/oliverkirk-sudo/QChatMarkdown) - 将机器人输出的markdown转换为图片,基于[playwright](https://playwright.dev/python/docs/intro)
|
||||
- [ruuuux/WikipediaSearch](https://github.com/ruuuux/WikipediaSearch) - Wikipedia 搜索插件
|
||||
|
||||
</details>
|
||||
|
||||
## 😘致谢
|
||||
<details>
|
||||
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV15v4y1X7aP)
|
||||
<summary>
|
||||
|
||||
## 😘致谢和赞赏
|
||||
|
||||
</summary>
|
||||
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV1Y14y1Q7kQ)
|
||||
- [@mikumifa](https://github.com/mikumifa) 本项目Docker部署仓库开发者
|
||||
- [@dominoar](https://github.com/dominoar) 为本项目开发多种插件
|
||||
- [@万神的星空](https://github.com/qq255204159) 整合包发行
|
||||
@@ -323,6 +367,6 @@ python3 main.py
|
||||
|
||||
以及所有[贡献者](https://github.com/RockChinQ/QChatGPT/graphs/contributors)和其他为本项目提供支持的朋友们。
|
||||
|
||||
## 👍赞赏
|
||||
|
||||
<img alt="赞赏码" src="res/mm_reward_qrcode_1672840549070.png" width="400" height="400"/>
|
||||
|
||||
</details>
|
||||
|
||||
@@ -62,6 +62,9 @@ nakuru_config = {
|
||||
# },
|
||||
# "reverse_proxy": "http://example.com:12345/v1"
|
||||
# }
|
||||
#
|
||||
# 作者开设公用反向代理地址: https://api.openai.rockchin.top/v1
|
||||
# 随时可能关闭,仅供测试使用,有条件建议使用正向代理或者自建反向代理
|
||||
openai_config = {
|
||||
"api_key": {
|
||||
"default": "openai_api_key"
|
||||
@@ -70,6 +73,11 @@ openai_config = {
|
||||
"reverse_proxy": None
|
||||
}
|
||||
|
||||
# api-key切换策略
|
||||
# active:每次请求时都会切换api-key
|
||||
# passive:仅当api-key超额时才会切换api-key
|
||||
switch_strategy = "active"
|
||||
|
||||
# [必需] 管理员QQ号,用于接收报错等通知及执行管理员级别指令
|
||||
# 支持多个管理员,可以使用list形式设置,例如:
|
||||
# admin_qq = [12345678, 87654321]
|
||||
@@ -152,7 +160,6 @@ response_rules = {
|
||||
}
|
||||
|
||||
|
||||
|
||||
# 消息忽略规则
|
||||
# 适用于私聊及群聊
|
||||
# 符合此规则的消息将不会被响应
|
||||
@@ -221,12 +228,12 @@ auto_reset = True
|
||||
#
|
||||
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
|
||||
# 请将内容修改到config.py中,请勿修改config-template.py
|
||||
#
|
||||
# 支持通过 One API 接入多种模型,请在上方的openai_config中设置One API的代理地址,
|
||||
# 并在此填写您要使用的模型名称,详细请参考:https://github.com/songquanpeng/one-api
|
||||
completion_api_params = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
||||
"top_p": 1, # 生成的文本的文本与要求的符合度, 取值范围[0, 1]
|
||||
"frequency_penalty": 0.2,
|
||||
"presence_penalty": 1.0,
|
||||
}
|
||||
|
||||
# OpenAI的Image API的参数
|
||||
@@ -235,9 +242,17 @@ image_api_params = {
|
||||
"size": "256x256", # 图片尺寸,支持256x256, 512x512, 1024x1024
|
||||
}
|
||||
|
||||
# 跟踪函数调用
|
||||
# 为True时,在每次GPT进行Function Calling时都会输出发送一条回复给用户
|
||||
# 同时,一次提问内所有的Function Calling和普通回复消息都会单独发送给用户
|
||||
trace_function_calls = False
|
||||
|
||||
# 群内回复消息时是否引用原消息
|
||||
quote_origin = True
|
||||
|
||||
# 群内回复消息时是否at发送者
|
||||
at_sender = False
|
||||
|
||||
# 回复绘图时是否包含图片描述
|
||||
include_image_description = True
|
||||
|
||||
|
||||
18
docker-compose.yaml
Normal file
18
docker-compose.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
qchatgpt:
|
||||
image: rockchin/qchatgpt:latest
|
||||
volumes:
|
||||
- ./config.py:/QChatGPT/config.py
|
||||
- ./banlist.py:/QChatGPT/banlist.py
|
||||
- ./cmdpriv.json:/QChatGPT/cmdpriv.json
|
||||
- ./sensitive.json:/QChatGPT/sensitive.json
|
||||
- ./tips.py:/QChatGPT/tips.py
|
||||
# 目录映射
|
||||
- ./plugins:/QChatGPT/plugins
|
||||
- ./scenario:/QChatGPT/scenario
|
||||
- ./temp:/QChatGPT/temp
|
||||
- ./logs:/QChatGPT/logs
|
||||
restart: always
|
||||
# 根据具体环境配置网络
|
||||
28
main.py
28
main.py
@@ -47,7 +47,7 @@ def init_db():
|
||||
|
||||
def ensure_dependencies():
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "--upgrade",
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "tiktoken", "--upgrade",
|
||||
"-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
|
||||
@@ -60,35 +60,42 @@ def override_config():
|
||||
# 检查override.json覆盖
|
||||
if os.path.exists("override.json") and use_override:
|
||||
override_json = json.load(open("override.json", "r", encoding="utf-8"))
|
||||
overrided = []
|
||||
for key in override_json:
|
||||
if hasattr(config, key):
|
||||
setattr(config, key, override_json[key])
|
||||
logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
||||
# logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
||||
overrided.append(key)
|
||||
else:
|
||||
logging.error("无法覆写配置[{}]为[{}],该配置不存在,请检查override.json是否正确".format(key, override_json[key]))
|
||||
if len(overrided) > 0:
|
||||
logging.info("已根据override.json覆写配置项: {}".format(", ".join(overrided)))
|
||||
|
||||
|
||||
# 临时函数,用于加载config和上下文,未来统一放在config类
|
||||
def load_config():
|
||||
logging.info("检查config模块完整性.")
|
||||
# 完整性校验
|
||||
non_exist_keys = []
|
||||
|
||||
is_integrity = True
|
||||
config_template = importlib.import_module('config-template')
|
||||
config = importlib.import_module('config')
|
||||
for key in dir(config_template):
|
||||
if not key.startswith("__") and not hasattr(config, key):
|
||||
setattr(config, key, getattr(config_template, key))
|
||||
logging.warning("[{}]不存在".format(key))
|
||||
# logging.warning("[{}]不存在".format(key))
|
||||
non_exist_keys.append(key)
|
||||
is_integrity = False
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("配置文件不完整,您可以依据config-template.py检查config.py")
|
||||
logging.warning("以下配置字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||
|
||||
# 检查override.json覆盖
|
||||
override_config()
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("以上不存在的配置已被设为默认值,将在3秒后继续启动... ")
|
||||
logging.warning("以上不存在的配置已被设为默认值,您可以依据config-template.py检查config.py,将在3秒后继续启动... ")
|
||||
time.sleep(3)
|
||||
|
||||
# 存进上下文
|
||||
@@ -97,6 +104,8 @@ def load_config():
|
||||
|
||||
def complete_tips():
|
||||
"""根据tips-custom-template模块补全tips模块的属性"""
|
||||
non_exist_keys = []
|
||||
|
||||
is_integrity = True
|
||||
logging.info("检查tips模块完整性.")
|
||||
tips_template = importlib.import_module('tips-custom-template')
|
||||
@@ -104,10 +113,12 @@ def complete_tips():
|
||||
for key in dir(tips_template):
|
||||
if not key.startswith("__") and not hasattr(tips, key):
|
||||
setattr(tips, key, getattr(tips_template, key))
|
||||
logging.warning("[{}]不存在".format(key))
|
||||
# logging.warning("[{}]不存在".format(key))
|
||||
non_exist_keys.append(key)
|
||||
is_integrity = False
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("以下提示语字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||
logging.warning("tips模块不完整,您可以依据tips-custom-template.py检查tips.py")
|
||||
logging.warning("以上配置已被设为默认值,将在3秒后继续启动... ")
|
||||
time.sleep(3)
|
||||
@@ -214,7 +225,7 @@ def start(first_time_init=False):
|
||||
def run_bot_wrapper():
|
||||
global known_exception_caught
|
||||
try:
|
||||
logging.info("使用账号: {}".format(qqbot.bot_account_id))
|
||||
logging.debug("使用账号: {}".format(qqbot.bot_account_id))
|
||||
qqbot.adapter.run_sync()
|
||||
except TypeError as e:
|
||||
if str(e).__contains__("argument 'debug'"):
|
||||
@@ -318,7 +329,8 @@ def start(first_time_init=False):
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
logging.info("新版本可用,请发送 !update 进行自动更新\n更新日志:\n{}".format("\n".join(pkg.utils.updater.get_rls_notes())))
|
||||
else:
|
||||
logging.info("当前已是最新版本")
|
||||
# logging.info("当前已是最新版本")
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logging.warning("检查更新失败:{}".format(e))
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
"http_proxy": null,
|
||||
"reverse_proxy": null
|
||||
},
|
||||
"switch_strategy": "active",
|
||||
"admin_qq": 0,
|
||||
"default_prompt": {
|
||||
"default": "如果我之后想获取帮助,请你说“输入!help获取帮助”"
|
||||
@@ -56,15 +57,14 @@
|
||||
"auto_reset": true,
|
||||
"completion_api_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0.2,
|
||||
"presence_penalty": 1.0
|
||||
"temperature": 0.9
|
||||
},
|
||||
"image_api_params": {
|
||||
"size": "256x256"
|
||||
},
|
||||
"trace_function_calls": false,
|
||||
"quote_origin": true,
|
||||
"at_sender": false,
|
||||
"include_image_description": true,
|
||||
"process_message_timeout": 30,
|
||||
"show_prefix": false,
|
||||
|
||||
@@ -30,7 +30,7 @@ class ChatCompletionRequest(RequestBase):
|
||||
)
|
||||
self.pending_msg = ""
|
||||
|
||||
def append_message(self, role: str, content: str, name: str=None):
|
||||
def append_message(self, role: str, content: str, name: str=None, function_call: dict=None):
|
||||
msg = {
|
||||
"role": role,
|
||||
"content": content
|
||||
@@ -39,6 +39,9 @@ class ChatCompletionRequest(RequestBase):
|
||||
if name is not None:
|
||||
msg['name'] = name
|
||||
|
||||
if function_call is not None:
|
||||
msg['function_call'] = function_call
|
||||
|
||||
self.messages.append(msg)
|
||||
|
||||
def __init__(
|
||||
@@ -87,7 +90,7 @@ class ChatCompletionRequest(RequestBase):
|
||||
choice0 = resp["choices"][0]
|
||||
|
||||
# 如果不是函数调用,且finish_reason为stop,则停止迭代
|
||||
if 'function_call' not in choice0['message'] and choice0["finish_reason"] == "stop":
|
||||
if choice0['finish_reason'] == 'stop': # and choice0["finish_reason"] == "stop"
|
||||
self.stopped = True
|
||||
|
||||
if 'function_call' in choice0['message']:
|
||||
@@ -95,7 +98,8 @@ class ChatCompletionRequest(RequestBase):
|
||||
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content="function call: "+json.dumps(self.pending_func_call, ensure_ascii=False)
|
||||
content=choice0['message']['content'],
|
||||
function_call=choice0['message']['function_call']
|
||||
)
|
||||
|
||||
return {
|
||||
@@ -106,7 +110,7 @@ class ChatCompletionRequest(RequestBase):
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "function_call",
|
||||
"content": None,
|
||||
"content": choice0['message']['content'],
|
||||
"function_call": choice0['message']['function_call']
|
||||
},
|
||||
"finish_reason": "function_call"
|
||||
@@ -129,7 +133,7 @@ class ChatCompletionRequest(RequestBase):
|
||||
"type": "text",
|
||||
"content": choice0['message']['content']
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
"finish_reason": choice0["finish_reason"]
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
@@ -159,9 +163,14 @@ class ChatCompletionRequest(RequestBase):
|
||||
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
|
||||
|
||||
# 执行函数调用
|
||||
ret = execute_function(func_name, arguments)
|
||||
ret = ""
|
||||
try:
|
||||
ret = execute_function(func_name, arguments)
|
||||
|
||||
logging.info("函数执行完成。")
|
||||
logging.info("函数执行完成。")
|
||||
except Exception as e:
|
||||
ret = "error: execute function failed: {}".format(str(e))
|
||||
logging.error("函数执行失败: {}".format(str(e)))
|
||||
|
||||
self.append_message(
|
||||
role="function",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# 定义不同接口请求的模型
|
||||
import threading
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import openai
|
||||
|
||||
@@ -12,8 +13,15 @@ class RequestBase:
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def _next_key(self):
|
||||
import pkg.utils.context as context
|
||||
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||
logging.debug("切换api-key: switched={}, name={}".format(switched, name))
|
||||
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||
|
||||
def _req(self, **kwargs):
|
||||
"""处理代理问题"""
|
||||
import config
|
||||
|
||||
ret: dict = {}
|
||||
exception: Exception = None
|
||||
@@ -23,6 +31,11 @@ class RequestBase:
|
||||
|
||||
try:
|
||||
ret = await self.req_func(**kwargs)
|
||||
logging.debug("接口请求返回:%s", str(ret))
|
||||
|
||||
if config.switch_strategy == 'active':
|
||||
self._next_key()
|
||||
|
||||
return ret
|
||||
except Exception as e:
|
||||
exception = e
|
||||
|
||||
@@ -54,7 +54,24 @@ class KeysManager:
|
||||
是否切换成功, 切换后的api-key的别名
|
||||
"""
|
||||
|
||||
index = 0
|
||||
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] == self.using_key:
|
||||
break
|
||||
|
||||
index += 1
|
||||
|
||||
# 从当前key开始向后轮询
|
||||
start_index = index
|
||||
index += 1
|
||||
if index >= len(self.api_key):
|
||||
index = 0
|
||||
|
||||
while index != start_index:
|
||||
|
||||
key_name = list(self.api_key.keys())[index]
|
||||
|
||||
if self.api_key[key_name] not in self.exceeded:
|
||||
self.using_key = self.api_key[key_name]
|
||||
|
||||
@@ -69,10 +86,14 @@ class KeysManager:
|
||||
|
||||
return True, key_name
|
||||
|
||||
self.using_key = list(self.api_key.values())[0]
|
||||
logging.info("使用api-key:" + list(self.api_key.keys())[0])
|
||||
index += 1
|
||||
if index >= len(self.api_key):
|
||||
index = 0
|
||||
|
||||
return False, ""
|
||||
self.using_key = list(self.api_key.values())[start_index]
|
||||
logging.debug("使用api-key:" + list(self.api_key.keys())[start_index])
|
||||
|
||||
return False, list(self.api_key.keys())[start_index]
|
||||
|
||||
def add(self, key_name, key):
|
||||
self.api_key[key_name] = key
|
||||
|
||||
@@ -29,7 +29,7 @@ class OpenAIInteract:
|
||||
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
|
||||
self.audit_mgr = pkg.audit.gatherer.DataGatherer()
|
||||
|
||||
logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||
|
||||
openai.api_key = self.key_mgr.get_using_key()
|
||||
|
||||
@@ -52,6 +52,13 @@ class OpenAIInteract:
|
||||
|
||||
# 请求接口
|
||||
for resp in request:
|
||||
|
||||
if resp['usage']['total_tokens'] > 0:
|
||||
self.audit_mgr.report_text_model_usage(
|
||||
model,
|
||||
resp['usage']['total_tokens']
|
||||
)
|
||||
|
||||
yield resp
|
||||
|
||||
def request_image(self, prompt) -> dict:
|
||||
|
||||
@@ -7,6 +7,7 @@ Completion - text-davinci-003 等模型
|
||||
"""
|
||||
import openai, logging, threading, asyncio
|
||||
import openai.error as aiE
|
||||
import tiktoken
|
||||
|
||||
from pkg.openai.api.model import RequestBase
|
||||
from pkg.openai.api.completion import CompletionRequest
|
||||
@@ -31,7 +32,16 @@ CHAT_COMPLETION_MODELS = {
|
||||
'gpt-4',
|
||||
'gpt-4-0613',
|
||||
'gpt-4-32k',
|
||||
'gpt-4-32k-0613'
|
||||
'gpt-4-32k-0613',
|
||||
# One-API 接入
|
||||
'SparkDesk',
|
||||
'chatglm_pro',
|
||||
'chatglm_std',
|
||||
'chatglm_lite',
|
||||
'qwen-v1',
|
||||
'qwen-plus-v1',
|
||||
'ERNIE-Bot',
|
||||
'ERNIE-Bot-turbo',
|
||||
}
|
||||
|
||||
EDIT_MODELS = {
|
||||
@@ -48,4 +58,80 @@ def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBa
|
||||
return ChatCompletionRequest(model_name, messages, **args)
|
||||
elif model_name in COMPLETION_MODELS:
|
||||
return CompletionRequest(model_name, messages, **args)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||
|
||||
|
||||
def count_chat_completion_tokens(messages: list, model: str) -> int:
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
"SparkDesk",
|
||||
"chatglm_pro",
|
||||
"chatglm_std",
|
||||
"chatglm_lite",
|
||||
"qwen-v1",
|
||||
"qwen-plus-v1",
|
||||
"ERNIE-Bot",
|
||||
"ERNIE-Bot-turbo",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
# print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""count_chat_completion_tokens() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
|
||||
def count_completion_tokens(messages: list, model: str) -> int:
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
text = ""
|
||||
|
||||
for message in messages:
|
||||
text += message['role'] + message['content'] + "\n"
|
||||
|
||||
text += "assistant: "
|
||||
|
||||
return len(encoding.encode(text))
|
||||
|
||||
|
||||
def count_tokens(messages: list, model: str):
|
||||
|
||||
if model in CHAT_COMPLETION_MODELS:
|
||||
return count_chat_completion_tokens(messages, model)
|
||||
elif model in COMPLETION_MODELS:
|
||||
return count_completion_tokens(messages, model)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model))
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# 计费模块
|
||||
# 已弃用 https://github.com/RockChinQ/QChatGPT/issues/81
|
||||
|
||||
import logging
|
||||
|
||||
pricing = {
|
||||
"base": { # 文字模型单位是1000字符
|
||||
"text-davinci-003": 0.02,
|
||||
},
|
||||
"image": {
|
||||
"256x256": 0.016,
|
||||
"512x512": 0.018,
|
||||
"1024x1024": 0.02,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def language_base_price(model, text):
|
||||
salt_rate = 0.93
|
||||
length = ((len(text.encode('utf-8')) - len(text)) / 2 + len(text)) * salt_rate
|
||||
logging.debug("text length: %d" % length)
|
||||
|
||||
return pricing["base"][model] * length / 1000
|
||||
|
||||
|
||||
def image_price(size):
|
||||
logging.debug("image size: %s" % size)
|
||||
return pricing["image"][size]
|
||||
@@ -1,79 +0,0 @@
|
||||
import time
|
||||
import threading
|
||||
import logging
|
||||
|
||||
|
||||
sessions = {}
|
||||
|
||||
|
||||
class SessionOfflineStatus:
|
||||
ON_GOING = "on_going"
|
||||
EXPLICITLY_CLOSED = "explicitly_closed"
|
||||
|
||||
|
||||
def reset_session_prompt(session_name, prompt):
|
||||
pass
|
||||
|
||||
|
||||
def load_sessions():
|
||||
pass
|
||||
|
||||
|
||||
def get_session(session_name: str) -> 'Session':
|
||||
pass
|
||||
|
||||
|
||||
def dump_session(session_name: str):
|
||||
pass
|
||||
|
||||
|
||||
class Session:
|
||||
name: str = ''
|
||||
|
||||
default_prompt: list = []
|
||||
"""会话系统提示语"""
|
||||
|
||||
messages: list = []
|
||||
"""保存消息历史记录"""
|
||||
|
||||
token_counts: list = []
|
||||
"""记录每回合的token数量"""
|
||||
|
||||
create_ts: int = 0
|
||||
"""会话创建时间戳"""
|
||||
|
||||
last_active_ts: int = 0
|
||||
"""会话最后活跃时间戳"""
|
||||
|
||||
just_switched_to_exist_session: bool = False
|
||||
|
||||
response_lock = None
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.default_prompt = self.get_runtime_default_prompt()
|
||||
logging.debug("prompt is: {}".format(self.default_prompt))
|
||||
self.messages = []
|
||||
self.token_counts = []
|
||||
self.create_ts = int(time.time())
|
||||
self.last_active_ts = int(time.time())
|
||||
|
||||
self.response_lock = threading.Lock()
|
||||
|
||||
self.schedule()
|
||||
|
||||
def get_runtime_default_prompt(self, use_default: str = None) -> list:
|
||||
"""从提示词管理器中获取所需提示词"""
|
||||
import pkg.openai.dprompt as dprompt
|
||||
|
||||
if use_default is None:
|
||||
use_default = dprompt.mode_inst().get_using_name()
|
||||
|
||||
current_default_prompt, _ = dprompt.mode_inst().get_prompt(use_default)
|
||||
return current_default_prompt
|
||||
|
||||
def schedule(self):
|
||||
"""定时会话过期检查任务"""
|
||||
|
||||
def expire_check_timer_loop(self):
|
||||
"""会话过期检查任务"""
|
||||
@@ -16,6 +16,8 @@ import pkg.utils.context
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
|
||||
from pkg.openai.modelmgr import count_tokens
|
||||
|
||||
# 运行时保存的所有session
|
||||
sessions = {}
|
||||
|
||||
@@ -64,7 +66,7 @@ def load_sessions():
|
||||
session_data = db_inst.load_valid_sessions()
|
||||
|
||||
for session_name in session_data:
|
||||
logging.info('加载session: {}'.format(session_name))
|
||||
logging.debug('加载session: {}'.format(session_name))
|
||||
|
||||
temp_session = Session(session_name)
|
||||
temp_session.name = session_name
|
||||
@@ -107,9 +109,6 @@ class Session:
|
||||
prompt = []
|
||||
"""使用list来保存会话中的回合"""
|
||||
|
||||
token_counts = []
|
||||
"""每个回合的token数量"""
|
||||
|
||||
default_prompt = []
|
||||
"""本session的默认prompt"""
|
||||
|
||||
@@ -195,8 +194,15 @@ class Session:
|
||||
|
||||
# 请求回复
|
||||
# 这个函数是阻塞的
|
||||
def append(self, text: str) -> str:
|
||||
"""向session中添加一条消息,返回接口回复"""
|
||||
def query(self, text: str=None) -> tuple[str, str, list[str]]:
|
||||
"""向session中添加一条消息,返回接口回复
|
||||
|
||||
Args:
|
||||
text (str): 用户消息
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: (接口回复, finish_reason, 已调用的函数列表)
|
||||
"""
|
||||
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
@@ -210,17 +216,34 @@ class Session:
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.SessionFirstMessageReceived, **args)
|
||||
if event.is_prevented_default():
|
||||
return None
|
||||
return None, None, None
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
max_length = config.prompt_submit_length
|
||||
|
||||
prompts, counts = self.cut_out(text, max_length)
|
||||
local_default_prompt = self.default_prompt.copy()
|
||||
local_prompt = self.prompt.copy()
|
||||
|
||||
# 计算请求前的prompt数量
|
||||
total_token_before_query = 0
|
||||
for token_count in counts:
|
||||
total_token_before_query += token_count
|
||||
# 触发PromptPreProcessing事件
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'default_prompt': self.default_prompt,
|
||||
'prompt': self.prompt,
|
||||
'text_message': text,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.PromptPreProcessing, **args)
|
||||
|
||||
if event.get_return_value('default_prompt') is not None:
|
||||
local_default_prompt = event.get_return_value('default_prompt')
|
||||
|
||||
if event.get_return_value('prompt') is not None:
|
||||
local_prompt = event.get_return_value('prompt')
|
||||
|
||||
if event.get_return_value('text_message') is not None:
|
||||
text = event.get_return_value('text_message')
|
||||
|
||||
prompts, _ = self.cut_out(text, max_length, local_default_prompt, local_prompt)
|
||||
|
||||
res_text = ""
|
||||
|
||||
@@ -228,26 +251,63 @@ class Session:
|
||||
|
||||
total_tokens = 0
|
||||
|
||||
finish_reason: str = ""
|
||||
|
||||
funcs = []
|
||||
|
||||
trace_func_calls = config.trace_function_calls
|
||||
botmgr = pkg.utils.context.get_qqbot_manager()
|
||||
|
||||
session_name_spt: list[str] = self.name.split("_")
|
||||
|
||||
pending_res_text = ""
|
||||
|
||||
# TODO 对不起,我知道这样非常非常屎山,但我之后会重构的
|
||||
for resp in pkg.utils.context.get_openai_manager().request_completion(prompts):
|
||||
if resp['choices'][0]['message']['type'] == 'text': # 普通回复
|
||||
res_text += resp['choices'][0]['message']['content']
|
||||
|
||||
if pending_res_text != "":
|
||||
botmgr.adapter.send_message(
|
||||
session_name_spt[0],
|
||||
session_name_spt[1],
|
||||
pending_res_text
|
||||
)
|
||||
pending_res_text = ""
|
||||
|
||||
finish_reason = resp['choices'][0]['finish_reason']
|
||||
|
||||
if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应
|
||||
|
||||
if not trace_func_calls:
|
||||
res_text += resp['choices'][0]['message']['content'] + "\n"
|
||||
else:
|
||||
res_text = resp['choices'][0]['message']['content']
|
||||
pending_res_text = resp['choices'][0]['message']['content']
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
|
||||
pending_msgs.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": resp['choices'][0]['message']['content']
|
||||
}
|
||||
)
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": resp['choices'][0]['message']['content']
|
||||
}
|
||||
|
||||
elif resp['choices'][0]['message']['type'] == 'function_call':
|
||||
if 'function_call' in resp['choices'][0]['message']:
|
||||
msg['function_call'] = json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
|
||||
pending_msgs.append(msg)
|
||||
|
||||
if resp['choices'][0]['message']['type'] == 'function_call':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "assistant",
|
||||
# "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
# }
|
||||
# )
|
||||
if trace_func_calls:
|
||||
botmgr.adapter.send_message(
|
||||
session_name_spt[0],
|
||||
session_name_spt[1],
|
||||
"调用函数 "+resp['choices'][0]['message']['function_call']['name'] + "..."
|
||||
)
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
elif resp['choices'][0]['message']['type'] == 'function_return':
|
||||
@@ -260,10 +320,12 @@ class Session:
|
||||
# )
|
||||
|
||||
# total_tokens += resp['usage']['total_tokens']
|
||||
funcs.append(
|
||||
resp['choices'][0]['message']['function_name']
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# 向API请求补全
|
||||
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
||||
# prompts,
|
||||
@@ -276,19 +338,20 @@ class Session:
|
||||
# 将此次对话的双方内容加入到prompt中
|
||||
# self.prompt.append({'role': 'user', 'content': text})
|
||||
# self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
if text:
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
# 添加pending_msgs
|
||||
self.prompt += pending_msgs
|
||||
|
||||
# 向token_counts中添加本回合的token数量
|
||||
self.token_counts.append(total_tokens-total_token_before_query)
|
||||
logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
|
||||
# self.token_counts.append(total_tokens-total_token_before_query)
|
||||
# logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
|
||||
|
||||
if self.just_switched_to_exist_session:
|
||||
self.just_switched_to_exist_session = False
|
||||
self.set_ongoing()
|
||||
|
||||
return res_ans if res_ans[0] != '\n' else res_ans[1:]
|
||||
return res_ans if res_ans[0] != '\n' else res_ans[1:], finish_reason, funcs
|
||||
|
||||
# 删除上一回合并返回上一回合的问题
|
||||
def undo(self) -> str:
|
||||
@@ -306,7 +369,7 @@ class Session:
|
||||
return question
|
||||
|
||||
# 构建对话体
|
||||
def cut_out(self, msg: str, max_tokens: int) -> tuple[list, list]:
|
||||
def cut_out(self, msg: str, max_tokens: int, default_prompt: list, prompt: list) -> tuple[list, list]:
|
||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens
|
||||
|
||||
:return: (新的prompt, 新的token_counts)
|
||||
@@ -319,42 +382,35 @@ class Session:
|
||||
|
||||
# 包装目前的对话回合内容
|
||||
changable_prompts = []
|
||||
changable_counts = []
|
||||
# 倒着来, 遍历prompt的步长为2, 遍历tokens_counts的步长为1
|
||||
changable_index = len(self.prompt) - 1
|
||||
token_count_index = len(self.token_counts) - 1
|
||||
|
||||
packed_tokens = 0
|
||||
use_model = pkg.utils.context.get_config().completion_api_params['model']
|
||||
|
||||
while changable_index >= 0 and token_count_index >= 0:
|
||||
if packed_tokens + self.token_counts[token_count_index] > max_tokens:
|
||||
ptr = len(prompt) - 1
|
||||
|
||||
# 直接从后向前扫描拼接,不管是否是整回合
|
||||
while ptr >= 0:
|
||||
if count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
|
||||
break
|
||||
|
||||
changable_prompts.insert(0, self.prompt[changable_index])
|
||||
changable_prompts.insert(0, self.prompt[changable_index - 1])
|
||||
changable_counts.insert(0, self.token_counts[token_count_index])
|
||||
packed_tokens += self.token_counts[token_count_index]
|
||||
changable_prompts.insert(0, prompt[ptr])
|
||||
|
||||
changable_index -= 2
|
||||
token_count_index -= 1
|
||||
ptr -= 1
|
||||
|
||||
# 将default_prompt和changable_prompts合并
|
||||
result_prompt = self.default_prompt + changable_prompts
|
||||
result_prompt = default_prompt + changable_prompts
|
||||
|
||||
# 添加当前问题
|
||||
result_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
)
|
||||
if msg:
|
||||
result_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
)
|
||||
|
||||
logging.debug('cut_out: {}\nchangable section tokens: {}\npacked counts: {}\nsession counts: {}'.format(json.dumps(result_prompt, ensure_ascii=False, indent=4),
|
||||
packed_tokens,
|
||||
changable_counts,
|
||||
self.token_counts))
|
||||
logging.debug("cut_out: {}".format(json.dumps(result_prompt, ensure_ascii=False, indent=4)))
|
||||
|
||||
return result_prompt, changable_counts
|
||||
return result_prompt, count_tokens(changable_prompts, use_model)
|
||||
|
||||
# 持久化session
|
||||
def persistence(self):
|
||||
|
||||
@@ -99,7 +99,7 @@ def walk_plugin_path(module, prefix='', path_prefix=''):
|
||||
|
||||
def load_plugins():
|
||||
"""加载插件"""
|
||||
logging.info("加载插件")
|
||||
logging.debug("加载插件")
|
||||
PluginHost()
|
||||
walk_plugin_path(__import__('plugins'))
|
||||
|
||||
@@ -122,15 +122,21 @@ def initialize_plugins():
|
||||
"""初始化插件"""
|
||||
logging.info("初始化插件")
|
||||
import pkg.plugin.models as models
|
||||
|
||||
successfully_initialized_plugins = []
|
||||
|
||||
for plugin in iter_plugins():
|
||||
# if not plugin['enabled']:
|
||||
# continue
|
||||
try:
|
||||
models.__current_registering_plugin__ = plugin['name']
|
||||
plugin['instance'] = plugin["class"](plugin_host=context.get_plugin_host())
|
||||
logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||
# logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||
successfully_initialized_plugins.append(plugin['name'])
|
||||
except:
|
||||
logging.error("插件{}初始化时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
||||
|
||||
logging.info("以下插件已初始化: {}".format(", ".join(successfully_initialized_plugins)))
|
||||
|
||||
|
||||
def unload_plugins():
|
||||
@@ -266,7 +272,7 @@ class EventContext:
|
||||
self.__return_value__[key] = []
|
||||
self.__return_value__[key].append(ret)
|
||||
|
||||
def get_return(self, key: str):
|
||||
def get_return(self, key: str) -> list:
|
||||
"""获取key的所有返回值"""
|
||||
if key in self.__return_value__:
|
||||
return self.__return_value__[key]
|
||||
@@ -387,7 +393,7 @@ class PluginHost:
|
||||
logging.debug("插件 {} 已要求阻止事件 {} 的默认行为".format(plugin['name'], event_name))
|
||||
|
||||
except Exception as e:
|
||||
logging.error("插件{}触发事件{}时发生错误".format(plugin['name'], event_name))
|
||||
logging.error("插件{}响应事件{}时发生错误".format(plugin['name'], event_name))
|
||||
logging.error(traceback.format_exc())
|
||||
|
||||
# print("done:{}".format(plugin['name']))
|
||||
|
||||
@@ -88,6 +88,8 @@ NormalMessageResponded = "normal_message_responded"
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
prefix: str 回复文字消息的前缀
|
||||
response_text: str 响应文本
|
||||
finish_reason: str 响应结束原因
|
||||
funcs_called: list[str] 此次响应中调用的函数列表
|
||||
|
||||
returns (optional):
|
||||
prefix: str 修改后的回复文字消息的前缀
|
||||
@@ -132,6 +134,20 @@ KeySwitched = "key_switched"
|
||||
key_list: list[str] api-key列表
|
||||
"""
|
||||
|
||||
PromptPreProcessing = "prompt_pre_processing"
|
||||
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
default_prompt: list 此session使用的情景预设内容
|
||||
prompt: list 此session现有的prompt内容
|
||||
text_message: str 用户发送的消息文本
|
||||
|
||||
returns (optional):
|
||||
default_prompt: list 修改后的情景预设内容
|
||||
prompt: list 修改后的prompt内容
|
||||
text_message: str 修改后的消息文本
|
||||
"""
|
||||
|
||||
|
||||
def on(*args, **kwargs):
|
||||
"""注册事件监听器
|
||||
@@ -150,6 +166,32 @@ def func(*args, **kwargs):
|
||||
__current_registering_plugin__ = ""
|
||||
|
||||
|
||||
def require_ver(ge: str, le: str="v999.9.9") -> bool:
|
||||
"""插件版本要求装饰器
|
||||
|
||||
Args:
|
||||
ge (str): 最低版本要求
|
||||
le (str, optional): 最高版本要求
|
||||
|
||||
Returns:
|
||||
bool: 是否满足要求, False时为无法获取版本号,True时为满足要求,报错为不满足要求
|
||||
"""
|
||||
qchatgpt_version = ""
|
||||
|
||||
from pkg.utils.updater import get_current_tag, compare_version_str
|
||||
|
||||
try:
|
||||
qchatgpt_version = get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
return False
|
||||
|
||||
if compare_version_str(qchatgpt_version, ge) < 0 or \
|
||||
(compare_version_str(qchatgpt_version, le) > 0):
|
||||
raise Exception("QChatGPT 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, qchatgpt_version))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Plugin:
|
||||
"""插件基类"""
|
||||
|
||||
|
||||
@@ -327,6 +327,10 @@ def apply_privileges():
|
||||
for path, priv in data.items():
|
||||
if path == 'comment':
|
||||
continue
|
||||
|
||||
if path not in __command_list__:
|
||||
continue
|
||||
|
||||
if __command_list__[path]['privilege'] != priv:
|
||||
logging.debug('应用权限: {} -> {}(default: {})'.format(path, priv, __command_list__[path]['privilege']))
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import logging
|
||||
|
||||
import json
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
@@ -19,10 +21,13 @@ class FuncCommand(AbstractCommandNode):
|
||||
|
||||
reply_str = "当前已加载的内容函数:\n\n"
|
||||
|
||||
logging.debug("host.__callable_functions__: {}".format(json.dumps(host.__callable_functions__, indent=4)))
|
||||
|
||||
index = 1
|
||||
for func in host.__callable_functions__:
|
||||
reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description'])
|
||||
|
||||
index += 1
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
|
||||
@@ -12,7 +12,7 @@ import pkg.utils.updater as updater
|
||||
description="插件管理",
|
||||
usage="!plugin\n!plugin get <插件仓库地址>\n!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
privilege=1
|
||||
)
|
||||
class PluginCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
@@ -188,6 +188,11 @@ class PluginOnOffCommand(AbstractCommandNode):
|
||||
plugin_name = ctx.crt_params[0]
|
||||
if plugin_name in plugin_list:
|
||||
plugin_list[plugin_name]['enabled'] = new_status
|
||||
|
||||
for func in plugin_host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name+"-"):
|
||||
func['enabled'] = new_status
|
||||
|
||||
plugin_switch.dump_switch()
|
||||
reply = ["[bot]已{}插件: {}".format("启用" if new_status else "禁用", plugin_name)]
|
||||
else:
|
||||
|
||||
@@ -118,7 +118,7 @@ class QQBotManager:
|
||||
# 故只在第一次初始化时创建bot对象,重载之后使用原bot对象
|
||||
# 因此,bot的配置不支持热重载
|
||||
if first_time_init:
|
||||
logging.info("Use adapter:" + config.msg_source_adapter)
|
||||
logging.debug("Use adapter:" + config.msg_source_adapter)
|
||||
if config.msg_source_adapter == 'yirimirai':
|
||||
from pkg.qqbot.sources.yirimirai import YiriMiraiAdapter
|
||||
|
||||
@@ -264,8 +264,24 @@ class QQBotManager:
|
||||
else:
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter([])
|
||||
|
||||
def send(self, event, msg, check_quote=True):
|
||||
def send(self, event, msg, check_quote=True, check_at_sender=True):
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
if check_at_sender and config.at_sender:
|
||||
msg.insert(
|
||||
0,
|
||||
Plain(" \n")
|
||||
)
|
||||
|
||||
# 当回复的正文中包含换行时,quote可能会自带at,此时就不再单独添加at,只添加换行
|
||||
if "\n" not in str(msg[1]) or config.msg_source_adapter == 'nakuru':
|
||||
msg.insert(
|
||||
0,
|
||||
At(
|
||||
event.sender.id
|
||||
)
|
||||
)
|
||||
|
||||
self.adapter.reply_message(
|
||||
event,
|
||||
msg,
|
||||
@@ -313,7 +329,7 @@ class QQBotManager:
|
||||
reply = [tips_custom.reply_message]
|
||||
|
||||
if reply:
|
||||
return self.send(event, reply, check_quote=False)
|
||||
return self.send(event, reply, check_quote=False, check_at_sender=False)
|
||||
|
||||
# 群消息处理
|
||||
def on_group_message(self, event: GroupMessage):
|
||||
|
||||
@@ -40,7 +40,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
try:
|
||||
prefix = "[GPT]" if config.show_prefix else ""
|
||||
|
||||
text = session.append(text_message)
|
||||
text, finish_reason, funcs = session.query(text_message)
|
||||
|
||||
# 触发插件事件
|
||||
args = {
|
||||
@@ -49,7 +49,9 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
"sender_id": sender_id,
|
||||
"session": session,
|
||||
"prefix": prefix,
|
||||
"response_text": text
|
||||
"response_text": text,
|
||||
"finish_reason": finish_reason,
|
||||
"funcs_called": funcs,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.NormalMessageResponded, **args)
|
||||
@@ -62,10 +64,11 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
|
||||
if not event.is_prevented_default():
|
||||
reply = [prefix + text]
|
||||
|
||||
except openai.error.APIConnectionError as e:
|
||||
err_msg = str(e)
|
||||
if err_msg.__contains__('Error communicating with OpenAI'):
|
||||
reply = handle_exception("{}会话调用API失败:{}\n请尝试关闭网络代理来解决此问题。".format(session_name, e),
|
||||
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
|
||||
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
else:
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from ..adapter import MessageSourceAdapter
|
||||
import mirai
|
||||
import mirai.models.bus
|
||||
from mirai.bot import MiraiRunner
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
@@ -110,7 +111,12 @@ class YiriMiraiAdapter(MessageSourceAdapter):
|
||||
bus.unsubscribe(event_type, callback)
|
||||
|
||||
def run_sync(self):
|
||||
self.bot.run()
|
||||
"""运行YiriMirai"""
|
||||
|
||||
# 创建新的
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
loop.run_until_complete(MiraiRunner(self.bot)._run())
|
||||
|
||||
def kill(self) -> bool:
|
||||
return False
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -8,7 +8,8 @@ def install(package):
|
||||
log.reset_logging()
|
||||
|
||||
def install_upgrade(package):
|
||||
pipmain(['install', '--upgrade', package])
|
||||
pipmain(['install', '--upgrade', package, "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
@@ -18,7 +19,8 @@ def run_pip(params: list):
|
||||
|
||||
|
||||
def install_requirements(file):
|
||||
pipmain(['install', '-r', file, "--upgrade"])
|
||||
pipmain(['install', '-r', file, "--upgrade", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
|
||||
@@ -78,6 +78,34 @@ def get_current_tag() -> str:
|
||||
return current_tag
|
||||
|
||||
|
||||
def compare_version_str(v0: str, v1: str) -> int:
|
||||
"""比较两个版本号"""
|
||||
|
||||
# 删除版本号前的v
|
||||
if v0.startswith("v"):
|
||||
v0 = v0[1:]
|
||||
if v1.startswith("v"):
|
||||
v1 = v1[1:]
|
||||
|
||||
v0:list = v0.split(".")
|
||||
v1:list = v1.split(".")
|
||||
|
||||
# 如果两个版本号节数不同,把短的后面用0补齐
|
||||
if len(v0) < len(v1):
|
||||
v0.extend(["0"]*(len(v1)-len(v0)))
|
||||
elif len(v0) > len(v1):
|
||||
v1.extend(["0"]*(len(v0)-len(v1)))
|
||||
|
||||
# 从高位向低位比较
|
||||
for i in range(len(v0)):
|
||||
if int(v0[i]) > int(v1[i]):
|
||||
return 1
|
||||
elif int(v0[i]) < int(v1[i]):
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def update_all(cli: bool = False) -> bool:
|
||||
"""检查更新并下载源码"""
|
||||
current_tag = get_current_tag()
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
requests~=2.31.0
|
||||
openai~=0.27.8
|
||||
dulwich~=0.21.5
|
||||
requests
|
||||
openai~=0.28.0
|
||||
dulwich~=0.21.6
|
||||
colorlog~=6.6.0
|
||||
yiri-mirai~=0.2.7
|
||||
yiri-mirai
|
||||
websockets
|
||||
urllib3~=1.26.10
|
||||
urllib3
|
||||
func_timeout~=4.3.5
|
||||
Pillow
|
||||
nakuru-project-idk
|
||||
CallingGPT
|
||||
CallingGPT
|
||||
tiktoken
|
||||
PyYaml
|
||||
@@ -1,14 +1,8 @@
|
||||
[
|
||||
{
|
||||
"id": 0,
|
||||
"time": "2023-04-24 16:05:20",
|
||||
"timestamp": 1682323520,
|
||||
"content": "现已支持使用go-cqhttp替换mirai作为QQ登录框架, 请更新并查看 https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE"
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"time": "2023-05-21 17:33:18",
|
||||
"timestamp": 1684661598,
|
||||
"content": "NewBing不再需要鉴权,若您正在使用revLibs逆向库插件,请立即使用!plugin update revLibs命令更新插件到最新版。"
|
||||
"id": 2,
|
||||
"time": "2023-08-01 10:49:26",
|
||||
"timestamp": 1690858166,
|
||||
"content": "现已支持GPT函数调用功能,欢迎了解:https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0"
|
||||
}
|
||||
]
|
||||
@@ -1,4 +1,6 @@
|
||||
|
||||
> **Warning**
|
||||
> 此文档已过时,请查看[QChatGPT 容器化部署指南](docker_deployment.md)
|
||||
|
||||
## 操作步骤
|
||||
|
||||
|
||||
64
res/docs/docker_deployment.md
Normal file
64
res/docs/docker_deployment.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# QChatGPT 容器化部署指南
|
||||
|
||||
> **Warning**
|
||||
> 请您确保您**确实**需要 Docker 部署,您**必须**具有以下能力:
|
||||
> - 了解 `Docker` 和 `Docker Compose` 的使用
|
||||
> - 了解容器间网络通信配置方式
|
||||
> - 了解容器文件挂载机制
|
||||
> - 了解容器调试操作
|
||||
> - 动手能力强、资料查找能力强
|
||||
>
|
||||
> 若您不完全具有以上能力,请勿使用 Docker 部署,由于误操作导致的配置不正确,我们将不会解答您的问题并不负任何责任。
|
||||
> **非常不建议**您在除 Linux 之外的系统上使用 Docker 进行部署。
|
||||
|
||||
## 概览
|
||||
|
||||
QChatGPT 主程序需要连接`QQ登录框架`以与QQ通信,您可以选择 [Mirai](https://github.com/mamoe/mirai)(还需要配置mirai-api-http,请查看此仓库README中手动部署部分) 或 [go-cqhttp](https://github.com/Mrs4s/go-cqhttp),我们仅发布 QChatGPT主程序 的镜像,您需要自行配置QQ登录框架(可以参考[README.md](https://github.com/RockChinQ/QChatGPT#-%E9%85%8D%E7%BD%AEqq%E7%99%BB%E5%BD%95%E6%A1%86%E6%9E%B6)中的教程,或自行寻找其镜像)并在 QChatGPT 的配置文件中设置连接地址。
|
||||
|
||||
> **Note**
|
||||
> 请先确保 Docker 和 Docker Compose 已安装
|
||||
|
||||
## 准备文件
|
||||
|
||||
> QChatGPT 目前暂不可以在没有配置模板文件的情况下自动生成文件,您需要按照以下步骤手动创建需要挂载的文件。
|
||||
> 如无特殊说明,模板文件均在此仓库中。
|
||||
|
||||
> 如果您不想挨个创建,也可以直接clone本仓库到本地,执行`python main.py`后即可自动根据模板生成所需文件。
|
||||
|
||||
现在请在一个空目录创建以下文件或目录:
|
||||
|
||||
### 📄`config.py`
|
||||
|
||||
复制根目录的`config-template.py`所有内容,创建`config.py`并根据其中注释进行修改。
|
||||
|
||||
### 📄`banlist.py`
|
||||
|
||||
复制`res/templates/banlist-template.py`所有内容,创建`banlist.py`,这是黑名单配置文件,根据需要修改。
|
||||
|
||||
### 📄`cmdpriv.json`
|
||||
|
||||
复制`res/templates/cmdpriv-template.json`所有内容,创建`cmdpriv.json`,这是各命令的权限配置文件,根据需要修改。
|
||||
|
||||
### 📄`sensitive.json`
|
||||
|
||||
复制`res/templates/sensitive-template.json`所有内容,创建`sensitive.json`,这是敏感词配置,根据需要修改。
|
||||
|
||||
### 📄`tips.py`
|
||||
|
||||
复制`tips-custom-template.py`所有内容,创建`tips.py`,这是部分提示语的配置,根据需要修改。
|
||||
|
||||
## 运行
|
||||
|
||||
已预先准备好`docker-compose.yaml`,您需要根据您的网络配置进行适当修改,使容器内的 QChatGPT 程序可以正常与 Mirai 或 go-cqhttp 通信。
|
||||
|
||||
将`docker-compose.yaml`复制到本目录,根据网络环境进行配置,并执行:
|
||||
|
||||
```bash
|
||||
docker compose up
|
||||
```
|
||||
|
||||
若无报错即配置完成,您可以Ctrl+C关闭后使用`docker compose up -d`将其置于后台运行
|
||||
|
||||
## 注意
|
||||
|
||||
- 安装的插件都会保存在`plugins`(映射到本目录`plugins`),安装插件时可能会自动安装相应的依赖,此时若`重新创建`容器,已安装的插件将被加载,但所需的增量依赖并未安装,会导致引入问题。您可以删除插件目录后重启,再次安装插件,以便程序可以自动安装插件所需依赖。
|
||||
BIN
res/logo.png
BIN
res/logo.png
Binary file not shown.
|
Before Width: | Height: | Size: 203 KiB After Width: | Height: | Size: 35 KiB |
@@ -2,7 +2,7 @@
|
||||
"comment": "以下为命令权限,请设置到cmdpriv.json中。关于此功能的说明,请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6",
|
||||
"draw": 1,
|
||||
"func": 1,
|
||||
"plugin": 2,
|
||||
"plugin": 1,
|
||||
"plugin.get": 2,
|
||||
"plugin.update": 2,
|
||||
"plugin.del": 2,
|
||||
|
||||
BIN
res/webwlkr-demo.gif
Normal file
BIN
res/webwlkr-demo.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 879 KiB |
@@ -4,6 +4,9 @@ QChatGPT 插件使用Wiki
|
||||
|
||||
`plugins`目录下的所有`.py`程序都将被加载,除了`__init__.py`之外的模块支持热加载
|
||||
|
||||
> 插件分为`行为插件`和`内容插件`两种,行为插件由主程序运行中的事件驱动,内容插件由GPT生成的内容驱动,请查看内容插件页
|
||||
> 已有插件列表:[QChatGPT 插件](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6)
|
||||
|
||||
## 安装
|
||||
|
||||
### 储存库克隆(推荐)
|
||||
31
res/wiki/6-插件使用-内容函数.md
Normal file
31
res/wiki/6-插件使用-内容函数.md
Normal file
@@ -0,0 +1,31 @@
|
||||
> 说白了就是ChatGPT官方插件那种东西
|
||||
|
||||
内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||
|
||||
例如我们为GPT提供一个函数`access_the_web`,并提供其详细的描述以及其参数的描述,那么当我们在与GPT对话时涉及类似以下内容时:
|
||||
|
||||
```
|
||||
Q: 请搜索一下github上有那些QQ机器人项目?
|
||||
Q: 请为我搜索一些不错的云服务商网站?
|
||||
Q:阅读并总结这篇文章:https://zhuanlan.zhihu.com/p/607570830
|
||||
Q:搜一下清远今天天气如何
|
||||
```
|
||||
|
||||
GPT将会回复一个对`access_the_web`的函数调用请求,QChatGPT将自动处理执行该调用,并返回结果给GPT使其生成新的回复。
|
||||
|
||||
当然,函数调用功能不止局限于网络访问,还可以实现图片处理、科学计算、行程规划等需要调用函数的功能,理论上我们可以通过内容函数实现与`ChatGPT Plugins`相同的功能。
|
||||
|
||||
- 您需要使用`v2.5.0`以上的版本才能加载包含内容函数的插件
|
||||
- 您需要同时在`config.py`中的`completion_api_params`中设置`model`为支持函数调用的模型,推荐使用`gpt-3.5-turbo-16k`
|
||||
- 使用此功能可能会造成难以预期的账号余额消耗,请关注
|
||||
- [逆向库插件](https://github.com/RockChinQ/revLibs)现在也支持函数调用了..您可以在完全免费的情况下使用GPT-3.5进行函数调用,若您在主程序配置了内容函数并启用,逆向ChatGPT会自动使用这些函数
|
||||
|
||||
### ?QChatGPT有什么类型的插件?区别是什么?
|
||||
|
||||
QChatGPT具有`行为插件`和`内容函数`两种扩展方式,行为插件是完整的插件结构,是由运行期间的事件驱动的,内容函数被包含于一个完整的插件体中,由GPT接口驱动。
|
||||
|
||||
> 还是不理解?可以尝试根据插件开发页的步骤自行编写插件
|
||||
|
||||
## QChatGPT的一些不错的内容函数插件
|
||||
|
||||
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||
@@ -117,6 +117,8 @@ class HelloPlugin(Plugin):
|
||||
|
||||
通过[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的`内容函数`,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||
|
||||
> 您的插件不一定必须包含内容函数,请先查看内容函数页了解此功能
|
||||
|
||||
<details>
|
||||
<summary>示例:联网插件</summary>
|
||||
|
||||
@@ -289,6 +291,21 @@ class HelloPlugin(Plugin):
|
||||
|
||||
- 这仅仅是一个示例,需要更高效的网络访问能力支持插件,请查看[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin)
|
||||
|
||||
## 🔒版本要求
|
||||
|
||||
若您的插件对主程序的版本有要求,可以使用以下函数进行断言,若不符合版本,此函数将报错并打断此函数所在的流程:
|
||||
|
||||
```python
|
||||
require_ver("v2.5.1") # 要求最低版本为 v2.5.1
|
||||
```
|
||||
|
||||
```python
|
||||
require_ver("v2.5.1", "v2.6.0") # 要求最低版本为 v2.5.1, 同时要求最高版本为 v2.6.0
|
||||
```
|
||||
|
||||
- 此函数在主程序`v2.5.1`中加入
|
||||
- 此函数声明在`pkg.plugin.models`模块中,在插件示例代码最前方已引入此模块所有内容,故可直接使用
|
||||
|
||||
## 📄API参考
|
||||
|
||||
### 说明
|
||||
@@ -390,10 +407,11 @@ NormalMessageResponded = "normal_message_responded"
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
prefix: str 回复文字消息的前缀
|
||||
response_text: str 响应文本
|
||||
finish_reason: str 响应结束原因
|
||||
|
||||
returns (optional):
|
||||
prefix: str 修改后的回复文字消息的前缀
|
||||
reply: list 替换回复消息组件列表,元素为YiriMirai支持的消息组件
|
||||
reply: list 替换回复消息组件列表
|
||||
"""
|
||||
|
||||
SessionFirstMessageReceived = "session_first_message_received"
|
||||
@@ -433,6 +451,20 @@ KeySwitched = "key_switched"
|
||||
key_name: str 切换成功的api-key名称
|
||||
key_list: list[str] api-key列表
|
||||
"""
|
||||
|
||||
PromptPreProcessing = "prompt_pre_processing" # 于v2.5.1加入
|
||||
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
default_prompt: list 此session使用的情景预设内容
|
||||
prompt: list 此session现有的prompt内容
|
||||
text_message: str 用户发送的消息文本
|
||||
|
||||
returns (optional):
|
||||
default_prompt: list 修改后的情景预设内容
|
||||
prompt: list 修改后的prompt内容
|
||||
text_message: str 修改后的消息文本
|
||||
"""
|
||||
```
|
||||
|
||||
### host: PluginHost 详解
|
||||
42
tests/bs_test/bs_test.py
Normal file
42
tests/bs_test/bs_test.py
Normal file
@@ -0,0 +1,42 @@
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
|
||||
|
||||
user_agents = [
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Version/14.1.2 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Version/14.1 Safari/537.36',
|
||||
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
||||
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0'
|
||||
]
|
||||
|
||||
r = requests.get(
|
||||
sys.argv[1],
|
||||
headers={
|
||||
"User-Agent": random.choice(user_agents)
|
||||
}
|
||||
)
|
||||
soup = BeautifulSoup(r.text, 'html.parser')
|
||||
# print(soup.get_text())
|
||||
|
||||
raw = soup.get_text()
|
||||
|
||||
import re
|
||||
|
||||
# strip每一行
|
||||
# raw = '\n'.join([line.strip() for line in raw.split('\n')])
|
||||
|
||||
# # 删除所有空行或只有空格的行
|
||||
# raw = re.sub(r'\n\s*\n', '\n', raw)
|
||||
|
||||
|
||||
print(raw)
|
||||
57
tests/ssh_client_test/ssh_client.py
Normal file
57
tests/ssh_client_test/ssh_client.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import os
|
||||
import sys
|
||||
import paramiko
|
||||
import time
|
||||
import select
|
||||
|
||||
|
||||
class sshClient:
|
||||
#创建一个ssh客户端,和服务器连接上,准备发消息
|
||||
def __init__(self,host,port,user,password):
|
||||
self.trans = paramiko.Transport((host, port))
|
||||
self.trans.start_client()
|
||||
self.trans.auth_password(username=user, password=password)
|
||||
self.channel = self.trans.open_session()
|
||||
self.channel.get_pty()
|
||||
self.channel.invoke_shell()
|
||||
|
||||
#给服务器发送一个命令
|
||||
def sendCmd(self,cmd):
|
||||
self.channel.sendall(cmd)
|
||||
|
||||
#接收的时候,有时候服务器处理的比较慢,需要设置一个延时等待一下。
|
||||
def recvResponse(self,timeout):
|
||||
data=b''
|
||||
while True:
|
||||
try:
|
||||
#使用select,不断的读取数据,直到没有多余的数据了,超时返回。
|
||||
readable,w,e= select.select([self.channel],[],[],timeout)
|
||||
if self.channel in readable:
|
||||
data = self.channel.recv(1024)
|
||||
else:
|
||||
sys.stdout.write(data.decode())
|
||||
sys.stdout.flush()
|
||||
return data.decode()
|
||||
except TimeoutError:
|
||||
sys.stdout.write(data.decode())
|
||||
sys.stdout.flush()
|
||||
return data.decode
|
||||
#关闭客户端
|
||||
def close(self):
|
||||
self.channel.close()
|
||||
self.trans.close()
|
||||
|
||||
host='host'
|
||||
port=22#your port
|
||||
user='root'
|
||||
pwd='pass'
|
||||
|
||||
ssh = sshClient(host,port,user,pwd)
|
||||
response = ssh.recvResponse(1)
|
||||
response = ssh.sendCmd("ls\n")
|
||||
ssh.sendCmd("cd /home\n")
|
||||
response = ssh.recvResponse(1)
|
||||
ssh.sendCmd("ls\n")
|
||||
response = ssh.recvResponse(1)
|
||||
|
||||
ssh.close()
|
||||
124
tests/token_test/tiktoken_test.py
Normal file
124
tests/token_test/tiktoken_test.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import tiktoken
|
||||
import openai
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
def encode(text: str, model: str):
|
||||
import tiktoken
|
||||
enc = tiktoken.get_encoding("cl100k_base")
|
||||
assert enc.decode(enc.encode("hello world")) == "hello world"
|
||||
|
||||
# To get the tokeniser corresponding to a specific model in the OpenAI API:
|
||||
enc = tiktoken.encoding_for_model(model)
|
||||
|
||||
return enc.encode(text)
|
||||
|
||||
|
||||
# def ask(prompt: str, model: str = "gpt-3.5-turbo"):
|
||||
# # To get the tokeniser corresponding to a specific model in the OpenAI API:
|
||||
# enc = tiktoken.encoding_for_model(model)
|
||||
|
||||
# resp = openai.ChatCompletion.create(
|
||||
# model=model,
|
||||
# messages=[
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": prompt
|
||||
# }
|
||||
# ]
|
||||
# )
|
||||
|
||||
# return enc.encode(prompt), enc.encode(resp['choices'][0]['message']['content']), resp
|
||||
|
||||
def ask(
|
||||
messages: list,
|
||||
model: str = "gpt-3.5-turbo"
|
||||
):
|
||||
enc = tiktoken.encoding_for_model(model)
|
||||
|
||||
resp = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages
|
||||
)
|
||||
|
||||
txt = ""
|
||||
|
||||
for r in messages:
|
||||
txt += r['role'] + r['content'] + "\n"
|
||||
|
||||
txt += "assistant: "
|
||||
|
||||
return enc.encode(txt), enc.encode(resp['choices'][0]['message']['content']), resp
|
||||
|
||||
|
||||
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return num_tokens_from_messages(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "你叫什么名字?"
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": "我是AI助手,没有具体的名字。你可以叫我GPT-3。有什么可以帮到你的吗?"
|
||||
},{
|
||||
"role": "user",
|
||||
"content": "你是由谁开发的?"
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": "我是由OpenAI开发的,一家人工智能研究实验室。OpenAI的使命是促进人工智能的发展,使其为全人类带来积极影响。我是由OpenAI团队使用GPT-3模型训练而成的。"
|
||||
},{
|
||||
"role": "user",
|
||||
"content": "很高兴见到你。"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
pro, rep, resp=ask(messages)
|
||||
|
||||
print(len(pro), len(rep))
|
||||
print(resp)
|
||||
print(resp['choices'][0]['message']['content'])
|
||||
|
||||
print(num_tokens_from_messages(messages, model="gpt-3.5-turbo"))
|
||||
Reference in New Issue
Block a user