Compare commits
357 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eebd6a6ba3 | ||
|
|
0407f3e4ac | ||
|
|
5abca84437 | ||
|
|
d2776cc1e6 | ||
|
|
9fe0ee2b77 | ||
|
|
b68daac323 | ||
|
|
665de5dc43 | ||
|
|
e3b280758c | ||
|
|
374ae25d9c | ||
|
|
c86529ac99 | ||
|
|
6309f1fb78 | ||
|
|
c246fb6d8e | ||
|
|
ec6c041bcf | ||
|
|
2da5a9f3c7 | ||
|
|
4e0df52d7c | ||
|
|
71b8bf13e4 | ||
|
|
a8b1e6ce91 | ||
|
|
1419d7611d | ||
|
|
89c83ebf20 | ||
|
|
76d7db88ea | ||
|
|
67a208bc90 | ||
|
|
acbd55ded2 | ||
|
|
11a240a6d1 | ||
|
|
97c85abbe7 | ||
|
|
06a0cd2a3d | ||
|
|
572b215df8 | ||
|
|
2c542bf412 | ||
|
|
1576ba7a01 | ||
|
|
45e4096a12 | ||
|
|
8a1d4fe287 | ||
|
|
98f880ebc2 | ||
|
|
2b852853f3 | ||
|
|
c7a9988033 | ||
|
|
c475eebe1c | ||
|
|
0fe7355ae0 | ||
|
|
57de96e3a2 | ||
|
|
70571cef50 | ||
|
|
0b6deb3340 | ||
|
|
dcda85a825 | ||
|
|
9d3bff018b | ||
|
|
051376e0d2 | ||
|
|
a113785211 | ||
|
|
3f4ed4dc3c | ||
|
|
ac80764fae | ||
|
|
e43afd4891 | ||
|
|
f1aea1d495 | ||
|
|
0e2a5db104 | ||
|
|
3a4c9771fa | ||
|
|
f4f8ef9523 | ||
|
|
b9ace69a72 | ||
|
|
aef0b2a26e | ||
|
|
f7712d71ec | ||
|
|
e94b44e3b8 | ||
|
|
524e863c78 | ||
|
|
bbc80ac901 | ||
|
|
f969ddd6ca | ||
|
|
1cc9781333 | ||
|
|
a609801bae | ||
|
|
d8b606d372 | ||
|
|
572a440e65 | ||
|
|
6e4eeae9b7 | ||
|
|
1a73669df8 | ||
|
|
91ebaf1122 | ||
|
|
46703eb906 | ||
|
|
b9dd9d5193 | ||
|
|
884481a4ec | ||
|
|
9040b37a63 | ||
|
|
99d47b2fa2 | ||
|
|
6575359a94 | ||
|
|
a2fc726372 | ||
|
|
3bfce8ab51 | ||
|
|
ff9a9830f2 | ||
|
|
e2b59e8efe | ||
|
|
04dad9757f | ||
|
|
75ea1080ad | ||
|
|
e25b064319 | ||
|
|
5d0dbc40ce | ||
|
|
beae8de5eb | ||
|
|
c4ff30c722 | ||
|
|
6f4ecb101b | ||
|
|
9f9b0ef846 | ||
|
|
de6957062c | ||
|
|
0a9b43e6fa | ||
|
|
5b0edd9937 | ||
|
|
8a400d202a | ||
|
|
5a1e9f7fb2 | ||
|
|
e03af75cf8 | ||
|
|
0da4919255 | ||
|
|
914e566d1f | ||
|
|
6ec2b653fe | ||
|
|
ba0a088b9c | ||
|
|
478e83bcd9 | ||
|
|
386124a3b9 | ||
|
|
ff5e7c16d1 | ||
|
|
7ff7a66012 | ||
|
|
c99dfb8a86 | ||
|
|
10f9d4c6b3 | ||
|
|
d347813411 | ||
|
|
7a93898b3f | ||
|
|
c057ea900f | ||
|
|
512266e74f | ||
|
|
e36aee11c7 | ||
|
|
97421299f5 | ||
|
|
bc41e5aa80 | ||
|
|
2fa30e7def | ||
|
|
1c6a7d9ba5 | ||
|
|
47435c42a5 | ||
|
|
39a1b421e6 | ||
|
|
b5edf2295b | ||
|
|
fb650a3d7a | ||
|
|
521541f311 | ||
|
|
7020abadbf | ||
|
|
d95fb3b5be | ||
|
|
3e524dc790 | ||
|
|
a64940bff8 | ||
|
|
c739290f0b | ||
|
|
af292fe050 | ||
|
|
634c7fb302 | ||
|
|
33efb94013 | ||
|
|
549e4dc02e | ||
|
|
3d40909c02 | ||
|
|
1aef81e38f | ||
|
|
1b0ae8da58 | ||
|
|
7979a8e97f | ||
|
|
080e53d9a9 | ||
|
|
89bb364b16 | ||
|
|
3586cd941f | ||
|
|
054d0839ac | ||
|
|
dd75f98d85 | ||
|
|
ec23bb5268 | ||
|
|
bc99db4fc1 | ||
|
|
c8275fcfbf | ||
|
|
a345043c30 | ||
|
|
382d37d479 | ||
|
|
32c144a75d | ||
|
|
7ca2aa5e39 | ||
|
|
86cc4a23ac | ||
|
|
08d1e138bd | ||
|
|
a9fe86542f | ||
|
|
4e29776fcd | ||
|
|
ee3eae8f4d | ||
|
|
a84575858a | ||
|
|
ac472291c7 | ||
|
|
f304873c6a | ||
|
|
18caf8face | ||
|
|
d21115aaa8 | ||
|
|
a05ecd2e7f | ||
|
|
32a725126d | ||
|
|
0528690622 | ||
|
|
819339142e | ||
|
|
1d0573e7ff | ||
|
|
00623bc431 | ||
|
|
c872264456 | ||
|
|
1336d3cb9a | ||
|
|
d1459578cd | ||
|
|
8a67fcf40f | ||
|
|
7930370aa9 | ||
|
|
0b854bdcf1 | ||
|
|
cba6aab48d | ||
|
|
12a9ca7a77 | ||
|
|
a6cbd226e1 | ||
|
|
3577e62b41 | ||
|
|
f86e69fcd1 | ||
|
|
292e00b078 | ||
|
|
2a91497bcf | ||
|
|
b0cca0a4c2 | ||
|
|
a2bda85a9c | ||
|
|
20677cff86 | ||
|
|
c8af5d8445 | ||
|
|
2dbe984539 | ||
|
|
6b8fa664f1 | ||
|
|
2b9612e933 | ||
|
|
749d0219fb | ||
|
|
a11a152bd7 | ||
|
|
fc803a3742 | ||
|
|
13a1e15f24 | ||
|
|
3f41b94da5 | ||
|
|
0fb5bfda20 | ||
|
|
dc1fd73ebb | ||
|
|
161b694f71 | ||
|
|
45d1c89e45 | ||
|
|
e26664aa51 | ||
|
|
e29691efbd | ||
|
|
6d45327882 | ||
|
|
fbd41eef49 | ||
|
|
0a30c88322 | ||
|
|
4f5af0e8c8 | ||
|
|
df3f0fd159 | ||
|
|
f2493c79dd | ||
|
|
a86a035b6b | ||
|
|
7995793bfd | ||
|
|
a56b340646 | ||
|
|
7473cdfe16 | ||
|
|
24273ac158 | ||
|
|
fe6275000e | ||
|
|
5fbf369f82 | ||
|
|
4400475ffa | ||
|
|
796eb7c95d | ||
|
|
89a01378e7 | ||
|
|
f4735e5e30 | ||
|
|
f1bb3045aa | ||
|
|
96e474a555 | ||
|
|
833d29b101 | ||
|
|
dce6734ba2 | ||
|
|
0481167dc6 | ||
|
|
a002f93f7b | ||
|
|
3c894fe70e | ||
|
|
8c69b8a1d9 | ||
|
|
a9dae05303 | ||
|
|
ae6994e241 | ||
|
|
caa72fa40c | ||
|
|
46cc9220c3 | ||
|
|
ddb56d7a8e | ||
|
|
a0267416d7 | ||
|
|
56e1ef3602 | ||
|
|
b4fc1057d1 | ||
|
|
06037df607 | ||
|
|
dce134d08d | ||
|
|
cca471d068 | ||
|
|
ddb211b74a | ||
|
|
cef70751ff | ||
|
|
2d2219fc6e | ||
|
|
514a6b4192 | ||
|
|
7a552b3434 | ||
|
|
ecebd1b0e0 | ||
|
|
8dc34d2a88 | ||
|
|
d52644ceec | ||
|
|
3052510591 | ||
|
|
777a5617db | ||
|
|
e17c1087e9 | ||
|
|
633695175a | ||
|
|
9e78bf3d21 | ||
|
|
43aa68a55d | ||
|
|
b8308f8c57 | ||
|
|
466bfbddeb | ||
|
|
b6da07b225 | ||
|
|
2f2159239a | ||
|
|
67d1ca8a65 | ||
|
|
497a393e83 | ||
|
|
782c0e22ea | ||
|
|
2932fc6dfd | ||
|
|
0a9eab2113 | ||
|
|
50a673a8ec | ||
|
|
9e25d0f9e4 | ||
|
|
23cd7be711 | ||
|
|
025b9e33f1 | ||
|
|
bab2f64913 | ||
|
|
b00e09aa9c | ||
|
|
0b109fdc7a | ||
|
|
018fea2ddb | ||
|
|
f8a3cc4352 | ||
|
|
6ab853acc1 | ||
|
|
e825dea02f | ||
|
|
cf8740d16e | ||
|
|
9c4809e26f | ||
|
|
0a232fd9ef | ||
|
|
23016a0791 | ||
|
|
cdcc67ff23 | ||
|
|
92274bfc34 | ||
|
|
2fed6f61ba | ||
|
|
59b2cd26d2 | ||
|
|
f7b87e99d2 | ||
|
|
70bc985145 | ||
|
|
070dbe9108 | ||
|
|
a63fa6d955 | ||
|
|
c7703809b0 | ||
|
|
37eb74338f | ||
|
|
77d5585b7c | ||
|
|
6cab3ef029 | ||
|
|
820a7b78fc | ||
|
|
c51dffef3a | ||
|
|
983bc3da3c | ||
|
|
09be956a58 | ||
|
|
5eded50c53 | ||
|
|
6d8eebd314 | ||
|
|
19a0572b5f | ||
|
|
6272e98474 | ||
|
|
45042fe7d4 | ||
|
|
d85e840126 | ||
|
|
804889f1de | ||
|
|
919c996434 | ||
|
|
00823b3d62 | ||
|
|
af54efd24a | ||
|
|
b1c9b121f6 | ||
|
|
7b5649d153 | ||
|
|
52bf716d84 | ||
|
|
c149dd7b66 | ||
|
|
65d5a1ed63 | ||
|
|
5516754bbb | ||
|
|
08082f2ee3 | ||
|
|
8489266080 | ||
|
|
51c7e0b235 | ||
|
|
628b6b0bb4 | ||
|
|
7e024d860d | ||
|
|
c2f6273f70 | ||
|
|
96e401ec7b | ||
|
|
ae8ac65447 | ||
|
|
2d4f59f36e | ||
|
|
0e85467e02 | ||
|
|
eb41cf5481 | ||
|
|
b970a42d07 | ||
|
|
8c9d123e1c | ||
|
|
ab2a95e347 | ||
|
|
2184c558a4 | ||
|
|
83cb8588fd | ||
|
|
007e82c533 | ||
|
|
499f8580a7 | ||
|
|
a7dc3c5dab | ||
|
|
d01d3a3c53 | ||
|
|
580e062dbf | ||
|
|
c8cee8410c | ||
|
|
6bf331c2e3 | ||
|
|
4c4930737c | ||
|
|
9de01e9525 | ||
|
|
c6a16f5974 | ||
|
|
253ef44d17 | ||
|
|
15a1f00b73 | ||
|
|
b5fa2ea8b8 | ||
|
|
449e024771 | ||
|
|
1bee7a146b | ||
|
|
270a632789 | ||
|
|
418bb05b4c | ||
|
|
052b834151 | ||
|
|
58ee204a75 | ||
|
|
0a02ee8c04 | ||
|
|
950ef4a181 | ||
|
|
7b7cdd8adb | ||
|
|
471768e760 | ||
|
|
c7517d31a4 | ||
|
|
7d10d0398e | ||
|
|
a2bc25c08b | ||
|
|
3cb49fe2d8 | ||
|
|
5b96ac122f | ||
|
|
612033f478 | ||
|
|
48ee940d8e | ||
|
|
e74df0b37d | ||
|
|
640afdc49c | ||
|
|
6b39df5b9b | ||
|
|
e7e698765e | ||
|
|
43fea13dab | ||
|
|
bc899e5bd0 | ||
|
|
160086feb9 | ||
|
|
016391c976 | ||
|
|
91746448a3 | ||
|
|
5cb0543237 | ||
|
|
fac29a24a8 | ||
|
|
4d3a2a21d0 | ||
|
|
6d4f88041c | ||
|
|
18587d3690 | ||
|
|
423090dccd | ||
|
|
78e88baab3 | ||
|
|
6a276767b3 | ||
|
|
2cb26c7c70 | ||
|
|
ff66c88060 | ||
|
|
611e82b8f9 | ||
|
|
59bdee7137 | ||
|
|
8c67d3c58f |
@@ -1,34 +0,0 @@
|
|||||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
|
||||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
|
||||||
{
|
|
||||||
"name": "QChatGPT 3.10",
|
|
||||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
|
||||||
"image": "mcr.microsoft.com/devcontainers/python:0-3.10",
|
|
||||||
|
|
||||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
|
||||||
// "features": {},
|
|
||||||
|
|
||||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
|
||||||
// "forwardPorts": [],
|
|
||||||
|
|
||||||
// Use 'postCreateCommand' to run commands after the container is created.
|
|
||||||
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
|
||||||
|
|
||||||
// Configure tool-specific properties.
|
|
||||||
// "customizations": {},
|
|
||||||
"customizations": {
|
|
||||||
"codespaces": {
|
|
||||||
"repositories": {
|
|
||||||
"RockChinQ/QChatGPT": {
|
|
||||||
"permissions": "write-all"
|
|
||||||
},
|
|
||||||
"RockChinQ/revLibs": {
|
|
||||||
"permissions": "write-all"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
|
||||||
// "remoteUser": "root"
|
|
||||||
}
|
|
||||||
9
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -14,6 +14,15 @@ body:
|
|||||||
- Docker部署
|
- Docker部署
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: 登录框架
|
||||||
|
description: "连接QQ使用的框架"
|
||||||
|
options:
|
||||||
|
- Mirai
|
||||||
|
- go-cqhttp
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: 系统环境
|
label: 系统环境
|
||||||
|
|||||||
2
.github/dependabot.yml
vendored
@@ -10,6 +10,6 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
allow:
|
allow:
|
||||||
- dependency-name: "yiri-mirai"
|
- dependency-name: "yiri-mirai-rc"
|
||||||
- dependency-name: "dulwich"
|
- dependency-name: "dulwich"
|
||||||
- dependency-name: "openai"
|
- dependency-name: "openai"
|
||||||
|
|||||||
38
.github/workflows/build_docker_image.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: Build Docker Image
|
||||||
|
on:
|
||||||
|
#防止fork乱用action设置只能手动触发构建
|
||||||
|
workflow_dispatch:
|
||||||
|
## 发布release的时候会自动构建
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
jobs:
|
||||||
|
publish-docker-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build image
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||||
|
run: |
|
||||||
|
if [ -z "$GITHUB_REF" ]; then
|
||||||
|
export GITHUB_REF=${{ github.ref }}
|
||||||
|
fi
|
||||||
|
- name: Check GITHUB_REF env
|
||||||
|
run: echo $GITHUB_REF
|
||||||
|
- name: Get version
|
||||||
|
id: get_version
|
||||||
|
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||||
|
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||||
|
- name: Build # image name: rockchin/qchatgpt:<VERSION>
|
||||||
|
run: docker build --network=host -t rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }} -t rockchin/qchatgpt:latest .
|
||||||
|
- name: Login to Registry
|
||||||
|
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Push image
|
||||||
|
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||||
|
run: docker push rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }}
|
||||||
|
|
||||||
|
- name: Push latest image
|
||||||
|
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||||
|
run: docker push rockchin/qchatgpt:latest
|
||||||
25
.github/workflows/sync-wiki.yml
vendored
@@ -1,7 +1,14 @@
|
|||||||
name: Update Wiki
|
name: Update Wiki
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- 'res/wiki/**'
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
paths:
|
paths:
|
||||||
- 'res/wiki/**'
|
- 'res/wiki/**'
|
||||||
|
|
||||||
@@ -20,14 +27,22 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repository: RockChinQ/QChatGPT.wiki
|
repository: RockChinQ/QChatGPT.wiki
|
||||||
path: wiki
|
path: wiki
|
||||||
|
- name: Delete old wiki content
|
||||||
|
run: |
|
||||||
|
rm -rf wiki/*
|
||||||
- name: Copy res/wiki content to wiki
|
- name: Copy res/wiki content to wiki
|
||||||
run: |
|
run: |
|
||||||
cp -r res/wiki/* wiki/
|
cp -r res/wiki/* wiki/
|
||||||
|
- name: Check for changes
|
||||||
|
run: |
|
||||||
|
cd wiki
|
||||||
|
if git diff --quiet; then
|
||||||
|
echo "No changes to commit."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
- name: Commit and Push Changes
|
- name: Commit and Push Changes
|
||||||
run: |
|
run: |
|
||||||
cd wiki
|
cd wiki
|
||||||
if git diff --name-only; then
|
git add .
|
||||||
git add .
|
git commit -m "Update wiki"
|
||||||
git commit -m "Update wiki"
|
git push
|
||||||
git push
|
|
||||||
fi
|
|
||||||
|
|||||||
@@ -21,12 +21,12 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.10.13
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade yiri-mirai-rc openai>=1.0.0 colorlog func_timeout dulwich Pillow CallingGPT tiktoken
|
||||||
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow
|
python -m pip install -U openai>=1.0.0
|
||||||
|
|
||||||
- name: Copy Scripts
|
- name: Copy Scripts
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
1
.github/workflows/update-override-all.yml
vendored
@@ -29,7 +29,6 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
# 在此处添加您的项目所需的其他依赖
|
|
||||||
|
|
||||||
- name: Copy Scripts
|
- name: Copy Scripts
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
13
.gitignore
vendored
@@ -19,4 +19,15 @@ cookies.json
|
|||||||
res/announcement_saved
|
res/announcement_saved
|
||||||
res/announcement_saved.json
|
res/announcement_saved.json
|
||||||
cmdpriv.json
|
cmdpriv.json
|
||||||
tips.py
|
tips.py
|
||||||
|
.venv
|
||||||
|
bin/
|
||||||
|
.vscode
|
||||||
|
test_*
|
||||||
|
venv/
|
||||||
|
hugchat.json
|
||||||
|
qcapi
|
||||||
|
claude.json
|
||||||
|
bard.json
|
||||||
|
/*yaml
|
||||||
|
!/docker-compose.yaml
|
||||||
@@ -17,3 +17,10 @@
|
|||||||
- 解决本项目或衍生项目的issues中亟待解决的问题
|
- 解决本项目或衍生项目的issues中亟待解决的问题
|
||||||
- 阅读并完善本项目文档
|
- 阅读并完善本项目文档
|
||||||
- 在各个社交媒体撰写本项目教程等
|
- 在各个社交媒体撰写本项目教程等
|
||||||
|
|
||||||
|
### 代码规范
|
||||||
|
|
||||||
|
- 代码中的注解`务必`符合Google风格的规范
|
||||||
|
- 模块顶部的引入代码请遵循`系统模块`、`第三方库模块`、`自定义模块`的顺序进行引入
|
||||||
|
- `不要`直接引入模块的特定属性,而是引入这个模块,再通过`xxx.yyy`的形式使用属性
|
||||||
|
- 任何作用域的字段`必须`先声明后使用,并在声明处注明类型提示
|
||||||
|
|||||||
22
Dockerfile
@@ -1,17 +1,15 @@
|
|||||||
FROM python:3.9-slim
|
FROM python:3.10.13-bullseye
|
||||||
WORKDIR /QChatGPT
|
WORKDIR /QChatGPT
|
||||||
|
|
||||||
RUN sed -i "s/deb.debian.org/mirrors.tencent.com/g" /etc/apt/sources.list \
|
|
||||||
&& sed -i 's|security.debian.org/debian-security|mirrors.tencent.com/debian-security|g' /etc/apt/sources.list \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get -y upgrade \
|
|
||||||
&& apt-get install -y git \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY . /QChatGPT/
|
COPY . /QChatGPT/
|
||||||
|
|
||||||
RUN pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
RUN ls
|
||||||
|
|
||||||
CMD [ "python", "main.py" ]
|
RUN python -m pip install -r requirements.txt && \
|
||||||
|
python -m pip install -U websockets==10.0 && \
|
||||||
|
python -m pip install -U httpcore httpx openai
|
||||||
|
|
||||||
|
# 生成配置文件
|
||||||
|
RUN python main.py
|
||||||
|
|
||||||
|
CMD [ "python", "main.py" ]
|
||||||
219
README.md
@@ -1,24 +1,59 @@
|
|||||||
# QChatGPT🤖
|
|
||||||
|
|
||||||
[English](README_en.md) | 简体中文
|
<p align="center">
|
||||||
|
<img src="res/logo.png" alt="QChatGPT" width="120" />
|
||||||
|
</p>
|
||||||
|
|
||||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
<div align="center">
|
||||||
|
|
||||||
> 2023/3/18 现已支持GPT-4 API(内测),请查看`config-template.py`中的`completion_api_params`
|
# QChatGPT
|
||||||
> 2023/3/15 逆向库已支持New Bing,使用方法查看[插件文档](https://github.com/RockChinQ/revLibs)
|
|
||||||
|
|
||||||
- **客官,来都来了,不点个⭐吗?**
|
<!-- 高稳定性/持续迭代/架构清晰/支持插件/高可自定义的 ChatGPT QQ机器人框架 -->
|
||||||
- 到[项目Wiki](https://github.com/RockChinQ/QChatGPT/wiki)可了解项目详细信息
|
<!-- “当然!下面是一个使用Java编写的快速排序算法的示例代码” -->
|
||||||
- 官方交流、答疑群: 656285629
|
|
||||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||||
- 社区群(内有一键部署包、图形化界面等资源): 362515018
|
<a href="https://hub.docker.com/repository/docker/rockchin/qchatgpt">
|
||||||
- QQ频道机器人见[QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
<img src="https://img.shields.io/docker/pulls/rockchin/qchatgpt?color=blue" alt="docker pull">
|
||||||
- 欢迎各种形式的贡献,请查看[贡献指引](CONTRIBUTING.md)
|
</a>
|
||||||
|

|
||||||
|
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="python">
|
||||||
|
<a href="https://github.com/RockChinQ/QChatGPT/wiki">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E9%A1%B9%E7%9B%AEWiki-blue">
|
||||||
|
</a><br/>
|
||||||
|
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=66-aWvn8cbP4c1ut_1YYkvvGVeEtyTH8&authKey=pTaKBK5C%2B8dFzQ4XlENf6MHTCLaHnlKcCRx7c14EeVVlpX2nRSaS8lJm8YeM4mCU&noverify=0&group_code=195992197">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/%E5%AE%98%E6%96%B9%E7%BE%A4-195992197-purple">
|
||||||
|
</a>
|
||||||
|
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=nC80H57wmKPwRDLFeQrDDjVl81XuC21P&authKey=2wTUTfoQ5v%2BD4C5zfpuR%2BSPMDqdXgDXA%2FS2wHI1NxTfWIG%2B%2FqK08dgyjMMOzhXa9&noverify=0&group_code=738382634">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-738382634-purple">
|
||||||
|
</a>
|
||||||
|
<a href="https://qchatgpt.rockchin.top">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E7%A4%BE%E5%8C%BA%E7%BC%96%E5%86%99%E4%BD%BF%E7%94%A8%E6%89%8B%E5%86%8C-blue">
|
||||||
|
</a>
|
||||||
|
<a href="https://www.bilibili.com/video/BV14h4y1w7TC">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/%E8%A7%86%E9%A2%91%E6%95%99%E7%A8%8B-208647">
|
||||||
|
</a>
|
||||||
|
<a href="https://www.bilibili.com/video/BV11h4y1y74H">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Linux%E9%83%A8%E7%BD%B2%E8%A7%86%E9%A2%91-208647">
|
||||||
|
</a>
|
||||||
|
|
||||||
## 🍺模型适配一览
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>点击此处展开</summary>
|
<summary>回复效果演示(带有联网插件)</summary>
|
||||||
|
<img alt="联网演示GIF" src="res/webwlkr-demo.gif" width="300px">
|
||||||
|
</details>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
> **NOTE**
|
||||||
|
> 2023/9/13 现已支持通过[One API](https://github.com/songquanpeng/one-api)接入 Azure、Anthropic Claude、Google PaLM 2、智谱 ChatGLM、百度文心一言、讯飞星火认知、阿里通义千问以及 360 智脑等模型,欢迎测试并反馈。
|
||||||
|
> 2023/8/29 [逆向库插件](https://github.com/RockChinQ/revLibs)已支持 gpt4free
|
||||||
|
> 2023/8/14 [逆向库插件](https://github.com/RockChinQ/revLibs)已支持Claude和Bard
|
||||||
|
> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果,请见[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/6-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>
|
||||||
|
|
||||||
|
## 🍺模型一览和功能点
|
||||||
|
|
||||||
|
</summary>
|
||||||
|
|
||||||
### 文字对话
|
### 文字对话
|
||||||
|
|
||||||
@@ -28,6 +63,15 @@
|
|||||||
- ChatGPT网页版GPT-3.5模型, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
- ChatGPT网页版GPT-3.5模型, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||||
- ChatGPT网页版GPT-4模型, 目前需要ChatGPT Plus订阅, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
- ChatGPT网页版GPT-4模型, 目前需要ChatGPT Plus订阅, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||||
- New Bing逆向库, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
- New Bing逆向库, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||||
|
- HuggingChat, 由[插件](https://github.com/RockChinQ/revLibs)接入, 仅支持英文
|
||||||
|
- Claude, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||||
|
- Google Bard, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||||
|
|
||||||
|
### 模型聚合平台
|
||||||
|
|
||||||
|
- [One API](https://github.com/songquanpeng/one-api), Azure、Anthropic Claude、Google PaLM 2、智谱 ChatGLM、百度文心一言、讯飞星火认知、阿里通义千问以及 360 智脑等模型的官方接口转换成 OpenAI API 接入,QChatGPT 原生支持,您需要先配置 One API,之后在`config.py`中设置反向代理和`One API`的密钥后使用。
|
||||||
|
- [gpt4free](https://github.com/xtekky/gpt4free), 破解以免费使用多个平台的各种文字模型, 由[插件](https://github.com/RockChinQ/revLibs)接入, 无需鉴权, 稳定性较差。
|
||||||
|
- [Poe](https://poe.com), 破解免费使用Poe上多个平台的模型, 由[oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot)接入(由于 Poe 上可用的大部分模型现已通过[revLibs插件](https://github.com/RockChinQ/revLubs)或其他方式接入,此插件现已停止维护)。
|
||||||
|
|
||||||
### 故事续写
|
### 故事续写
|
||||||
|
|
||||||
@@ -35,7 +79,7 @@
|
|||||||
|
|
||||||
### 图片绘制
|
### 图片绘制
|
||||||
|
|
||||||
- OpenAI DALL·E模型, 本项目原生支持, 使用方法查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
- OpenAI DALL·E模型, 本项目原生支持, 使用方法查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||||
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
||||||
|
|
||||||
### 语音生成
|
### 语音生成
|
||||||
@@ -44,14 +88,9 @@
|
|||||||
- Plachta/VITS-Umamusume-voice-synthesizer, 由[插件](https://github.com/oliverkirk-sudo/chat_voice)接入
|
- Plachta/VITS-Umamusume-voice-synthesizer, 由[插件](https://github.com/oliverkirk-sudo/chat_voice)接入
|
||||||
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
安装[此插件](https://github.com/RockChinQ/Switcher),即可在使用中切换文字模型。
|
安装[此插件](https://github.com/RockChinQ/Switcher),即可在使用中切换文字模型。
|
||||||
|
|
||||||
## ✅功能
|
### 功能点
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>点击此处展开概述</summary>
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持敏感词过滤,避免账号风险</summary>
|
<summary>✅支持敏感词过滤,避免账号风险</summary>
|
||||||
@@ -78,30 +117,31 @@
|
|||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持预设指令文字</summary>
|
<summary>✅支持预设文字</summary>
|
||||||
|
|
||||||
- 支持以自然语言预设文字,自定义机器人人格等信息
|
- 支持以自然语言预设文字,自定义机器人人格等信息
|
||||||
- 详见`config.py`中的`default_prompt`部分
|
- 详见`config.py`中的`default_prompt`部分
|
||||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
- 支持设置多个预设情景,并通过!reset、!default等命令控制,详细请查看[wiki命令](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
||||||
|
|
||||||
- 现已支持OpenAI的对话`Completion API`和绘图`Image API`
|
- 现已支持OpenAI的对话`Completion API`和绘图`Image API`
|
||||||
- 向机器人发送指令`!draw <prompt>`即可使用绘图模型
|
- 向机器人发送命令`!draw <prompt>`即可使用绘图模型
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持指令控制热重载、热更新</summary>
|
<summary>✅支持命令控制热重载、热更新</summary>
|
||||||
|
|
||||||
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送指令`!reload`进行热重载,无需重启
|
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送命令`!reload`进行热重载,无需重启
|
||||||
- 运行期间允许以管理员账号向机器人发送指令`!update`进行热更新,拉取远程最新代码并执行热重载
|
- 运行期间允许以管理员账号向机器人发送命令`!update`进行热更新,拉取远程最新代码并执行热重载
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持插件加载🧩</summary>
|
<summary>✅支持插件加载🧩</summary>
|
||||||
|
|
||||||
- 自行实现插件加载器及相关支持
|
- 自行实现插件加载器及相关支持
|
||||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
- 支持GPT的Function Calling功能
|
||||||
|
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅私聊、群聊黑名单机制</summary>
|
<summary>✅私聊、群聊黑名单机制</summary>
|
||||||
@@ -136,16 +176,37 @@
|
|||||||
- 允许用户自定义报错、帮助等提示信息
|
- 允许用户自定义报错、帮助等提示信息
|
||||||
- 请查看`tips.py`
|
- 请查看`tips.py`
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
### 🏞️截图
|
||||||
|
|
||||||
|
<img alt="私聊GPT-3.5" src="res/screenshots/person_gpt3.5.png" width="400"/>
|
||||||
|
<br/>
|
||||||
|
<img alt="群聊GPT-3.5" src="res/screenshots/group_gpt3.5.png" width="400"/>
|
||||||
|
<br/>
|
||||||
|
<img alt="New Bing" src="res/screenshots/person_newbing.png" width="400"/>
|
||||||
|
|
||||||
|
详情请查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8)
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
详情请查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
<details>
|
||||||
|
|
||||||
## 🔩部署
|
<summary>
|
||||||
|
|
||||||
**部署过程中遇到任何问题,请先在[QChatGPT](https://github.com/RockChinQ/QChatGPT/issues)或[qcg-installer](https://github.com/RockChinQ/qcg-installer/issues)的issue里进行搜索**
|
## 🚀部署和使用
|
||||||
|
|
||||||
|
</summary>
|
||||||
|
|
||||||
|
> **NOTE**
|
||||||
|
> - 部署过程中遇到任何问题,请先在[QChatGPT](https://github.com/RockChinQ/QChatGPT/issues)或[qcg-installer](https://github.com/RockChinQ/qcg-installer/issues)的issue里进行搜索
|
||||||
|
> - QChatGPT需要Python版本>=3.9
|
||||||
|
> - 官方群和社区群群号请见文档顶部
|
||||||
|
|
||||||
### - 注册OpenAI账号
|
### - 注册OpenAI账号
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>点此查看步骤</summary>
|
||||||
|
|
||||||
> 若您要直接使用非OpenAI的模型(如New Bing),可跳过此步骤,直接进行之后的部署,完成后按照相关插件的文档进行配置即可
|
> 若您要直接使用非OpenAI的模型(如New Bing),可跳过此步骤,直接进行之后的部署,完成后按照相关插件的文档进行配置即可
|
||||||
|
|
||||||
参考以下文章自行注册
|
参考以下文章自行注册
|
||||||
@@ -156,15 +217,18 @@
|
|||||||
注册成功后请前往[个人中心查看](https://beta.openai.com/account/api-keys)api_key
|
注册成功后请前往[个人中心查看](https://beta.openai.com/account/api-keys)api_key
|
||||||
完成注册后,使用以下自动化或手动部署步骤
|
完成注册后,使用以下自动化或手动部署步骤
|
||||||
|
|
||||||
### - 自动化部署
|
</details>
|
||||||
|
|
||||||
|
### - Docker或自动化部署
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>展开查看,以下方式二选一,Linux首选Docker,Windows首选安装器</summary>
|
<summary>展开查看,以下方式二选一,Linux首选Docker,Windows首选安装器</summary>
|
||||||
|
|
||||||
#### Docker方式
|
#### Docker方式
|
||||||
|
|
||||||
请查看[此文档](res/docs/docker_deploy.md)
|
> docker方式较为复杂,若您不**熟悉**docker的操作及相关知识,强烈建议您使用其他方式部署,我们**不会且难以**解决您主机上多个容器的连接问题。
|
||||||
由[@mikumifa](https://github.com/mikumifa)贡献
|
|
||||||
|
请查看[此文档](res/docs/docker_deployment.md)
|
||||||
|
|
||||||
#### 安装器方式
|
#### 安装器方式
|
||||||
|
|
||||||
@@ -180,12 +244,29 @@
|
|||||||
|
|
||||||
- 请使用Python 3.9.x以上版本
|
- 请使用Python 3.9.x以上版本
|
||||||
|
|
||||||
#### 配置Mirai
|
#### ① 配置QQ登录框架
|
||||||
|
|
||||||
按照[此教程](https://yiri-mirai.wybxc.cc/tutorials/01/configuration)配置Mirai及YiriMirai
|
目前支持mirai和go-cqhttp,配置任意一个即可
|
||||||
启动mirai-console后,使用`login`命令登录QQ账号,保持mirai-console运行状态
|
|
||||||
|
|
||||||
#### 配置主程序
|
<details>
|
||||||
|
<summary>mirai</summary>
|
||||||
|
|
||||||
|
1. 按照[此教程](https://yiri-mirai.wybxc.cc/tutorials/01/configuration)配置Mirai及mirai-api-http
|
||||||
|
2. 启动mirai-console后,使用`login`命令登录QQ账号,保持mirai-console运行状态
|
||||||
|
3. 在下一步配置主程序时请在config.py中将`msg_source_adapter`设为`yirimirai`
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>go-cqhttp</summary>
|
||||||
|
|
||||||
|
1. 按照[此文档](https://github.com/RockChinQ/QChatGPT/wiki/9-go-cqhttp%E9%85%8D%E7%BD%AE)配置go-cqhttp
|
||||||
|
2. 启动go-cqhttp,确保登录成功,保持运行
|
||||||
|
3. 在下一步配置主程序时请在config.py中将`msg_source_adapter`设为`nakuru`
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
#### ② 配置主程序
|
||||||
|
|
||||||
1. 克隆此项目
|
1. 克隆此项目
|
||||||
|
|
||||||
@@ -197,7 +278,7 @@ cd QChatGPT
|
|||||||
2. 安装依赖
|
2. 安装依赖
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow
|
pip3 install requests -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 运行一次主程序,生成配置文件
|
3. 运行一次主程序,生成配置文件
|
||||||
@@ -226,47 +307,57 @@ python3 main.py
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## 🚀使用
|
**部署完成后必看: [命令说明](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||||
|
|
||||||
**部署完成后必看: [指令说明](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
所有功能查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8)
|
||||||
所有功能查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E4%BD%BF%E7%94%A8%E6%96%B9%E5%BC%8F)
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>
|
||||||
|
|
||||||
## 🧩插件生态
|
## 🧩插件生态
|
||||||
|
|
||||||
现已支持自行开发插件对功能进行扩展或自定义程序行为
|
</summary>
|
||||||
详见[Wiki插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
|
||||||
开发教程见[Wiki插件开发页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
|
||||||
|
|
||||||
<details>
|
⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling),请查看[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/6-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||||
<summary>查看插件列表</summary>
|
|
||||||
|
|
||||||
### 示例插件
|
> 使用方法见:[Wiki插件使用](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||||
|
> 开发教程见:[Wiki插件开发](https://github.com/RockChinQ/QChatGPT/wiki/7-%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
||||||
|
|
||||||
在`tests/plugin_examples`目录下,将其整个目录复制到`plugins`目录下即可使用
|
|
||||||
|
|
||||||
- `cmdcn` - 主程序指令中文形式
|
[所有插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6),欢迎提出issue以提交新的插件
|
||||||
- `hello_plugin` - 在收到消息`hello`时回复相应消息
|
|
||||||
- `urlikethisijustsix` - 收到冒犯性消息时回复相应消息
|
|
||||||
|
|
||||||
### 更多
|
### 部分插件
|
||||||
|
|
||||||
欢迎提交新的插件
|
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||||
|
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版、Claude、Bard、Hugging Chat等破解版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/8-%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
|
||||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E4%B8%8EChatGPT%E7%BD%91%E9%A1%B5%E7%89%88)
|
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过命令切换使用的模型
|
||||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
|
|
||||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语音输出、Ranimg、屏蔽词规则等)
|
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,支持HuggingFace上的[VITS模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer),azure语音合成,vits本地语音合成,sovits语音合成
|
||||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
|
||||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,使用HuggingFace上的[VITS-Umamusume-voice-synthesizer模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer)
|
|
||||||
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
||||||
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
||||||
- [oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot) - 接入[Poe](https://poe.com/)上的机器人
|
- [oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot) - 接入[Poe](https://poe.com/)上的机器人
|
||||||
- [lieyanqzu/WeatherPlugin](https://github.com/lieyanqzu/WeatherPlugin) - 天气查询插件
|
- [lieyanqzu/WeatherPlugin](https://github.com/lieyanqzu/WeatherPlugin) - 天气查询插件
|
||||||
|
- [SysStatPlugin](https://github.com/RockChinQ/SysStatPlugin) - 查看系统状态
|
||||||
|
- [oliverkirk-sudo/qchat_system_status](https://github.com/oliverkirk-sudo/qchat_system_status) - 以图片的形式输出系统状态
|
||||||
|
- [oliverkirk-sudo/QChatAIPaint](https://github.com/oliverkirk-sudo/QChatAIPaint) - 基于[Holara](https://holara.ai/)的ai绘图插件
|
||||||
|
- [oliverkirk-sudo/QChatCodeRunner](https://github.com/oliverkirk-sudo/QChatCodeRunner) - 基于[CodeRunner-Plugin](https://github.com/oliverkirk-sudo/CodeRunner-Plugin)的代码运行与图表生成插件
|
||||||
|
- [oliverkirk-sudo/QChatWeather](https://github.com/oliverkirk-sudo/QChatWeather) - 生成好看的天气图片,基于和风天气
|
||||||
|
- [oliverkirk-sudo/QChatMarkdown](https://github.com/oliverkirk-sudo/QChatMarkdown) - 将机器人输出的markdown转换为图片,基于[playwright](https://playwright.dev/python/docs/intro)
|
||||||
|
- [ruuuux/WikipediaSearch](https://github.com/ruuuux/WikipediaSearch) - Wikipedia 搜索插件
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## 😘致谢
|
<details>
|
||||||
|
|
||||||
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV15v4y1X7aP)
|
<summary>
|
||||||
|
|
||||||
|
## 😘致谢和赞赏
|
||||||
|
|
||||||
|
</summary>
|
||||||
|
|
||||||
|
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV1Y14y1Q7kQ)
|
||||||
- [@mikumifa](https://github.com/mikumifa) 本项目Docker部署仓库开发者
|
- [@mikumifa](https://github.com/mikumifa) 本项目Docker部署仓库开发者
|
||||||
- [@dominoar](https://github.com/dominoar) 为本项目开发多种插件
|
- [@dominoar](https://github.com/dominoar) 为本项目开发多种插件
|
||||||
- [@万神的星空](https://github.com/qq255204159) 整合包发行
|
- [@万神的星空](https://github.com/qq255204159) 整合包发行
|
||||||
@@ -274,6 +365,6 @@ python3 main.py
|
|||||||
|
|
||||||
以及所有[贡献者](https://github.com/RockChinQ/QChatGPT/graphs/contributors)和其他为本项目提供支持的朋友们。
|
以及所有[贡献者](https://github.com/RockChinQ/QChatGPT/graphs/contributors)和其他为本项目提供支持的朋友们。
|
||||||
|
|
||||||
<!-- ## 👍赞赏
|
<img alt="赞赏码" src="res/mm_reward_qrcode_1672840549070.png" width="400" height="400"/>
|
||||||
|
|
||||||
<img alt="赞赏码" src="res/mm_reward_qrcode_1672840549070.png" width="400" height="400"/> -->
|
</details>
|
||||||
|
|||||||
25
README_en.md
@@ -1,8 +1,13 @@
|
|||||||
# QChatGPT🤖
|
# QChatGPT🤖
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="res/social.png" alt="QChatGPT" width="640" />
|
||||||
|
</p>
|
||||||
|
|
||||||
English | [简体中文](README.md)
|
English | [简体中文](README.md)
|
||||||
|
|
||||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||||
|

|
||||||
|
|
||||||
- Refer to [Wiki](https://github.com/RockChinQ/QChatGPT/wiki) to get further information.
|
- Refer to [Wiki](https://github.com/RockChinQ/QChatGPT/wiki) to get further information.
|
||||||
- Official QQ group: 656285629
|
- Official QQ group: 656285629
|
||||||
@@ -23,6 +28,7 @@ English | [简体中文](README.md)
|
|||||||
- ChatGPT website edition (GPT-3.5), see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
- ChatGPT website edition (GPT-3.5), see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||||
- ChatGPT website edition (GPT-4), ChatGPT plus subscription required, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
- ChatGPT website edition (GPT-4), ChatGPT plus subscription required, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||||
- New Bing, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
- New Bing, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||||
|
- HuggingChat, see [revLibs plugin](https://github.com/RockChinQ/revLibs), English only
|
||||||
|
|
||||||
### Story
|
### Story
|
||||||
|
|
||||||
@@ -103,11 +109,26 @@ Use [this installer](https://github.com/RockChinQ/qcg-installer) to deploy.
|
|||||||
|
|
||||||
- Python 3.9.x or higher
|
- Python 3.9.x or higher
|
||||||
|
|
||||||
#### Configure Mirai
|
#### 配置QQ登录框架
|
||||||
|
|
||||||
|
Currently supports mirai and go-cqhttp, configure either one
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>mirai</summary>
|
||||||
|
|
||||||
Follow [this tutorial(cn)](https://yiri-mirai.wybxc.cc/tutorials/01/configuration) to configure Mirai and YiriMirai.
|
Follow [this tutorial(cn)](https://yiri-mirai.wybxc.cc/tutorials/01/configuration) to configure Mirai and YiriMirai.
|
||||||
After starting mirai-console, use the `login` command to log in to the QQ account, and keep the mirai-console running.
|
After starting mirai-console, use the `login` command to log in to the QQ account, and keep the mirai-console running.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>go-cqhttp</summary>
|
||||||
|
|
||||||
|
1. Follow [this tutorial(cn)](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE) to configure go-cqhttp.
|
||||||
|
2. Start go-cqhttp, make sure it is logged in and running.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
#### Configure QChatGPT
|
#### Configure QChatGPT
|
||||||
|
|
||||||
1. Clone the repository
|
1. Clone the repository
|
||||||
@@ -120,7 +141,7 @@ cd QChatGPT
|
|||||||
2. Install dependencies
|
2. Install dependencies
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow
|
pip3 install requests yiri-mirai-rc openai colorlog func_timeout dulwich Pillow nakuru-project-idk
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Generate `config.py`
|
3. Generate `config.py`
|
||||||
|
|||||||
@@ -1,7 +1,13 @@
|
|||||||
# 配置文件: 注释里标[必需]的参数必须修改, 其他参数根据需要修改, 但请勿删除
|
# 配置文件: 注释里标[必需]的参数必须修改, 其他参数根据需要修改, 但请勿删除
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
# [必需] Mirai的配置
|
# 消息处理协议适配器
|
||||||
|
# 目前支持以下适配器:
|
||||||
|
# - "yirimirai": mirai的通信框架,YiriMirai框架适配器, 请同时填写下方mirai_http_api_config
|
||||||
|
# - "nakuru": go-cqhttp通信框架,请同时填写下方nakuru_config
|
||||||
|
msg_source_adapter = "yirimirai"
|
||||||
|
|
||||||
|
# [必需(与nakuru二选一,取决于msg_source_adapter)] Mirai的配置
|
||||||
# 请到配置mirai的步骤中的教程查看每个字段的信息
|
# 请到配置mirai的步骤中的教程查看每个字段的信息
|
||||||
# adapter: 选择适配器,目前支持HTTPAdapter和WebSocketAdapter
|
# adapter: 选择适配器,目前支持HTTPAdapter和WebSocketAdapter
|
||||||
# host: 运行mirai的主机地址
|
# host: 运行mirai的主机地址
|
||||||
@@ -18,6 +24,15 @@ mirai_http_api_config = {
|
|||||||
"qq": 1234567890
|
"qq": 1234567890
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# [必需(与mirai二选一,取决于msg_source_adapter)]
|
||||||
|
# 使用nakuru-project框架连接go-cqhttp的配置
|
||||||
|
nakuru_config = {
|
||||||
|
"host": "localhost", # go-cqhttp的地址
|
||||||
|
"port": 6700, # go-cqhttp的正向websocket端口
|
||||||
|
"http_port": 5700, # go-cqhttp的正向http端口
|
||||||
|
"token": "" # 若在go-cqhttp的config.yml设置了access_token, 则填写此处
|
||||||
|
}
|
||||||
|
|
||||||
# [必需] OpenAI的配置
|
# [必需] OpenAI的配置
|
||||||
# api_key: OpenAI的API Key
|
# api_key: OpenAI的API Key
|
||||||
# http_proxy: 请求OpenAI时使用的代理,None为不使用,https和socks5暂不能使用
|
# http_proxy: 请求OpenAI时使用的代理,None为不使用,https和socks5暂不能使用
|
||||||
@@ -47,6 +62,9 @@ mirai_http_api_config = {
|
|||||||
# },
|
# },
|
||||||
# "reverse_proxy": "http://example.com:12345/v1"
|
# "reverse_proxy": "http://example.com:12345/v1"
|
||||||
# }
|
# }
|
||||||
|
#
|
||||||
|
# 作者开设公用反向代理地址: https://api.openai.rockchin.top/v1
|
||||||
|
# 随时可能关闭,仅供测试使用,有条件建议使用正向代理或者自建反向代理
|
||||||
openai_config = {
|
openai_config = {
|
||||||
"api_key": {
|
"api_key": {
|
||||||
"default": "openai_api_key"
|
"default": "openai_api_key"
|
||||||
@@ -55,6 +73,11 @@ openai_config = {
|
|||||||
"reverse_proxy": None
|
"reverse_proxy": None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# api-key切换策略
|
||||||
|
# active:每次请求时都会切换api-key
|
||||||
|
# passive:仅当api-key超额时才会切换api-key
|
||||||
|
switch_strategy = "active"
|
||||||
|
|
||||||
# [必需] 管理员QQ号,用于接收报错等通知及执行管理员级别指令
|
# [必需] 管理员QQ号,用于接收报错等通知及执行管理员级别指令
|
||||||
# 支持多个管理员,可以使用list形式设置,例如:
|
# 支持多个管理员,可以使用list形式设置,例如:
|
||||||
# admin_qq = [12345678, 87654321]
|
# admin_qq = [12345678, 87654321]
|
||||||
@@ -91,7 +114,7 @@ admin_qq = 0
|
|||||||
#
|
#
|
||||||
# 还可以加载文件中的预设文字,使用方法请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E9%A2%84%E8%AE%BE%E6%96%87%E5%AD%97
|
# 还可以加载文件中的预设文字,使用方法请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E9%A2%84%E8%AE%BE%E6%96%87%E5%AD%97
|
||||||
default_prompt = {
|
default_prompt = {
|
||||||
"default": "如果我之后想获取帮助,请你说“输入!help获取帮助”",
|
"default": "如果用户之后想获取帮助,请你说“输入!help获取帮助”。",
|
||||||
}
|
}
|
||||||
|
|
||||||
# 情景预设格式
|
# 情景预设格式
|
||||||
@@ -108,13 +131,35 @@ preset_mode = "normal"
|
|||||||
# 注意:由消息前缀(prefix)匹配的消息中将会删除此前缀,正则表达式(regexp)匹配的消息不会删除匹配的部分
|
# 注意:由消息前缀(prefix)匹配的消息中将会删除此前缀,正则表达式(regexp)匹配的消息不会删除匹配的部分
|
||||||
# 前缀匹配优先级高于正则表达式匹配
|
# 前缀匹配优先级高于正则表达式匹配
|
||||||
# 正则表达式简明教程:https://www.runoob.com/regexp/regexp-tutorial.html
|
# 正则表达式简明教程:https://www.runoob.com/regexp/regexp-tutorial.html
|
||||||
|
#
|
||||||
|
# 支持针对不同群设置不同的响应规则,例如:
|
||||||
|
# response_rules = {
|
||||||
|
# "default": {
|
||||||
|
# "at": True,
|
||||||
|
# "prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||||
|
# "regexp": [],
|
||||||
|
# "random_rate": 0.0,
|
||||||
|
# },
|
||||||
|
# "12345678": {
|
||||||
|
# "at": False,
|
||||||
|
# "prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||||
|
# "regexp": [],
|
||||||
|
# "random_rate": 0.0,
|
||||||
|
# },
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# 以上设置将会在群号为12345678的群中关闭at响应
|
||||||
|
# 未单独设置的群将使用default规则
|
||||||
response_rules = {
|
response_rules = {
|
||||||
"at": True, # 是否响应at机器人的消息
|
"default": {
|
||||||
"prefix": ["/ai", "!ai", "!ai", "ai"],
|
"at": True, # 是否响应at机器人的消息
|
||||||
"regexp": [], # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
"prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||||
"random_rate": 0.0, # 随机响应概率,0.0-1.0,0.0为不随机响应,1.0为响应所有消息, 仅在前几项判断不通过时生效
|
"regexp": [], # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
||||||
|
"random_rate": 0.0, # 随机响应概率,0.0-1.0,0.0为不随机响应,1.0为响应所有消息, 仅在前几项判断不通过时生效
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# 消息忽略规则
|
# 消息忽略规则
|
||||||
# 适用于私聊及群聊
|
# 适用于私聊及群聊
|
||||||
# 符合此规则的消息将不会被响应
|
# 符合此规则的消息将不会被响应
|
||||||
@@ -157,32 +202,61 @@ encourage_sponsor_at_start = True
|
|||||||
# 注意:较大的prompt_submit_length会导致OpenAI账户额度消耗更快
|
# 注意:较大的prompt_submit_length会导致OpenAI账户额度消耗更快
|
||||||
prompt_submit_length = 2048
|
prompt_submit_length = 2048
|
||||||
|
|
||||||
|
# 是否在token超限报错时自动重置会话
|
||||||
|
# 可在tips.py中编辑提示语
|
||||||
|
auto_reset = True
|
||||||
|
|
||||||
# OpenAI补全API的参数
|
# OpenAI补全API的参数
|
||||||
# 请在下方填写模型,程序自动选择接口
|
# 请在下方填写模型,程序自动选择接口
|
||||||
|
# 模型文档:https://platform.openai.com/docs/models
|
||||||
# 现已支持的模型有:
|
# 现已支持的模型有:
|
||||||
#
|
#
|
||||||
# 'gpt-4'
|
# ChatCompletions 接口:
|
||||||
# 'gpt-4-0314'
|
# # GPT 4 系列
|
||||||
# 'gpt-4-32k'
|
# "gpt-4-1106-preview",
|
||||||
# 'gpt-4-32k-0314'
|
# "gpt-4-vision-preview",
|
||||||
# 'gpt-3.5-turbo'
|
# "gpt-4",
|
||||||
# 'gpt-3.5-turbo-0301'
|
# "gpt-4-32k",
|
||||||
# 'text-davinci-003'
|
# "gpt-4-0613",
|
||||||
# 'text-davinci-002'
|
# "gpt-4-32k-0613",
|
||||||
# 'code-davinci-002'
|
# "gpt-4-0314", # legacy
|
||||||
# 'code-cushman-001'
|
# "gpt-4-32k-0314", # legacy
|
||||||
# 'text-curie-001'
|
# # GPT 3.5 系列
|
||||||
# 'text-babbage-001'
|
# "gpt-3.5-turbo-1106",
|
||||||
# 'text-ada-001'
|
# "gpt-3.5-turbo",
|
||||||
|
# "gpt-3.5-turbo-16k",
|
||||||
|
# "gpt-3.5-turbo-0613", # legacy
|
||||||
|
# "gpt-3.5-turbo-16k-0613", # legacy
|
||||||
|
# "gpt-3.5-turbo-0301", # legacy
|
||||||
|
#
|
||||||
|
# Completions接口:
|
||||||
|
# "text-davinci-003", # legacy
|
||||||
|
# "text-davinci-002", # legacy
|
||||||
|
# "code-davinci-002", # legacy
|
||||||
|
# "code-cushman-001", # legacy
|
||||||
|
# "text-curie-001", # legacy
|
||||||
|
# "text-babbage-001", # legacy
|
||||||
|
# "text-ada-001", # legacy
|
||||||
|
# "gpt-3.5-turbo-instruct",
|
||||||
#
|
#
|
||||||
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
|
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
|
||||||
# 请将内容修改到config.py中,请勿修改config-template.py
|
# 请将内容修改到config.py中,请勿修改config-template.py
|
||||||
|
#
|
||||||
|
# 支持通过 One API 接入多种模型,请在上方的openai_config中设置One API的代理地址,
|
||||||
|
# 并在此填写您要使用的模型名称,详细请参考:https://github.com/songquanpeng/one-api
|
||||||
|
#
|
||||||
|
# 支持的 One API 模型:
|
||||||
|
# "SparkDesk",
|
||||||
|
# "chatglm_pro",
|
||||||
|
# "chatglm_std",
|
||||||
|
# "chatglm_lite",
|
||||||
|
# "qwen-v1",
|
||||||
|
# "qwen-plus-v1",
|
||||||
|
# "ERNIE-Bot",
|
||||||
|
# "ERNIE-Bot-turbo",
|
||||||
completion_api_params = {
|
completion_api_params = {
|
||||||
"model": "gpt-3.5-turbo",
|
"model": "gpt-3.5-turbo",
|
||||||
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
||||||
"top_p": 1, # 生成的文本的文本与要求的符合度, 取值范围[0, 1]
|
|
||||||
"frequency_penalty": 0.2,
|
|
||||||
"presence_penalty": 1.0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# OpenAI的Image API的参数
|
# OpenAI的Image API的参数
|
||||||
@@ -191,18 +265,34 @@ image_api_params = {
|
|||||||
"size": "256x256", # 图片尺寸,支持256x256, 512x512, 1024x1024
|
"size": "256x256", # 图片尺寸,支持256x256, 512x512, 1024x1024
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# 跟踪函数调用
|
||||||
|
# 为True时,在每次GPT进行Function Calling时都会输出发送一条回复给用户
|
||||||
|
# 同时,一次提问内所有的Function Calling和普通回复消息都会单独发送给用户
|
||||||
|
trace_function_calls = False
|
||||||
|
|
||||||
# 群内回复消息时是否引用原消息
|
# 群内回复消息时是否引用原消息
|
||||||
quote_origin = True
|
quote_origin = False
|
||||||
|
|
||||||
|
# 群内回复消息时是否at发送者
|
||||||
|
at_sender = False
|
||||||
|
|
||||||
# 回复绘图时是否包含图片描述
|
# 回复绘图时是否包含图片描述
|
||||||
include_image_description = True
|
include_image_description = True
|
||||||
|
|
||||||
# 消息处理的超时时间,单位为秒
|
# 消息处理的超时时间,单位为秒
|
||||||
process_message_timeout = 30
|
process_message_timeout = 120
|
||||||
|
|
||||||
# 回复消息时是否显示[GPT]前缀
|
# 回复消息时是否显示[GPT]前缀
|
||||||
show_prefix = False
|
show_prefix = False
|
||||||
|
|
||||||
|
# 回复前的强制延迟时间,降低机器人被腾讯风控概率
|
||||||
|
# *此机制对命令和消息、私聊及群聊均生效
|
||||||
|
# 每次处理时从以下的范围取一个随机秒数,
|
||||||
|
# 当此次消息处理时间低于此秒数时,将会强制延迟至此秒数
|
||||||
|
# 例如:[1.5, 3],则每次处理时会随机取一个1.5-3秒的随机数,若处理时间低于此随机数,则强制延迟至此随机秒数
|
||||||
|
# 若您不需要此功能,请将force_delay_range设置为[0, 0]
|
||||||
|
force_delay_range = [1.5, 3]
|
||||||
|
|
||||||
# 应用长消息处理策略的阈值
|
# 应用长消息处理策略的阈值
|
||||||
# 当回复消息长度超过此值时,将使用长消息处理策略
|
# 当回复消息长度超过此值时,将使用长消息处理策略
|
||||||
blob_message_threshold = 256
|
blob_message_threshold = 256
|
||||||
@@ -284,7 +374,7 @@ rate_limitation = {
|
|||||||
rate_limit_strategy = "drop"
|
rate_limit_strategy = "drop"
|
||||||
|
|
||||||
# 是否在启动时进行依赖库更新
|
# 是否在启动时进行依赖库更新
|
||||||
upgrade_dependencies = True
|
upgrade_dependencies = False
|
||||||
|
|
||||||
# 是否上报统计信息
|
# 是否上报统计信息
|
||||||
# 用于统计机器人的使用情况,不会收集任何用户信息
|
# 用于统计机器人的使用情况,不会收集任何用户信息
|
||||||
|
|||||||
18
docker-compose.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
qchatgpt:
|
||||||
|
image: rockchin/qchatgpt:latest
|
||||||
|
volumes:
|
||||||
|
- ./config.py:/QChatGPT/config.py
|
||||||
|
- ./banlist.py:/QChatGPT/banlist.py
|
||||||
|
- ./cmdpriv.json:/QChatGPT/cmdpriv.json
|
||||||
|
- ./sensitive.json:/QChatGPT/sensitive.json
|
||||||
|
- ./tips.py:/QChatGPT/tips.py
|
||||||
|
# 目录映射
|
||||||
|
- ./plugins:/QChatGPT/plugins
|
||||||
|
- ./scenario:/QChatGPT/scenario
|
||||||
|
- ./temp:/QChatGPT/temp
|
||||||
|
- ./logs:/QChatGPT/logs
|
||||||
|
restart: always
|
||||||
|
# 根据具体环境配置网络
|
||||||
676
main.py
@@ -13,322 +13,6 @@ sys.path.append(".")
|
|||||||
|
|
||||||
from pkg.utils.log import init_runtime_log_file, reset_logging
|
from pkg.utils.log import init_runtime_log_file, reset_logging
|
||||||
|
|
||||||
try:
|
|
||||||
import colorlog
|
|
||||||
except ImportError:
|
|
||||||
# 尝试安装
|
|
||||||
import pkg.utils.pkgmgr as pkgmgr
|
|
||||||
try:
|
|
||||||
pkgmgr.install_requirements("requirements.txt")
|
|
||||||
pkgmgr.install_upgrade("websockets")
|
|
||||||
import colorlog
|
|
||||||
except ImportError:
|
|
||||||
print("依赖不满足,请查看 https://github.com/RockChinQ/qcg-installer/issues/15")
|
|
||||||
sys.exit(1)
|
|
||||||
import colorlog
|
|
||||||
|
|
||||||
import requests
|
|
||||||
import websockets.exceptions
|
|
||||||
from urllib3.exceptions import InsecureRequestWarning
|
|
||||||
import pkg.utils.context
|
|
||||||
|
|
||||||
|
|
||||||
# 是否使用override.json覆盖配置
|
|
||||||
# 仅在启动时提供 --override 或 -r 参数时生效
|
|
||||||
use_override = False
|
|
||||||
|
|
||||||
|
|
||||||
def init_db():
|
|
||||||
import pkg.database.manager
|
|
||||||
database = pkg.database.manager.DatabaseManager()
|
|
||||||
|
|
||||||
database.initialize_database()
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_dependencies():
|
|
||||||
import pkg.utils.pkgmgr as pkgmgr
|
|
||||||
pkgmgr.run_pip(["install", "openai", "Pillow", "--upgrade",
|
|
||||||
"-i", "https://pypi.douban.com/simple/",
|
|
||||||
"--trusted-host", "pypi.douban.com"])
|
|
||||||
|
|
||||||
|
|
||||||
known_exception_caught = False
|
|
||||||
|
|
||||||
|
|
||||||
def override_config():
|
|
||||||
import config
|
|
||||||
# 检查override.json覆盖
|
|
||||||
if os.path.exists("override.json") and use_override:
|
|
||||||
override_json = json.load(open("override.json", "r", encoding="utf-8"))
|
|
||||||
for key in override_json:
|
|
||||||
if hasattr(config, key):
|
|
||||||
setattr(config, key, override_json[key])
|
|
||||||
logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
|
||||||
else:
|
|
||||||
logging.error("无法覆写配置[{}]为[{}],该配置不存在,请检查override.json是否正确".format(key, override_json[key]))
|
|
||||||
|
|
||||||
|
|
||||||
# 临时函数,用于加载config和上下文,未来统一放在config类
|
|
||||||
def load_config():
|
|
||||||
logging.info("检查config模块完整性.")
|
|
||||||
# 完整性校验
|
|
||||||
is_integrity = True
|
|
||||||
config_template = importlib.import_module('config-template')
|
|
||||||
config = importlib.import_module('config')
|
|
||||||
for key in dir(config_template):
|
|
||||||
if not key.startswith("__") and not hasattr(config, key):
|
|
||||||
setattr(config, key, getattr(config_template, key))
|
|
||||||
logging.warning("[{}]不存在".format(key))
|
|
||||||
is_integrity = False
|
|
||||||
|
|
||||||
if not is_integrity:
|
|
||||||
logging.warning("配置文件不完整,您可以依据config-template.py检查config.py")
|
|
||||||
|
|
||||||
# 检查override.json覆盖
|
|
||||||
override_config()
|
|
||||||
|
|
||||||
if not is_integrity:
|
|
||||||
logging.warning("以上不存在的配置已被设为默认值,将在3秒后继续启动... ")
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
# 存进上下文
|
|
||||||
pkg.utils.context.set_config(config)
|
|
||||||
|
|
||||||
|
|
||||||
def complete_tips():
|
|
||||||
"""根据tips-custom-template模块补全tips模块的属性"""
|
|
||||||
is_integrity = True
|
|
||||||
logging.info("检查tips模块完整性.")
|
|
||||||
tips_template = importlib.import_module('tips-custom-template')
|
|
||||||
tips = importlib.import_module('tips')
|
|
||||||
for key in dir(tips_template):
|
|
||||||
if not key.startswith("__") and not hasattr(tips, key):
|
|
||||||
setattr(tips, key, getattr(tips_template, key))
|
|
||||||
logging.warning("[{}]不存在".format(key))
|
|
||||||
is_integrity = False
|
|
||||||
|
|
||||||
if not is_integrity:
|
|
||||||
logging.warning("tips模块不完整,您可以依据tips-custom-template.py检查tips.py")
|
|
||||||
logging.warning("以上配置已被设为默认值,将在3秒后继续启动... ")
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
|
|
||||||
def start(first_time_init=False):
|
|
||||||
"""启动流程,reload之后会被执行"""
|
|
||||||
|
|
||||||
global known_exception_caught
|
|
||||||
import pkg.utils.context
|
|
||||||
|
|
||||||
config = pkg.utils.context.get_config()
|
|
||||||
# 更新openai库到最新版本
|
|
||||||
if not hasattr(config, 'upgrade_dependencies') or config.upgrade_dependencies:
|
|
||||||
print("正在更新依赖库,请等待...")
|
|
||||||
if not hasattr(config, 'upgrade_dependencies'):
|
|
||||||
print("这个操作不是必须的,如果不想更新,请在config.py中添加upgrade_dependencies=False")
|
|
||||||
else:
|
|
||||||
print("这个操作不是必须的,如果不想更新,请在config.py中将upgrade_dependencies设置为False")
|
|
||||||
try:
|
|
||||||
ensure_dependencies()
|
|
||||||
except Exception as e:
|
|
||||||
print("更新openai库失败:{}, 请忽略或自行更新".format(e))
|
|
||||||
|
|
||||||
known_exception_caught = False
|
|
||||||
try:
|
|
||||||
|
|
||||||
sh = reset_logging()
|
|
||||||
pkg.utils.context.context['logger_handler'] = sh
|
|
||||||
|
|
||||||
# 检查是否设置了管理员
|
|
||||||
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
|
||||||
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
config.admin_qq = int(input("未设置管理员QQ,管理员权限指令及运行告警将无法使用,请输入管理员QQ号: "))
|
|
||||||
# 写入到文件
|
|
||||||
|
|
||||||
# 读取文件
|
|
||||||
config_file_str = ""
|
|
||||||
with open("config.py", "r", encoding="utf-8") as f:
|
|
||||||
config_file_str = f.read()
|
|
||||||
# 替换
|
|
||||||
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(config.admin_qq))
|
|
||||||
# 写入
|
|
||||||
with open("config.py", "w", encoding="utf-8") as f:
|
|
||||||
f.write(config_file_str)
|
|
||||||
|
|
||||||
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
|
||||||
time.sleep(4)
|
|
||||||
break
|
|
||||||
except ValueError:
|
|
||||||
print("请输入数字")
|
|
||||||
|
|
||||||
import pkg.openai.manager
|
|
||||||
import pkg.database.manager
|
|
||||||
import pkg.openai.session
|
|
||||||
import pkg.qqbot.manager
|
|
||||||
import pkg.openai.dprompt
|
|
||||||
import pkg.qqbot.cmds.aamgr
|
|
||||||
|
|
||||||
try:
|
|
||||||
pkg.openai.dprompt.register_all()
|
|
||||||
pkg.qqbot.cmds.aamgr.register_all()
|
|
||||||
pkg.qqbot.cmds.aamgr.apply_privileges()
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(e)
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
# 配置openai api_base
|
|
||||||
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
|
|
||||||
import openai
|
|
||||||
openai.api_base = config.openai_config["reverse_proxy"]
|
|
||||||
|
|
||||||
# 主启动流程
|
|
||||||
database = pkg.database.manager.DatabaseManager()
|
|
||||||
|
|
||||||
database.initialize_database()
|
|
||||||
|
|
||||||
openai_interact = pkg.openai.manager.OpenAIInteract(config.openai_config['api_key'])
|
|
||||||
|
|
||||||
# 加载所有未超时的session
|
|
||||||
pkg.openai.session.load_sessions()
|
|
||||||
|
|
||||||
# 初始化qq机器人
|
|
||||||
qqbot = pkg.qqbot.manager.QQBotManager(mirai_http_api_config=config.mirai_http_api_config,
|
|
||||||
timeout=config.process_message_timeout, retry=config.retry_times,
|
|
||||||
first_time_init=first_time_init)
|
|
||||||
|
|
||||||
# 加载插件
|
|
||||||
import pkg.plugin.host
|
|
||||||
pkg.plugin.host.load_plugins()
|
|
||||||
|
|
||||||
pkg.plugin.host.initialize_plugins()
|
|
||||||
|
|
||||||
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
|
||||||
|
|
||||||
import mirai.exceptions
|
|
||||||
|
|
||||||
def run_bot_wrapper():
|
|
||||||
global known_exception_caught
|
|
||||||
try:
|
|
||||||
qqbot.bot.run()
|
|
||||||
except TypeError as e:
|
|
||||||
if str(e).__contains__("argument 'debug'"):
|
|
||||||
logging.error(
|
|
||||||
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
|
||||||
known_exception_caught = True
|
|
||||||
elif str(e).__contains__("As of 3.10, the *loop*"):
|
|
||||||
logging.error(
|
|
||||||
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
|
||||||
known_exception_caught = True
|
|
||||||
|
|
||||||
except websockets.exceptions.InvalidStatus as e:
|
|
||||||
logging.error(
|
|
||||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
|
||||||
e))
|
|
||||||
known_exception_caught = True
|
|
||||||
except mirai.exceptions.NetworkError as e:
|
|
||||||
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
|
||||||
known_exception_caught = True
|
|
||||||
except Exception as e:
|
|
||||||
if str(e).__contains__("404"):
|
|
||||||
logging.error(
|
|
||||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
|
||||||
e))
|
|
||||||
known_exception_caught = True
|
|
||||||
elif str(e).__contains__("signal only works in main thread"):
|
|
||||||
logging.error(
|
|
||||||
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
|
||||||
e))
|
|
||||||
known_exception_caught = True
|
|
||||||
elif str(e).__contains__("did not receive a valid HTTP"):
|
|
||||||
logging.error(
|
|
||||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
|
||||||
e))
|
|
||||||
else:
|
|
||||||
logging.error(
|
|
||||||
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
|
||||||
known_exception_caught = True
|
|
||||||
raise e
|
|
||||||
finally:
|
|
||||||
time.sleep(12)
|
|
||||||
threading.Thread(
|
|
||||||
target=run_bot_wrapper
|
|
||||||
).start()
|
|
||||||
finally:
|
|
||||||
# 判断若是Windows,输出选择模式可能会暂停程序的警告
|
|
||||||
if os.name == 'nt':
|
|
||||||
time.sleep(2)
|
|
||||||
logging.info("您正在使用Windows系统,若命令行窗口处于“选择”模式,程序可能会被暂停,此时请右键点击窗口空白区域使其取消选择模式。")
|
|
||||||
|
|
||||||
time.sleep(12)
|
|
||||||
|
|
||||||
if first_time_init:
|
|
||||||
if not known_exception_caught:
|
|
||||||
logging.info("QQ: {}, MAH: {}".format(config.mirai_http_api_config['qq'], config.mirai_http_api_config['host']+":"+str(config.mirai_http_api_config['port'])))
|
|
||||||
logging.info('程序启动完成,如长时间未显示 ”成功登录到账号xxxxx“ ,并且不回复消息,请查看 '
|
|
||||||
'https://github.com/RockChinQ/QChatGPT/issues/37')
|
|
||||||
else:
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
logging.info('热重载完成')
|
|
||||||
|
|
||||||
# 发送赞赏码
|
|
||||||
if config.encourage_sponsor_at_start \
|
|
||||||
and pkg.utils.context.get_openai_manager().audit_mgr.get_total_text_length() >= 2048:
|
|
||||||
|
|
||||||
logging.info("发送赞赏码")
|
|
||||||
from mirai import MessageChain, Plain, Image
|
|
||||||
import pkg.utils.constants
|
|
||||||
message_chain = MessageChain([
|
|
||||||
Plain("自2022年12月初以来,开发者已经花费了大量时间和精力来维护本项目,如果您觉得本项目对您有帮助,欢迎赞赏开发者,"
|
|
||||||
"以支持项目稳定运行😘"),
|
|
||||||
Image(base64=pkg.utils.constants.alipay_qr_b64),
|
|
||||||
Image(base64=pkg.utils.constants.wechat_qr_b64),
|
|
||||||
Plain("BTC: 3N4Azee63vbBB9boGv9Rjf4N5SocMe5eCq\nXMR: 89LS21EKQuDGkyQoe2nDupiuWXk4TVD6FALvSKv5owfmeJEPFpHeMsZLYtLiJ6GxLrhsRe5gMs6MyMSDn4GNQAse2Mae4KE\n\n"),
|
|
||||||
Plain("(本消息仅在启动时发送至管理员,如果您不想再看到此消息,请在config.py中将encourage_sponsor_at_start设置为False)")
|
|
||||||
])
|
|
||||||
pkg.utils.context.get_qqbot_manager().notify_admin_message_chain(message_chain)
|
|
||||||
|
|
||||||
time.sleep(5)
|
|
||||||
import pkg.utils.updater
|
|
||||||
try:
|
|
||||||
if pkg.utils.updater.is_new_version_available():
|
|
||||||
logging.info("新版本可用,请发送 !update 进行自动更新\n更新日志:\n{}".format("\n".join(pkg.utils.updater.get_rls_notes())))
|
|
||||||
else:
|
|
||||||
logging.info("当前已是最新版本")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.warning("检查更新失败:{}".format(e))
|
|
||||||
|
|
||||||
try:
|
|
||||||
import pkg.utils.announcement as announcement
|
|
||||||
new_announcement = announcement.fetch_new()
|
|
||||||
if len(new_announcement) > 0:
|
|
||||||
for announcement in new_announcement:
|
|
||||||
logging.critical("[公告]<{}> {}".format(announcement['time'], announcement['content']))
|
|
||||||
except Exception as e:
|
|
||||||
logging.warning("获取公告失败:{}".format(e))
|
|
||||||
|
|
||||||
return qqbot
|
|
||||||
|
|
||||||
def stop():
|
|
||||||
import pkg.qqbot.manager
|
|
||||||
import pkg.openai.session
|
|
||||||
try:
|
|
||||||
import pkg.plugin.host
|
|
||||||
pkg.plugin.host.unload_plugins()
|
|
||||||
|
|
||||||
qqbot_inst = pkg.utils.context.get_qqbot_manager()
|
|
||||||
assert isinstance(qqbot_inst, pkg.qqbot.manager.QQBotManager)
|
|
||||||
|
|
||||||
for session in pkg.openai.session.sessions:
|
|
||||||
logging.info('持久化session: %s', session)
|
|
||||||
pkg.openai.session.sessions[session].persistence()
|
|
||||||
pkg.utils.context.get_database_manager().close()
|
|
||||||
except Exception as e:
|
|
||||||
if not isinstance(e, KeyboardInterrupt):
|
|
||||||
raise e
|
|
||||||
|
|
||||||
|
|
||||||
def check_file():
|
def check_file():
|
||||||
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
|
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
|
||||||
@@ -368,15 +52,369 @@ def check_file():
|
|||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
# 初始化相关文件
|
||||||
|
check_file()
|
||||||
|
|
||||||
|
try:
|
||||||
|
import colorlog
|
||||||
|
except ImportError:
|
||||||
|
# 尝试安装
|
||||||
|
import pkg.utils.pkgmgr as pkgmgr
|
||||||
|
try:
|
||||||
|
pkgmgr.install_requirements("requirements.txt")
|
||||||
|
import colorlog
|
||||||
|
except ImportError:
|
||||||
|
print("依赖不满足,请查看 https://github.com/RockChinQ/qcg-installer/issues/15")
|
||||||
|
sys.exit(1)
|
||||||
|
import colorlog
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import websockets.exceptions
|
||||||
|
from urllib3.exceptions import InsecureRequestWarning
|
||||||
|
import pkg.utils.context
|
||||||
|
|
||||||
|
|
||||||
|
# 是否使用override.json覆盖配置
|
||||||
|
# 仅在启动时提供 --override 或 -r 参数时生效
|
||||||
|
use_override = False
|
||||||
|
|
||||||
|
|
||||||
|
def init_db():
|
||||||
|
import pkg.database.manager
|
||||||
|
database = pkg.database.manager.DatabaseManager()
|
||||||
|
|
||||||
|
database.initialize_database()
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_dependencies():
|
||||||
|
import pkg.utils.pkgmgr as pkgmgr
|
||||||
|
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "tiktoken", "--upgrade",
|
||||||
|
"-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||||
|
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||||
|
|
||||||
|
|
||||||
|
known_exception_caught = False
|
||||||
|
|
||||||
|
|
||||||
|
def override_config():
|
||||||
|
import config
|
||||||
|
# 检查override.json覆盖
|
||||||
|
if os.path.exists("override.json") and use_override:
|
||||||
|
override_json = json.load(open("override.json", "r", encoding="utf-8"))
|
||||||
|
overrided = []
|
||||||
|
for key in override_json:
|
||||||
|
if hasattr(config, key):
|
||||||
|
setattr(config, key, override_json[key])
|
||||||
|
# logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
||||||
|
overrided.append(key)
|
||||||
|
else:
|
||||||
|
logging.error("无法覆写配置[{}]为[{}],该配置不存在,请检查override.json是否正确".format(key, override_json[key]))
|
||||||
|
if len(overrided) > 0:
|
||||||
|
logging.info("已根据override.json覆写配置项: {}".format(", ".join(overrided)))
|
||||||
|
|
||||||
|
|
||||||
|
# 临时函数,用于加载config和上下文,未来统一放在config类
|
||||||
|
def load_config():
|
||||||
|
logging.info("检查config模块完整性.")
|
||||||
|
# 完整性校验
|
||||||
|
non_exist_keys = []
|
||||||
|
|
||||||
|
is_integrity = True
|
||||||
|
config_template = importlib.import_module('config-template')
|
||||||
|
config = importlib.import_module('config')
|
||||||
|
for key in dir(config_template):
|
||||||
|
if not key.startswith("__") and not hasattr(config, key):
|
||||||
|
setattr(config, key, getattr(config_template, key))
|
||||||
|
# logging.warning("[{}]不存在".format(key))
|
||||||
|
non_exist_keys.append(key)
|
||||||
|
is_integrity = False
|
||||||
|
|
||||||
|
if not is_integrity:
|
||||||
|
logging.warning("以下配置字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||||
|
|
||||||
|
# 检查override.json覆盖
|
||||||
|
override_config()
|
||||||
|
|
||||||
|
if not is_integrity:
|
||||||
|
logging.warning("以上不存在的配置已被设为默认值,您可以依据config-template.py检查config.py,将在3秒后继续启动... ")
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# 存进上下文
|
||||||
|
pkg.utils.context.set_config(config)
|
||||||
|
|
||||||
|
|
||||||
|
def complete_tips():
|
||||||
|
"""根据tips-custom-template模块补全tips模块的属性"""
|
||||||
|
non_exist_keys = []
|
||||||
|
|
||||||
|
is_integrity = True
|
||||||
|
logging.info("检查tips模块完整性.")
|
||||||
|
tips_template = importlib.import_module('tips-custom-template')
|
||||||
|
tips = importlib.import_module('tips')
|
||||||
|
for key in dir(tips_template):
|
||||||
|
if not key.startswith("__") and not hasattr(tips, key):
|
||||||
|
setattr(tips, key, getattr(tips_template, key))
|
||||||
|
# logging.warning("[{}]不存在".format(key))
|
||||||
|
non_exist_keys.append(key)
|
||||||
|
is_integrity = False
|
||||||
|
|
||||||
|
if not is_integrity:
|
||||||
|
logging.warning("以下提示语字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||||
|
logging.warning("tips模块不完整,您可以依据tips-custom-template.py检查tips.py")
|
||||||
|
logging.warning("以上配置已被设为默认值,将在3秒后继续启动... ")
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
|
||||||
|
def start(first_time_init=False):
|
||||||
|
"""启动流程,reload之后会被执行"""
|
||||||
|
|
||||||
|
global known_exception_caught
|
||||||
|
import pkg.utils.context
|
||||||
|
|
||||||
|
config = pkg.utils.context.get_config()
|
||||||
|
# 更新openai库到最新版本
|
||||||
|
if not hasattr(config, 'upgrade_dependencies') or config.upgrade_dependencies:
|
||||||
|
print("正在更新依赖库,请等待...")
|
||||||
|
if not hasattr(config, 'upgrade_dependencies'):
|
||||||
|
print("这个操作不是必须的,如果不想更新,请在config.py中添加upgrade_dependencies=False")
|
||||||
|
else:
|
||||||
|
print("这个操作不是必须的,如果不想更新,请在config.py中将upgrade_dependencies设置为False")
|
||||||
|
try:
|
||||||
|
ensure_dependencies()
|
||||||
|
except Exception as e:
|
||||||
|
print("更新openai库失败:{}, 请忽略或自行更新".format(e))
|
||||||
|
|
||||||
|
known_exception_caught = False
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
|
||||||
|
sh = reset_logging()
|
||||||
|
pkg.utils.context.context['logger_handler'] = sh
|
||||||
|
|
||||||
|
# 检查是否设置了管理员
|
||||||
|
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
||||||
|
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
config.admin_qq = int(input("未设置管理员QQ,管理员权限指令及运行告警将无法使用,请输入管理员QQ号: "))
|
||||||
|
# 写入到文件
|
||||||
|
|
||||||
|
# 读取文件
|
||||||
|
config_file_str = ""
|
||||||
|
with open("config.py", "r", encoding="utf-8") as f:
|
||||||
|
config_file_str = f.read()
|
||||||
|
# 替换
|
||||||
|
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(config.admin_qq))
|
||||||
|
# 写入
|
||||||
|
with open("config.py", "w", encoding="utf-8") as f:
|
||||||
|
f.write(config_file_str)
|
||||||
|
|
||||||
|
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
||||||
|
time.sleep(4)
|
||||||
|
break
|
||||||
|
except ValueError:
|
||||||
|
print("请输入数字")
|
||||||
|
|
||||||
|
import pkg.openai.manager
|
||||||
|
import pkg.database.manager
|
||||||
|
import pkg.openai.session
|
||||||
|
import pkg.qqbot.manager
|
||||||
|
import pkg.openai.dprompt
|
||||||
|
import pkg.qqbot.cmds.aamgr
|
||||||
|
|
||||||
|
try:
|
||||||
|
pkg.openai.dprompt.register_all()
|
||||||
|
pkg.qqbot.cmds.aamgr.register_all()
|
||||||
|
pkg.qqbot.cmds.aamgr.apply_privileges()
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(e)
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
# 配置OpenAI proxy
|
||||||
|
import openai
|
||||||
|
openai.proxies = None # 先重置,因为重载后可能需要清除proxy
|
||||||
|
if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None:
|
||||||
|
openai.proxies = {
|
||||||
|
"http": config.openai_config["http_proxy"],
|
||||||
|
"https": config.openai_config["http_proxy"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# 配置openai api_base
|
||||||
|
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
|
||||||
|
openai.base_url = config.openai_config["reverse_proxy"]
|
||||||
|
|
||||||
|
# 主启动流程
|
||||||
|
database = pkg.database.manager.DatabaseManager()
|
||||||
|
|
||||||
|
database.initialize_database()
|
||||||
|
|
||||||
|
openai_interact = pkg.openai.manager.OpenAIInteract(config.openai_config['api_key'])
|
||||||
|
|
||||||
|
# 加载所有未超时的session
|
||||||
|
pkg.openai.session.load_sessions()
|
||||||
|
|
||||||
|
# 初始化qq机器人
|
||||||
|
qqbot = pkg.qqbot.manager.QQBotManager(first_time_init=first_time_init)
|
||||||
|
|
||||||
|
# 加载插件
|
||||||
|
import pkg.plugin.host
|
||||||
|
pkg.plugin.host.load_plugins()
|
||||||
|
|
||||||
|
pkg.plugin.host.initialize_plugins()
|
||||||
|
|
||||||
|
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
||||||
|
|
||||||
|
import mirai.exceptions
|
||||||
|
|
||||||
|
def run_bot_wrapper():
|
||||||
|
global known_exception_caught
|
||||||
|
try:
|
||||||
|
logging.debug("使用账号: {}".format(qqbot.bot_account_id))
|
||||||
|
qqbot.adapter.run_sync()
|
||||||
|
except TypeError as e:
|
||||||
|
if str(e).__contains__("argument 'debug'"):
|
||||||
|
logging.error(
|
||||||
|
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
||||||
|
known_exception_caught = True
|
||||||
|
elif str(e).__contains__("As of 3.10, the *loop*"):
|
||||||
|
logging.error(
|
||||||
|
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
||||||
|
known_exception_caught = True
|
||||||
|
|
||||||
|
except websockets.exceptions.InvalidStatus as e:
|
||||||
|
logging.error(
|
||||||
|
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||||
|
e))
|
||||||
|
known_exception_caught = True
|
||||||
|
except mirai.exceptions.NetworkError as e:
|
||||||
|
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
||||||
|
known_exception_caught = True
|
||||||
|
except Exception as e:
|
||||||
|
if str(e).__contains__("404"):
|
||||||
|
logging.error(
|
||||||
|
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||||
|
e))
|
||||||
|
known_exception_caught = True
|
||||||
|
elif str(e).__contains__("signal only works in main thread"):
|
||||||
|
logging.error(
|
||||||
|
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
||||||
|
e))
|
||||||
|
known_exception_caught = True
|
||||||
|
elif str(e).__contains__("did not receive a valid HTTP"):
|
||||||
|
logging.error(
|
||||||
|
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||||
|
e))
|
||||||
|
else:
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
logging.error(
|
||||||
|
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
||||||
|
known_exception_caught = True
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
time.sleep(12)
|
||||||
|
threading.Thread(
|
||||||
|
target=run_bot_wrapper
|
||||||
|
).start()
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
if isinstance(e, KeyboardInterrupt):
|
||||||
|
logging.info("程序被用户中止")
|
||||||
|
sys.exit(0)
|
||||||
|
elif isinstance(e, SyntaxError):
|
||||||
|
logging.error("配置文件存在语法错误,请检查配置文件:\n1. 是否存在中文符号\n2. 是否已按照文件中的说明填写正确")
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
logging.error("初始化失败:{}".format(e))
|
||||||
|
sys.exit(1)
|
||||||
|
finally:
|
||||||
|
# 判断若是Windows,输出选择模式可能会暂停程序的警告
|
||||||
|
if os.name == 'nt':
|
||||||
|
time.sleep(2)
|
||||||
|
logging.info("您正在使用Windows系统,若命令行窗口处于“选择”模式,程序可能会被暂停,此时请右键点击窗口空白区域使其取消选择模式。")
|
||||||
|
|
||||||
|
time.sleep(12)
|
||||||
|
|
||||||
|
if first_time_init:
|
||||||
|
if not known_exception_caught:
|
||||||
|
import config
|
||||||
|
if config.msg_source_adapter == "yirimirai":
|
||||||
|
logging.info("QQ: {}, MAH: {}".format(config.mirai_http_api_config['qq'], config.mirai_http_api_config['host']+":"+str(config.mirai_http_api_config['port'])))
|
||||||
|
logging.critical('程序启动完成,如长时间未显示 "成功登录到账号xxxxx" ,并且不回复消息,解决办法(请勿到群里问): '
|
||||||
|
'https://github.com/RockChinQ/QChatGPT/issues/37')
|
||||||
|
elif config.msg_source_adapter == 'nakuru':
|
||||||
|
logging.info("host: {}, port: {}, http_port: {}".format(config.nakuru_config['host'], config.nakuru_config['port'], config.nakuru_config['http_port']))
|
||||||
|
logging.critical('程序启动完成,如长时间未显示 "Protocol: connected" ,并且不回复消息,请检查config.py中的nakuru_config是否正确')
|
||||||
|
else:
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
logging.info('热重载完成')
|
||||||
|
|
||||||
|
# 发送赞赏码
|
||||||
|
if config.encourage_sponsor_at_start \
|
||||||
|
and pkg.utils.context.get_openai_manager().audit_mgr.get_total_text_length() >= 2048:
|
||||||
|
|
||||||
|
logging.info("发送赞赏码")
|
||||||
|
from mirai import MessageChain, Plain, Image
|
||||||
|
import pkg.utils.constants
|
||||||
|
message_chain = MessageChain([
|
||||||
|
Plain("自2022年12月初以来,开发者已经花费了大量时间和精力来维护本项目,如果您觉得本项目对您有帮助,欢迎赞赏开发者,"
|
||||||
|
"以支持项目稳定运行😘"),
|
||||||
|
Image(base64=pkg.utils.constants.alipay_qr_b64),
|
||||||
|
Image(base64=pkg.utils.constants.wechat_qr_b64),
|
||||||
|
Plain("BTC: 3N4Azee63vbBB9boGv9Rjf4N5SocMe5eCq\nXMR: 89LS21EKQuDGkyQoe2nDupiuWXk4TVD6FALvSKv5owfmeJEPFpHeMsZLYtLiJ6GxLrhsRe5gMs6MyMSDn4GNQAse2Mae4KE\n\n"),
|
||||||
|
Plain("(本消息仅在启动时发送至管理员,如果您不想再看到此消息,请在config.py中将encourage_sponsor_at_start设置为False)")
|
||||||
|
])
|
||||||
|
pkg.utils.context.get_qqbot_manager().notify_admin_message_chain(message_chain)
|
||||||
|
|
||||||
|
time.sleep(5)
|
||||||
|
import pkg.utils.updater
|
||||||
|
try:
|
||||||
|
if pkg.utils.updater.is_new_version_available():
|
||||||
|
logging.info("新版本可用,请发送 !update 进行自动更新\n更新日志:\n{}".format("\n".join(pkg.utils.updater.get_rls_notes())))
|
||||||
|
else:
|
||||||
|
# logging.info("当前已是最新版本")
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning("检查更新失败:{}".format(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pkg.utils.announcement as announcement
|
||||||
|
new_announcement = announcement.fetch_new()
|
||||||
|
if len(new_announcement) > 0:
|
||||||
|
for announcement in new_announcement:
|
||||||
|
logging.critical("[公告]<{}> {}".format(announcement['time'], announcement['content']))
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning("获取公告失败:{}".format(e))
|
||||||
|
|
||||||
|
return qqbot
|
||||||
|
|
||||||
|
def stop():
|
||||||
|
import pkg.qqbot.manager
|
||||||
|
import pkg.openai.session
|
||||||
|
try:
|
||||||
|
import pkg.plugin.host
|
||||||
|
pkg.plugin.host.unload_plugins()
|
||||||
|
|
||||||
|
qqbot_inst = pkg.utils.context.get_qqbot_manager()
|
||||||
|
assert isinstance(qqbot_inst, pkg.qqbot.manager.QQBotManager)
|
||||||
|
|
||||||
|
for session in pkg.openai.session.sessions:
|
||||||
|
logging.info('持久化session: %s', session)
|
||||||
|
pkg.openai.session.sessions[session].persistence()
|
||||||
|
pkg.utils.context.get_database_manager().close()
|
||||||
|
except Exception as e:
|
||||||
|
if not isinstance(e, KeyboardInterrupt):
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
global use_override
|
global use_override
|
||||||
# 检查是否携带了 --override 或 -r 参数
|
# 检查是否携带了 --override 或 -r 参数
|
||||||
if '--override' in sys.argv or '-r' in sys.argv:
|
if '--override' in sys.argv or '-r' in sys.argv:
|
||||||
use_override = True
|
use_override = True
|
||||||
|
|
||||||
# 初始化相关文件
|
|
||||||
check_file()
|
|
||||||
|
|
||||||
# 初始化logging
|
# 初始化logging
|
||||||
init_runtime_log_file()
|
init_runtime_log_file()
|
||||||
pkg.utils.context.context['logger_handler'] = reset_logging()
|
pkg.utils.context.context['logger_handler'] = reset_logging()
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
"comment": "这是override.json支持的字段全集, 关于override.json机制, 请查看https://github.com/RockChinQ/QChatGPT/pull/271",
|
"comment": "这是override.json支持的字段全集, 关于override.json机制, 请查看https://github.com/RockChinQ/QChatGPT/pull/271",
|
||||||
|
"msg_source_adapter": "yirimirai",
|
||||||
"mirai_http_api_config": {
|
"mirai_http_api_config": {
|
||||||
"adapter": "WebSocketAdapter",
|
"adapter": "WebSocketAdapter",
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
@@ -7,6 +8,12 @@
|
|||||||
"verifyKey": "yirimirai",
|
"verifyKey": "yirimirai",
|
||||||
"qq": 1234567890
|
"qq": 1234567890
|
||||||
},
|
},
|
||||||
|
"nakuru_config": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 6700,
|
||||||
|
"http_port": 5700,
|
||||||
|
"token": ""
|
||||||
|
},
|
||||||
"openai_config": {
|
"openai_config": {
|
||||||
"api_key": {
|
"api_key": {
|
||||||
"default": "openai_api_key"
|
"default": "openai_api_key"
|
||||||
@@ -14,21 +21,24 @@
|
|||||||
"http_proxy": null,
|
"http_proxy": null,
|
||||||
"reverse_proxy": null
|
"reverse_proxy": null
|
||||||
},
|
},
|
||||||
|
"switch_strategy": "active",
|
||||||
"admin_qq": 0,
|
"admin_qq": 0,
|
||||||
"default_prompt": {
|
"default_prompt": {
|
||||||
"default": "如果我之后想获取帮助,请你说“输入!help获取帮助”"
|
"default": "如果用户之后想获取帮助,请你说“输入!help获取帮助”。"
|
||||||
},
|
},
|
||||||
"preset_mode": "normal",
|
"preset_mode": "normal",
|
||||||
"response_rules": {
|
"response_rules": {
|
||||||
"at": true,
|
"default": {
|
||||||
"prefix": [
|
"at": true,
|
||||||
"/ai",
|
"prefix": [
|
||||||
"!ai",
|
"/ai",
|
||||||
"!ai",
|
"!ai",
|
||||||
"ai"
|
"!ai",
|
||||||
],
|
"ai"
|
||||||
"regexp": [],
|
],
|
||||||
"random_rate": 0.0
|
"regexp": [],
|
||||||
|
"random_rate": 0.0
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"ignore_rules": {
|
"ignore_rules": {
|
||||||
"prefix": [
|
"prefix": [
|
||||||
@@ -44,20 +54,24 @@
|
|||||||
"inappropriate_message_tips": "[百度云]请珍惜机器人,当前返回内容不合规",
|
"inappropriate_message_tips": "[百度云]请珍惜机器人,当前返回内容不合规",
|
||||||
"encourage_sponsor_at_start": true,
|
"encourage_sponsor_at_start": true,
|
||||||
"prompt_submit_length": 2048,
|
"prompt_submit_length": 2048,
|
||||||
|
"auto_reset": true,
|
||||||
"completion_api_params": {
|
"completion_api_params": {
|
||||||
"model": "gpt-3.5-turbo",
|
"model": "gpt-3.5-turbo",
|
||||||
"temperature": 0.9,
|
"temperature": 0.9
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0.2,
|
|
||||||
"presence_penalty": 1.0
|
|
||||||
},
|
},
|
||||||
"image_api_params": {
|
"image_api_params": {
|
||||||
"size": "256x256"
|
"size": "256x256"
|
||||||
},
|
},
|
||||||
"quote_origin": true,
|
"trace_function_calls": false,
|
||||||
|
"quote_origin": false,
|
||||||
|
"at_sender": false,
|
||||||
"include_image_description": true,
|
"include_image_description": true,
|
||||||
"process_message_timeout": 30,
|
"process_message_timeout": 120,
|
||||||
"show_prefix": false,
|
"show_prefix": false,
|
||||||
|
"force_delay_range": [
|
||||||
|
1.5,
|
||||||
|
3
|
||||||
|
],
|
||||||
"blob_message_threshold": 256,
|
"blob_message_threshold": 256,
|
||||||
"blob_message_strategy": "forward",
|
"blob_message_strategy": "forward",
|
||||||
"wait_last_done": true,
|
"wait_last_done": true,
|
||||||
@@ -72,7 +86,7 @@
|
|||||||
"default": 60
|
"default": 60
|
||||||
},
|
},
|
||||||
"rate_limit_strategy": "drop",
|
"rate_limit_strategy": "drop",
|
||||||
"upgrade_dependencies": true,
|
"upgrade_dependencies": false,
|
||||||
"report_usage": true,
|
"report_usage": true,
|
||||||
"logging_level": 20
|
"logging_level": 20
|
||||||
}
|
}
|
||||||
@@ -5,11 +5,12 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import threading
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
import pkg.utils.context
|
from ..utils import context
|
||||||
import pkg.utils.updater
|
from ..utils import updater
|
||||||
|
|
||||||
|
|
||||||
class DataGatherer:
|
class DataGatherer:
|
||||||
@@ -32,7 +33,7 @@ class DataGatherer:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.load_from_db()
|
self.load_from_db()
|
||||||
try:
|
try:
|
||||||
self.version_str = pkg.utils.updater.get_current_tag() # 从updater模块获取版本号
|
self.version_str = updater.get_current_tag() # 从updater模块获取版本号
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -42,15 +43,20 @@ class DataGatherer:
|
|||||||
只会报告此次请求的使用量,不会报告总量。
|
只会报告此次请求的使用量,不会报告总量。
|
||||||
不包含除版本号、使用类型、使用量以外的任何信息,仅供开发者分析使用情况。
|
不包含除版本号、使用类型、使用量以外的任何信息,仅供开发者分析使用情况。
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
config = pkg.utils.context.get_config()
|
def thread_func():
|
||||||
if not config.report_usage:
|
|
||||||
|
try:
|
||||||
|
config = context.get_config()
|
||||||
|
if not config.report_usage:
|
||||||
|
return
|
||||||
|
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}&msg_source={}".format(subservice_name, self.version_str, count, config.msg_source_adapter))
|
||||||
|
if res.status_code != 200 or res.text != "ok":
|
||||||
|
logging.warning("report to server failed, status_code: {}, text: {}".format(res.status_code, res.text))
|
||||||
|
except:
|
||||||
return
|
return
|
||||||
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}".format(subservice_name, self.version_str, count))
|
|
||||||
if res.status_code != 200 or res.text != "ok":
|
threading.Thread(target=thread_func).start()
|
||||||
logging.warning("report to server failed, status_code: {}, text: {}".format(res.status_code, res.text))
|
|
||||||
except:
|
|
||||||
return
|
|
||||||
|
|
||||||
def get_usage(self, key_md5):
|
def get_usage(self, key_md5):
|
||||||
return self.usage[key_md5] if key_md5 in self.usage else {}
|
return self.usage[key_md5] if key_md5 in self.usage else {}
|
||||||
@@ -58,7 +64,7 @@ class DataGatherer:
|
|||||||
def report_text_model_usage(self, model, total_tokens):
|
def report_text_model_usage(self, model, total_tokens):
|
||||||
"""调用方报告文字模型请求文字使用量"""
|
"""调用方报告文字模型请求文字使用量"""
|
||||||
|
|
||||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
|
key_md5 = context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
|
||||||
|
|
||||||
if key_md5 not in self.usage:
|
if key_md5 not in self.usage:
|
||||||
self.usage[key_md5] = {}
|
self.usage[key_md5] = {}
|
||||||
@@ -78,7 +84,7 @@ class DataGatherer:
|
|||||||
def report_image_model_usage(self, size):
|
def report_image_model_usage(self, size):
|
||||||
"""调用方报告图片模型请求图片使用量"""
|
"""调用方报告图片模型请求图片使用量"""
|
||||||
|
|
||||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()
|
key_md5 = context.get_openai_manager().key_mgr.get_using_key_md5()
|
||||||
|
|
||||||
if key_md5 not in self.usage:
|
if key_md5 not in self.usage:
|
||||||
self.usage[key_md5] = {}
|
self.usage[key_md5] = {}
|
||||||
@@ -125,9 +131,9 @@ class DataGatherer:
|
|||||||
return total
|
return total
|
||||||
|
|
||||||
def dump_to_db(self):
|
def dump_to_db(self):
|
||||||
pkg.utils.context.get_database_manager().dump_usage_json(self.usage)
|
context.get_database_manager().dump_usage_json(self.usage)
|
||||||
|
|
||||||
def load_from_db(self):
|
def load_from_db(self):
|
||||||
json_str = pkg.utils.context.get_database_manager().load_usage_json()
|
json_str = context.get_database_manager().load_usage_json()
|
||||||
if json_str is not None:
|
if json_str is not None:
|
||||||
self.usage = json.loads(json_str)
|
self.usage = json.loads(json_str)
|
||||||
|
|||||||
@@ -5,11 +5,10 @@ import hashlib
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from sqlite3 import Cursor
|
|
||||||
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
|
||||||
import pkg.utils.context
|
from ..utils import context
|
||||||
|
|
||||||
|
|
||||||
class DatabaseManager:
|
class DatabaseManager:
|
||||||
@@ -22,7 +21,7 @@ class DatabaseManager:
|
|||||||
|
|
||||||
self.reconnect()
|
self.reconnect()
|
||||||
|
|
||||||
pkg.utils.context.set_database_manager(self)
|
context.set_database_manager(self)
|
||||||
|
|
||||||
# 连接到数据库文件
|
# 连接到数据库文件
|
||||||
def reconnect(self):
|
def reconnect(self):
|
||||||
@@ -33,7 +32,7 @@ class DatabaseManager:
|
|||||||
def close(self):
|
def close(self):
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
|
|
||||||
def __execute__(self, *args, **kwargs) -> Cursor:
|
def __execute__(self, *args, **kwargs) -> sqlite3.Cursor:
|
||||||
# logging.debug('SQL: {}'.format(sql))
|
# logging.debug('SQL: {}'.format(sql))
|
||||||
logging.debug('SQL: {}'.format(args))
|
logging.debug('SQL: {}'.format(args))
|
||||||
c = self.cursor.execute(*args, **kwargs)
|
c = self.cursor.execute(*args, **kwargs)
|
||||||
@@ -145,7 +144,7 @@ class DatabaseManager:
|
|||||||
# 从数据库加载还没过期的session数据
|
# 从数据库加载还没过期的session数据
|
||||||
def load_valid_sessions(self) -> dict:
|
def load_valid_sessions(self) -> dict:
|
||||||
# 从数据库中加载所有还没过期的session
|
# 从数据库中加载所有还没过期的session
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
self.__execute__("""
|
self.__execute__("""
|
||||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||||
from `sessions` where `last_interact_timestamp` > {}
|
from `sessions` where `last_interact_timestamp` > {}
|
||||||
|
|||||||
0
pkg/openai/api/__init__.py
Normal file
220
pkg/openai/api/chat_completion.py
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import openai
|
||||||
|
from openai.types.chat import chat_completion_message
|
||||||
|
|
||||||
|
from .model import RequestBase
|
||||||
|
from .. import funcmgr
|
||||||
|
|
||||||
|
|
||||||
|
class ChatCompletionRequest(RequestBase):
|
||||||
|
"""调用ChatCompletion接口的请求类。
|
||||||
|
|
||||||
|
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
|
||||||
|
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
|
||||||
|
"""
|
||||||
|
|
||||||
|
model: str
|
||||||
|
messages: list[dict[str, str]]
|
||||||
|
kwargs: dict
|
||||||
|
|
||||||
|
stopped: bool = False
|
||||||
|
|
||||||
|
pending_func_call: chat_completion_message.FunctionCall = None
|
||||||
|
|
||||||
|
pending_msg: str
|
||||||
|
|
||||||
|
def flush_pending_msg(self):
|
||||||
|
self.append_message(
|
||||||
|
role="assistant",
|
||||||
|
content=self.pending_msg
|
||||||
|
)
|
||||||
|
self.pending_msg = ""
|
||||||
|
|
||||||
|
def append_message(self, role: str, content: str, name: str=None, function_call: dict=None):
|
||||||
|
msg = {
|
||||||
|
"role": role,
|
||||||
|
"content": content
|
||||||
|
}
|
||||||
|
|
||||||
|
if name is not None:
|
||||||
|
msg['name'] = name
|
||||||
|
|
||||||
|
if function_call is not None:
|
||||||
|
msg['function_call'] = function_call
|
||||||
|
|
||||||
|
self.messages.append(msg)
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
client: openai.Client,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
self.client = client
|
||||||
|
self.model = model
|
||||||
|
self.messages = messages.copy()
|
||||||
|
|
||||||
|
self.kwargs = kwargs
|
||||||
|
|
||||||
|
self.req_func = self.client.chat.completions.create
|
||||||
|
|
||||||
|
self.pending_func_call = None
|
||||||
|
|
||||||
|
self.stopped = False
|
||||||
|
|
||||||
|
self.pending_msg = ""
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self) -> dict:
|
||||||
|
if self.stopped:
|
||||||
|
raise StopIteration()
|
||||||
|
|
||||||
|
if self.pending_func_call is None: # 没有待处理的函数调用请求
|
||||||
|
|
||||||
|
args = {
|
||||||
|
"model": self.model,
|
||||||
|
"messages": self.messages,
|
||||||
|
}
|
||||||
|
|
||||||
|
funcs = funcmgr.get_func_schema_list()
|
||||||
|
|
||||||
|
if len(funcs) > 0:
|
||||||
|
args['functions'] = funcs
|
||||||
|
|
||||||
|
# 拼接kwargs
|
||||||
|
args = {**args, **self.kwargs}
|
||||||
|
|
||||||
|
from openai.types.chat import chat_completion
|
||||||
|
|
||||||
|
resp: chat_completion.ChatCompletion = self._req(**args)
|
||||||
|
|
||||||
|
choice0 = resp.choices[0]
|
||||||
|
|
||||||
|
# 如果不是函数调用,且finish_reason为stop,则停止迭代
|
||||||
|
if choice0.finish_reason == 'stop': # and choice0["finish_reason"] == "stop"
|
||||||
|
self.stopped = True
|
||||||
|
|
||||||
|
if hasattr(choice0.message, 'function_call') and choice0.message.function_call is not None:
|
||||||
|
self.pending_func_call = choice0.message.function_call
|
||||||
|
|
||||||
|
self.append_message(
|
||||||
|
role="assistant",
|
||||||
|
content=choice0.message.content,
|
||||||
|
function_call=choice0.message.function_call
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": resp.id,
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": choice0.index,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"type": "function_call",
|
||||||
|
"content": choice0.message.content,
|
||||||
|
"function_call": {
|
||||||
|
"name": choice0.message.function_call.name,
|
||||||
|
"arguments": choice0.message.function_call.arguments
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"finish_reason": "function_call"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": resp.usage.prompt_tokens,
|
||||||
|
"completion_tokens": resp.usage.completion_tokens,
|
||||||
|
"total_tokens": resp.usage.total_tokens
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
|
||||||
|
# self.pending_msg += choice0['message']['content']
|
||||||
|
# 普通回复一定处于最后方,故不用再追加进内部messages
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": resp.id,
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": choice0.index,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"type": "text",
|
||||||
|
"content": choice0.message.content
|
||||||
|
},
|
||||||
|
"finish_reason": choice0.finish_reason
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": resp.usage.prompt_tokens,
|
||||||
|
"completion_tokens": resp.usage.completion_tokens,
|
||||||
|
"total_tokens": resp.usage.total_tokens
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else: # 处理函数调用请求
|
||||||
|
|
||||||
|
cp_pending_func_call = self.pending_func_call.copy()
|
||||||
|
|
||||||
|
self.pending_func_call = None
|
||||||
|
|
||||||
|
func_name = cp_pending_func_call.name
|
||||||
|
arguments = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
try:
|
||||||
|
arguments = json.loads(cp_pending_func_call.arguments)
|
||||||
|
# 若不是json格式的异常处理
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
# 获取函数的参数列表
|
||||||
|
func_schema = funcmgr.get_func_schema(func_name)
|
||||||
|
|
||||||
|
arguments = {
|
||||||
|
func_schema['parameters']['required'][0]: cp_pending_func_call.arguments
|
||||||
|
}
|
||||||
|
|
||||||
|
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
|
||||||
|
|
||||||
|
# 执行函数调用
|
||||||
|
ret = ""
|
||||||
|
try:
|
||||||
|
ret = funcmgr.execute_function(func_name, arguments)
|
||||||
|
|
||||||
|
logging.info("函数执行完成。")
|
||||||
|
except Exception as e:
|
||||||
|
ret = "error: execute function failed: {}".format(str(e))
|
||||||
|
logging.error("函数执行失败: {}".format(str(e)))
|
||||||
|
|
||||||
|
self.append_message(
|
||||||
|
role="function",
|
||||||
|
content=json.dumps(ret, ensure_ascii=False),
|
||||||
|
name=func_name
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": -1,
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": -1,
|
||||||
|
"message": {
|
||||||
|
"role": "function",
|
||||||
|
"type": "function_return",
|
||||||
|
"function_name": func_name,
|
||||||
|
"content": json.dumps(ret, ensure_ascii=False)
|
||||||
|
},
|
||||||
|
"finish_reason": "function_return"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": 0,
|
||||||
|
"completion_tokens": 0,
|
||||||
|
"total_tokens": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
except funcmgr.ContentFunctionNotFoundError:
|
||||||
|
raise Exception("没有找到函数: {}".format(func_name))
|
||||||
100
pkg/openai/api/completion.py
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
import openai
|
||||||
|
from openai.types import completion, completion_choice
|
||||||
|
|
||||||
|
from . import model
|
||||||
|
|
||||||
|
|
||||||
|
class CompletionRequest(model.RequestBase):
|
||||||
|
"""调用Completion接口的请求类。
|
||||||
|
|
||||||
|
调用方可以一直next completion直到finish_reason为stop。
|
||||||
|
"""
|
||||||
|
|
||||||
|
model: str
|
||||||
|
prompt: str
|
||||||
|
kwargs: dict
|
||||||
|
|
||||||
|
stopped: bool = False
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
client: openai.Client,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
self.client = client
|
||||||
|
self.model = model
|
||||||
|
self.prompt = ""
|
||||||
|
|
||||||
|
for message in messages:
|
||||||
|
self.prompt += message["role"] + ": " + message["content"] + "\n"
|
||||||
|
|
||||||
|
self.prompt += "assistant: "
|
||||||
|
|
||||||
|
self.kwargs = kwargs
|
||||||
|
|
||||||
|
self.req_func = self.client.completions.create
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self) -> dict:
|
||||||
|
"""调用Completion接口,返回生成的文本
|
||||||
|
|
||||||
|
{
|
||||||
|
"id": "id",
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": 0,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"type": "text",
|
||||||
|
"content": "message"
|
||||||
|
},
|
||||||
|
"finish_reason": "reason"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": 10,
|
||||||
|
"completion_tokens": 20,
|
||||||
|
"total_tokens": 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.stopped:
|
||||||
|
raise StopIteration()
|
||||||
|
|
||||||
|
resp: completion.Completion = self._req(
|
||||||
|
model=self.model,
|
||||||
|
prompt=self.prompt,
|
||||||
|
**self.kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
if resp.choices[0].finish_reason == "stop":
|
||||||
|
self.stopped = True
|
||||||
|
|
||||||
|
choice0: completion_choice.CompletionChoice = resp.choices[0]
|
||||||
|
|
||||||
|
self.prompt += choice0.text
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": resp.id,
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": choice0.index,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"type": "text",
|
||||||
|
"content": choice0.text
|
||||||
|
},
|
||||||
|
"finish_reason": choice0.finish_reason
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": resp.usage.prompt_tokens,
|
||||||
|
"completion_tokens": resp.usage.completion_tokens,
|
||||||
|
"total_tokens": resp.usage.total_tokens
|
||||||
|
}
|
||||||
|
}
|
||||||
39
pkg/openai/api/model.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# 定义不同接口请求的模型
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import openai
|
||||||
|
|
||||||
|
|
||||||
|
class RequestBase:
|
||||||
|
|
||||||
|
client: openai.Client
|
||||||
|
|
||||||
|
req_func: callable
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def _next_key(self):
|
||||||
|
import pkg.utils.context as context
|
||||||
|
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||||
|
logging.debug("切换api-key: switched={}, name={}".format(switched, name))
|
||||||
|
self.client.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||||
|
|
||||||
|
def _req(self, **kwargs):
|
||||||
|
"""处理代理问题"""
|
||||||
|
logging.debug("请求接口参数: %s", str(kwargs))
|
||||||
|
import config
|
||||||
|
|
||||||
|
ret = self.req_func(**kwargs)
|
||||||
|
logging.debug("接口请求返回:%s", str(ret))
|
||||||
|
|
||||||
|
if config.switch_strategy == 'active':
|
||||||
|
self._next_key()
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
raise self
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
raise NotImplementedError
|
||||||
@@ -16,10 +16,6 @@ import os
|
|||||||
# __scenario_from_files__ = {}
|
# __scenario_from_files__ = {}
|
||||||
|
|
||||||
|
|
||||||
__universal_first_reply__ = "ok, I'll follow your commands."
|
|
||||||
"""通用首次回复"""
|
|
||||||
|
|
||||||
|
|
||||||
class ScenarioMode:
|
class ScenarioMode:
|
||||||
"""情景预设模式抽象类"""
|
"""情景预设模式抽象类"""
|
||||||
|
|
||||||
@@ -66,17 +62,13 @@ class NormalScenarioMode(ScenarioMode):
|
|||||||
"""普通情景预设模式"""
|
"""普通情景预设模式"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
global __universal_first_reply__
|
|
||||||
# 加载config中的default_prompt值
|
# 加载config中的default_prompt值
|
||||||
if type(config.default_prompt) == str:
|
if type(config.default_prompt) == str:
|
||||||
self.using_prompt_name = "default"
|
self.using_prompt_name = "default"
|
||||||
self.prompts = {"default": [
|
self.prompts = {"default": [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "system",
|
||||||
"content": config.default_prompt
|
"content": config.default_prompt
|
||||||
},{
|
|
||||||
"role": "assistant",
|
|
||||||
"content": __universal_first_reply__
|
|
||||||
}
|
}
|
||||||
]}
|
]}
|
||||||
|
|
||||||
@@ -84,11 +76,8 @@ class NormalScenarioMode(ScenarioMode):
|
|||||||
for key in config.default_prompt:
|
for key in config.default_prompt:
|
||||||
self.prompts[key] = [
|
self.prompts[key] = [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "system",
|
||||||
"content": config.default_prompt[key]
|
"content": config.default_prompt[key]
|
||||||
},{
|
|
||||||
"role": "assistant",
|
|
||||||
"content": __universal_first_reply__
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -98,11 +87,8 @@ class NormalScenarioMode(ScenarioMode):
|
|||||||
with open(os.path.join("prompts", file), encoding="utf-8") as f:
|
with open(os.path.join("prompts", file), encoding="utf-8") as f:
|
||||||
self.prompts[file] = [
|
self.prompts[file] = [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "system",
|
||||||
"content": f.read()
|
"content": f.read()
|
||||||
},{
|
|
||||||
"role": "assistant",
|
|
||||||
"content": __universal_first_reply__
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
46
pkg/openai/funcmgr.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# 封装了function calling的一些支持函数
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from ..plugin import host
|
||||||
|
|
||||||
|
|
||||||
|
class ContentFunctionNotFoundError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_func_schema_list() -> list:
|
||||||
|
"""从plugin包中的函数结构中获取并处理成受GPT支持的格式"""
|
||||||
|
if not host.__enable_content_functions__:
|
||||||
|
return []
|
||||||
|
|
||||||
|
schemas = []
|
||||||
|
|
||||||
|
for func in host.__callable_functions__:
|
||||||
|
if func['enabled']:
|
||||||
|
fun_cp = func.copy()
|
||||||
|
|
||||||
|
del fun_cp['enabled']
|
||||||
|
|
||||||
|
schemas.append(fun_cp)
|
||||||
|
|
||||||
|
return schemas
|
||||||
|
|
||||||
|
def get_func(name: str) -> callable:
|
||||||
|
if name not in host.__function_inst_map__:
|
||||||
|
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||||
|
|
||||||
|
return host.__function_inst_map__[name]
|
||||||
|
|
||||||
|
def get_func_schema(name: str) -> dict:
|
||||||
|
for func in host.__callable_functions__:
|
||||||
|
if func['name'] == name:
|
||||||
|
return func
|
||||||
|
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||||
|
|
||||||
|
def execute_function(name: str, kwargs: dict) -> any:
|
||||||
|
"""执行函数调用"""
|
||||||
|
|
||||||
|
logging.debug("executing function: name='{}', kwargs={}".format(name, kwargs))
|
||||||
|
|
||||||
|
func = get_func(name)
|
||||||
|
return func(**kwargs)
|
||||||
@@ -2,8 +2,8 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import pkg.plugin.host as plugin_host
|
from ..plugin import host as plugin_host
|
||||||
import pkg.plugin.models as plugin_models
|
from ..plugin import models as plugin_models
|
||||||
|
|
||||||
|
|
||||||
class KeysManager:
|
class KeysManager:
|
||||||
@@ -54,7 +54,24 @@ class KeysManager:
|
|||||||
是否切换成功, 切换后的api-key的别名
|
是否切换成功, 切换后的api-key的别名
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
index = 0
|
||||||
|
|
||||||
for key_name in self.api_key:
|
for key_name in self.api_key:
|
||||||
|
if self.api_key[key_name] == self.using_key:
|
||||||
|
break
|
||||||
|
|
||||||
|
index += 1
|
||||||
|
|
||||||
|
# 从当前key开始向后轮询
|
||||||
|
start_index = index
|
||||||
|
index += 1
|
||||||
|
if index >= len(self.api_key):
|
||||||
|
index = 0
|
||||||
|
|
||||||
|
while index != start_index:
|
||||||
|
|
||||||
|
key_name = list(self.api_key.keys())[index]
|
||||||
|
|
||||||
if self.api_key[key_name] not in self.exceeded:
|
if self.api_key[key_name] not in self.exceeded:
|
||||||
self.using_key = self.api_key[key_name]
|
self.using_key = self.api_key[key_name]
|
||||||
|
|
||||||
@@ -69,10 +86,14 @@ class KeysManager:
|
|||||||
|
|
||||||
return True, key_name
|
return True, key_name
|
||||||
|
|
||||||
self.using_key = list(self.api_key.values())[0]
|
index += 1
|
||||||
logging.info("使用api-key:" + list(self.api_key.keys())[0])
|
if index >= len(self.api_key):
|
||||||
|
index = 0
|
||||||
|
|
||||||
return False, ""
|
self.using_key = list(self.api_key.values())[start_index]
|
||||||
|
logging.debug("使用api-key:" + list(self.api_key.keys())[start_index])
|
||||||
|
|
||||||
|
return False, list(self.api_key.keys())[start_index]
|
||||||
|
|
||||||
def add(self, key_name, key):
|
def add(self, key_name, key):
|
||||||
self.api_key[key_name] = key
|
self.api_key[key_name] = key
|
||||||
|
|||||||
@@ -2,10 +2,11 @@ import logging
|
|||||||
|
|
||||||
import openai
|
import openai
|
||||||
|
|
||||||
import pkg.openai.keymgr
|
from ..openai import keymgr
|
||||||
import pkg.utils.context
|
from ..utils import context
|
||||||
import pkg.audit.gatherer
|
from ..audit import gatherer
|
||||||
from pkg.openai.modelmgr import ModelRequest, create_openai_model_request
|
from ..openai import modelmgr
|
||||||
|
from ..openai.api import model as api_model
|
||||||
|
|
||||||
|
|
||||||
class OpenAIInteract:
|
class OpenAIInteract:
|
||||||
@@ -14,64 +15,54 @@ class OpenAIInteract:
|
|||||||
将文字接口和图片接口封装供调用方使用
|
将文字接口和图片接口封装供调用方使用
|
||||||
"""
|
"""
|
||||||
|
|
||||||
key_mgr: pkg.openai.keymgr.KeysManager = None
|
key_mgr: keymgr.KeysManager = None
|
||||||
|
|
||||||
audit_mgr: pkg.audit.gatherer.DataGatherer = None
|
audit_mgr: gatherer.DataGatherer = None
|
||||||
|
|
||||||
default_image_api_params = {
|
default_image_api_params = {
|
||||||
"size": "256x256",
|
"size": "256x256",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client: openai.Client = None
|
||||||
|
|
||||||
def __init__(self, api_key: str):
|
def __init__(self, api_key: str):
|
||||||
|
|
||||||
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
|
self.key_mgr = keymgr.KeysManager(api_key)
|
||||||
self.audit_mgr = pkg.audit.gatherer.DataGatherer()
|
self.audit_mgr = gatherer.DataGatherer()
|
||||||
|
|
||||||
logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||||
|
|
||||||
openai.api_key = self.key_mgr.get_using_key()
|
self.client = openai.Client(
|
||||||
|
api_key=self.key_mgr.get_using_key()
|
||||||
|
)
|
||||||
|
|
||||||
pkg.utils.context.set_openai_manager(self)
|
context.set_openai_manager(self)
|
||||||
|
|
||||||
# 请求OpenAI Completion
|
def request_completion(self, messages: list):
|
||||||
def request_completion(self, prompts) -> tuple[str, int]:
|
"""请求补全接口回复=
|
||||||
"""请求补全接口回复
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
prompts (str): 提示语
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: 回复
|
|
||||||
"""
|
"""
|
||||||
|
# 选择接口请求类
|
||||||
|
config = context.get_config()
|
||||||
|
|
||||||
config = pkg.utils.context.get_config()
|
request: api_model.RequestBase
|
||||||
|
|
||||||
# 根据模型选择使用的接口
|
model: str = config.completion_api_params['model']
|
||||||
ai: ModelRequest = create_openai_model_request(
|
|
||||||
config.completion_api_params['model'],
|
|
||||||
'user',
|
|
||||||
config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None
|
|
||||||
)
|
|
||||||
ai.request(
|
|
||||||
prompts,
|
|
||||||
**config.completion_api_params
|
|
||||||
)
|
|
||||||
response = ai.get_response()
|
|
||||||
|
|
||||||
logging.debug("OpenAI response: %s", response)
|
cp_parmas = config.completion_api_params.copy()
|
||||||
|
del cp_parmas['model']
|
||||||
|
|
||||||
# 记录使用量
|
request = modelmgr.select_request_cls(self.client, model, messages, cp_parmas)
|
||||||
current_round_token = 0
|
|
||||||
if 'model' in config.completion_api_params:
|
|
||||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['model'],
|
|
||||||
ai.get_total_tokens())
|
|
||||||
current_round_token = ai.get_total_tokens()
|
|
||||||
elif 'engine' in config.completion_api_params:
|
|
||||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'],
|
|
||||||
response['usage']['total_tokens'])
|
|
||||||
current_round_token = response['usage']['total_tokens']
|
|
||||||
|
|
||||||
return ai.get_message(), current_round_token
|
# 请求接口
|
||||||
|
for resp in request:
|
||||||
|
|
||||||
|
if resp['usage']['total_tokens'] > 0:
|
||||||
|
self.audit_mgr.report_text_model_usage(
|
||||||
|
model,
|
||||||
|
resp['usage']['total_tokens']
|
||||||
|
)
|
||||||
|
|
||||||
|
yield resp
|
||||||
|
|
||||||
def request_image(self, prompt) -> dict:
|
def request_image(self, prompt) -> dict:
|
||||||
"""请求图片接口回复
|
"""请求图片接口回复
|
||||||
@@ -82,7 +73,7 @@ class OpenAIInteract:
|
|||||||
Returns:
|
Returns:
|
||||||
dict: 响应
|
dict: 响应
|
||||||
"""
|
"""
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
params = config.image_api_params
|
params = config.image_api_params
|
||||||
|
|
||||||
response = openai.Image.create(
|
response = openai.Image.create(
|
||||||
|
|||||||
@@ -5,26 +5,50 @@ ChatCompletion - gpt-3.5-turbo 等模型
|
|||||||
Completion - text-davinci-003 等模型
|
Completion - text-davinci-003 等模型
|
||||||
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
|
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
|
||||||
"""
|
"""
|
||||||
import openai, logging, threading, asyncio
|
import tiktoken
|
||||||
import openai.error as aiE
|
import openai
|
||||||
|
|
||||||
|
from ..openai.api import model as api_model
|
||||||
|
from ..openai.api import completion as api_completion
|
||||||
|
from ..openai.api import chat_completion as api_chat_completion
|
||||||
|
|
||||||
COMPLETION_MODELS = {
|
COMPLETION_MODELS = {
|
||||||
'text-davinci-003',
|
"text-davinci-003", # legacy
|
||||||
'text-davinci-002',
|
"text-davinci-002", # legacy
|
||||||
'code-davinci-002',
|
"code-davinci-002", # legacy
|
||||||
'code-cushman-001',
|
"code-cushman-001", # legacy
|
||||||
'text-curie-001',
|
"text-curie-001", # legacy
|
||||||
'text-babbage-001',
|
"text-babbage-001", # legacy
|
||||||
'text-ada-001',
|
"text-ada-001", # legacy
|
||||||
|
"gpt-3.5-turbo-instruct",
|
||||||
}
|
}
|
||||||
|
|
||||||
CHAT_COMPLETION_MODELS = {
|
CHAT_COMPLETION_MODELS = {
|
||||||
'gpt-3.5-turbo',
|
# GPT 4 系列
|
||||||
'gpt-3.5-turbo-0301',
|
"gpt-4-1106-preview",
|
||||||
'gpt-4',
|
"gpt-4-vision-preview",
|
||||||
'gpt-4-0314',
|
"gpt-4",
|
||||||
'gpt-4-32k',
|
"gpt-4-32k",
|
||||||
'gpt-4-32k-0314'
|
"gpt-4-0613",
|
||||||
|
"gpt-4-32k-0613",
|
||||||
|
"gpt-4-0314", # legacy
|
||||||
|
"gpt-4-32k-0314", # legacy
|
||||||
|
# GPT 3.5 系列
|
||||||
|
"gpt-3.5-turbo-1106",
|
||||||
|
"gpt-3.5-turbo",
|
||||||
|
"gpt-3.5-turbo-16k",
|
||||||
|
"gpt-3.5-turbo-0613", # legacy
|
||||||
|
"gpt-3.5-turbo-16k-0613", # legacy
|
||||||
|
"gpt-3.5-turbo-0301", # legacy
|
||||||
|
# One-API 接入
|
||||||
|
"SparkDesk",
|
||||||
|
"chatglm_pro",
|
||||||
|
"chatglm_std",
|
||||||
|
"chatglm_lite",
|
||||||
|
"qwen-v1",
|
||||||
|
"qwen-plus-v1",
|
||||||
|
"ERNIE-Bot",
|
||||||
|
"ERNIE-Bot-turbo",
|
||||||
}
|
}
|
||||||
|
|
||||||
EDIT_MODELS = {
|
EDIT_MODELS = {
|
||||||
@@ -36,153 +60,85 @@ IMAGE_MODELS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ModelRequest:
|
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> api_model.RequestBase:
|
||||||
"""模型接口请求父类"""
|
|
||||||
|
|
||||||
can_chat = False
|
|
||||||
runtime: threading.Thread = None
|
|
||||||
ret = {}
|
|
||||||
proxy: str = None
|
|
||||||
request_ready = True
|
|
||||||
error_info: str = "若在没有任何错误的情况下看到这句话,请带着配置文件上报Issues"
|
|
||||||
|
|
||||||
def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None):
|
|
||||||
self.model_name = model_name
|
|
||||||
self.user_name = user_name
|
|
||||||
self.request_fun = request_fun
|
|
||||||
self.time_out = time_out
|
|
||||||
if http_proxy != None:
|
|
||||||
self.proxy = http_proxy
|
|
||||||
openai.proxy = self.proxy
|
|
||||||
self.request_ready = False
|
|
||||||
|
|
||||||
async def __a_request__(self, **kwargs):
|
|
||||||
"""异步请求"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.ret:dict = await self.request_fun(**kwargs)
|
|
||||||
self.request_ready = True
|
|
||||||
except aiE.APIConnectionError as e:
|
|
||||||
self.error_info = "{}\n请检查网络连接或代理是否正常".format(e)
|
|
||||||
raise ConnectionError(self.error_info)
|
|
||||||
except ValueError as e:
|
|
||||||
self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的"
|
|
||||||
except Exception as e:
|
|
||||||
self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e)
|
|
||||||
raise type(e)(self.error_info)
|
|
||||||
|
|
||||||
def request(self, **kwargs):
|
|
||||||
"""向接口发起请求"""
|
|
||||||
|
|
||||||
if self.proxy != None: #异步请求
|
|
||||||
self.request_ready = False
|
|
||||||
loop = asyncio.new_event_loop()
|
|
||||||
self.runtime = threading.Thread(
|
|
||||||
target=loop.run_until_complete,
|
|
||||||
args=(self.__a_request__(**kwargs),)
|
|
||||||
)
|
|
||||||
self.runtime.start()
|
|
||||||
else: #同步请求
|
|
||||||
self.ret = self.request_fun(**kwargs)
|
|
||||||
|
|
||||||
def __msg_handle__(self, msg):
|
|
||||||
"""将prompt dict转换成接口需要的格式"""
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def ret_handle(self):
|
|
||||||
'''
|
|
||||||
API消息返回处理函数
|
|
||||||
若重写该方法,应检查异步线程状态,或在需要检查处super该方法
|
|
||||||
'''
|
|
||||||
if self.runtime != None and isinstance(self.runtime, threading.Thread):
|
|
||||||
self.runtime.join(self.time_out)
|
|
||||||
if self.request_ready:
|
|
||||||
return
|
|
||||||
raise Exception(self.error_info)
|
|
||||||
|
|
||||||
def get_total_tokens(self):
|
|
||||||
try:
|
|
||||||
return self.ret['usage']['total_tokens']
|
|
||||||
except:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def get_message(self):
|
|
||||||
return self.message
|
|
||||||
|
|
||||||
def get_response(self):
|
|
||||||
return self.ret
|
|
||||||
|
|
||||||
|
|
||||||
class ChatCompletionModel(ModelRequest):
|
|
||||||
"""ChatCompletion接口的请求实现"""
|
|
||||||
|
|
||||||
Chat_role = ['system', 'user', 'assistant']
|
|
||||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
|
||||||
if http_proxy == None:
|
|
||||||
request_fun = openai.ChatCompletion.create
|
|
||||||
else:
|
|
||||||
request_fun = openai.ChatCompletion.acreate
|
|
||||||
self.can_chat = True
|
|
||||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
|
||||||
|
|
||||||
def request(self, prompts, **kwargs):
|
|
||||||
prompts = self.__msg_handle__(prompts)
|
|
||||||
kwargs['messages'] = prompts
|
|
||||||
super().request(**kwargs)
|
|
||||||
self.ret_handle()
|
|
||||||
|
|
||||||
def __msg_handle__(self, msgs):
|
|
||||||
temp_msgs = []
|
|
||||||
# 把msgs拷贝进temp_msgs
|
|
||||||
for msg in msgs:
|
|
||||||
temp_msgs.append(msg.copy())
|
|
||||||
return temp_msgs
|
|
||||||
|
|
||||||
def get_message(self):
|
|
||||||
return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗
|
|
||||||
|
|
||||||
|
|
||||||
class CompletionModel(ModelRequest):
|
|
||||||
"""Completion接口的请求实现"""
|
|
||||||
|
|
||||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
|
||||||
if http_proxy == None:
|
|
||||||
request_fun = openai.Completion.create
|
|
||||||
else:
|
|
||||||
request_fun = openai.Completion.acreate
|
|
||||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
|
||||||
|
|
||||||
def request(self, prompts, **kwargs):
|
|
||||||
prompts = self.__msg_handle__(prompts)
|
|
||||||
kwargs['prompt'] = prompts
|
|
||||||
super().request(**kwargs)
|
|
||||||
self.ret_handle()
|
|
||||||
|
|
||||||
def __msg_handle__(self, msgs):
|
|
||||||
prompt = ''
|
|
||||||
for msg in msgs:
|
|
||||||
prompt = prompt + "{}: {}\n".format(msg['role'], msg['content'])
|
|
||||||
# for msg in msgs:
|
|
||||||
# if msg['role'] == 'assistant':
|
|
||||||
# prompt = prompt + "{}\n".format(msg['content'])
|
|
||||||
# else:
|
|
||||||
# prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content'])
|
|
||||||
prompt = prompt + "assistant: "
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
def get_message(self):
|
|
||||||
return self.ret["choices"][0]["text"]
|
|
||||||
|
|
||||||
|
|
||||||
def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest:
|
|
||||||
"""使用给定的模型名称创建模型请求对象"""
|
|
||||||
if model_name in CHAT_COMPLETION_MODELS:
|
if model_name in CHAT_COMPLETION_MODELS:
|
||||||
model = ChatCompletionModel(model_name, user_name, http_proxy)
|
return api_chat_completion.ChatCompletionRequest(client, model_name, messages, **args)
|
||||||
elif model_name in COMPLETION_MODELS:
|
elif model_name in COMPLETION_MODELS:
|
||||||
model = CompletionModel(model_name, user_name, http_proxy)
|
return api_completion.CompletionRequest(client, model_name, messages, **args)
|
||||||
else :
|
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||||
log = "找不到模型[{}],请检查配置文件".format(model_name)
|
|
||||||
logging.error(log)
|
|
||||||
raise IndexError(log)
|
def count_chat_completion_tokens(messages: list, model: str) -> int:
|
||||||
logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name))
|
"""Return the number of tokens used by a list of messages."""
|
||||||
return model
|
try:
|
||||||
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
|
except KeyError:
|
||||||
|
print("Warning: model not found. Using cl100k_base encoding.")
|
||||||
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
|
if model in {
|
||||||
|
"gpt-3.5-turbo-0613",
|
||||||
|
"gpt-3.5-turbo-16k-0613",
|
||||||
|
"gpt-4-0314",
|
||||||
|
"gpt-4-32k-0314",
|
||||||
|
"gpt-4-0613",
|
||||||
|
"gpt-4-32k-0613",
|
||||||
|
"SparkDesk",
|
||||||
|
"chatglm_pro",
|
||||||
|
"chatglm_std",
|
||||||
|
"chatglm_lite",
|
||||||
|
"qwen-v1",
|
||||||
|
"qwen-plus-v1",
|
||||||
|
"ERNIE-Bot",
|
||||||
|
"ERNIE-Bot-turbo",
|
||||||
|
}:
|
||||||
|
tokens_per_message = 3
|
||||||
|
tokens_per_name = 1
|
||||||
|
elif model == "gpt-3.5-turbo-0301":
|
||||||
|
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||||
|
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||||
|
elif "gpt-3.5-turbo" in model:
|
||||||
|
# print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||||
|
return count_chat_completion_tokens(messages, model="gpt-3.5-turbo-0613")
|
||||||
|
elif "gpt-4" in model:
|
||||||
|
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||||
|
return count_chat_completion_tokens(messages, model="gpt-4-0613")
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"""count_chat_completion_tokens() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||||
|
)
|
||||||
|
num_tokens = 0
|
||||||
|
for message in messages:
|
||||||
|
num_tokens += tokens_per_message
|
||||||
|
for key, value in message.items():
|
||||||
|
num_tokens += len(encoding.encode(value))
|
||||||
|
if key == "name":
|
||||||
|
num_tokens += tokens_per_name
|
||||||
|
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||||
|
return num_tokens
|
||||||
|
|
||||||
|
|
||||||
|
def count_completion_tokens(messages: list, model: str) -> int:
|
||||||
|
|
||||||
|
try:
|
||||||
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
|
except KeyError:
|
||||||
|
print("Warning: model not found. Using cl100k_base encoding.")
|
||||||
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
|
|
||||||
|
text = ""
|
||||||
|
|
||||||
|
for message in messages:
|
||||||
|
text += message['role'] + message['content'] + "\n"
|
||||||
|
|
||||||
|
text += "assistant: "
|
||||||
|
|
||||||
|
return len(encoding.encode(text))
|
||||||
|
|
||||||
|
|
||||||
|
def count_tokens(messages: list, model: str):
|
||||||
|
|
||||||
|
if model in CHAT_COMPLETION_MODELS:
|
||||||
|
return count_chat_completion_tokens(messages, model)
|
||||||
|
elif model in COMPLETION_MODELS:
|
||||||
|
return count_completion_tokens(messages, model)
|
||||||
|
raise ValueError("不支持模型[{}],请检查配置文件".format(model))
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
# 计费模块
|
|
||||||
# 已弃用 https://github.com/RockChinQ/QChatGPT/issues/81
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
pricing = {
|
|
||||||
"base": { # 文字模型单位是1000字符
|
|
||||||
"text-davinci-003": 0.02,
|
|
||||||
},
|
|
||||||
"image": {
|
|
||||||
"256x256": 0.016,
|
|
||||||
"512x512": 0.018,
|
|
||||||
"1024x1024": 0.02,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def language_base_price(model, text):
|
|
||||||
salt_rate = 0.93
|
|
||||||
length = ((len(text.encode('utf-8')) - len(text)) / 2 + len(text)) * salt_rate
|
|
||||||
logging.debug("text length: %d" % length)
|
|
||||||
|
|
||||||
return pricing["base"][model] * length / 1000
|
|
||||||
|
|
||||||
|
|
||||||
def image_price(size):
|
|
||||||
logging.debug("image size: %s" % size)
|
|
||||||
return pricing["image"][size]
|
|
||||||
@@ -8,13 +8,13 @@ import threading
|
|||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import pkg.openai.manager
|
from ..openai import manager as openai_manager
|
||||||
import pkg.openai.modelmgr
|
from ..openai import modelmgr as openai_modelmgr
|
||||||
import pkg.database.manager
|
from ..database import manager as database_manager
|
||||||
import pkg.utils.context
|
from ..utils import context as context
|
||||||
|
|
||||||
import pkg.plugin.host as plugin_host
|
from ..plugin import host as plugin_host
|
||||||
import pkg.plugin.models as plugin_models
|
from ..plugin import models as plugin_models
|
||||||
|
|
||||||
# 运行时保存的所有session
|
# 运行时保存的所有session
|
||||||
sessions = {}
|
sessions = {}
|
||||||
@@ -36,7 +36,7 @@ def reset_session_prompt(session_name, prompt):
|
|||||||
f.write(prompt)
|
f.write(prompt)
|
||||||
f.close()
|
f.close()
|
||||||
# 生成新数据
|
# 生成新数据
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
prompt = [
|
prompt = [
|
||||||
{
|
{
|
||||||
'role': 'system',
|
'role': 'system',
|
||||||
@@ -59,12 +59,12 @@ def load_sessions():
|
|||||||
|
|
||||||
global sessions
|
global sessions
|
||||||
|
|
||||||
db_inst = pkg.utils.context.get_database_manager()
|
db_inst = context.get_database_manager()
|
||||||
|
|
||||||
session_data = db_inst.load_valid_sessions()
|
session_data = db_inst.load_valid_sessions()
|
||||||
|
|
||||||
for session_name in session_data:
|
for session_name in session_data:
|
||||||
logging.info('加载session: {}'.format(session_name))
|
logging.debug('加载session: {}'.format(session_name))
|
||||||
|
|
||||||
temp_session = Session(session_name)
|
temp_session = Session(session_name)
|
||||||
temp_session.name = session_name
|
temp_session.name = session_name
|
||||||
@@ -83,7 +83,7 @@ def load_sessions():
|
|||||||
|
|
||||||
|
|
||||||
# 获取指定名称的session,如果不存在则创建一个新的
|
# 获取指定名称的session,如果不存在则创建一个新的
|
||||||
def get_session(session_name: str):
|
def get_session(session_name: str) -> 'Session':
|
||||||
global sessions
|
global sessions
|
||||||
if session_name not in sessions:
|
if session_name not in sessions:
|
||||||
sessions[session_name] = Session(session_name)
|
sessions[session_name] = Session(session_name)
|
||||||
@@ -107,9 +107,6 @@ class Session:
|
|||||||
prompt = []
|
prompt = []
|
||||||
"""使用list来保存会话中的回合"""
|
"""使用list来保存会话中的回合"""
|
||||||
|
|
||||||
token_counts = []
|
|
||||||
"""每个回合的token数量"""
|
|
||||||
|
|
||||||
default_prompt = []
|
default_prompt = []
|
||||||
"""本session的默认prompt"""
|
"""本session的默认prompt"""
|
||||||
|
|
||||||
@@ -173,7 +170,7 @@ class Session:
|
|||||||
if self.create_timestamp != create_timestamp or self not in sessions.values():
|
if self.create_timestamp != create_timestamp or self not in sessions.values():
|
||||||
return
|
return
|
||||||
|
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
if int(time.time()) - self.last_interact_timestamp > config.session_expire_time:
|
if int(time.time()) - self.last_interact_timestamp > config.session_expire_time:
|
||||||
logging.info('session {} 已过期'.format(self.name))
|
logging.info('session {} 已过期'.format(self.name))
|
||||||
|
|
||||||
@@ -183,7 +180,7 @@ class Session:
|
|||||||
'session': self,
|
'session': self,
|
||||||
'session_expire_time': config.session_expire_time
|
'session_expire_time': config.session_expire_time
|
||||||
}
|
}
|
||||||
event = pkg.plugin.host.emit(plugin_models.SessionExpired, **args)
|
event = plugin_host.emit(plugin_models.SessionExpired, **args)
|
||||||
if event.is_prevented_default():
|
if event.is_prevented_default():
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -195,8 +192,15 @@ class Session:
|
|||||||
|
|
||||||
# 请求回复
|
# 请求回复
|
||||||
# 这个函数是阻塞的
|
# 这个函数是阻塞的
|
||||||
def append(self, text: str) -> str:
|
def query(self, text: str=None) -> tuple[str, str, list[str]]:
|
||||||
"""向session中添加一条消息,返回接口回复"""
|
"""向session中添加一条消息,返回接口回复
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text (str): 用户消息
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[str, str]: (接口回复, finish_reason, 已调用的函数列表)
|
||||||
|
"""
|
||||||
|
|
||||||
self.last_interact_timestamp = int(time.time())
|
self.last_interact_timestamp = int(time.time())
|
||||||
|
|
||||||
@@ -208,42 +212,144 @@ class Session:
|
|||||||
'default_prompt': self.default_prompt,
|
'default_prompt': self.default_prompt,
|
||||||
}
|
}
|
||||||
|
|
||||||
event = pkg.plugin.host.emit(plugin_models.SessionFirstMessageReceived, **args)
|
event = plugin_host.emit(plugin_models.SessionFirstMessageReceived, **args)
|
||||||
if event.is_prevented_default():
|
if event.is_prevented_default():
|
||||||
return None
|
return None, None, None
|
||||||
|
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
max_length = config.prompt_submit_length
|
max_length = config.prompt_submit_length
|
||||||
|
|
||||||
prompts, counts = self.cut_out(text, max_length)
|
local_default_prompt = self.default_prompt.copy()
|
||||||
|
local_prompt = self.prompt.copy()
|
||||||
|
|
||||||
# 计算请求前的prompt数量
|
# 触发PromptPreProcessing事件
|
||||||
total_token_before_query = 0
|
args = {
|
||||||
for token_count in counts:
|
'session_name': self.name,
|
||||||
total_token_before_query += token_count
|
'default_prompt': self.default_prompt,
|
||||||
|
'prompt': self.prompt,
|
||||||
|
'text_message': text,
|
||||||
|
}
|
||||||
|
|
||||||
|
event = plugin_host.emit(plugin_models.PromptPreProcessing, **args)
|
||||||
|
|
||||||
|
if event.get_return_value('default_prompt') is not None:
|
||||||
|
local_default_prompt = event.get_return_value('default_prompt')
|
||||||
|
|
||||||
|
if event.get_return_value('prompt') is not None:
|
||||||
|
local_prompt = event.get_return_value('prompt')
|
||||||
|
|
||||||
|
if event.get_return_value('text_message') is not None:
|
||||||
|
text = event.get_return_value('text_message')
|
||||||
|
|
||||||
|
# 裁剪messages到合适长度
|
||||||
|
prompts, _ = self.cut_out(text, max_length, local_default_prompt, local_prompt)
|
||||||
|
|
||||||
|
res_text = ""
|
||||||
|
|
||||||
|
pending_msgs = []
|
||||||
|
|
||||||
|
total_tokens = 0
|
||||||
|
|
||||||
|
finish_reason: str = ""
|
||||||
|
|
||||||
|
funcs = []
|
||||||
|
|
||||||
|
trace_func_calls = config.trace_function_calls
|
||||||
|
botmgr = context.get_qqbot_manager()
|
||||||
|
|
||||||
|
session_name_spt: list[str] = self.name.split("_")
|
||||||
|
|
||||||
|
pending_res_text = ""
|
||||||
|
|
||||||
|
# TODO 对不起,我知道这样非常非常屎山,但我之后会重构的
|
||||||
|
for resp in context.get_openai_manager().request_completion(prompts):
|
||||||
|
|
||||||
|
if pending_res_text != "":
|
||||||
|
botmgr.adapter.send_message(
|
||||||
|
session_name_spt[0],
|
||||||
|
session_name_spt[1],
|
||||||
|
pending_res_text
|
||||||
|
)
|
||||||
|
pending_res_text = ""
|
||||||
|
|
||||||
|
finish_reason = resp['choices'][0]['finish_reason']
|
||||||
|
|
||||||
|
if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应
|
||||||
|
|
||||||
|
if not trace_func_calls:
|
||||||
|
res_text += resp['choices'][0]['message']['content']
|
||||||
|
else:
|
||||||
|
res_text = resp['choices'][0]['message']['content']
|
||||||
|
pending_res_text = resp['choices'][0]['message']['content']
|
||||||
|
|
||||||
|
total_tokens += resp['usage']['total_tokens']
|
||||||
|
|
||||||
|
msg = {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": resp['choices'][0]['message']['content']
|
||||||
|
}
|
||||||
|
|
||||||
|
if 'function_call' in resp['choices'][0]['message']:
|
||||||
|
msg['function_call'] = json.dumps(resp['choices'][0]['message']['function_call'])
|
||||||
|
|
||||||
|
pending_msgs.append(msg)
|
||||||
|
|
||||||
|
if resp['choices'][0]['message']['type'] == 'function_call':
|
||||||
|
# self.prompt.append(
|
||||||
|
# {
|
||||||
|
# "role": "assistant",
|
||||||
|
# "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call'])
|
||||||
|
# }
|
||||||
|
# )
|
||||||
|
if trace_func_calls:
|
||||||
|
botmgr.adapter.send_message(
|
||||||
|
session_name_spt[0],
|
||||||
|
session_name_spt[1],
|
||||||
|
"调用函数 "+resp['choices'][0]['message']['function_call']['name'] + "..."
|
||||||
|
)
|
||||||
|
|
||||||
|
total_tokens += resp['usage']['total_tokens']
|
||||||
|
elif resp['choices'][0]['message']['type'] == 'function_return':
|
||||||
|
# self.prompt.append(
|
||||||
|
# {
|
||||||
|
# "role": "function",
|
||||||
|
# "name": resp['choices'][0]['message']['function_name'],
|
||||||
|
# "content": json.dumps(resp['choices'][0]['message']['content'])
|
||||||
|
# }
|
||||||
|
# )
|
||||||
|
|
||||||
|
# total_tokens += resp['usage']['total_tokens']
|
||||||
|
funcs.append(
|
||||||
|
resp['choices'][0]['message']['function_name']
|
||||||
|
)
|
||||||
|
pass
|
||||||
|
|
||||||
# 向API请求补全
|
# 向API请求补全
|
||||||
message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
||||||
prompts,
|
# prompts,
|
||||||
)
|
# )
|
||||||
|
|
||||||
# 成功获取,处理回复
|
# 成功获取,处理回复
|
||||||
res_test = message
|
# res_test = message
|
||||||
res_ans = res_test.strip()
|
res_ans = res_text.strip()
|
||||||
|
|
||||||
# 将此次对话的双方内容加入到prompt中
|
# 将此次对话的双方内容加入到prompt中
|
||||||
self.prompt.append({'role': 'user', 'content': text})
|
# self.prompt.append({'role': 'user', 'content': text})
|
||||||
self.prompt.append({'role': 'assistant', 'content': res_ans})
|
# self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||||
|
if text:
|
||||||
|
self.prompt.append({'role': 'user', 'content': text})
|
||||||
|
# 添加pending_msgs
|
||||||
|
self.prompt += pending_msgs
|
||||||
|
|
||||||
# 向token_counts中添加本回合的token数量
|
# 向token_counts中添加本回合的token数量
|
||||||
self.token_counts.append(total_token-total_token_before_query)
|
# self.token_counts.append(total_tokens-total_token_before_query)
|
||||||
logging.debug("本回合使用token: {}, session counts: {}".format(total_token-total_token_before_query, self.token_counts))
|
# logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
|
||||||
|
|
||||||
if self.just_switched_to_exist_session:
|
if self.just_switched_to_exist_session:
|
||||||
self.just_switched_to_exist_session = False
|
self.just_switched_to_exist_session = False
|
||||||
self.set_ongoing()
|
self.set_ongoing()
|
||||||
|
|
||||||
return res_ans if res_ans[0] != '\n' else res_ans[1:]
|
return res_ans if res_ans[0] != '\n' else res_ans[1:], finish_reason, funcs
|
||||||
|
|
||||||
# 删除上一回合并返回上一回合的问题
|
# 删除上一回合并返回上一回合的问题
|
||||||
def undo(self) -> str:
|
def undo(self) -> str:
|
||||||
@@ -261,7 +367,7 @@ class Session:
|
|||||||
return question
|
return question
|
||||||
|
|
||||||
# 构建对话体
|
# 构建对话体
|
||||||
def cut_out(self, msg: str, max_tokens: int) -> tuple[list, list]:
|
def cut_out(self, msg: str, max_tokens: int, default_prompt: list, prompt: list) -> tuple[list, list]:
|
||||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens
|
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens
|
||||||
|
|
||||||
:return: (新的prompt, 新的token_counts)
|
:return: (新的prompt, 新的token_counts)
|
||||||
@@ -274,49 +380,42 @@ class Session:
|
|||||||
|
|
||||||
# 包装目前的对话回合内容
|
# 包装目前的对话回合内容
|
||||||
changable_prompts = []
|
changable_prompts = []
|
||||||
changable_counts = []
|
|
||||||
# 倒着来, 遍历prompt的步长为2, 遍历tokens_counts的步长为1
|
|
||||||
changable_index = len(self.prompt) - 1
|
|
||||||
token_count_index = len(self.token_counts) - 1
|
|
||||||
|
|
||||||
packed_tokens = 0
|
use_model = context.get_config().completion_api_params['model']
|
||||||
|
|
||||||
while changable_index >= 0 and token_count_index >= 0:
|
ptr = len(prompt) - 1
|
||||||
if packed_tokens + self.token_counts[token_count_index] > max_tokens:
|
|
||||||
|
# 直接从后向前扫描拼接,不管是否是整回合
|
||||||
|
while ptr >= 0:
|
||||||
|
if openai_modelmgr.count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
|
||||||
break
|
break
|
||||||
|
|
||||||
changable_prompts.insert(0, self.prompt[changable_index])
|
changable_prompts.insert(0, prompt[ptr])
|
||||||
changable_prompts.insert(0, self.prompt[changable_index - 1])
|
|
||||||
changable_counts.insert(0, self.token_counts[token_count_index])
|
|
||||||
packed_tokens += self.token_counts[token_count_index]
|
|
||||||
|
|
||||||
changable_index -= 2
|
ptr -= 1
|
||||||
token_count_index -= 1
|
|
||||||
|
|
||||||
# 将default_prompt和changable_prompts合并
|
# 将default_prompt和changable_prompts合并
|
||||||
result_prompt = self.default_prompt + changable_prompts
|
result_prompt = default_prompt + changable_prompts
|
||||||
|
|
||||||
# 添加当前问题
|
# 添加当前问题
|
||||||
result_prompt.append(
|
if msg:
|
||||||
{
|
result_prompt.append(
|
||||||
'role': 'user',
|
{
|
||||||
'content': msg
|
'role': 'user',
|
||||||
}
|
'content': msg
|
||||||
)
|
}
|
||||||
|
)
|
||||||
|
|
||||||
logging.debug('cut_out: {}\nchangable section tokens: {}\npacked counts: {}\nsession counts: {}'.format(json.dumps(result_prompt, ensure_ascii=False, indent=4),
|
logging.debug("cut_out: {}".format(json.dumps(result_prompt, ensure_ascii=False, indent=4)))
|
||||||
packed_tokens,
|
|
||||||
changable_counts,
|
|
||||||
self.token_counts))
|
|
||||||
|
|
||||||
return result_prompt, changable_counts
|
return result_prompt, openai_modelmgr.count_tokens(changable_prompts, use_model)
|
||||||
|
|
||||||
# 持久化session
|
# 持久化session
|
||||||
def persistence(self):
|
def persistence(self):
|
||||||
if self.prompt == self.get_default_prompt():
|
if self.prompt == self.get_default_prompt():
|
||||||
return
|
return
|
||||||
|
|
||||||
db_inst = pkg.utils.context.get_database_manager()
|
db_inst = context.get_database_manager()
|
||||||
|
|
||||||
name_spt = self.name.split('_')
|
name_spt = self.name.split('_')
|
||||||
|
|
||||||
@@ -327,7 +426,7 @@ class Session:
|
|||||||
json.dumps(self.prompt), json.dumps(self.default_prompt), json.dumps(self.token_counts))
|
json.dumps(self.prompt), json.dumps(self.default_prompt), json.dumps(self.token_counts))
|
||||||
|
|
||||||
# 重置session
|
# 重置session
|
||||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None):
|
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None, persist: bool = False):
|
||||||
if self.prompt:
|
if self.prompt:
|
||||||
self.persistence()
|
self.persistence()
|
||||||
if explicit:
|
if explicit:
|
||||||
@@ -338,14 +437,15 @@ class Session:
|
|||||||
}
|
}
|
||||||
|
|
||||||
# 此事件不支持阻止默认行为
|
# 此事件不支持阻止默认行为
|
||||||
_ = pkg.plugin.host.emit(plugin_models.SessionExplicitReset, **args)
|
_ = plugin_host.emit(plugin_models.SessionExplicitReset, **args)
|
||||||
|
|
||||||
pkg.utils.context.get_database_manager().explicit_close_session(self.name, self.create_timestamp)
|
context.get_database_manager().explicit_close_session(self.name, self.create_timestamp)
|
||||||
|
|
||||||
if expired:
|
if expired:
|
||||||
pkg.utils.context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
|
context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
|
||||||
|
|
||||||
self.default_prompt = self.get_default_prompt(use_prompt)
|
if not persist: # 不要求保持default prompt
|
||||||
|
self.default_prompt = self.get_default_prompt(use_prompt)
|
||||||
self.prompt = []
|
self.prompt = []
|
||||||
self.token_counts = []
|
self.token_counts = []
|
||||||
self.create_timestamp = int(time.time())
|
self.create_timestamp = int(time.time())
|
||||||
@@ -359,11 +459,11 @@ class Session:
|
|||||||
|
|
||||||
# 将本session的数据库状态设置为on_going
|
# 将本session的数据库状态设置为on_going
|
||||||
def set_ongoing(self):
|
def set_ongoing(self):
|
||||||
pkg.utils.context.get_database_manager().set_session_ongoing(self.name, self.create_timestamp)
|
context.get_database_manager().set_session_ongoing(self.name, self.create_timestamp)
|
||||||
|
|
||||||
# 切换到上一个session
|
# 切换到上一个session
|
||||||
def last_session(self):
|
def last_session(self):
|
||||||
last_one = pkg.utils.context.get_database_manager().last_session(self.name, self.last_interact_timestamp)
|
last_one = context.get_database_manager().last_session(self.name, self.last_interact_timestamp)
|
||||||
if last_one is None:
|
if last_one is None:
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
@@ -384,7 +484,7 @@ class Session:
|
|||||||
|
|
||||||
# 切换到下一个session
|
# 切换到下一个session
|
||||||
def next_session(self):
|
def next_session(self):
|
||||||
next_one = pkg.utils.context.get_database_manager().next_session(self.name, self.last_interact_timestamp)
|
next_one = context.get_database_manager().next_session(self.name, self.last_interact_timestamp)
|
||||||
if next_one is None:
|
if next_one is None:
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
@@ -404,13 +504,13 @@ class Session:
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def list_history(self, capacity: int = 10, page: int = 0):
|
def list_history(self, capacity: int = 10, page: int = 0):
|
||||||
return pkg.utils.context.get_database_manager().list_history(self.name, capacity, page)
|
return context.get_database_manager().list_history(self.name, capacity, page)
|
||||||
|
|
||||||
def delete_history(self, index: int) -> bool:
|
def delete_history(self, index: int) -> bool:
|
||||||
return pkg.utils.context.get_database_manager().delete_history(self.name, index)
|
return context.get_database_manager().delete_history(self.name, index)
|
||||||
|
|
||||||
def delete_all_history(self) -> bool:
|
def delete_all_history(self) -> bool:
|
||||||
return pkg.utils.context.get_database_manager().delete_all_history(self.name)
|
return context.get_database_manager().delete_all_history(self.name)
|
||||||
|
|
||||||
def draw_image(self, prompt: str):
|
def draw_image(self, prompt: str):
|
||||||
return pkg.utils.context.get_openai_manager().request_image(prompt)
|
return context.get_openai_manager().request_image(prompt)
|
||||||
|
|||||||
@@ -7,12 +7,21 @@ import pkgutil
|
|||||||
import sys
|
import sys
|
||||||
import shutil
|
import shutil
|
||||||
import traceback
|
import traceback
|
||||||
|
import time
|
||||||
|
import re
|
||||||
|
|
||||||
import pkg.utils.context as context
|
from ..utils import updater as updater
|
||||||
import pkg.plugin.switch as switch
|
from ..utils import network as network
|
||||||
import pkg.plugin.settings as settings
|
from ..utils import context as context
|
||||||
|
from ..plugin import switch as switch
|
||||||
|
from ..plugin import settings as settings
|
||||||
|
from ..qqbot import adapter as msadapter
|
||||||
|
from ..plugin import metadata as metadata
|
||||||
|
|
||||||
from mirai import Mirai
|
from mirai import Mirai
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from CallingGPT.session.session import Session
|
||||||
|
|
||||||
__plugins__ = {}
|
__plugins__ = {}
|
||||||
"""插件列表
|
"""插件列表
|
||||||
@@ -40,6 +49,15 @@ __plugins__ = {}
|
|||||||
__plugins_order__ = []
|
__plugins_order__ = []
|
||||||
"""插件顺序"""
|
"""插件顺序"""
|
||||||
|
|
||||||
|
__enable_content_functions__ = True
|
||||||
|
"""是否启用内容函数"""
|
||||||
|
|
||||||
|
__callable_functions__ = []
|
||||||
|
"""供GPT调用的函数结构"""
|
||||||
|
|
||||||
|
__function_inst_map__: dict[str, callable] = {}
|
||||||
|
"""函数名:实例 映射"""
|
||||||
|
|
||||||
|
|
||||||
def generate_plugin_order():
|
def generate_plugin_order():
|
||||||
"""根据__plugin__生成插件初始顺序,无视是否启用"""
|
"""根据__plugin__生成插件初始顺序,无视是否启用"""
|
||||||
@@ -52,6 +70,8 @@ def generate_plugin_order():
|
|||||||
def iter_plugins():
|
def iter_plugins():
|
||||||
"""按照顺序迭代插件"""
|
"""按照顺序迭代插件"""
|
||||||
for plugin_name in __plugins_order__:
|
for plugin_name in __plugins_order__:
|
||||||
|
if plugin_name not in __plugins__:
|
||||||
|
continue
|
||||||
yield __plugins__[plugin_name]
|
yield __plugins__[plugin_name]
|
||||||
|
|
||||||
|
|
||||||
@@ -78,7 +98,7 @@ def walk_plugin_path(module, prefix='', path_prefix=''):
|
|||||||
__current_module_path__ = "plugins/"+path_prefix + item.name + '.py'
|
__current_module_path__ = "plugins/"+path_prefix + item.name + '.py'
|
||||||
|
|
||||||
importlib.import_module(module.__name__ + '.' + item.name)
|
importlib.import_module(module.__name__ + '.' + item.name)
|
||||||
logging.info('加载模块: plugins/{} 成功'.format(path_prefix + item.name + '.py'))
|
logging.debug('加载模块: plugins/{} 成功'.format(path_prefix + item.name + '.py'))
|
||||||
except:
|
except:
|
||||||
logging.error('加载模块: plugins/{} 失败: {}'.format(path_prefix + item.name + '.py', sys.exc_info()))
|
logging.error('加载模块: plugins/{} 失败: {}'.format(path_prefix + item.name + '.py', sys.exc_info()))
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
@@ -86,7 +106,7 @@ def walk_plugin_path(module, prefix='', path_prefix=''):
|
|||||||
|
|
||||||
def load_plugins():
|
def load_plugins():
|
||||||
"""加载插件"""
|
"""加载插件"""
|
||||||
logging.info("加载插件")
|
logging.debug("加载插件")
|
||||||
PluginHost()
|
PluginHost()
|
||||||
walk_plugin_path(__import__('plugins'))
|
walk_plugin_path(__import__('plugins'))
|
||||||
|
|
||||||
@@ -100,20 +120,36 @@ def load_plugins():
|
|||||||
# 加载插件顺序
|
# 加载插件顺序
|
||||||
settings.load_settings()
|
settings.load_settings()
|
||||||
|
|
||||||
|
logging.debug("registered plugins: {}".format(__plugins__))
|
||||||
|
|
||||||
|
# 输出已注册的内容函数列表
|
||||||
|
logging.debug("registered content functions: {}".format(__callable_functions__))
|
||||||
|
logging.debug("function instance map: {}".format(__function_inst_map__))
|
||||||
|
|
||||||
|
# 迁移插件源地址记录
|
||||||
|
metadata.do_plugin_git_repo_migrate()
|
||||||
|
|
||||||
|
|
||||||
def initialize_plugins():
|
def initialize_plugins():
|
||||||
"""初始化插件"""
|
"""初始化插件"""
|
||||||
logging.info("初始化插件")
|
logging.info("初始化插件")
|
||||||
import pkg.plugin.models as models
|
import pkg.plugin.models as models
|
||||||
|
|
||||||
|
successfully_initialized_plugins = []
|
||||||
|
|
||||||
for plugin in iter_plugins():
|
for plugin in iter_plugins():
|
||||||
# if not plugin['enabled']:
|
# if not plugin['enabled']:
|
||||||
# continue
|
# continue
|
||||||
try:
|
try:
|
||||||
models.__current_registering_plugin__ = plugin['name']
|
models.__current_registering_plugin__ = plugin['name']
|
||||||
plugin['instance'] = plugin["class"](plugin_host=context.get_plugin_host())
|
plugin['instance'] = plugin["class"](plugin_host=context.get_plugin_host())
|
||||||
logging.info("插件 {} 已初始化".format(plugin['name']))
|
# logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||||
|
successfully_initialized_plugins.append(plugin['name'])
|
||||||
except:
|
except:
|
||||||
logging.error("插件{}初始化时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
logging.error("插件{}初始化时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
||||||
|
logging.debug(traceback.format_exc())
|
||||||
|
|
||||||
|
logging.info("以下插件已初始化: {}".format(", ".join(successfully_initialized_plugins)))
|
||||||
|
|
||||||
|
|
||||||
def unload_plugins():
|
def unload_plugins():
|
||||||
@@ -132,34 +168,100 @@ def unload_plugins():
|
|||||||
# logging.error("插件{}卸载时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
# logging.error("插件{}卸载时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
||||||
|
|
||||||
|
|
||||||
def install_plugin(repo_url: str):
|
def get_github_plugin_repo_label(repo_url: str) -> list[str]:
|
||||||
"""安装插件,从git储存库获取并解决依赖"""
|
"""获取username, repo"""
|
||||||
try:
|
|
||||||
import pkg.utils.pkgmgr
|
|
||||||
pkg.utils.pkgmgr.ensure_dulwich()
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
# 提取 username/repo , 正则表达式
|
||||||
import dulwich
|
repo = re.findall(r'(?:https?://github\.com/|git@github\.com:)([^/]+/[^/]+?)(?:\.git|/|$)', repo_url)
|
||||||
except ModuleNotFoundError:
|
|
||||||
raise Exception("dulwich模块未安装,请查看 https://github.com/RockChinQ/QChatGPT/issues/77")
|
|
||||||
|
|
||||||
from dulwich import porcelain
|
if len(repo) > 0: # github
|
||||||
|
return repo[0].split("/")
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
logging.info("克隆插件储存库: {}".format(repo_url))
|
|
||||||
repo = porcelain.clone(repo_url, "plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/", checkout=True)
|
|
||||||
|
|
||||||
|
def download_plugin_source_code(repo_url: str, target_path: str) -> str:
|
||||||
|
"""下载插件源码"""
|
||||||
|
# 检查源类型
|
||||||
|
|
||||||
|
# 提取 username/repo , 正则表达式
|
||||||
|
repo = get_github_plugin_repo_label(repo_url)
|
||||||
|
|
||||||
|
target_path += repo[1]
|
||||||
|
|
||||||
|
if repo is not None: # github
|
||||||
|
logging.info("从 GitHub 下载插件源码...")
|
||||||
|
|
||||||
|
zipball_url = f"https://api.github.com/repos/{'/'.join(repo)}/zipball/HEAD"
|
||||||
|
|
||||||
|
zip_resp = requests.get(
|
||||||
|
url=zipball_url,
|
||||||
|
proxies=network.wrapper_proxies(),
|
||||||
|
stream=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if zip_resp.status_code != 200:
|
||||||
|
raise Exception("下载源码失败: {}".format(zip_resp.text))
|
||||||
|
|
||||||
|
if os.path.exists("temp/"+target_path):
|
||||||
|
shutil.rmtree("temp/"+target_path)
|
||||||
|
|
||||||
|
if os.path.exists(target_path):
|
||||||
|
shutil.rmtree(target_path)
|
||||||
|
|
||||||
|
os.makedirs("temp/"+target_path)
|
||||||
|
|
||||||
|
with open("temp/"+target_path+"/source.zip", "wb") as f:
|
||||||
|
for chunk in zip_resp.iter_content(chunk_size=1024):
|
||||||
|
if chunk:
|
||||||
|
f.write(chunk)
|
||||||
|
|
||||||
|
logging.info("下载完成, 解压...")
|
||||||
|
import zipfile
|
||||||
|
with zipfile.ZipFile("temp/"+target_path+"/source.zip", 'r') as zip_ref:
|
||||||
|
zip_ref.extractall("temp/"+target_path)
|
||||||
|
os.remove("temp/"+target_path+"/source.zip")
|
||||||
|
|
||||||
|
# 目标是 username-repo-hash , 用正则表达式提取完整的文件夹名,复制到 plugins/repo
|
||||||
|
import glob
|
||||||
|
|
||||||
|
# 获取解压后的文件夹名
|
||||||
|
unzip_dir = glob.glob("temp/"+target_path+"/*")[0]
|
||||||
|
|
||||||
|
# 复制到 plugins/repo
|
||||||
|
shutil.copytree(unzip_dir, target_path+"/")
|
||||||
|
|
||||||
|
# 删除解压后的文件夹
|
||||||
|
shutil.rmtree(unzip_dir)
|
||||||
|
|
||||||
|
logging.info("解压完成")
|
||||||
|
else:
|
||||||
|
raise Exception("暂不支持的源类型,请使用 GitHub 仓库发行插件。")
|
||||||
|
|
||||||
|
return repo[1]
|
||||||
|
|
||||||
|
|
||||||
|
def check_requirements(path: str):
|
||||||
# 检查此目录是否包含requirements.txt
|
# 检查此目录是否包含requirements.txt
|
||||||
if os.path.exists("plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/requirements.txt"):
|
if os.path.exists(path+"/requirements.txt"):
|
||||||
logging.info("检测到requirements.txt,正在安装依赖")
|
logging.info("检测到requirements.txt,正在安装依赖")
|
||||||
import pkg.utils.pkgmgr
|
import pkg.utils.pkgmgr
|
||||||
pkg.utils.pkgmgr.install_requirements("plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/requirements.txt")
|
pkg.utils.pkgmgr.install_requirements(path+"/requirements.txt")
|
||||||
|
|
||||||
import pkg.utils.log as log
|
import pkg.utils.log as log
|
||||||
log.reset_logging()
|
log.reset_logging()
|
||||||
|
|
||||||
|
|
||||||
|
def install_plugin(repo_url: str):
|
||||||
|
"""安装插件,从git储存库获取并解决依赖"""
|
||||||
|
|
||||||
|
repo_label = download_plugin_source_code(repo_url, "plugins/")
|
||||||
|
|
||||||
|
check_requirements("plugins/"+repo_label)
|
||||||
|
|
||||||
|
metadata.set_plugin_metadata(repo_label, repo_url, int(time.time()), "HEAD")
|
||||||
|
|
||||||
|
|
||||||
def uninstall_plugin(plugin_name: str) -> str:
|
def uninstall_plugin(plugin_name: str) -> str:
|
||||||
"""卸载插件"""
|
"""卸载插件"""
|
||||||
if plugin_name not in __plugins__:
|
if plugin_name not in __plugins__:
|
||||||
@@ -176,6 +278,47 @@ def uninstall_plugin(plugin_name: str) -> str:
|
|||||||
return "plugins/"+plugin_path
|
return "plugins/"+plugin_path
|
||||||
|
|
||||||
|
|
||||||
|
def update_plugin(plugin_name: str):
|
||||||
|
"""更新插件"""
|
||||||
|
# 检查是否有远程地址记录
|
||||||
|
plugin_path_name = get_plugin_path_name_by_plugin_name(plugin_name)
|
||||||
|
|
||||||
|
meta = metadata.get_plugin_metadata(plugin_path_name)
|
||||||
|
|
||||||
|
if meta == {}:
|
||||||
|
raise Exception("没有此插件元数据信息,无法更新")
|
||||||
|
|
||||||
|
remote_url = meta['source']
|
||||||
|
if remote_url == "https://github.com/RockChinQ/QChatGPT" or remote_url == "https://gitee.com/RockChin/QChatGPT" \
|
||||||
|
or remote_url == "" or remote_url is None or remote_url == "http://github.com/RockChinQ/QChatGPT" or remote_url == "http://gitee.com/RockChin/QChatGPT":
|
||||||
|
raise Exception("插件没有远程地址记录,无法更新")
|
||||||
|
|
||||||
|
# 重新安装插件
|
||||||
|
logging.info("正在重新安装插件以进行更新...")
|
||||||
|
|
||||||
|
install_plugin(remote_url)
|
||||||
|
|
||||||
|
|
||||||
|
def get_plugin_name_by_path_name(plugin_path_name: str) -> str:
|
||||||
|
for k, v in __plugins__.items():
|
||||||
|
if v['path'] == "plugins/"+plugin_path_name+"/main.py":
|
||||||
|
return k
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_plugin_path_name_by_plugin_name(plugin_name: str) -> str:
|
||||||
|
if plugin_name not in __plugins__:
|
||||||
|
return None
|
||||||
|
|
||||||
|
plugin_main_module_path = __plugins__[plugin_name]['path']
|
||||||
|
|
||||||
|
plugin_main_module_path = plugin_main_module_path.replace("\\", "/")
|
||||||
|
|
||||||
|
spt = plugin_main_module_path.split("/")
|
||||||
|
|
||||||
|
return spt[1]
|
||||||
|
|
||||||
|
|
||||||
class EventContext:
|
class EventContext:
|
||||||
"""事件上下文"""
|
"""事件上下文"""
|
||||||
eid = 0
|
eid = 0
|
||||||
@@ -212,7 +355,7 @@ class EventContext:
|
|||||||
self.__return_value__[key] = []
|
self.__return_value__[key] = []
|
||||||
self.__return_value__[key].append(ret)
|
self.__return_value__[key].append(ret)
|
||||||
|
|
||||||
def get_return(self, key: str):
|
def get_return(self, key: str) -> list:
|
||||||
"""获取key的所有返回值"""
|
"""获取key的所有返回值"""
|
||||||
if key in self.__return_value__:
|
if key in self.__return_value__:
|
||||||
return self.__return_value__[key]
|
return self.__return_value__[key]
|
||||||
@@ -261,7 +404,9 @@ class PluginHost:
|
|||||||
"""插件宿主"""
|
"""插件宿主"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
"""初始化插件宿主"""
|
||||||
context.set_plugin_host(self)
|
context.set_plugin_host(self)
|
||||||
|
self.calling_gpt_session = Session([])
|
||||||
|
|
||||||
def get_runtime_context(self) -> context:
|
def get_runtime_context(self) -> context:
|
||||||
"""获取运行时上下文(pkg.utils.context模块的对象)
|
"""获取运行时上下文(pkg.utils.context模块的对象)
|
||||||
@@ -276,13 +421,17 @@ class PluginHost:
|
|||||||
"""获取机器人对象"""
|
"""获取机器人对象"""
|
||||||
return context.get_qqbot_manager().bot
|
return context.get_qqbot_manager().bot
|
||||||
|
|
||||||
|
def get_bot_adapter(self) -> msadapter.MessageSourceAdapter:
|
||||||
|
"""获取消息源适配器"""
|
||||||
|
return context.get_qqbot_manager().adapter
|
||||||
|
|
||||||
def send_person_message(self, person, message):
|
def send_person_message(self, person, message):
|
||||||
"""发送私聊消息"""
|
"""发送私聊消息"""
|
||||||
asyncio.run(self.get_bot().send_friend_message(person, message))
|
self.get_bot_adapter().send_message("person", person, message)
|
||||||
|
|
||||||
def send_group_message(self, group, message):
|
def send_group_message(self, group, message):
|
||||||
"""发送群消息"""
|
"""发送群消息"""
|
||||||
asyncio.run(self.get_bot().send_group_message(group, message))
|
self.get_bot_adapter().send_message("group", group, message)
|
||||||
|
|
||||||
def notify_admin(self, message):
|
def notify_admin(self, message):
|
||||||
"""通知管理员"""
|
"""通知管理员"""
|
||||||
@@ -327,7 +476,7 @@ class PluginHost:
|
|||||||
logging.debug("插件 {} 已要求阻止事件 {} 的默认行为".format(plugin['name'], event_name))
|
logging.debug("插件 {} 已要求阻止事件 {} 的默认行为".format(plugin['name'], event_name))
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error("插件{}触发事件{}时发生错误".format(plugin['name'], event_name))
|
logging.error("插件{}响应事件{}时发生错误".format(plugin['name'], event_name))
|
||||||
logging.error(traceback.format_exc())
|
logging.error(traceback.format_exc())
|
||||||
|
|
||||||
# print("done:{}".format(plugin['name']))
|
# print("done:{}".format(plugin['name']))
|
||||||
@@ -339,3 +488,6 @@ class PluginHost:
|
|||||||
event_context.__return_value__))
|
event_context.__return_value__))
|
||||||
|
|
||||||
return event_context
|
return event_context
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
pass
|
||||||
|
|||||||
87
pkg/plugin/metadata.py
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
import dulwich.errors as dulwich_err
|
||||||
|
|
||||||
|
from ..utils import updater
|
||||||
|
|
||||||
|
|
||||||
|
def read_metadata_file() -> dict:
|
||||||
|
# 读取 plugins/metadata.json 文件
|
||||||
|
if not os.path.exists('plugins/metadata.json'):
|
||||||
|
return {}
|
||||||
|
with open('plugins/metadata.json', 'r') as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
def write_metadata_file(metadata: dict):
|
||||||
|
if not os.path.exists('plugins'):
|
||||||
|
os.mkdir('plugins')
|
||||||
|
|
||||||
|
with open('plugins/metadata.json', 'w') as f:
|
||||||
|
json.dump(metadata, f, indent=4, ensure_ascii=False)
|
||||||
|
|
||||||
|
|
||||||
|
def do_plugin_git_repo_migrate():
|
||||||
|
# 仅在 plugins/metadata.json 不存在时执行
|
||||||
|
if os.path.exists('plugins/metadata.json'):
|
||||||
|
return
|
||||||
|
|
||||||
|
metadata = read_metadata_file()
|
||||||
|
|
||||||
|
# 遍历 plugins 下所有目录,获取目录的git远程地址
|
||||||
|
for plugin_name in os.listdir('plugins'):
|
||||||
|
plugin_path = os.path.join('plugins', plugin_name)
|
||||||
|
if not os.path.isdir(plugin_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
remote_url = None
|
||||||
|
try:
|
||||||
|
remote_url = updater.get_remote_url(plugin_path)
|
||||||
|
except dulwich_err.NotGitRepository:
|
||||||
|
continue
|
||||||
|
if remote_url == "https://github.com/RockChinQ/QChatGPT" or remote_url == "https://gitee.com/RockChin/QChatGPT" \
|
||||||
|
or remote_url == "" or remote_url is None or remote_url == "http://github.com/RockChinQ/QChatGPT" or remote_url == "http://gitee.com/RockChin/QChatGPT":
|
||||||
|
continue
|
||||||
|
|
||||||
|
from . import host
|
||||||
|
|
||||||
|
if plugin_name not in metadata:
|
||||||
|
metadata[plugin_name] = {
|
||||||
|
'source': remote_url,
|
||||||
|
'install_timestamp': int(time.time()),
|
||||||
|
'ref': 'HEAD',
|
||||||
|
}
|
||||||
|
|
||||||
|
write_metadata_file(metadata)
|
||||||
|
|
||||||
|
|
||||||
|
def set_plugin_metadata(
|
||||||
|
plugin_name: str,
|
||||||
|
source: str,
|
||||||
|
install_timestamp: int,
|
||||||
|
ref: str,
|
||||||
|
):
|
||||||
|
metadata = read_metadata_file()
|
||||||
|
metadata[plugin_name] = {
|
||||||
|
'source': source,
|
||||||
|
'install_timestamp': install_timestamp,
|
||||||
|
'ref': ref,
|
||||||
|
}
|
||||||
|
write_metadata_file(metadata)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_plugin_metadata(plugin_name: str):
|
||||||
|
metadata = read_metadata_file()
|
||||||
|
if plugin_name in metadata:
|
||||||
|
del metadata[plugin_name]
|
||||||
|
write_metadata_file(metadata)
|
||||||
|
|
||||||
|
|
||||||
|
def get_plugin_metadata(plugin_name: str) -> dict:
|
||||||
|
metadata = read_metadata_file()
|
||||||
|
if plugin_name in metadata:
|
||||||
|
return metadata[plugin_name]
|
||||||
|
return {}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
import pkg.plugin.host as host
|
from ..plugin import host
|
||||||
import pkg.utils.context
|
from ..utils import context
|
||||||
|
|
||||||
PersonMessageReceived = "person_message_received"
|
PersonMessageReceived = "person_message_received"
|
||||||
"""收到私聊消息时,在判断是否应该响应前触发
|
"""收到私聊消息时,在判断是否应该响应前触发
|
||||||
@@ -88,6 +88,8 @@ NormalMessageResponded = "normal_message_responded"
|
|||||||
session: pkg.openai.session.Session 会话对象
|
session: pkg.openai.session.Session 会话对象
|
||||||
prefix: str 回复文字消息的前缀
|
prefix: str 回复文字消息的前缀
|
||||||
response_text: str 响应文本
|
response_text: str 响应文本
|
||||||
|
finish_reason: str 响应结束原因
|
||||||
|
funcs_called: list[str] 此次响应中调用的函数列表
|
||||||
|
|
||||||
returns (optional):
|
returns (optional):
|
||||||
prefix: str 修改后的回复文字消息的前缀
|
prefix: str 修改后的回复文字消息的前缀
|
||||||
@@ -132,18 +134,64 @@ KeySwitched = "key_switched"
|
|||||||
key_list: list[str] api-key列表
|
key_list: list[str] api-key列表
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
PromptPreProcessing = "prompt_pre_processing"
|
||||||
|
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||||
|
kwargs:
|
||||||
|
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||||
|
default_prompt: list 此session使用的情景预设内容
|
||||||
|
prompt: list 此session现有的prompt内容
|
||||||
|
text_message: str 用户发送的消息文本
|
||||||
|
|
||||||
|
returns (optional):
|
||||||
|
default_prompt: list 修改后的情景预设内容
|
||||||
|
prompt: list 修改后的prompt内容
|
||||||
|
text_message: str 修改后的消息文本
|
||||||
|
"""
|
||||||
|
|
||||||
def on(event: str):
|
|
||||||
|
def on(*args, **kwargs):
|
||||||
"""注册事件监听器
|
"""注册事件监听器
|
||||||
:param
|
|
||||||
event: str 事件名称
|
|
||||||
"""
|
"""
|
||||||
return Plugin.on(event)
|
return Plugin.on(*args, **kwargs)
|
||||||
|
|
||||||
|
def func(*args, **kwargs):
|
||||||
|
"""注册内容函数,声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用
|
||||||
|
此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
|
||||||
|
所述的格式编写函数的docstring。
|
||||||
|
此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。
|
||||||
|
"""
|
||||||
|
return Plugin.func(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
__current_registering_plugin__ = ""
|
__current_registering_plugin__ = ""
|
||||||
|
|
||||||
|
|
||||||
|
def require_ver(ge: str, le: str="v999.9.9") -> bool:
|
||||||
|
"""插件版本要求装饰器
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ge (str): 最低版本要求
|
||||||
|
le (str, optional): 最高版本要求
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: 是否满足要求, False时为无法获取版本号,True时为满足要求,报错为不满足要求
|
||||||
|
"""
|
||||||
|
qchatgpt_version = ""
|
||||||
|
|
||||||
|
from pkg.utils.updater import get_current_tag, compare_version_str
|
||||||
|
|
||||||
|
try:
|
||||||
|
qchatgpt_version = get_current_tag() # 从updater模块获取版本号
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if compare_version_str(qchatgpt_version, ge) < 0 or \
|
||||||
|
(compare_version_str(qchatgpt_version, le) > 0):
|
||||||
|
raise Exception("QChatGPT 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, qchatgpt_version))
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class Plugin:
|
class Plugin:
|
||||||
"""插件基类"""
|
"""插件基类"""
|
||||||
|
|
||||||
@@ -176,6 +224,34 @@ class Plugin:
|
|||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def func(cls, name: str=None):
|
||||||
|
"""内容函数装饰器
|
||||||
|
"""
|
||||||
|
global __current_registering_plugin__
|
||||||
|
from CallingGPT.entities.namespace import get_func_schema
|
||||||
|
|
||||||
|
def wrapper(func):
|
||||||
|
|
||||||
|
function_schema = get_func_schema(func)
|
||||||
|
function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name)
|
||||||
|
|
||||||
|
function_schema['enabled'] = True
|
||||||
|
|
||||||
|
host.__function_inst_map__[function_schema['name']] = function_schema['function']
|
||||||
|
|
||||||
|
del function_schema['function']
|
||||||
|
|
||||||
|
# logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema))
|
||||||
|
|
||||||
|
host.__callable_functions__.append(
|
||||||
|
function_schema
|
||||||
|
)
|
||||||
|
|
||||||
|
return func
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def register(name: str, description: str, version: str, author: str):
|
def register(name: str, description: str, version: str, author: str):
|
||||||
"""注册插件, 此函数作为装饰器使用
|
"""注册插件, 此函数作为装饰器使用
|
||||||
@@ -209,7 +285,7 @@ def register(name: str, description: str, version: str, author: str):
|
|||||||
cls.description = description
|
cls.description = description
|
||||||
cls.version = version
|
cls.version = version
|
||||||
cls.author = author
|
cls.author = author
|
||||||
cls.host = pkg.utils.context.get_plugin_host()
|
cls.host = context.get_plugin_host()
|
||||||
cls.enabled = True
|
cls.enabled = True
|
||||||
cls.path = host.__current_module_path__
|
cls.path = host.__current_module_path__
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,17 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import pkg.plugin.host as host
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from ..plugin import host
|
||||||
|
|
||||||
def wrapper_dict_from_runtime_context() -> dict:
|
def wrapper_dict_from_runtime_context() -> dict:
|
||||||
"""从变量中包装settings.json的数据字典"""
|
"""从变量中包装settings.json的数据字典"""
|
||||||
settings = {
|
settings = {
|
||||||
"order": []
|
"order": [],
|
||||||
|
"functions": {
|
||||||
|
"enabled": host.__enable_content_functions__
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for plugin_name in host.__plugins_order__:
|
for plugin_name in host.__plugins_order__:
|
||||||
@@ -22,6 +25,11 @@ def apply_settings(settings: dict):
|
|||||||
if "order" in settings:
|
if "order" in settings:
|
||||||
host.__plugins_order__ = settings["order"]
|
host.__plugins_order__ = settings["order"]
|
||||||
|
|
||||||
|
if "functions" in settings:
|
||||||
|
if "enabled" in settings["functions"]:
|
||||||
|
host.__enable_content_functions__ = settings["functions"]["enabled"]
|
||||||
|
# logging.debug("set content function enabled: {}".format(host.__enable_content_functions__))
|
||||||
|
|
||||||
|
|
||||||
def dump_settings():
|
def dump_settings():
|
||||||
"""保存settings.json数据"""
|
"""保存settings.json数据"""
|
||||||
@@ -78,6 +86,17 @@ def load_settings():
|
|||||||
settings["order"].append(plugin_name)
|
settings["order"].append(plugin_name)
|
||||||
settings_modified = True
|
settings_modified = True
|
||||||
|
|
||||||
|
if "functions" not in settings:
|
||||||
|
settings["functions"] = {
|
||||||
|
"enabled": host.__enable_content_functions__
|
||||||
|
}
|
||||||
|
settings_modified = True
|
||||||
|
elif "enabled" not in settings["functions"]:
|
||||||
|
settings["functions"]["enabled"] = host.__enable_content_functions__
|
||||||
|
settings_modified = True
|
||||||
|
|
||||||
|
logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enabled"] else "禁用"))
|
||||||
|
|
||||||
apply_settings(settings)
|
apply_settings(settings)
|
||||||
|
|
||||||
if settings_modified:
|
if settings_modified:
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import pkg.plugin.host as host
|
from ..plugin import host
|
||||||
|
|
||||||
|
|
||||||
def wrapper_dict_from_plugin_list() -> dict:
|
def wrapper_dict_from_plugin_list() -> dict:
|
||||||
@@ -28,6 +28,11 @@ def apply_switch(switch: dict):
|
|||||||
for plugin_name in switch:
|
for plugin_name in switch:
|
||||||
host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"]
|
host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"]
|
||||||
|
|
||||||
|
# 查找此插件的所有内容函数
|
||||||
|
for func in host.__callable_functions__:
|
||||||
|
if func['name'].startswith(plugin_name + '-'):
|
||||||
|
func['enabled'] = switch[plugin_name]["enabled"]
|
||||||
|
|
||||||
|
|
||||||
def dump_switch():
|
def dump_switch():
|
||||||
"""保存开关数据"""
|
"""保存开关数据"""
|
||||||
|
|||||||
136
pkg/qqbot/adapter.py
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# MessageSource的适配器
|
||||||
|
import typing
|
||||||
|
|
||||||
|
import mirai
|
||||||
|
|
||||||
|
|
||||||
|
class MessageSourceAdapter:
|
||||||
|
def __init__(self, config: dict):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def send_message(
|
||||||
|
self,
|
||||||
|
target_type: str,
|
||||||
|
target_id: str,
|
||||||
|
message: mirai.MessageChain
|
||||||
|
):
|
||||||
|
"""发送消息
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_type (str): 目标类型,`person`或`group`
|
||||||
|
target_id (str): 目标ID
|
||||||
|
message (mirai.MessageChain): YiriMirai库的消息链
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def reply_message(
|
||||||
|
self,
|
||||||
|
message_source: mirai.MessageEvent,
|
||||||
|
message: mirai.MessageChain,
|
||||||
|
quote_origin: bool = False
|
||||||
|
):
|
||||||
|
"""回复消息
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message_source (mirai.MessageEvent): YiriMirai消息源事件
|
||||||
|
message (mirai.MessageChain): YiriMirai库的消息链
|
||||||
|
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def is_muted(self, group_id: int) -> bool:
|
||||||
|
"""获取账号是否在指定群被禁言"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def register_listener(
|
||||||
|
self,
|
||||||
|
event_type: typing.Type[mirai.Event],
|
||||||
|
callback: typing.Callable[[mirai.Event], None]
|
||||||
|
):
|
||||||
|
"""注册事件监听器
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||||
|
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def unregister_listener(
|
||||||
|
self,
|
||||||
|
event_type: typing.Type[mirai.Event],
|
||||||
|
callback: typing.Callable[[mirai.Event], None]
|
||||||
|
):
|
||||||
|
"""注销事件监听器
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||||
|
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def run_sync(self):
|
||||||
|
"""以阻塞的方式运行适配器"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def kill(self) -> bool:
|
||||||
|
"""关闭适配器
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: 是否成功关闭,热重载时若此函数返回False则不会重载MessageSource底层
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class MessageConverter:
|
||||||
|
"""消息链转换器基类"""
|
||||||
|
@staticmethod
|
||||||
|
def yiri2target(message_chain: mirai.MessageChain):
|
||||||
|
"""将YiriMirai消息链转换为目标消息链
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message_chain (mirai.MessageChain): YiriMirai消息链
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
typing.Any: 目标消息链
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def target2yiri(message_chain: typing.Any) -> mirai.MessageChain:
|
||||||
|
"""将目标消息链转换为YiriMirai消息链
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message_chain (typing.Any): 目标消息链
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
mirai.MessageChain: YiriMirai消息链
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class EventConverter:
|
||||||
|
"""事件转换器基类"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def yiri2target(event: typing.Type[mirai.Event]):
|
||||||
|
"""将YiriMirai事件转换为目标事件
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (typing.Type[mirai.Event]): YiriMirai事件
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
typing.Any: 目标事件
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def target2yiri(event: typing.Any) -> mirai.Event:
|
||||||
|
"""将目标事件的调用参数转换为YiriMirai的事件参数对象
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (typing.Any): 目标事件
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
typing.Type[mirai.Event]: YiriMirai事件
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
@@ -1,18 +1,18 @@
|
|||||||
import pkg.utils.context
|
from ..utils import context
|
||||||
|
|
||||||
|
|
||||||
def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
|
def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
|
||||||
if not pkg.utils.context.get_qqbot_manager().enable_banlist:
|
if not context.get_qqbot_manager().enable_banlist:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
result = False
|
result = False
|
||||||
|
|
||||||
if launcher_type == 'group':
|
if launcher_type == 'group':
|
||||||
# 检查是否显式声明发起人QQ要被person忽略
|
# 检查是否显式声明发起人QQ要被person忽略
|
||||||
if sender_id in pkg.utils.context.get_qqbot_manager().ban_person:
|
if sender_id in context.get_qqbot_manager().ban_person:
|
||||||
result = True
|
result = True
|
||||||
else:
|
else:
|
||||||
for group_rule in pkg.utils.context.get_qqbot_manager().ban_group:
|
for group_rule in context.get_qqbot_manager().ban_group:
|
||||||
if type(group_rule) == int:
|
if type(group_rule) == int:
|
||||||
if group_rule == launcher_id: # 此群群号被禁用
|
if group_rule == launcher_id: # 此群群号被禁用
|
||||||
result = True
|
result = True
|
||||||
@@ -32,7 +32,7 @@ def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# ban_person, 与群规则相同
|
# ban_person, 与群规则相同
|
||||||
for person_rule in pkg.utils.context.get_qqbot_manager().ban_person:
|
for person_rule in context.get_qqbot_manager().ban_person:
|
||||||
if type(person_rule) == int:
|
if type(person_rule) == int:
|
||||||
if person_rule == launcher_id:
|
if person_rule == launcher_id:
|
||||||
result = True
|
result = True
|
||||||
|
|||||||
@@ -2,21 +2,21 @@
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import base64
|
import base64
|
||||||
|
import typing
|
||||||
|
|
||||||
import config
|
|
||||||
from mirai.models.message import MessageComponent, MessageChain, Image
|
from mirai.models.message import MessageComponent, MessageChain, Image
|
||||||
from mirai.models.message import ForwardMessageNode
|
from mirai.models.message import ForwardMessageNode
|
||||||
from mirai.models.base import MiraiBaseModel
|
from mirai.models.base import MiraiBaseModel
|
||||||
from typing import List
|
|
||||||
import pkg.utils.context as context
|
from ..utils import text2img
|
||||||
import pkg.utils.text2img as text2img
|
import config
|
||||||
|
|
||||||
|
|
||||||
class ForwardMessageDiaplay(MiraiBaseModel):
|
class ForwardMessageDiaplay(MiraiBaseModel):
|
||||||
title: str = "群聊的聊天记录"
|
title: str = "群聊的聊天记录"
|
||||||
brief: str = "[聊天记录]"
|
brief: str = "[聊天记录]"
|
||||||
source: str = "聊天记录"
|
source: str = "聊天记录"
|
||||||
preview: List[str] = []
|
preview: typing.List[str] = []
|
||||||
summary: str = "查看x条转发消息"
|
summary: str = "查看x条转发消息"
|
||||||
|
|
||||||
|
|
||||||
@@ -26,7 +26,7 @@ class Forward(MessageComponent):
|
|||||||
"""消息组件类型。"""
|
"""消息组件类型。"""
|
||||||
display: ForwardMessageDiaplay
|
display: ForwardMessageDiaplay
|
||||||
"""显示信息"""
|
"""显示信息"""
|
||||||
node_list: List[ForwardMessageNode]
|
node_list: typing.List[ForwardMessageNode]
|
||||||
"""转发消息节点列表。"""
|
"""转发消息节点列表。"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
if len(args) == 1:
|
if len(args) == 1:
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
import importlib
|
|
||||||
import inspect
|
|
||||||
import logging
|
import logging
|
||||||
import copy
|
import copy
|
||||||
import pkgutil
|
import pkgutil
|
||||||
import traceback
|
import traceback
|
||||||
import types
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
@@ -260,8 +257,8 @@ def execute(context: Context) -> list:
|
|||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
logging.debug('执行指令: {}'.format(path))
|
|
||||||
node = __command_list__[path]
|
node = __command_list__[path]
|
||||||
|
logging.debug('执行指令: {}'.format(path))
|
||||||
|
|
||||||
# 检查权限
|
# 检查权限
|
||||||
if ctx.privilege < node['privilege']:
|
if ctx.privilege < node['privilege']:
|
||||||
@@ -327,6 +324,10 @@ def apply_privileges():
|
|||||||
for path, priv in data.items():
|
for path, priv in data.items():
|
||||||
if path == 'comment':
|
if path == 'comment':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if path not in __command_list__:
|
||||||
|
continue
|
||||||
|
|
||||||
if __command_list__[path]['privilege'] != priv:
|
if __command_list__[path]['privilege'] != priv:
|
||||||
logging.debug('应用权限: {} -> {}(default: {})'.format(path, priv, __command_list__[path]['privilege']))
|
logging.debug('应用权限: {} -> {}(default: {})'.format(path, priv, __command_list__[path]['privilege']))
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from mirai import Image
|
import mirai
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
import config
|
import config
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="draw",
|
name="draw",
|
||||||
description="使用DALL·E生成图片",
|
description="使用DALL·E生成图片",
|
||||||
@@ -13,9 +14,9 @@ import config
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class DrawCommand(AbstractCommandNode):
|
class DrawCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
|
|
||||||
reply = []
|
reply = []
|
||||||
@@ -28,7 +29,7 @@ class DrawCommand(AbstractCommandNode):
|
|||||||
res = session.draw_image(" ".join(ctx.params))
|
res = session.draw_image(" ".join(ctx.params))
|
||||||
|
|
||||||
logging.debug("draw_image result:{}".format(res))
|
logging.debug("draw_image result:{}".format(res))
|
||||||
reply = [Image(url=res['data'][0]['url'])]
|
reply = [mirai.Image(url=res['data'][0]['url'])]
|
||||||
if not (hasattr(config, 'include_image_description')
|
if not (hasattr(config, 'include_image_description')
|
||||||
and not config.include_image_description):
|
and not config.include_image_description):
|
||||||
reply.append(" ".join(ctx.params))
|
reply.append(" ".join(ctx.params))
|
||||||
|
|||||||
32
pkg/qqbot/cmds/funcs/func.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import logging
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
|
parent=None,
|
||||||
|
name="func",
|
||||||
|
description="管理内容函数",
|
||||||
|
usage="!func",
|
||||||
|
aliases=[],
|
||||||
|
privilege=1
|
||||||
|
)
|
||||||
|
class FuncCommand(aamgr.AbstractCommandNode):
|
||||||
|
@classmethod
|
||||||
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
|
from pkg.plugin.models import host
|
||||||
|
|
||||||
|
reply = []
|
||||||
|
|
||||||
|
reply_str = "当前已加载的内容函数:\n\n"
|
||||||
|
|
||||||
|
logging.debug("host.__callable_functions__: {}".format(json.dumps(host.__callable_functions__, indent=4)))
|
||||||
|
|
||||||
|
index = 1
|
||||||
|
for func in host.__callable_functions__:
|
||||||
|
reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description'])
|
||||||
|
index += 1
|
||||||
|
|
||||||
|
reply = [reply_str]
|
||||||
|
|
||||||
|
return True, reply
|
||||||
@@ -1,22 +1,19 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from ....plugin import host as plugin_host
|
||||||
|
from ....utils import updater
|
||||||
import os
|
from .. import aamgr
|
||||||
|
|
||||||
import pkg.plugin.host as plugin_host
|
|
||||||
import pkg.utils.updater as updater
|
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="plugin",
|
name="plugin",
|
||||||
description="插件管理",
|
description="插件管理",
|
||||||
usage="!plugin\n!plugin get <插件仓库地址>\!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>",
|
usage="!plugin\n!plugin get <插件仓库地址>\n!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>",
|
||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=1
|
||||||
)
|
)
|
||||||
class PluginCommand(AbstractCommandNode):
|
class PluginCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
reply = []
|
reply = []
|
||||||
plugin_list = plugin_host.__plugins__
|
plugin_list = plugin_host.__plugins__
|
||||||
if len(ctx.params) == 0:
|
if len(ctx.params) == 0:
|
||||||
@@ -48,7 +45,7 @@ class PluginCommand(AbstractCommandNode):
|
|||||||
return False, []
|
return False, []
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=PluginCommand,
|
parent=PluginCommand,
|
||||||
name="get",
|
name="get",
|
||||||
description="安装插件",
|
description="安装插件",
|
||||||
@@ -56,9 +53,9 @@ class PluginCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class PluginGetCommand(AbstractCommandNode):
|
class PluginGetCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import threading
|
import threading
|
||||||
import logging
|
import logging
|
||||||
import pkg.utils.context
|
import pkg.utils.context
|
||||||
@@ -81,57 +78,56 @@ class PluginGetCommand(AbstractCommandNode):
|
|||||||
return True, reply
|
return True, reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=PluginCommand,
|
parent=PluginCommand,
|
||||||
name="update",
|
name="update",
|
||||||
description="更新所有插件",
|
description="更新指定插件或全部插件",
|
||||||
usage="!plugin update",
|
usage="!plugin update",
|
||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class PluginUpdateCommand(AbstractCommandNode):
|
class PluginUpdateCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import threading
|
import threading
|
||||||
import logging
|
import logging
|
||||||
plugin_list = plugin_host.__plugins__
|
plugin_list = plugin_host.__plugins__
|
||||||
|
|
||||||
reply = []
|
reply = []
|
||||||
def closure():
|
|
||||||
try:
|
|
||||||
import pkg.utils.context
|
|
||||||
updated = []
|
|
||||||
for key in plugin_list:
|
|
||||||
plugin = plugin_list[key]
|
|
||||||
if updater.is_repo("/".join(plugin['path'].split('/')[:-1])):
|
|
||||||
success = updater.pull_latest("/".join(plugin['path'].split('/')[:-1]))
|
|
||||||
if success:
|
|
||||||
updated.append(plugin['name'])
|
|
||||||
|
|
||||||
# 检查是否有requirements.txt
|
if len(ctx.crt_params) > 0:
|
||||||
pkg.utils.context.get_qqbot_manager().notify_admin("正在安装依赖...")
|
def closure():
|
||||||
for key in plugin_list:
|
try:
|
||||||
plugin = plugin_list[key]
|
import pkg.utils.context
|
||||||
if os.path.exists("/".join(plugin['path'].split('/')[:-1])+"/requirements.txt"):
|
|
||||||
logging.info("{}检测到requirements.txt,安装依赖".format(plugin['name']))
|
updated = []
|
||||||
import pkg.utils.pkgmgr
|
|
||||||
pkg.utils.pkgmgr.install_requirements("/".join(plugin['path'].split('/')[:-1])+"/requirements.txt")
|
|
||||||
|
|
||||||
import pkg.utils.log as log
|
if ctx.crt_params[0] == 'all':
|
||||||
log.reset_logging()
|
for key in plugin_list:
|
||||||
|
plugin_host.update_plugin(key)
|
||||||
|
updated.append(key)
|
||||||
|
else:
|
||||||
|
plugin_path_name = plugin_host.get_plugin_path_name_by_plugin_name(ctx.crt_params[0])
|
||||||
|
|
||||||
pkg.utils.context.get_qqbot_manager().notify_admin("已更新插件: {}".format(", ".join(updated)))
|
if plugin_path_name is not None:
|
||||||
except Exception as e:
|
plugin_host.update_plugin(ctx.crt_params[0])
|
||||||
logging.error("插件更新失败:{}".format(e))
|
updated.append(ctx.crt_params[0])
|
||||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件更新失败:{} 请尝试手动更新插件".format(e))
|
else:
|
||||||
|
raise Exception("未找到插件: {}".format(ctx.crt_params[0]))
|
||||||
|
|
||||||
|
pkg.utils.context.get_qqbot_manager().notify_admin("已更新插件: {}, 请发送 !reload 重载插件".format(", ".join(updated)))
|
||||||
|
except Exception as e:
|
||||||
|
logging.error("插件更新失败:{}".format(e))
|
||||||
|
pkg.utils.context.get_qqbot_manager().notify_admin("插件更新失败:{} 请使用 !plugin 命令确认插件名称或尝试手动更新插件".format(e))
|
||||||
|
|
||||||
threading.Thread(target=closure).start()
|
reply = ["[bot]正在更新插件,请勿重复发起..."]
|
||||||
reply = ["[bot]正在更新所有插件,请勿重复发起..."]
|
threading.Thread(target=closure).start()
|
||||||
|
else:
|
||||||
|
reply = ["[bot]请指定要更新的插件, 或使用 !plugin update all 更新所有插件"]
|
||||||
return True, reply
|
return True, reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=PluginCommand,
|
parent=PluginCommand,
|
||||||
name="del",
|
name="del",
|
||||||
description="删除插件",
|
description="删除插件",
|
||||||
@@ -139,9 +135,9 @@ class PluginUpdateCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class PluginDelCommand(AbstractCommandNode):
|
class PluginDelCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
plugin_list = plugin_host.__plugins__
|
plugin_list = plugin_host.__plugins__
|
||||||
reply = []
|
reply = []
|
||||||
|
|
||||||
@@ -158,7 +154,7 @@ class PluginDelCommand(AbstractCommandNode):
|
|||||||
return True, reply
|
return True, reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=PluginCommand,
|
parent=PluginCommand,
|
||||||
name="on",
|
name="on",
|
||||||
description="启用指定插件",
|
description="启用指定插件",
|
||||||
@@ -166,7 +162,7 @@ class PluginDelCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=PluginCommand,
|
parent=PluginCommand,
|
||||||
name="off",
|
name="off",
|
||||||
description="禁用指定插件",
|
description="禁用指定插件",
|
||||||
@@ -174,9 +170,9 @@ class PluginDelCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class PluginOnOffCommand(AbstractCommandNode):
|
class PluginOnOffCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.plugin.switch as plugin_switch
|
import pkg.plugin.switch as plugin_switch
|
||||||
|
|
||||||
plugin_list = plugin_host.__plugins__
|
plugin_list = plugin_host.__plugins__
|
||||||
@@ -191,6 +187,11 @@ class PluginOnOffCommand(AbstractCommandNode):
|
|||||||
plugin_name = ctx.crt_params[0]
|
plugin_name = ctx.crt_params[0]
|
||||||
if plugin_name in plugin_list:
|
if plugin_name in plugin_list:
|
||||||
plugin_list[plugin_name]['enabled'] = new_status
|
plugin_list[plugin_name]['enabled'] = new_status
|
||||||
|
|
||||||
|
for func in plugin_host.__callable_functions__:
|
||||||
|
if func['name'].startswith(plugin_name+"-"):
|
||||||
|
func['enabled'] = new_status
|
||||||
|
|
||||||
plugin_switch.dump_switch()
|
plugin_switch.dump_switch()
|
||||||
reply = ["[bot]已{}插件: {}".format("启用" if new_status else "禁用", plugin_name)]
|
reply = ["[bot]已{}插件: {}".format("启用" if new_status else "禁用", plugin_name)]
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
@AbstractCommandNode.register(
|
|
||||||
parent=None,
|
parent=None,
|
||||||
name="default",
|
name="default",
|
||||||
description="操作情景预设",
|
description="操作情景预设",
|
||||||
@@ -9,9 +8,9 @@ from ..aamgr import AbstractCommandNode, Context
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class DefaultCommand(AbstractCommandNode):
|
class DefaultCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
params = ctx.params
|
params = ctx.params
|
||||||
@@ -45,7 +44,7 @@ class DefaultCommand(AbstractCommandNode):
|
|||||||
return True, reply
|
return True, reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=DefaultCommand,
|
parent=DefaultCommand,
|
||||||
name="set",
|
name="set",
|
||||||
description="设置默认情景预设",
|
description="设置默认情景预设",
|
||||||
@@ -53,9 +52,9 @@ class DefaultCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class DefaultSetCommand(AbstractCommandNode):
|
class DefaultSetCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
reply = []
|
reply = []
|
||||||
|
|
||||||
if len(ctx.crt_params) == 0:
|
if len(ctx.crt_params) == 0:
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="del",
|
name="del",
|
||||||
description="删除当前会话的历史记录",
|
description="删除当前会话的历史记录",
|
||||||
@@ -10,9 +9,9 @@ import datetime
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class DelCommand(AbstractCommandNode):
|
class DelCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
params = ctx.params
|
params = ctx.params
|
||||||
@@ -33,7 +32,7 @@ class DelCommand(AbstractCommandNode):
|
|||||||
return True, reply
|
return True, reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=DelCommand,
|
parent=DelCommand,
|
||||||
name="all",
|
name="all",
|
||||||
description="删除当前会话的全部历史记录",
|
description="删除当前会话的全部历史记录",
|
||||||
@@ -41,9 +40,9 @@ class DelCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class DelAllCommand(AbstractCommandNode):
|
class DelAllCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
reply = []
|
reply = []
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="delhst",
|
name="delhst",
|
||||||
description="删除指定会话的所有历史记录",
|
description="删除指定会话的所有历史记录",
|
||||||
@@ -9,9 +9,9 @@ from ..aamgr import AbstractCommandNode, Context
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class DelHistoryCommand(AbstractCommandNode):
|
class DelHistoryCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
import pkg.utils.context
|
import pkg.utils.context
|
||||||
params = ctx.params
|
params = ctx.params
|
||||||
@@ -31,7 +31,7 @@ class DelHistoryCommand(AbstractCommandNode):
|
|||||||
return True, reply
|
return True, reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=DelHistoryCommand,
|
parent=DelHistoryCommand,
|
||||||
name="all",
|
name="all",
|
||||||
description="删除所有会话的全部历史记录",
|
description="删除所有会话的全部历史记录",
|
||||||
@@ -39,9 +39,9 @@ class DelHistoryCommand(AbstractCommandNode):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class DelAllHistoryCommand(AbstractCommandNode):
|
class DelAllHistoryCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.utils.context
|
import pkg.utils.context
|
||||||
reply = []
|
reply = []
|
||||||
pkg.utils.context.get_database_manager().delete_all_session_history()
|
pkg.utils.context.get_database_manager().delete_all_session_history()
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="last",
|
name="last",
|
||||||
description="切换前一次对话",
|
description="切换前一次对话",
|
||||||
@@ -10,9 +11,9 @@ import datetime
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class LastCommand(AbstractCommandNode):
|
class LastCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name='list',
|
name='list',
|
||||||
description='列出当前会话的所有历史记录',
|
description='列出当前会话的所有历史记录',
|
||||||
@@ -11,9 +12,9 @@ import json
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class ListCommand(AbstractCommandNode):
|
class ListCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
params = ctx.params
|
params = ctx.params
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="next",
|
name="next",
|
||||||
description="切换后一次对话",
|
description="切换后一次对话",
|
||||||
@@ -10,9 +11,9 @@ import datetime
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class NextCommand(AbstractCommandNode):
|
class NextCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
reply = []
|
reply = []
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="prompt",
|
name="prompt",
|
||||||
description="获取当前会话的前文",
|
description="获取当前会话的前文",
|
||||||
@@ -10,9 +9,9 @@ import datetime
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class PromptCommand(AbstractCommandNode):
|
class PromptCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
import pkg.openai.session
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
params = ctx.params
|
params = ctx.params
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="resend",
|
name="resend",
|
||||||
description="重新获取上一次问题的回复",
|
description="重新获取上一次问题的回复",
|
||||||
@@ -10,20 +9,22 @@ import datetime
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class ResendCommand(AbstractCommandNode):
|
class ResendCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import pkg.openai.session
|
from ....openai import session as openai_session
|
||||||
|
from ....utils import context
|
||||||
|
from ....qqbot import message
|
||||||
import config
|
import config
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
reply = []
|
reply = []
|
||||||
|
|
||||||
session = pkg.openai.session.get_session(session_name)
|
session = openai_session.get_session(session_name)
|
||||||
to_send = session.undo()
|
to_send = session.undo()
|
||||||
|
|
||||||
mgr = pkg.utils.context.get_qqbot_manager()
|
mgr = context.get_qqbot_manager()
|
||||||
|
|
||||||
reply = pkg.qqbot.message.process_normal_message(to_send, mgr, config,
|
reply = message.process_normal_message(to_send, mgr, config,
|
||||||
ctx.launcher_type, ctx.launcher_id,
|
ctx.launcher_type, ctx.launcher_id,
|
||||||
ctx.sender_id)
|
ctx.sender_id)
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import tips as tips_custom
|
import tips as tips_custom
|
||||||
|
|
||||||
import pkg.openai.session
|
from .. import aamgr
|
||||||
import pkg.utils.context
|
from ....openai import session
|
||||||
|
from ....utils import context
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name='reset',
|
name='reset',
|
||||||
description='重置当前会话',
|
description='重置当前会话',
|
||||||
@@ -13,21 +13,21 @@ import pkg.utils.context
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class ResetCommand(AbstractCommandNode):
|
class ResetCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
params = ctx.params
|
params = ctx.params
|
||||||
session_name = ctx.session_name
|
session_name = ctx.session_name
|
||||||
|
|
||||||
reply = ""
|
reply = ""
|
||||||
|
|
||||||
if len(params) == 0:
|
if len(params) == 0:
|
||||||
pkg.openai.session.get_session(session_name).reset(explicit=True)
|
session.get_session(session_name).reset(explicit=True)
|
||||||
reply = [tips_custom.command_reset_message]
|
reply = [tips_custom.command_reset_message]
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
import pkg.openai.dprompt as dprompt
|
import pkg.openai.dprompt as dprompt
|
||||||
pkg.openai.session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
|
session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
|
||||||
reply = [tips_custom.command_reset_name_message+"{}".format(dprompt.mode_inst().get_full_name(params[0]))]
|
reply = [tips_custom.command_reset_name_message+"{}".format(dprompt.mode_inst().get_full_name(params[0]))]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
reply = ["[bot]会话重置失败:{}".format(e)]
|
reply = ["[bot]会话重置失败:{}".format(e)]
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
|
|
||||||
|
|
||||||
def config_operation(cmd, params):
|
def config_operation(cmd, params):
|
||||||
reply = []
|
reply = []
|
||||||
@@ -8,7 +9,7 @@ def config_operation(cmd, params):
|
|||||||
config = pkg.utils.context.get_config()
|
config = pkg.utils.context.get_config()
|
||||||
reply_str = ""
|
reply_str = ""
|
||||||
if len(params) == 0:
|
if len(params) == 0:
|
||||||
reply = ["[bot]err:请输入配置项"]
|
reply = ["[bot]err:请输入!cmd cfg查看使用方法"]
|
||||||
else:
|
else:
|
||||||
cfg_name = params[0]
|
cfg_name = params[0]
|
||||||
if cfg_name == 'all':
|
if cfg_name == 'all':
|
||||||
@@ -26,50 +27,66 @@ def config_operation(cmd, params):
|
|||||||
else:
|
else:
|
||||||
reply_str += "{}: {}\n".format(cfg, getattr(config, cfg))
|
reply_str += "{}: {}\n".format(cfg, getattr(config, cfg))
|
||||||
reply = [reply_str]
|
reply = [reply_str]
|
||||||
elif cfg_name in dir(config):
|
|
||||||
if len(params) == 1:
|
|
||||||
# 按照配置项类型进行格式化
|
|
||||||
if isinstance(getattr(config, cfg_name), str):
|
|
||||||
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, getattr(config, cfg_name))
|
|
||||||
elif isinstance(getattr(config, cfg_name), dict):
|
|
||||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
|
||||||
json.dumps(getattr(config, cfg_name),
|
|
||||||
ensure_ascii=False, indent=4))
|
|
||||||
else:
|
|
||||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, getattr(config, cfg_name))
|
|
||||||
reply = [reply_str]
|
|
||||||
else:
|
|
||||||
cfg_value = " ".join(params[1:])
|
|
||||||
# 类型转换,如果是json则转换为字典
|
|
||||||
if cfg_value == 'true':
|
|
||||||
cfg_value = True
|
|
||||||
elif cfg_value == 'false':
|
|
||||||
cfg_value = False
|
|
||||||
elif cfg_value.isdigit():
|
|
||||||
cfg_value = int(cfg_value)
|
|
||||||
elif cfg_value.startswith('{') and cfg_value.endswith('}'):
|
|
||||||
cfg_value = json.loads(cfg_value)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
cfg_value = float(cfg_value)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# 检查类型是否匹配
|
|
||||||
if isinstance(getattr(config, cfg_name), type(cfg_value)):
|
|
||||||
setattr(config, cfg_name, cfg_value)
|
|
||||||
pkg.utils.context.set_config(config)
|
|
||||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
|
||||||
else:
|
|
||||||
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
cfg_entry_path = cfg_name.split('.')
|
||||||
|
|
||||||
|
try:
|
||||||
|
if len(params) == 1:
|
||||||
|
cfg_entry = getattr(config, cfg_entry_path[0])
|
||||||
|
if len(cfg_entry_path) > 1:
|
||||||
|
for i in range(1, len(cfg_entry_path)):
|
||||||
|
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||||
|
|
||||||
|
if isinstance(cfg_entry, str):
|
||||||
|
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, cfg_entry)
|
||||||
|
elif isinstance(cfg_entry, dict):
|
||||||
|
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
||||||
|
json.dumps(cfg_entry,
|
||||||
|
ensure_ascii=False, indent=4))
|
||||||
|
else:
|
||||||
|
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, cfg_entry)
|
||||||
|
reply = [reply_str]
|
||||||
|
else:
|
||||||
|
cfg_value = " ".join(params[1:])
|
||||||
|
# 类型转换,如果是json则转换为字典
|
||||||
|
# if cfg_value == 'true':
|
||||||
|
# cfg_value = True
|
||||||
|
# elif cfg_value == 'false':
|
||||||
|
# cfg_value = False
|
||||||
|
# elif cfg_value.isdigit():
|
||||||
|
# cfg_value = int(cfg_value)
|
||||||
|
# elif cfg_value.startswith('{') and cfg_value.endswith('}'):
|
||||||
|
# cfg_value = json.loads(cfg_value)
|
||||||
|
# else:
|
||||||
|
# try:
|
||||||
|
# cfg_value = float(cfg_value)
|
||||||
|
# except ValueError:
|
||||||
|
# pass
|
||||||
|
cfg_value = eval(cfg_value)
|
||||||
|
|
||||||
|
cfg_entry = getattr(config, cfg_entry_path[0])
|
||||||
|
if len(cfg_entry_path) > 1:
|
||||||
|
for i in range(1, len(cfg_entry_path) - 1):
|
||||||
|
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||||
|
if isinstance(cfg_entry[cfg_entry_path[-1]], type(cfg_value)):
|
||||||
|
cfg_entry[cfg_entry_path[-1]] = cfg_value
|
||||||
|
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||||
|
else:
|
||||||
|
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
||||||
|
else:
|
||||||
|
setattr(config, cfg_entry_path[0], cfg_value)
|
||||||
|
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||||
|
except AttributeError:
|
||||||
|
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||||
|
except ValueError:
|
||||||
|
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||||
|
# else:
|
||||||
|
# reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||||
|
|
||||||
return reply
|
return reply
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="cfg",
|
name="cfg",
|
||||||
description="配置项管理",
|
description="配置项管理",
|
||||||
@@ -77,8 +94,8 @@ def config_operation(cmd, params):
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class CfgCommand(AbstractCommandNode):
|
class CfgCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
return True, config_operation(ctx.command, ctx.params)
|
return True, config_operation(ctx.command, ctx.params)
|
||||||
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context, __command_list__
|
from .. import aamgr
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="cmd",
|
name="cmd",
|
||||||
description="显示指令列表",
|
description="显示指令列表",
|
||||||
@@ -9,10 +9,10 @@ from ..aamgr import AbstractCommandNode, Context, __command_list__
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class CmdCommand(AbstractCommandNode):
|
class CmdCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
command_list = __command_list__
|
command_list = aamgr.__command_list__
|
||||||
|
|
||||||
reply = []
|
reply = []
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="help",
|
name="help",
|
||||||
description="显示自定义的帮助信息",
|
description="显示自定义的帮助信息",
|
||||||
@@ -9,9 +9,9 @@ from ..aamgr import AbstractCommandNode, Context
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class HelpCommand(AbstractCommandNode):
|
class HelpCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import tips
|
import tips
|
||||||
reply = ["[bot] "+tips.help_message + "\n请输入 !cmd 查看指令列表"]
|
reply = ["[bot] "+tips.help_message + "\n请输入 !cmd 查看指令列表"]
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
from .. import aamgr
|
||||||
|
|
||||||
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="reload",
|
name="reload",
|
||||||
description="执行热重载",
|
description="执行热重载",
|
||||||
@@ -9,9 +11,9 @@ import threading
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class ReloadCommand(AbstractCommandNode):
|
class ReloadCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
reply = []
|
reply = []
|
||||||
|
|
||||||
import pkg.utils.reloader
|
import pkg.utils.reloader
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
|
||||||
import threading
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
from .. import aamgr
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
|
||||||
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="update",
|
name="update",
|
||||||
description="更新程序",
|
description="更新程序",
|
||||||
@@ -11,9 +12,9 @@ import traceback
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=2
|
privilege=2
|
||||||
)
|
)
|
||||||
class UpdateCommand(AbstractCommandNode):
|
class UpdateCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
reply = []
|
reply = []
|
||||||
import pkg.utils.updater
|
import pkg.utils.updater
|
||||||
import pkg.utils.reloader
|
import pkg.utils.reloader
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="usage",
|
name="usage",
|
||||||
description="获取使用情况",
|
description="获取使用情况",
|
||||||
@@ -10,9 +9,9 @@ import logging
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class UsageCommand(AbstractCommandNode):
|
class UsageCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
import config
|
import config
|
||||||
import pkg.utils.credit as credit
|
import pkg.utils.credit as credit
|
||||||
import pkg.utils.context
|
import pkg.utils.context
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from ..aamgr import AbstractCommandNode, Context
|
from .. import aamgr
|
||||||
|
|
||||||
|
|
||||||
@AbstractCommandNode.register(
|
@aamgr.AbstractCommandNode.register(
|
||||||
parent=None,
|
parent=None,
|
||||||
name="version",
|
name="version",
|
||||||
description="查看版本信息",
|
description="查看版本信息",
|
||||||
@@ -9,9 +9,9 @@ from ..aamgr import AbstractCommandNode, Context
|
|||||||
aliases=[],
|
aliases=[],
|
||||||
privilege=1
|
privilege=1
|
||||||
)
|
)
|
||||||
class VersionCommand(AbstractCommandNode):
|
class VersionCommand(aamgr.AbstractCommandNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||||
reply = []
|
reply = []
|
||||||
import pkg.utils.updater
|
import pkg.utils.updater
|
||||||
|
|
||||||
|
|||||||
@@ -1,23 +1,7 @@
|
|||||||
# 指令处理模块
|
# 指令处理模块
|
||||||
import logging
|
import logging
|
||||||
import json
|
|
||||||
import datetime
|
|
||||||
import os
|
|
||||||
import threading
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import pkg.openai.session
|
|
||||||
import pkg.openai.manager
|
|
||||||
import pkg.utils.reloader
|
|
||||||
import pkg.utils.updater
|
|
||||||
import pkg.utils.context
|
|
||||||
import pkg.qqbot.message
|
|
||||||
import pkg.utils.credit as credit
|
|
||||||
# import pkg.qqbot.cmds.model as cmdmodel
|
|
||||||
import pkg.qqbot.cmds.aamgr as cmdmgr
|
|
||||||
|
|
||||||
from mirai import Image
|
|
||||||
|
|
||||||
|
from ..qqbot.cmds import aamgr as cmdmgr
|
||||||
|
|
||||||
|
|
||||||
def process_command(session_name: str, text_message: str, mgr, config,
|
def process_command(session_name: str, text_message: str, mgr, config,
|
||||||
|
|||||||
@@ -1,32 +1,35 @@
|
|||||||
import asyncio
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import threading
|
|
||||||
|
|
||||||
import mirai.models.bus
|
|
||||||
from mirai import At, GroupMessage, MessageEvent, Mirai, StrangerMessage, WebSocketAdapter, HTTPAdapter, \
|
|
||||||
FriendMessage, Image
|
|
||||||
from func_timeout import func_set_timeout
|
|
||||||
|
|
||||||
import pkg.openai.session
|
|
||||||
import pkg.openai.manager
|
|
||||||
from func_timeout import FunctionTimedOut
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import pkg.qqbot.filter
|
from mirai import At, GroupMessage, MessageEvent, Mirai, StrangerMessage, WebSocketAdapter, HTTPAdapter, \
|
||||||
import pkg.qqbot.process as processor
|
FriendMessage, Image, MessageChain, Plain
|
||||||
import pkg.utils.context
|
import func_timeout
|
||||||
|
|
||||||
import pkg.plugin.host as plugin_host
|
from ..openai import session as openai_session
|
||||||
import pkg.plugin.models as plugin_models
|
|
||||||
|
from ..qqbot import filter as qqbot_filter
|
||||||
|
from ..qqbot import process as processor
|
||||||
|
from ..utils import context
|
||||||
|
from ..plugin import host as plugin_host
|
||||||
|
from ..plugin import models as plugin_models
|
||||||
import tips as tips_custom
|
import tips as tips_custom
|
||||||
|
from ..qqbot import adapter as msadapter
|
||||||
|
|
||||||
|
|
||||||
# 检查消息是否符合泛响应匹配机制
|
# 检查消息是否符合泛响应匹配机制
|
||||||
def check_response_rule(text: str):
|
def check_response_rule(group_id:int, text: str):
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
|
|
||||||
rules = config.response_rules
|
rules = config.response_rules
|
||||||
|
|
||||||
|
# 检查是否有特定规则
|
||||||
|
if 'prefix' not in config.response_rules:
|
||||||
|
if str(group_id) in config.response_rules:
|
||||||
|
rules = config.response_rules[str(group_id)]
|
||||||
|
else:
|
||||||
|
rules = config.response_rules['default']
|
||||||
|
|
||||||
# 检查前缀匹配
|
# 检查前缀匹配
|
||||||
if 'prefix' in rules:
|
if 'prefix' in rules:
|
||||||
for rule in rules['prefix']:
|
for rule in rules['prefix']:
|
||||||
@@ -44,19 +47,39 @@ def check_response_rule(text: str):
|
|||||||
return False, ""
|
return False, ""
|
||||||
|
|
||||||
|
|
||||||
def response_at():
|
def response_at(group_id: int):
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
if 'at' not in config.response_rules:
|
|
||||||
|
use_response_rule = config.response_rules
|
||||||
|
|
||||||
|
# 检查是否有特定规则
|
||||||
|
if 'prefix' not in config.response_rules:
|
||||||
|
if str(group_id) in config.response_rules:
|
||||||
|
use_response_rule = config.response_rules[str(group_id)]
|
||||||
|
else:
|
||||||
|
use_response_rule = config.response_rules['default']
|
||||||
|
|
||||||
|
if 'at' not in use_response_rule:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return config.response_rules['at']
|
return use_response_rule['at']
|
||||||
|
|
||||||
|
|
||||||
def random_responding():
|
def random_responding(group_id):
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
if 'random_rate' in config.response_rules:
|
|
||||||
|
use_response_rule = config.response_rules
|
||||||
|
|
||||||
|
# 检查是否有特定规则
|
||||||
|
if 'prefix' not in config.response_rules:
|
||||||
|
if str(group_id) in config.response_rules:
|
||||||
|
use_response_rule = config.response_rules[str(group_id)]
|
||||||
|
else:
|
||||||
|
use_response_rule = config.response_rules['default']
|
||||||
|
|
||||||
|
if 'random_rate' in use_response_rule:
|
||||||
import random
|
import random
|
||||||
return random.random() < config.response_rules['random_rate']
|
return random.random() < use_response_rule['random_rate']
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@@ -64,57 +87,52 @@ def random_responding():
|
|||||||
class QQBotManager:
|
class QQBotManager:
|
||||||
retry = 3
|
retry = 3
|
||||||
|
|
||||||
bot: Mirai = None
|
adapter: msadapter.MessageSourceAdapter = None
|
||||||
|
|
||||||
|
bot_account_id: int = 0
|
||||||
|
|
||||||
reply_filter = None
|
reply_filter = None
|
||||||
|
|
||||||
enable_banlist = False
|
enable_banlist = False
|
||||||
|
|
||||||
|
enable_private = True
|
||||||
|
enable_group = True
|
||||||
|
|
||||||
ban_person = []
|
ban_person = []
|
||||||
ban_group = []
|
ban_group = []
|
||||||
|
|
||||||
def __init__(self, mirai_http_api_config: dict, timeout: int = 60, retry: int = 3, first_time_init=True):
|
def __init__(self, first_time_init=True):
|
||||||
self.timeout = timeout
|
import config
|
||||||
self.retry = retry
|
|
||||||
|
|
||||||
# 加载禁用列表
|
self.timeout = config.process_message_timeout
|
||||||
if os.path.exists("banlist.py"):
|
self.retry = config.retry_times
|
||||||
import banlist
|
|
||||||
self.enable_banlist = banlist.enable
|
|
||||||
self.ban_person = banlist.person
|
|
||||||
self.ban_group = banlist.group
|
|
||||||
logging.info("加载禁用列表: person: {}, group: {}".format(self.ban_person, self.ban_group))
|
|
||||||
|
|
||||||
config = pkg.utils.context.get_config()
|
|
||||||
if os.path.exists("sensitive.json") \
|
|
||||||
and config.sensitive_word_filter is not None \
|
|
||||||
and config.sensitive_word_filter:
|
|
||||||
with open("sensitive.json", "r", encoding="utf-8") as f:
|
|
||||||
sensitive_json = json.load(f)
|
|
||||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter(
|
|
||||||
sensitive_words=sensitive_json['words'],
|
|
||||||
mask=sensitive_json['mask'] if 'mask' in sensitive_json else '*',
|
|
||||||
mask_word=sensitive_json['mask_word'] if 'mask_word' in sensitive_json else ''
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter([])
|
|
||||||
|
|
||||||
# 由于YiriMirai的bot对象是单例的,且shutdown方法暂时无法使用
|
# 由于YiriMirai的bot对象是单例的,且shutdown方法暂时无法使用
|
||||||
# 故只在第一次初始化时创建bot对象,重载之后使用原bot对象
|
# 故只在第一次初始化时创建bot对象,重载之后使用原bot对象
|
||||||
# 因此,bot的配置不支持热重载
|
# 因此,bot的配置不支持热重载
|
||||||
if first_time_init:
|
if first_time_init:
|
||||||
self.first_time_init(mirai_http_api_config)
|
logging.debug("Use adapter:" + config.msg_source_adapter)
|
||||||
|
if config.msg_source_adapter == 'yirimirai':
|
||||||
|
from pkg.qqbot.sources.yirimirai import YiriMiraiAdapter
|
||||||
|
|
||||||
|
mirai_http_api_config = config.mirai_http_api_config
|
||||||
|
self.bot_account_id = config.mirai_http_api_config['qq']
|
||||||
|
self.adapter = YiriMiraiAdapter(mirai_http_api_config)
|
||||||
|
elif config.msg_source_adapter == 'nakuru':
|
||||||
|
from pkg.qqbot.sources.nakuru import NakuruProjectAdapter
|
||||||
|
self.adapter = NakuruProjectAdapter(config.nakuru_config)
|
||||||
|
self.bot_account_id = self.adapter.bot_account_id
|
||||||
else:
|
else:
|
||||||
self.bot = pkg.utils.context.get_qqbot_manager().bot
|
self.adapter = context.get_qqbot_manager().adapter
|
||||||
|
self.bot_account_id = context.get_qqbot_manager().bot_account_id
|
||||||
|
|
||||||
pkg.utils.context.set_qqbot_manager(self)
|
context.set_qqbot_manager(self)
|
||||||
|
|
||||||
|
# 注册诸事件
|
||||||
# Caution: 注册新的事件处理器之后,请务必在unsubscribe_all中编写相应的取消订阅代码
|
# Caution: 注册新的事件处理器之后,请务必在unsubscribe_all中编写相应的取消订阅代码
|
||||||
@self.bot.on(FriendMessage)
|
def on_friend_message(event: FriendMessage):
|
||||||
async def on_friend_message(event: FriendMessage):
|
|
||||||
|
|
||||||
def friend_message_handler(event: FriendMessage):
|
|
||||||
|
|
||||||
|
def friend_message_handler():
|
||||||
# 触发事件
|
# 触发事件
|
||||||
args = {
|
args = {
|
||||||
"launcher_type": "person",
|
"launcher_type": "person",
|
||||||
@@ -129,15 +147,17 @@ class QQBotManager:
|
|||||||
|
|
||||||
self.on_person_message(event)
|
self.on_person_message(event)
|
||||||
|
|
||||||
pkg.utils.context.get_thread_ctl().submit_user_task(
|
context.get_thread_ctl().submit_user_task(
|
||||||
friend_message_handler,
|
friend_message_handler,
|
||||||
event
|
|
||||||
)
|
)
|
||||||
|
self.adapter.register_listener(
|
||||||
|
FriendMessage,
|
||||||
|
on_friend_message
|
||||||
|
)
|
||||||
|
|
||||||
@self.bot.on(StrangerMessage)
|
def on_stranger_message(event: StrangerMessage):
|
||||||
async def on_stranger_message(event: StrangerMessage):
|
|
||||||
|
|
||||||
def stranger_message_handler(event: StrangerMessage):
|
def stranger_message_handler():
|
||||||
# 触发事件
|
# 触发事件
|
||||||
args = {
|
args = {
|
||||||
"launcher_type": "person",
|
"launcher_type": "person",
|
||||||
@@ -152,13 +172,17 @@ class QQBotManager:
|
|||||||
|
|
||||||
self.on_person_message(event)
|
self.on_person_message(event)
|
||||||
|
|
||||||
pkg.utils.context.get_thread_ctl().submit_user_task(
|
context.get_thread_ctl().submit_user_task(
|
||||||
stranger_message_handler,
|
stranger_message_handler,
|
||||||
event
|
)
|
||||||
|
# nakuru不区分好友和陌生人,故仅为yirimirai注册陌生人事件
|
||||||
|
if config.msg_source_adapter == 'yirimirai':
|
||||||
|
self.adapter.register_listener(
|
||||||
|
StrangerMessage,
|
||||||
|
on_stranger_message
|
||||||
)
|
)
|
||||||
|
|
||||||
@self.bot.on(GroupMessage)
|
def on_group_message(event: GroupMessage):
|
||||||
async def on_group_message(event: GroupMessage):
|
|
||||||
|
|
||||||
def group_message_handler(event: GroupMessage):
|
def group_message_handler(event: GroupMessage):
|
||||||
# 触发事件
|
# 触发事件
|
||||||
@@ -175,66 +199,96 @@ class QQBotManager:
|
|||||||
|
|
||||||
self.on_group_message(event)
|
self.on_group_message(event)
|
||||||
|
|
||||||
pkg.utils.context.get_thread_ctl().submit_user_task(
|
context.get_thread_ctl().submit_user_task(
|
||||||
group_message_handler,
|
group_message_handler,
|
||||||
event
|
event
|
||||||
)
|
)
|
||||||
|
self.adapter.register_listener(
|
||||||
|
GroupMessage,
|
||||||
|
on_group_message
|
||||||
|
)
|
||||||
|
|
||||||
def unsubscribe_all():
|
def unsubscribe_all():
|
||||||
"""取消所有订阅
|
"""取消所有订阅
|
||||||
|
|
||||||
用于在热重载流程中卸载所有事件处理器
|
用于在热重载流程中卸载所有事件处理器
|
||||||
"""
|
"""
|
||||||
assert isinstance(self.bot, Mirai)
|
import config
|
||||||
bus = self.bot.bus
|
self.adapter.unregister_listener(
|
||||||
assert isinstance(bus, mirai.models.bus.ModelEventBus)
|
FriendMessage,
|
||||||
|
on_friend_message
|
||||||
bus.unsubscribe(FriendMessage, on_friend_message)
|
)
|
||||||
bus.unsubscribe(StrangerMessage, on_stranger_message)
|
if config.msg_source_adapter == 'yirimirai':
|
||||||
bus.unsubscribe(GroupMessage, on_group_message)
|
self.adapter.unregister_listener(
|
||||||
|
StrangerMessage,
|
||||||
|
on_stranger_message
|
||||||
|
)
|
||||||
|
self.adapter.unregister_listener(
|
||||||
|
GroupMessage,
|
||||||
|
on_group_message
|
||||||
|
)
|
||||||
|
|
||||||
self.unsubscribe_all = unsubscribe_all
|
self.unsubscribe_all = unsubscribe_all
|
||||||
|
|
||||||
def go(self, func, *args, **kwargs):
|
# 加载禁用列表
|
||||||
self.pool.submit(func, *args, **kwargs)
|
if os.path.exists("banlist.py"):
|
||||||
|
import banlist
|
||||||
|
self.enable_banlist = banlist.enable
|
||||||
|
self.ban_person = banlist.person
|
||||||
|
self.ban_group = banlist.group
|
||||||
|
logging.info("加载禁用列表: person: {}, group: {}".format(self.ban_person, self.ban_group))
|
||||||
|
|
||||||
def first_time_init(self, mirai_http_api_config: dict):
|
if hasattr(banlist, "enable_private"):
|
||||||
"""热重载后不再运行此函数"""
|
self.enable_private = banlist.enable_private
|
||||||
if 'adapter' not in mirai_http_api_config or mirai_http_api_config['adapter'] == "WebSocketAdapter":
|
if hasattr(banlist, "enable_group"):
|
||||||
bot = Mirai(
|
self.enable_group = banlist.enable_group
|
||||||
qq=mirai_http_api_config['qq'],
|
|
||||||
adapter=WebSocketAdapter(
|
|
||||||
verify_key=mirai_http_api_config['verifyKey'],
|
|
||||||
host=mirai_http_api_config['host'],
|
|
||||||
port=mirai_http_api_config['port']
|
|
||||||
)
|
|
||||||
)
|
|
||||||
elif mirai_http_api_config['adapter'] == "HTTPAdapter":
|
|
||||||
bot = Mirai(
|
|
||||||
qq=mirai_http_api_config['qq'],
|
|
||||||
adapter=HTTPAdapter(
|
|
||||||
verify_key=mirai_http_api_config['verifyKey'],
|
|
||||||
host=mirai_http_api_config['host'],
|
|
||||||
port=mirai_http_api_config['port']
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
config = context.get_config()
|
||||||
|
if os.path.exists("sensitive.json") \
|
||||||
|
and config.sensitive_word_filter is not None \
|
||||||
|
and config.sensitive_word_filter:
|
||||||
|
with open("sensitive.json", "r", encoding="utf-8") as f:
|
||||||
|
sensitive_json = json.load(f)
|
||||||
|
self.reply_filter = qqbot_filter.ReplyFilter(
|
||||||
|
sensitive_words=sensitive_json['words'],
|
||||||
|
mask=sensitive_json['mask'] if 'mask' in sensitive_json else '*',
|
||||||
|
mask_word=sensitive_json['mask_word'] if 'mask_word' in sensitive_json else ''
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise Exception("未知的适配器类型")
|
self.reply_filter = qqbot_filter.ReplyFilter([])
|
||||||
|
|
||||||
self.bot = bot
|
def send(self, event, msg, check_quote=True, check_at_sender=True):
|
||||||
|
config = context.get_config()
|
||||||
|
|
||||||
|
if check_at_sender and config.at_sender:
|
||||||
|
msg.insert(
|
||||||
|
0,
|
||||||
|
Plain(" \n")
|
||||||
|
)
|
||||||
|
|
||||||
def send(self, event, msg, check_quote=True):
|
# 当回复的正文中包含换行时,quote可能会自带at,此时就不再单独添加at,只添加换行
|
||||||
config = pkg.utils.context.get_config()
|
if "\n" not in str(msg[1]) or config.msg_source_adapter == 'nakuru':
|
||||||
asyncio.run(
|
msg.insert(
|
||||||
self.bot.send(event, msg, quote=True if config.quote_origin and check_quote else False))
|
0,
|
||||||
|
At(
|
||||||
|
event.sender.id
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.adapter.reply_message(
|
||||||
|
event,
|
||||||
|
msg,
|
||||||
|
quote_origin=True if config.quote_origin and check_quote else False
|
||||||
|
)
|
||||||
|
|
||||||
# 私聊消息处理
|
# 私聊消息处理
|
||||||
def on_person_message(self, event: MessageEvent):
|
def on_person_message(self, event: MessageEvent):
|
||||||
import config
|
import config
|
||||||
reply = ''
|
reply = ''
|
||||||
|
|
||||||
if event.sender.id == self.bot.qq:
|
if not self.enable_private:
|
||||||
|
logging.debug("已在banlist.py中禁用所有私聊")
|
||||||
|
elif event.sender.id == self.bot_account_id:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
if Image in event.message_chain:
|
if Image in event.message_chain:
|
||||||
@@ -245,7 +299,7 @@ class QQBotManager:
|
|||||||
for i in range(self.retry):
|
for i in range(self.retry):
|
||||||
try:
|
try:
|
||||||
|
|
||||||
@func_set_timeout(config.process_message_timeout)
|
@func_timeout.func_set_timeout(config.process_message_timeout)
|
||||||
def time_ctrl_wrapper():
|
def time_ctrl_wrapper():
|
||||||
reply = processor.process_message('person', event.sender.id, str(event.message_chain),
|
reply = processor.process_message('person', event.sender.id, str(event.message_chain),
|
||||||
event.message_chain,
|
event.message_chain,
|
||||||
@@ -254,37 +308,36 @@ class QQBotManager:
|
|||||||
|
|
||||||
reply = time_ctrl_wrapper()
|
reply = time_ctrl_wrapper()
|
||||||
break
|
break
|
||||||
except FunctionTimedOut:
|
except func_timeout.FunctionTimedOut:
|
||||||
logging.warning("person_{}: 超时,重试中({})".format(event.sender.id, i))
|
logging.warning("person_{}: 超时,重试中({})".format(event.sender.id, i))
|
||||||
pkg.openai.session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
openai_session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
||||||
if "person_{}".format(event.sender.id) in pkg.qqbot.process.processing:
|
if "person_{}".format(event.sender.id) in processor.processing:
|
||||||
pkg.qqbot.process.processing.remove('person_{}'.format(event.sender.id))
|
processor.processing.remove('person_{}'.format(event.sender.id))
|
||||||
failed += 1
|
failed += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if failed == self.retry:
|
if failed == self.retry:
|
||||||
pkg.openai.session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
openai_session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
||||||
self.notify_admin("{} 请求超时".format("person_{}".format(event.sender.id)))
|
self.notify_admin("{} 请求超时".format("person_{}".format(event.sender.id)))
|
||||||
reply = [tips_custom.reply_message]
|
reply = [tips_custom.reply_message]
|
||||||
|
|
||||||
if reply:
|
if reply:
|
||||||
return self.send(event, reply, check_quote=False)
|
return self.send(event, reply, check_quote=False, check_at_sender=False)
|
||||||
|
|
||||||
# 群消息处理
|
# 群消息处理
|
||||||
def on_group_message(self, event: GroupMessage):
|
def on_group_message(self, event: GroupMessage):
|
||||||
import config
|
import config
|
||||||
reply = ''
|
reply = ''
|
||||||
|
|
||||||
def process(text=None) -> str:
|
def process(text=None) -> str:
|
||||||
replys = ""
|
replys = ""
|
||||||
if At(self.bot.qq) in event.message_chain:
|
if At(self.bot_account_id) in event.message_chain:
|
||||||
event.message_chain.remove(At(self.bot.qq))
|
event.message_chain.remove(At(self.bot_account_id))
|
||||||
|
|
||||||
# 超时则重试,重试超过次数则放弃
|
# 超时则重试,重试超过次数则放弃
|
||||||
failed = 0
|
failed = 0
|
||||||
for i in range(self.retry):
|
for i in range(self.retry):
|
||||||
try:
|
try:
|
||||||
@func_set_timeout(config.process_message_timeout)
|
@func_timeout.func_set_timeout(config.process_message_timeout)
|
||||||
def time_ctrl_wrapper():
|
def time_ctrl_wrapper():
|
||||||
replys = processor.process_message('group', event.group.id,
|
replys = processor.process_message('group', event.group.id,
|
||||||
str(event.message_chain).strip() if text is None else text,
|
str(event.message_chain).strip() if text is None else text,
|
||||||
@@ -294,34 +347,36 @@ class QQBotManager:
|
|||||||
|
|
||||||
replys = time_ctrl_wrapper()
|
replys = time_ctrl_wrapper()
|
||||||
break
|
break
|
||||||
except FunctionTimedOut:
|
except func_timeout.FunctionTimedOut:
|
||||||
logging.warning("group_{}: 超时,重试中({})".format(event.group.id, i))
|
logging.warning("group_{}: 超时,重试中({})".format(event.group.id, i))
|
||||||
pkg.openai.session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
openai_session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
||||||
if "group_{}".format(event.group.id) in pkg.qqbot.process.processing:
|
if "group_{}".format(event.group.id) in processor.processing:
|
||||||
pkg.qqbot.process.processing.remove('group_{}'.format(event.group.id))
|
processor.processing.remove('group_{}'.format(event.group.id))
|
||||||
failed += 1
|
failed += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if failed == self.retry:
|
if failed == self.retry:
|
||||||
pkg.openai.session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
openai_session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
||||||
self.notify_admin("{} 请求超时".format("group_{}".format(event.group.id)))
|
self.notify_admin("{} 请求超时".format("group_{}".format(event.group.id)))
|
||||||
replys = [tips_custom.replys_message]
|
replys = [tips_custom.replys_message]
|
||||||
|
|
||||||
return replys
|
return replys
|
||||||
|
|
||||||
if Image in event.message_chain:
|
if not self.enable_group:
|
||||||
|
logging.debug("已在banlist.py中禁用所有群聊")
|
||||||
|
elif Image in event.message_chain:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
if At(self.bot.qq) in event.message_chain and response_at():
|
if At(self.bot_account_id) in event.message_chain and response_at(event.group.id):
|
||||||
# 直接调用
|
# 直接调用
|
||||||
reply = process()
|
reply = process()
|
||||||
else:
|
else:
|
||||||
check, result = check_response_rule(str(event.message_chain).strip())
|
check, result = check_response_rule(event.group.id, str(event.message_chain).strip())
|
||||||
|
|
||||||
if check:
|
if check:
|
||||||
reply = process(result.strip())
|
reply = process(result.strip())
|
||||||
# 检查是否随机响应
|
# 检查是否随机响应
|
||||||
elif random_responding():
|
elif random_responding(event.group.id):
|
||||||
logging.info("随机响应group_{}消息".format(event.group.id))
|
logging.info("随机响应group_{}消息".format(event.group.id))
|
||||||
reply = process()
|
reply = process()
|
||||||
|
|
||||||
@@ -330,26 +385,37 @@ class QQBotManager:
|
|||||||
|
|
||||||
# 通知系统管理员
|
# 通知系统管理员
|
||||||
def notify_admin(self, message: str):
|
def notify_admin(self, message: str):
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
if config.admin_qq != 0 and config.admin_qq != []:
|
if config.admin_qq != 0 and config.admin_qq != []:
|
||||||
logging.info("通知管理员:{}".format(message))
|
logging.info("通知管理员:{}".format(message))
|
||||||
if type(config.admin_qq) == int:
|
if type(config.admin_qq) == int:
|
||||||
send_task = self.bot.send_friend_message(config.admin_qq, "[bot]{}".format(message))
|
self.adapter.send_message(
|
||||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
"person",
|
||||||
|
config.admin_qq,
|
||||||
|
MessageChain([Plain("[bot]{}".format(message))])
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
for adm in config.admin_qq:
|
for adm in config.admin_qq:
|
||||||
send_task = self.bot.send_friend_message(adm, "[bot]{}".format(message))
|
self.adapter.send_message(
|
||||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
"person",
|
||||||
|
adm,
|
||||||
|
MessageChain([Plain("[bot]{}".format(message))])
|
||||||
|
)
|
||||||
|
|
||||||
def notify_admin_message_chain(self, message):
|
def notify_admin_message_chain(self, message):
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
if config.admin_qq != 0 and config.admin_qq != []:
|
if config.admin_qq != 0 and config.admin_qq != []:
|
||||||
logging.info("通知管理员:{}".format(message))
|
logging.info("通知管理员:{}".format(message))
|
||||||
if type(config.admin_qq) == int:
|
if type(config.admin_qq) == int:
|
||||||
send_task = self.bot.send_friend_message(config.admin_qq, message)
|
self.adapter.send_message(
|
||||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
"person",
|
||||||
|
config.admin_qq,
|
||||||
|
message
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
for adm in config.admin_qq:
|
for adm in config.admin_qq:
|
||||||
send_task = self.bot.send_friend_message(adm, message)
|
self.adapter.send_message(
|
||||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
"person",
|
||||||
|
adm,
|
||||||
|
message
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,19 +1,20 @@
|
|||||||
# 普通消息处理模块
|
# 普通消息处理模块
|
||||||
import logging
|
import logging
|
||||||
import openai
|
|
||||||
import pkg.utils.context
|
|
||||||
import pkg.openai.session
|
|
||||||
|
|
||||||
import pkg.plugin.host as plugin_host
|
import openai
|
||||||
import pkg.plugin.models as plugin_models
|
|
||||||
import pkg.qqbot.blob as blob
|
from ..utils import context
|
||||||
|
from ..openai import session as openai_session
|
||||||
|
|
||||||
|
from ..plugin import host as plugin_host
|
||||||
|
from ..plugin import models as plugin_models
|
||||||
import tips as tips_custom
|
import tips as tips_custom
|
||||||
|
|
||||||
|
|
||||||
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
|
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
|
||||||
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
|
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
|
||||||
import config
|
import config
|
||||||
pkg.utils.context.get_qqbot_manager().notify_admin(notify_admin)
|
context.get_qqbot_manager().notify_admin(notify_admin)
|
||||||
if config.hide_exce_info_to_user:
|
if config.hide_exce_info_to_user:
|
||||||
return [tips_custom.alter_tip_message] if tips_custom.alter_tip_message else []
|
return [tips_custom.alter_tip_message] if tips_custom.alter_tip_message else []
|
||||||
else:
|
else:
|
||||||
@@ -26,7 +27,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
|||||||
logging.info("[{}]发送消息:{}".format(session_name, text_message[:min(20, len(text_message))] + (
|
logging.info("[{}]发送消息:{}".format(session_name, text_message[:min(20, len(text_message))] + (
|
||||||
"..." if len(text_message) > 20 else "")))
|
"..." if len(text_message) > 20 else "")))
|
||||||
|
|
||||||
session = pkg.openai.session.get_session(session_name)
|
session = openai_session.get_session(session_name)
|
||||||
|
|
||||||
unexpected_exception_times = 0
|
unexpected_exception_times = 0
|
||||||
|
|
||||||
@@ -40,7 +41,7 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
|||||||
try:
|
try:
|
||||||
prefix = "[GPT]" if config.show_prefix else ""
|
prefix = "[GPT]" if config.show_prefix else ""
|
||||||
|
|
||||||
text = session.append(text_message)
|
text, finish_reason, funcs = session.query(text_message)
|
||||||
|
|
||||||
# 触发插件事件
|
# 触发插件事件
|
||||||
args = {
|
args = {
|
||||||
@@ -49,10 +50,12 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
|||||||
"sender_id": sender_id,
|
"sender_id": sender_id,
|
||||||
"session": session,
|
"session": session,
|
||||||
"prefix": prefix,
|
"prefix": prefix,
|
||||||
"response_text": text
|
"response_text": text,
|
||||||
|
"finish_reason": finish_reason,
|
||||||
|
"funcs_called": funcs,
|
||||||
}
|
}
|
||||||
|
|
||||||
event = pkg.plugin.host.emit(plugin_models.NormalMessageResponded, **args)
|
event = plugin_host.emit(plugin_models.NormalMessageResponded, **args)
|
||||||
|
|
||||||
if event.get_return_value("prefix") is not None:
|
if event.get_return_value("prefix") is not None:
|
||||||
prefix = event.get_return_value("prefix")
|
prefix = event.get_return_value("prefix")
|
||||||
@@ -62,42 +65,43 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
|||||||
|
|
||||||
if not event.is_prevented_default():
|
if not event.is_prevented_default():
|
||||||
reply = [prefix + text]
|
reply = [prefix + text]
|
||||||
except openai.error.APIConnectionError as e:
|
|
||||||
|
except openai.APIConnectionError as e:
|
||||||
err_msg = str(e)
|
err_msg = str(e)
|
||||||
if err_msg.__contains__('Error communicating with OpenAI'):
|
if err_msg.__contains__('Error communicating with OpenAI'):
|
||||||
reply = handle_exception("{}会话调用API失败:{}\n请尝试关闭网络代理来解决此问题。".format(session_name, e),
|
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
|
||||||
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||||
else:
|
else:
|
||||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||||
except openai.error.RateLimitError as e:
|
except openai.RateLimitError as e:
|
||||||
logging.debug(type(e))
|
logging.debug(type(e))
|
||||||
logging.debug(e.error['message'])
|
logging.debug(e.error['message'])
|
||||||
|
|
||||||
if 'message' in e.error and e.error['message'].__contains__('You exceeded your current quota'):
|
if 'message' in e.error and e.error['message'].__contains__('You exceeded your current quota'):
|
||||||
# 尝试切换api-key
|
# 尝试切换api-key
|
||||||
current_key_name = pkg.utils.context.get_openai_manager().key_mgr.get_key_name(
|
current_key_name = context.get_openai_manager().key_mgr.get_key_name(
|
||||||
pkg.utils.context.get_openai_manager().key_mgr.using_key
|
context.get_openai_manager().key_mgr.using_key
|
||||||
)
|
)
|
||||||
pkg.utils.context.get_openai_manager().key_mgr.set_current_exceeded()
|
context.get_openai_manager().key_mgr.set_current_exceeded()
|
||||||
|
|
||||||
# 触发插件事件
|
# 触发插件事件
|
||||||
args = {
|
args = {
|
||||||
'key_name': current_key_name,
|
'key_name': current_key_name,
|
||||||
'usage': pkg.utils.context.get_openai_manager().audit_mgr
|
'usage': context.get_openai_manager().audit_mgr
|
||||||
.get_usage(pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()),
|
.get_usage(context.get_openai_manager().key_mgr.get_using_key_md5()),
|
||||||
'exceeded_keys': pkg.utils.context.get_openai_manager().key_mgr.exceeded,
|
'exceeded_keys': context.get_openai_manager().key_mgr.exceeded,
|
||||||
}
|
}
|
||||||
event = plugin_host.emit(plugin_models.KeyExceeded, **args)
|
event = plugin_host.emit(plugin_models.KeyExceeded, **args)
|
||||||
|
|
||||||
if not event.is_prevented_default():
|
if not event.is_prevented_default():
|
||||||
switched, name = pkg.utils.context.get_openai_manager().key_mgr.auto_switch()
|
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||||
|
|
||||||
if not switched:
|
if not switched:
|
||||||
reply = handle_exception(
|
reply = handle_exception(
|
||||||
"api-key调用额度超限({}),无可用api_key,请向OpenAI账户充值或在config.py中更换api_key;如果你认为这是误判,请尝试重启程序。".format(
|
"api-key调用额度超限({}),无可用api_key,请向OpenAI账户充值或在config.py中更换api_key;如果你认为这是误判,请尝试重启程序。".format(
|
||||||
current_key_name), "[bot]err:API调用额度超额,请联系管理员,或等待修复")
|
current_key_name), "[bot]err:API调用额度超额,请联系管理员,或等待修复")
|
||||||
else:
|
else:
|
||||||
openai.api_key = pkg.utils.context.get_openai_manager().key_mgr.get_using_key()
|
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||||
mgr.notify_admin("api-key调用额度超限({}),接口报错,已切换到{}".format(current_key_name, name))
|
mgr.notify_admin("api-key调用额度超限({}),接口报错,已切换到{}".format(current_key_name, name))
|
||||||
reply = ["[bot]err:API调用额度超额,已自动切换,请重新发送消息"]
|
reply = ["[bot]err:API调用额度超额,已自动切换,请重新发送消息"]
|
||||||
continue
|
continue
|
||||||
@@ -113,10 +117,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
|||||||
else:
|
else:
|
||||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
|
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
|
||||||
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
|
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
|
||||||
except openai.error.InvalidRequestError as e:
|
except openai.BadRequestError as e:
|
||||||
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
if config.auto_reset and "This model's maximum context length is" in str(e):
|
||||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
session.reset(persist=True)
|
||||||
except openai.error.ServiceUnavailableError as e:
|
reply = [tips_custom.session_auto_reset_message]
|
||||||
|
else:
|
||||||
|
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
||||||
|
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||||
|
except openai.APIStatusError as e:
|
||||||
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
|
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
|
|||||||
@@ -5,28 +5,22 @@ import time
|
|||||||
import mirai
|
import mirai
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from mirai import MessageChain, Plain
|
|
||||||
|
|
||||||
# 这里不使用动态引入config
|
# 这里不使用动态引入config
|
||||||
# 因为在这里动态引入会卡死程序
|
# 因为在这里动态引入会卡死程序
|
||||||
# 而此模块静态引用config与动态引入的表现一致
|
# 而此模块静态引用config与动态引入的表现一致
|
||||||
# 已弃用,由于超时时间现已动态使用
|
# 已弃用,由于超时时间现已动态使用
|
||||||
# import config as config_init_import
|
# import config as config_init_import
|
||||||
|
|
||||||
import pkg.openai.session
|
from ..qqbot import ratelimit
|
||||||
import pkg.openai.manager
|
from ..qqbot import command, message
|
||||||
import pkg.utils.reloader
|
from ..openai import session as openai_session
|
||||||
import pkg.utils.updater
|
from ..utils import context
|
||||||
import pkg.utils.context
|
|
||||||
import pkg.qqbot.message
|
|
||||||
import pkg.qqbot.command
|
|
||||||
import pkg.qqbot.ratelimit as ratelimit
|
|
||||||
|
|
||||||
import pkg.plugin.host as plugin_host
|
from ..plugin import host as plugin_host
|
||||||
import pkg.plugin.models as plugin_models
|
from ..plugin import models as plugin_models
|
||||||
import pkg.qqbot.ignore as ignore
|
from ..qqbot import ignore
|
||||||
import pkg.qqbot.banlist as banlist
|
from ..qqbot import banlist
|
||||||
import pkg.qqbot.blob as blob
|
from ..qqbot import blob
|
||||||
import tips as tips_custom
|
import tips as tips_custom
|
||||||
|
|
||||||
processing = []
|
processing = []
|
||||||
@@ -41,11 +35,11 @@ def is_admin(qq: int) -> bool:
|
|||||||
return qq == config.admin_qq
|
return qq == config.admin_qq
|
||||||
|
|
||||||
|
|
||||||
def process_message(launcher_type: str, launcher_id: int, text_message: str, message_chain: MessageChain,
|
def process_message(launcher_type: str, launcher_id: int, text_message: str, message_chain: mirai.MessageChain,
|
||||||
sender_id: int) -> MessageChain:
|
sender_id: int) -> mirai.MessageChain:
|
||||||
global processing
|
global processing
|
||||||
|
|
||||||
mgr = pkg.utils.context.get_qqbot_manager()
|
mgr = context.get_qqbot_manager()
|
||||||
|
|
||||||
reply = []
|
reply = []
|
||||||
session_name = "{}_{}".format(launcher_type, launcher_id)
|
session_name = "{}_{}".format(launcher_type, launcher_id)
|
||||||
@@ -62,30 +56,32 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||||||
import config
|
import config
|
||||||
|
|
||||||
if not config.wait_last_done and session_name in processing:
|
if not config.wait_last_done and session_name in processing:
|
||||||
return MessageChain([Plain(tips_custom.message_drop_tip)])
|
return mirai.MessageChain([mirai.Plain(tips_custom.message_drop_tip)])
|
||||||
|
|
||||||
# 检查是否被禁言
|
# 检查是否被禁言
|
||||||
if launcher_type == 'group':
|
if launcher_type == 'group':
|
||||||
result = mgr.bot.member_info(target=launcher_id, member_id=mgr.bot.qq).get()
|
is_muted = mgr.adapter.is_muted(launcher_id)
|
||||||
result = asyncio.run(result)
|
if is_muted:
|
||||||
if result.mute_time_remaining > 0:
|
logging.info("机器人被禁言,跳过消息处理(group_{})".format(launcher_id))
|
||||||
logging.info("机器人被禁言,跳过消息处理(group_{},剩余{}s)".format(launcher_id,
|
|
||||||
result.mute_time_remaining))
|
|
||||||
return reply
|
return reply
|
||||||
|
|
||||||
import config
|
import config
|
||||||
if config.income_msg_check:
|
if config.income_msg_check:
|
||||||
if mgr.reply_filter.is_illegal(text_message):
|
if mgr.reply_filter.is_illegal(text_message):
|
||||||
return MessageChain(Plain("[bot] 你的提问中有不合适的内容, 请更换措辞~"))
|
return mirai.MessageChain(mirai.Plain("[bot] 消息中存在不合适的内容, 请更换措辞"))
|
||||||
|
|
||||||
pkg.openai.session.get_session(session_name).acquire_response_lock()
|
openai_session.get_session(session_name).acquire_response_lock()
|
||||||
|
|
||||||
text_message = text_message.strip()
|
text_message = text_message.strip()
|
||||||
|
|
||||||
|
|
||||||
|
# 为强制消息延迟计时
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
# 处理消息
|
# 处理消息
|
||||||
try:
|
try:
|
||||||
|
|
||||||
config = pkg.utils.context.get_config()
|
config = context.get_config()
|
||||||
|
|
||||||
processing.append(session_name)
|
processing.append(session_name)
|
||||||
try:
|
try:
|
||||||
@@ -112,7 +108,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||||||
reply = event.get_return_value("reply")
|
reply = event.get_return_value("reply")
|
||||||
|
|
||||||
if not event.is_prevented_default():
|
if not event.is_prevented_default():
|
||||||
reply = pkg.qqbot.command.process_command(session_name, text_message,
|
reply = command.process_command(session_name, text_message,
|
||||||
mgr, config, launcher_type, launcher_id, sender_id, is_admin(sender_id))
|
mgr, config, launcher_type, launcher_id, sender_id, is_admin(sender_id))
|
||||||
|
|
||||||
else: # 消息
|
else: # 消息
|
||||||
@@ -122,7 +118,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||||||
if ratelimit.is_reach_limit(session_name):
|
if ratelimit.is_reach_limit(session_name):
|
||||||
logging.info("根据限速策略丢弃[{}]消息: {}".format(session_name, text_message))
|
logging.info("根据限速策略丢弃[{}]消息: {}".format(session_name, text_message))
|
||||||
|
|
||||||
return MessageChain(["[bot]"+tips_custom.rate_limit_drop_tip]) if tips_custom.rate_limit_drop_tip != "" else []
|
return mirai.MessageChain(["[bot]"+tips_custom.rate_limit_drop_tip]) if tips_custom.rate_limit_drop_tip != "" else []
|
||||||
|
|
||||||
before = time.time()
|
before = time.time()
|
||||||
# 触发插件事件
|
# 触发插件事件
|
||||||
@@ -144,7 +140,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||||||
reply = event.get_return_value("reply")
|
reply = event.get_return_value("reply")
|
||||||
|
|
||||||
if not event.is_prevented_default():
|
if not event.is_prevented_default():
|
||||||
reply = pkg.qqbot.message.process_normal_message(text_message,
|
reply = message.process_normal_message(text_message,
|
||||||
mgr, config, launcher_type, launcher_id, sender_id)
|
mgr, config, launcher_type, launcher_id, sender_id)
|
||||||
|
|
||||||
# 限速等待时间
|
# 限速等待时间
|
||||||
@@ -168,6 +164,25 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
|||||||
finally:
|
finally:
|
||||||
processing.remove(session_name)
|
processing.remove(session_name)
|
||||||
finally:
|
finally:
|
||||||
pkg.openai.session.get_session(session_name).release_response_lock()
|
openai_session.get_session(session_name).release_response_lock()
|
||||||
|
|
||||||
return MessageChain(reply)
|
# 检查延迟时间
|
||||||
|
if config.force_delay_range[1] == 0:
|
||||||
|
delay_time = 0
|
||||||
|
else:
|
||||||
|
import random
|
||||||
|
|
||||||
|
# 从延迟范围中随机取一个值(浮点)
|
||||||
|
rdm = random.uniform(config.force_delay_range[0], config.force_delay_range[1])
|
||||||
|
|
||||||
|
spent = time.time() - start_time
|
||||||
|
|
||||||
|
# 如果花费时间小于延迟时间,则延迟
|
||||||
|
delay_time = rdm - spent if rdm - spent > 0 else 0
|
||||||
|
|
||||||
|
# 延迟
|
||||||
|
if delay_time > 0:
|
||||||
|
logging.info("[风控] 强制延迟{:.2f}秒(如需关闭,请到config.py修改force_delay_range字段)".format(delay_time))
|
||||||
|
time.sleep(delay_time)
|
||||||
|
|
||||||
|
return mirai.MessageChain(reply)
|
||||||
|
|||||||
0
pkg/qqbot/sources/__init__.py
Normal file
324
pkg/qqbot/sources/nakuru.py
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
import asyncio
|
||||||
|
import typing
|
||||||
|
import traceback
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import mirai
|
||||||
|
|
||||||
|
import nakuru
|
||||||
|
import nakuru.entities.components as nkc
|
||||||
|
|
||||||
|
from .. import adapter as adapter_model
|
||||||
|
from ...qqbot import blob
|
||||||
|
|
||||||
|
|
||||||
|
class NakuruProjectMessageConverter(adapter_model.MessageConverter):
|
||||||
|
"""消息转换器"""
|
||||||
|
@staticmethod
|
||||||
|
def yiri2target(message_chain: mirai.MessageChain) -> list:
|
||||||
|
msg_list = []
|
||||||
|
if type(message_chain) is mirai.MessageChain:
|
||||||
|
msg_list = message_chain.__root__
|
||||||
|
elif type(message_chain) is list:
|
||||||
|
msg_list = message_chain
|
||||||
|
else:
|
||||||
|
raise Exception("Unknown message type: " + str(message_chain) + str(type(message_chain)))
|
||||||
|
|
||||||
|
nakuru_msg_list = []
|
||||||
|
|
||||||
|
# 遍历并转换
|
||||||
|
for component in msg_list:
|
||||||
|
if type(component) is mirai.Plain:
|
||||||
|
nakuru_msg_list.append(nkc.Plain(component.text, False))
|
||||||
|
elif type(component) is mirai.Image:
|
||||||
|
if component.url is not None:
|
||||||
|
nakuru_msg_list.append(nkc.Image.fromURL(component.url))
|
||||||
|
elif component.base64 is not None:
|
||||||
|
nakuru_msg_list.append(nkc.Image.fromBase64(component.base64))
|
||||||
|
elif component.path is not None:
|
||||||
|
nakuru_msg_list.append(nkc.Image.fromFileSystem(component.path))
|
||||||
|
elif type(component) is mirai.Face:
|
||||||
|
nakuru_msg_list.append(nkc.Face(id=component.face_id))
|
||||||
|
elif type(component) is mirai.At:
|
||||||
|
nakuru_msg_list.append(nkc.At(qq=component.target))
|
||||||
|
elif type(component) is mirai.AtAll:
|
||||||
|
nakuru_msg_list.append(nkc.AtAll())
|
||||||
|
elif type(component) is mirai.Voice:
|
||||||
|
if component.url is not None:
|
||||||
|
nakuru_msg_list.append(nkc.Record.fromURL(component.url))
|
||||||
|
elif component.path is not None:
|
||||||
|
nakuru_msg_list.append(nkc.Record.fromFileSystem(component.path))
|
||||||
|
elif type(component) is blob.Forward:
|
||||||
|
# 转发消息
|
||||||
|
yiri_forward_node_list = component.node_list
|
||||||
|
nakuru_forward_node_list = []
|
||||||
|
|
||||||
|
# 遍历并转换
|
||||||
|
for yiri_forward_node in yiri_forward_node_list:
|
||||||
|
try:
|
||||||
|
content_list = NakuruProjectMessageConverter.yiri2target(yiri_forward_node.message_chain)
|
||||||
|
nakuru_forward_node = nkc.Node(
|
||||||
|
name=yiri_forward_node.sender_name,
|
||||||
|
uin=yiri_forward_node.sender_id,
|
||||||
|
time=int(yiri_forward_node.time.timestamp()) if yiri_forward_node.time is not None else None,
|
||||||
|
content=content_list
|
||||||
|
)
|
||||||
|
nakuru_forward_node_list.append(nakuru_forward_node)
|
||||||
|
except Exception as e:
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
nakuru_msg_list.append(nakuru_forward_node_list)
|
||||||
|
else:
|
||||||
|
nakuru_msg_list.append(nkc.Plain(str(component)))
|
||||||
|
|
||||||
|
return nakuru_msg_list
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def target2yiri(message_chain: typing.Any, message_id: int = -1) -> mirai.MessageChain:
|
||||||
|
"""将Yiri的消息链转换为YiriMirai的消息链"""
|
||||||
|
assert type(message_chain) is list
|
||||||
|
|
||||||
|
yiri_msg_list = []
|
||||||
|
import datetime
|
||||||
|
# 添加Source组件以标记message_id等信息
|
||||||
|
yiri_msg_list.append(mirai.models.message.Source(id=message_id, time=datetime.datetime.now()))
|
||||||
|
for component in message_chain:
|
||||||
|
if type(component) is nkc.Plain:
|
||||||
|
yiri_msg_list.append(mirai.Plain(text=component.text))
|
||||||
|
elif type(component) is nkc.Image:
|
||||||
|
yiri_msg_list.append(mirai.Image(url=component.url))
|
||||||
|
elif type(component) is nkc.Face:
|
||||||
|
yiri_msg_list.append(mirai.Face(face_id=component.id))
|
||||||
|
elif type(component) is nkc.At:
|
||||||
|
yiri_msg_list.append(mirai.At(target=component.qq))
|
||||||
|
elif type(component) is nkc.AtAll:
|
||||||
|
yiri_msg_list.append(mirai.AtAll())
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
logging.debug("转换后的消息链: " + str(yiri_msg_list))
|
||||||
|
chain = mirai.MessageChain(yiri_msg_list)
|
||||||
|
return chain
|
||||||
|
|
||||||
|
|
||||||
|
class NakuruProjectEventConverter(adapter_model.EventConverter):
|
||||||
|
"""事件转换器"""
|
||||||
|
@staticmethod
|
||||||
|
def yiri2target(event: typing.Type[mirai.Event]):
|
||||||
|
if event is mirai.GroupMessage:
|
||||||
|
return nakuru.GroupMessage
|
||||||
|
elif event is mirai.FriendMessage:
|
||||||
|
return nakuru.FriendMessage
|
||||||
|
else:
|
||||||
|
raise Exception("未支持转换的事件类型: " + str(event))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def target2yiri(event: typing.Any) -> mirai.Event:
|
||||||
|
yiri_chain = NakuruProjectMessageConverter.target2yiri(event.message, event.message_id)
|
||||||
|
if type(event) is nakuru.FriendMessage: # 私聊消息事件
|
||||||
|
return mirai.FriendMessage(
|
||||||
|
sender=mirai.models.entities.Friend(
|
||||||
|
id=event.sender.user_id,
|
||||||
|
nickname=event.sender.nickname,
|
||||||
|
remark=event.sender.nickname
|
||||||
|
),
|
||||||
|
message_chain=yiri_chain,
|
||||||
|
time=event.time
|
||||||
|
)
|
||||||
|
elif type(event) is nakuru.GroupMessage: # 群聊消息事件
|
||||||
|
permission = "MEMBER"
|
||||||
|
|
||||||
|
if event.sender.role == "admin":
|
||||||
|
permission = "ADMINISTRATOR"
|
||||||
|
elif event.sender.role == "owner":
|
||||||
|
permission = "OWNER"
|
||||||
|
|
||||||
|
import mirai.models.entities as entities
|
||||||
|
return mirai.GroupMessage(
|
||||||
|
sender=mirai.models.entities.GroupMember(
|
||||||
|
id=event.sender.user_id,
|
||||||
|
member_name=event.sender.nickname,
|
||||||
|
permission=permission,
|
||||||
|
group=mirai.models.entities.Group(
|
||||||
|
id=event.group_id,
|
||||||
|
name=event.sender.nickname,
|
||||||
|
permission=entities.Permission.Member
|
||||||
|
),
|
||||||
|
special_title=event.sender.title,
|
||||||
|
join_timestamp=0,
|
||||||
|
last_speak_timestamp=0,
|
||||||
|
mute_time_remaining=0,
|
||||||
|
),
|
||||||
|
message_chain=yiri_chain,
|
||||||
|
time=event.time
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise Exception("未支持转换的事件类型: " + str(event))
|
||||||
|
|
||||||
|
|
||||||
|
class NakuruProjectAdapter(adapter_model.MessageSourceAdapter):
|
||||||
|
"""nakuru-project适配器"""
|
||||||
|
bot: nakuru.CQHTTP
|
||||||
|
bot_account_id: int
|
||||||
|
|
||||||
|
message_converter: NakuruProjectMessageConverter = NakuruProjectMessageConverter()
|
||||||
|
event_converter: NakuruProjectEventConverter = NakuruProjectEventConverter()
|
||||||
|
|
||||||
|
listener_list: list[dict]
|
||||||
|
|
||||||
|
def __init__(self, cfg: dict):
|
||||||
|
"""初始化nakuru-project的对象"""
|
||||||
|
self.bot = nakuru.CQHTTP(**cfg)
|
||||||
|
self.listener_list = []
|
||||||
|
# nakuru库有bug,这个接口没法带access_token,会失败
|
||||||
|
# 所以目前自行发请求
|
||||||
|
import config
|
||||||
|
import requests
|
||||||
|
resp = requests.get(
|
||||||
|
url="http://{}:{}/get_login_info".format(config.nakuru_config['host'], config.nakuru_config['http_port']),
|
||||||
|
headers={
|
||||||
|
'Authorization': "Bearer " + config.nakuru_config['token'] if 'token' in config.nakuru_config else ""
|
||||||
|
},
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
if resp.status_code == 403:
|
||||||
|
logging.error("go-cqhttp拒绝访问,请检查config.py中nakuru_config的token是否与go-cqhttp设置的access-token匹配")
|
||||||
|
raise Exception("go-cqhttp拒绝访问,请检查config.py中nakuru_config的token是否与go-cqhttp设置的access-token匹配")
|
||||||
|
try:
|
||||||
|
self.bot_account_id = int(resp.json()['data']['user_id'])
|
||||||
|
except Exception as e:
|
||||||
|
logging.error("获取go-cqhttp账号信息失败: {}, 请检查是否已启动go-cqhttp并配置正确".format(e))
|
||||||
|
raise Exception("获取go-cqhttp账号信息失败: {}, 请检查是否已启动go-cqhttp并配置正确".format(e))
|
||||||
|
|
||||||
|
def send_message(
|
||||||
|
self,
|
||||||
|
target_type: str,
|
||||||
|
target_id: str,
|
||||||
|
message: typing.Union[mirai.MessageChain, list],
|
||||||
|
converted: bool = False
|
||||||
|
):
|
||||||
|
task = None
|
||||||
|
|
||||||
|
converted_msg = self.message_converter.yiri2target(message) if not converted else message
|
||||||
|
|
||||||
|
# 检查是否有转发消息
|
||||||
|
has_forward = False
|
||||||
|
for msg in converted_msg:
|
||||||
|
if type(msg) is list: # 转发消息,仅回复此消息组件
|
||||||
|
has_forward = True
|
||||||
|
converted_msg = msg
|
||||||
|
break
|
||||||
|
if has_forward:
|
||||||
|
if target_type == "group":
|
||||||
|
task = self.bot.sendGroupForwardMessage(int(target_id), converted_msg)
|
||||||
|
elif target_type == "person":
|
||||||
|
task = self.bot.sendPrivateForwardMessage(int(target_id), converted_msg)
|
||||||
|
else:
|
||||||
|
raise Exception("Unknown target type: " + target_type)
|
||||||
|
else:
|
||||||
|
if target_type == "group":
|
||||||
|
task = self.bot.sendGroupMessage(int(target_id), converted_msg)
|
||||||
|
elif target_type == "person":
|
||||||
|
task = self.bot.sendFriendMessage(int(target_id), converted_msg)
|
||||||
|
else:
|
||||||
|
raise Exception("Unknown target type: " + target_type)
|
||||||
|
|
||||||
|
asyncio.run(task)
|
||||||
|
|
||||||
|
def reply_message(
|
||||||
|
self,
|
||||||
|
message_source: mirai.MessageEvent,
|
||||||
|
message: mirai.MessageChain,
|
||||||
|
quote_origin: bool = False
|
||||||
|
):
|
||||||
|
message = self.message_converter.yiri2target(message)
|
||||||
|
if quote_origin:
|
||||||
|
# 在前方添加引用组件
|
||||||
|
message.insert(0, nkc.Reply(
|
||||||
|
id=message_source.message_chain.message_id,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if type(message_source) is mirai.GroupMessage:
|
||||||
|
self.send_message(
|
||||||
|
"group",
|
||||||
|
message_source.sender.group.id,
|
||||||
|
message,
|
||||||
|
converted=True
|
||||||
|
)
|
||||||
|
elif type(message_source) is mirai.FriendMessage:
|
||||||
|
self.send_message(
|
||||||
|
"person",
|
||||||
|
message_source.sender.id,
|
||||||
|
message,
|
||||||
|
converted=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise Exception("Unknown message source type: " + str(type(message_source)))
|
||||||
|
|
||||||
|
def is_muted(self, group_id: int) -> bool:
|
||||||
|
import time
|
||||||
|
# 检查是否被禁言
|
||||||
|
group_member_info = asyncio.run(self.bot.getGroupMemberInfo(group_id, self.bot_account_id))
|
||||||
|
return group_member_info.shut_up_timestamp > int(time.time())
|
||||||
|
|
||||||
|
def register_listener(
|
||||||
|
self,
|
||||||
|
event_type: typing.Type[mirai.Event],
|
||||||
|
callback: typing.Callable[[mirai.Event], None]
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
logging.debug("注册监听器: " + str(event_type) + " -> " + str(callback))
|
||||||
|
|
||||||
|
# 包装函数
|
||||||
|
async def listener_wrapper(app: nakuru.CQHTTP, source: self.event_converter.yiri2target(event_type)):
|
||||||
|
callback(self.event_converter.target2yiri(source))
|
||||||
|
|
||||||
|
# 将包装函数和原函数的对应关系存入列表
|
||||||
|
self.listener_list.append(
|
||||||
|
{
|
||||||
|
"event_type": event_type,
|
||||||
|
"callable": callback,
|
||||||
|
"wrapper": listener_wrapper,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# 注册监听器
|
||||||
|
self.bot.receiver(self.event_converter.yiri2target(event_type).__name__)(listener_wrapper)
|
||||||
|
logging.debug("注册完成")
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def unregister_listener(
|
||||||
|
self,
|
||||||
|
event_type: typing.Type[mirai.Event],
|
||||||
|
callback: typing.Callable[[mirai.Event], None]
|
||||||
|
):
|
||||||
|
nakuru_event_name = self.event_converter.yiri2target(event_type).__name__
|
||||||
|
|
||||||
|
new_event_list = []
|
||||||
|
|
||||||
|
# 从本对象的监听器列表中查找并删除
|
||||||
|
target_wrapper = None
|
||||||
|
for listener in self.listener_list:
|
||||||
|
if listener["event_type"] == event_type and listener["callable"] == callback:
|
||||||
|
target_wrapper = listener["wrapper"]
|
||||||
|
self.listener_list.remove(listener)
|
||||||
|
break
|
||||||
|
|
||||||
|
if target_wrapper is None:
|
||||||
|
raise Exception("未找到对应的监听器")
|
||||||
|
|
||||||
|
for func in self.bot.event[nakuru_event_name]:
|
||||||
|
if func.callable != target_wrapper:
|
||||||
|
new_event_list.append(func)
|
||||||
|
|
||||||
|
self.bot.event[nakuru_event_name] = new_event_list
|
||||||
|
|
||||||
|
def run_sync(self):
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
self.bot.run()
|
||||||
|
|
||||||
|
def kill(self) -> bool:
|
||||||
|
return False
|
||||||
123
pkg/qqbot/sources/yirimirai.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
import asyncio
|
||||||
|
import typing
|
||||||
|
|
||||||
|
import mirai
|
||||||
|
import mirai.models.bus
|
||||||
|
from mirai.bot import MiraiRunner
|
||||||
|
|
||||||
|
from .. import adapter as adapter_model
|
||||||
|
|
||||||
|
|
||||||
|
class YiriMiraiAdapter(adapter_model.MessageSourceAdapter):
|
||||||
|
"""YiriMirai适配器"""
|
||||||
|
bot: mirai.Mirai
|
||||||
|
|
||||||
|
def __init__(self, config: dict):
|
||||||
|
"""初始化YiriMirai的对象"""
|
||||||
|
if 'adapter' not in config or \
|
||||||
|
config['adapter'] == 'WebSocketAdapter':
|
||||||
|
self.bot = mirai.Mirai(
|
||||||
|
qq=config['qq'],
|
||||||
|
adapter=mirai.WebSocketAdapter(
|
||||||
|
host=config['host'],
|
||||||
|
port=config['port'],
|
||||||
|
verify_key=config['verifyKey']
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif config['adapter'] == 'HTTPAdapter':
|
||||||
|
self.bot = mirai.Mirai(
|
||||||
|
qq=config['qq'],
|
||||||
|
adapter=mirai.HTTPAdapter(
|
||||||
|
host=config['host'],
|
||||||
|
port=config['port'],
|
||||||
|
verify_key=config['verifyKey']
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise Exception('Unknown adapter for YiriMirai: ' + config['adapter'])
|
||||||
|
|
||||||
|
def send_message(
|
||||||
|
self,
|
||||||
|
target_type: str,
|
||||||
|
target_id: str,
|
||||||
|
message: mirai.MessageChain
|
||||||
|
):
|
||||||
|
"""发送消息
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_type (str): 目标类型,`person`或`group`
|
||||||
|
target_id (str): 目标ID
|
||||||
|
message (mirai.MessageChain): YiriMirai库的消息链
|
||||||
|
"""
|
||||||
|
task = None
|
||||||
|
if target_type == 'person':
|
||||||
|
task = self.bot.send_friend_message(int(target_id), message)
|
||||||
|
elif target_type == 'group':
|
||||||
|
task = self.bot.send_group_message(int(target_id), message)
|
||||||
|
else:
|
||||||
|
raise Exception('Unknown target type: ' + target_type)
|
||||||
|
|
||||||
|
asyncio.run(task)
|
||||||
|
|
||||||
|
def reply_message(
|
||||||
|
self,
|
||||||
|
message_source: mirai.MessageEvent,
|
||||||
|
message: mirai.MessageChain,
|
||||||
|
quote_origin: bool = False
|
||||||
|
):
|
||||||
|
"""回复消息
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message_source (mirai.MessageEvent): YiriMirai消息源事件
|
||||||
|
message (mirai.MessageChain): YiriMirai库的消息链
|
||||||
|
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
|
||||||
|
"""
|
||||||
|
asyncio.run(self.bot.send(message_source, message, quote_origin))
|
||||||
|
|
||||||
|
def is_muted(self, group_id: int) -> bool:
|
||||||
|
result = self.bot.member_info(target=group_id, member_id=self.bot.qq).get()
|
||||||
|
result = asyncio.run(result)
|
||||||
|
if result.mute_time_remaining > 0:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def register_listener(
|
||||||
|
self,
|
||||||
|
event_type: typing.Type[mirai.Event],
|
||||||
|
callback: typing.Callable[[mirai.Event], None]
|
||||||
|
):
|
||||||
|
"""注册事件监听器
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||||
|
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||||
|
"""
|
||||||
|
self.bot.on(event_type)(callback)
|
||||||
|
|
||||||
|
def unregister_listener(
|
||||||
|
self,
|
||||||
|
event_type: typing.Type[mirai.Event],
|
||||||
|
callback: typing.Callable[[mirai.Event], None]
|
||||||
|
):
|
||||||
|
"""注销事件监听器
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||||
|
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||||
|
"""
|
||||||
|
assert isinstance(self.bot, mirai.Mirai)
|
||||||
|
bus = self.bot.bus
|
||||||
|
assert isinstance(bus, mirai.models.bus.ModelEventBus)
|
||||||
|
|
||||||
|
bus.unsubscribe(event_type, callback)
|
||||||
|
|
||||||
|
def run_sync(self):
|
||||||
|
"""运行YiriMirai"""
|
||||||
|
|
||||||
|
# 创建新的
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
|
||||||
|
loop.run_until_complete(MiraiRunner(self.bot)._run())
|
||||||
|
|
||||||
|
def kill(self) -> bool:
|
||||||
|
return False
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
import threading
|
import threading
|
||||||
from pkg.utils import ThreadCtl
|
from . import threadctl
|
||||||
|
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
@@ -87,8 +87,8 @@ def set_thread_ctl(inst):
|
|||||||
context_lock.release()
|
context_lock.release()
|
||||||
|
|
||||||
|
|
||||||
def get_thread_ctl() -> ThreadCtl:
|
def get_thread_ctl() -> threadctl.ThreadCtl:
|
||||||
context_lock.acquire()
|
context_lock.acquire()
|
||||||
t: ThreadCtl = context['pool_ctl']
|
t: threadctl.ThreadCtl = context['pool_ctl']
|
||||||
context_lock.release()
|
context_lock.release()
|
||||||
return t
|
return t
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ def reset_logging():
|
|||||||
|
|
||||||
logging.basicConfig(level=config.logging_level, # 设置日志输出格式
|
logging.basicConfig(level=config.logging_level, # 设置日志输出格式
|
||||||
filename=log_file_name, # log日志输出的文件位置和文件名
|
filename=log_file_name, # log日志输出的文件位置和文件名
|
||||||
format="[%(asctime)s.%(msecs)03d] %(filename)s (%(lineno)d) - [%(levelname)s] : %(message)s",
|
format="[%(asctime)s.%(msecs)03d] %(pathname)s (%(lineno)d) - [%(levelname)s] :\n%(message)s",
|
||||||
# 日志输出的格式
|
# 日志输出的格式
|
||||||
# -8表示占位符,让输出左对齐,输出长度都为8位
|
# -8表示占位符,让输出左对齐,输出长度都为8位
|
||||||
datefmt="%Y-%m-%d %H:%M:%S" # 时间输出的格式
|
datefmt="%Y-%m-%d %H:%M:%S" # 时间输出的格式
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from pip._internal import main as pipmain
|
from pip._internal import main as pipmain
|
||||||
|
|
||||||
import pkg.utils.log as log
|
from . import log
|
||||||
|
|
||||||
|
|
||||||
def install(package):
|
def install(package):
|
||||||
@@ -8,7 +8,8 @@ def install(package):
|
|||||||
log.reset_logging()
|
log.reset_logging()
|
||||||
|
|
||||||
def install_upgrade(package):
|
def install_upgrade(package):
|
||||||
pipmain(['install', '--upgrade', package])
|
pipmain(['install', '--upgrade', package, "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||||
|
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||||
log.reset_logging()
|
log.reset_logging()
|
||||||
|
|
||||||
|
|
||||||
@@ -18,7 +19,8 @@ def run_pip(params: list):
|
|||||||
|
|
||||||
|
|
||||||
def install_requirements(file):
|
def install_requirements(file):
|
||||||
pipmain(['install', '-r', file, "--upgrade"])
|
pipmain(['install', '-r', file, "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||||
|
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||||
log.reset_logging()
|
log.reset_logging()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
import logging
|
import logging
|
||||||
import threading
|
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import pkgutil
|
import pkgutil
|
||||||
import pkg.utils.context as context
|
|
||||||
import pkg.plugin.host
|
from . import context
|
||||||
|
from ..plugin import host as plugin_host
|
||||||
|
|
||||||
|
|
||||||
def walk(module, prefix='', path_prefix=''):
|
def walk(module, prefix='', path_prefix=''):
|
||||||
@@ -15,7 +14,7 @@ def walk(module, prefix='', path_prefix=''):
|
|||||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
|
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
|
||||||
else:
|
else:
|
||||||
logging.info('reload module: {}, path: {}'.format(prefix + item.name, path_prefix + item.name + '.py'))
|
logging.info('reload module: {}, path: {}'.format(prefix + item.name, path_prefix + item.name + '.py'))
|
||||||
pkg.plugin.host.__current_module_path__ = "plugins/" + path_prefix + item.name + '.py'
|
plugin_host.__current_module_path__ = "plugins/" + path_prefix + item.name + '.py'
|
||||||
importlib.reload(__import__(module.__name__ + '.' + item.name, fromlist=['']))
|
importlib.reload(__import__(module.__name__ + '.' + item.name, fromlist=['']))
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from PIL import Image, ImageDraw, ImageFont
|
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import config
|
import config
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
from PIL import Image, ImageDraw, ImageFont
|
||||||
|
|
||||||
text_render_font: ImageFont = None
|
text_render_font: ImageFont = None
|
||||||
|
|
||||||
if config.blob_message_strategy == "image": # 仅在启用了image时才加载字体
|
if config.blob_message_strategy == "image": # 仅在启用了image时才加载字体
|
||||||
|
|||||||
@@ -3,10 +3,9 @@ import logging
|
|||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import json
|
|
||||||
|
|
||||||
import pkg.utils.constants
|
from . import constants
|
||||||
import pkg.utils.network as network
|
from . import network
|
||||||
|
|
||||||
|
|
||||||
def check_dulwich_closure():
|
def check_dulwich_closure():
|
||||||
@@ -34,13 +33,18 @@ def pull_latest(repo_path: str) -> bool:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_newer_ignored_bugfix_ver(new_tag: str, old_tag: str):
|
def is_newer(new_tag: str, old_tag: str):
|
||||||
"""判断版本是否更新,忽略第四位版本"""
|
"""判断版本是否更新,忽略第四位版本和第一位版本"""
|
||||||
if new_tag == old_tag:
|
if new_tag == old_tag:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
new_tag = new_tag.split(".")
|
new_tag = new_tag.split(".")
|
||||||
old_tag = old_tag.split(".")
|
old_tag = old_tag.split(".")
|
||||||
|
|
||||||
|
# 判断主版本是否相同
|
||||||
|
if new_tag[0] != old_tag[0]:
|
||||||
|
return False
|
||||||
|
|
||||||
if len(new_tag) < 4:
|
if len(new_tag) < 4:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -65,7 +69,7 @@ def get_release_list() -> list:
|
|||||||
|
|
||||||
def get_current_tag() -> str:
|
def get_current_tag() -> str:
|
||||||
"""获取当前tag"""
|
"""获取当前tag"""
|
||||||
current_tag = pkg.utils.constants.semantic_version
|
current_tag = constants.semantic_version
|
||||||
if os.path.exists("current_tag"):
|
if os.path.exists("current_tag"):
|
||||||
with open("current_tag", "r") as f:
|
with open("current_tag", "r") as f:
|
||||||
current_tag = f.read()
|
current_tag = f.read()
|
||||||
@@ -73,6 +77,34 @@ def get_current_tag() -> str:
|
|||||||
return current_tag
|
return current_tag
|
||||||
|
|
||||||
|
|
||||||
|
def compare_version_str(v0: str, v1: str) -> int:
|
||||||
|
"""比较两个版本号"""
|
||||||
|
|
||||||
|
# 删除版本号前的v
|
||||||
|
if v0.startswith("v"):
|
||||||
|
v0 = v0[1:]
|
||||||
|
if v1.startswith("v"):
|
||||||
|
v1 = v1[1:]
|
||||||
|
|
||||||
|
v0:list = v0.split(".")
|
||||||
|
v1:list = v1.split(".")
|
||||||
|
|
||||||
|
# 如果两个版本号节数不同,把短的后面用0补齐
|
||||||
|
if len(v0) < len(v1):
|
||||||
|
v0.extend(["0"]*(len(v1)-len(v0)))
|
||||||
|
elif len(v0) > len(v1):
|
||||||
|
v1.extend(["0"]*(len(v0)-len(v1)))
|
||||||
|
|
||||||
|
# 从高位向低位比较
|
||||||
|
for i in range(len(v0)):
|
||||||
|
if int(v0[i]) > int(v1[i]):
|
||||||
|
return 1
|
||||||
|
elif int(v0[i]) < int(v1[i]):
|
||||||
|
return -1
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def update_all(cli: bool = False) -> bool:
|
def update_all(cli: bool = False) -> bool:
|
||||||
"""检查更新并下载源码"""
|
"""检查更新并下载源码"""
|
||||||
current_tag = get_current_tag()
|
current_tag = get_current_tag()
|
||||||
@@ -97,7 +129,7 @@ def update_all(cli: bool = False) -> bool:
|
|||||||
else:
|
else:
|
||||||
print("更新日志: {}".format(rls_notes))
|
print("更新日志: {}".format(rls_notes))
|
||||||
|
|
||||||
if latest_rls == {} and not is_newer_ignored_bugfix_ver(latest_tag_name, current_tag): # 没有新版本
|
if latest_rls == {} and not is_newer(latest_tag_name, current_tag): # 没有新版本
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# 下载最新版本的zip到temp目录
|
# 下载最新版本的zip到temp目录
|
||||||
@@ -254,7 +286,7 @@ def is_new_version_available() -> bool:
|
|||||||
latest_tag_name = rls['tag_name']
|
latest_tag_name = rls['tag_name']
|
||||||
break
|
break
|
||||||
|
|
||||||
return is_newer_ignored_bugfix_ver(latest_tag_name, current_tag)
|
return is_newer(latest_tag_name, current_tag)
|
||||||
|
|
||||||
|
|
||||||
def get_rls_notes() -> list:
|
def get_rls_notes() -> list:
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
requests~=2.28.1
|
requests
|
||||||
openai~=0.27.4
|
openai
|
||||||
dulwich~=0.21.3
|
dulwich~=0.21.6
|
||||||
colorlog~=6.6.0
|
colorlog~=6.6.0
|
||||||
yiri-mirai~=0.2.6.1
|
yiri-mirai-rc
|
||||||
websockets
|
websockets
|
||||||
urllib3~=1.26.10
|
urllib3
|
||||||
func_timeout~=4.3.5
|
func_timeout~=4.3.5
|
||||||
Pillow
|
Pillow
|
||||||
|
nakuru-project-idk
|
||||||
|
CallingGPT
|
||||||
|
tiktoken
|
||||||
|
PyYaml
|
||||||
|
|||||||
@@ -1 +1,20 @@
|
|||||||
[]
|
[
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"time": "2023-08-01 10:49:26",
|
||||||
|
"timestamp": 1690858166,
|
||||||
|
"content": "现已支持GPT函数调用功能,欢迎了解:https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"time": "2023-11-10 12:20:09",
|
||||||
|
"timestamp": 1699590009,
|
||||||
|
"content": "OpenAI 库1.0版本已发行,若出现 OpenAI 调用问题,请更新 QChatGPT 版本。详见项目主页:https://github.com/RockChinQ/QChatGPT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"time": "2023-11-13 18:02:39",
|
||||||
|
"timestamp": 1699869759,
|
||||||
|
"content": "近期 OpenAI 接口改动频繁,正在积极适配并添加新功能,请尽快更新到最新版本,更新方式:https://github.com/RockChinQ/QChatGPT/discussions/595"
|
||||||
|
}
|
||||||
|
]
|
||||||
@@ -1,4 +1,6 @@
|
|||||||
|
|
||||||
|
> **Warning**
|
||||||
|
> 此文档已过时,请查看[QChatGPT 容器化部署指南](docker_deployment.md)
|
||||||
|
|
||||||
## 操作步骤
|
## 操作步骤
|
||||||
|
|
||||||
|
|||||||
64
res/docs/docker_deployment.md
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# QChatGPT 容器化部署指南
|
||||||
|
|
||||||
|
> **Warning**
|
||||||
|
> 请您确保您**确实**需要 Docker 部署,您**必须**具有以下能力:
|
||||||
|
> - 了解 `Docker` 和 `Docker Compose` 的使用
|
||||||
|
> - 了解容器间网络通信配置方式
|
||||||
|
> - 了解容器文件挂载机制
|
||||||
|
> - 了解容器调试操作
|
||||||
|
> - 动手能力强、资料查找能力强
|
||||||
|
>
|
||||||
|
> 若您不完全具有以上能力,请勿使用 Docker 部署,由于误操作导致的配置不正确,我们将不会解答您的问题并不负任何责任。
|
||||||
|
> **非常不建议**您在除 Linux 之外的系统上使用 Docker 进行部署。
|
||||||
|
|
||||||
|
## 概览
|
||||||
|
|
||||||
|
QChatGPT 主程序需要连接`QQ登录框架`以与QQ通信,您可以选择 [Mirai](https://github.com/mamoe/mirai)(还需要配置mirai-api-http,请查看此仓库README中手动部署部分) 或 [go-cqhttp](https://github.com/Mrs4s/go-cqhttp),我们仅发布 QChatGPT主程序 的镜像,您需要自行配置QQ登录框架(可以参考[README.md](https://github.com/RockChinQ/QChatGPT#-%E9%85%8D%E7%BD%AEqq%E7%99%BB%E5%BD%95%E6%A1%86%E6%9E%B6)中的教程,或自行寻找其镜像)并在 QChatGPT 的配置文件中设置连接地址。
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
> 请先确保 Docker 和 Docker Compose 已安装
|
||||||
|
|
||||||
|
## 准备文件
|
||||||
|
|
||||||
|
> QChatGPT 目前暂不可以在没有配置模板文件的情况下自动生成文件,您需要按照以下步骤手动创建需要挂载的文件。
|
||||||
|
> 如无特殊说明,模板文件均在此仓库中。
|
||||||
|
|
||||||
|
> 如果您不想挨个创建,也可以直接clone本仓库到本地,执行`python main.py`后即可自动根据模板生成所需文件。
|
||||||
|
|
||||||
|
现在请在一个空目录创建以下文件或目录:
|
||||||
|
|
||||||
|
### 📄`config.py`
|
||||||
|
|
||||||
|
复制根目录的`config-template.py`所有内容,创建`config.py`并根据其中注释进行修改。
|
||||||
|
|
||||||
|
### 📄`banlist.py`
|
||||||
|
|
||||||
|
复制`res/templates/banlist-template.py`所有内容,创建`banlist.py`,这是黑名单配置文件,根据需要修改。
|
||||||
|
|
||||||
|
### 📄`cmdpriv.json`
|
||||||
|
|
||||||
|
复制`res/templates/cmdpriv-template.json`所有内容,创建`cmdpriv.json`,这是各命令的权限配置文件,根据需要修改。
|
||||||
|
|
||||||
|
### 📄`sensitive.json`
|
||||||
|
|
||||||
|
复制`res/templates/sensitive-template.json`所有内容,创建`sensitive.json`,这是敏感词配置,根据需要修改。
|
||||||
|
|
||||||
|
### 📄`tips.py`
|
||||||
|
|
||||||
|
复制`tips-custom-template.py`所有内容,创建`tips.py`,这是部分提示语的配置,根据需要修改。
|
||||||
|
|
||||||
|
## 运行
|
||||||
|
|
||||||
|
已预先准备好`docker-compose.yaml`,您需要根据您的网络配置进行适当修改,使容器内的 QChatGPT 程序可以正常与 Mirai 或 go-cqhttp 通信。
|
||||||
|
|
||||||
|
将`docker-compose.yaml`复制到本目录,根据网络环境进行配置,并执行:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
若无报错即配置完成,您可以Ctrl+C关闭后使用`docker compose up -d`将其置于后台运行
|
||||||
|
|
||||||
|
## 注意
|
||||||
|
|
||||||
|
- 安装的插件都会保存在`plugins`(映射到本目录`plugins`),安装插件时可能会自动安装相应的依赖,此时若`重新创建`容器,已安装的插件将被加载,但所需的增量依赖并未安装,会导致引入问题。您可以删除插件目录后重启,再次安装插件,以便程序可以自动安装插件所需依赖。
|
||||||
BIN
res/logo.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
res/screenshots/group_gpt3.5.png
Normal file
|
After Width: | Height: | Size: 129 KiB |
BIN
res/screenshots/person_gpt3.5.png
Normal file
|
After Width: | Height: | Size: 101 KiB |
BIN
res/screenshots/person_newbing.png
Normal file
|
After Width: | Height: | Size: 98 KiB |
BIN
res/screenshots/webwlkr_plugin.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
res/social.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
@@ -1,3 +1,13 @@
|
|||||||
|
# 是否处理群聊消息
|
||||||
|
# 为False时忽略所有群聊消息
|
||||||
|
# 优先级高于下方禁用列表
|
||||||
|
enable_group = True
|
||||||
|
|
||||||
|
# 是否处理私聊消息
|
||||||
|
# 为False时忽略所有私聊消息
|
||||||
|
# 优先级高于下方禁用列表
|
||||||
|
enable_private = True
|
||||||
|
|
||||||
# 是否启用禁用列表
|
# 是否启用禁用列表
|
||||||
enable = True
|
enable = True
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
{
|
{
|
||||||
"comment": "以下为命令权限,请设置到cmdpriv.json中。关于此功能的说明,请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6",
|
"comment": "以下为命令权限,请设置到cmdpriv.json中。关于此功能的说明,请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6",
|
||||||
"draw": 1,
|
"draw": 1,
|
||||||
"plugin": 2,
|
"func": 1,
|
||||||
|
"plugin": 1,
|
||||||
"plugin.get": 2,
|
"plugin.get": 2,
|
||||||
"plugin.update": 2,
|
"plugin.update": 2,
|
||||||
"plugin.del": 2,
|
"plugin.del": 2,
|
||||||
|
|||||||
BIN
res/webwlkr-demo.gif
Normal file
|
After Width: | Height: | Size: 879 KiB |
@@ -48,12 +48,12 @@
|
|||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持预设指令文字</summary>
|
<summary>✅支持预设文字</summary>
|
||||||
|
|
||||||
- 支持以自然语言预设文字,自定义机器人人格等信息
|
- 支持以自然语言预设文字,自定义机器人人格等信息
|
||||||
- 详见`config.py`中的`default_prompt`部分
|
- 详见`config.py`中的`default_prompt`部分
|
||||||
- 支持设置多个预设情景,并通过!reset、!default等指令控制,详细请查看[wiki指令](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)
|
- 支持设置多个预设情景,并通过!reset、!default等命令控制,详细请查看[wiki命令](https://github.com/RockChinQ/QChatGPT/wiki/1-%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E5%91%BD%E4%BB%A4)
|
||||||
- 支持使用文件存储情景预设文字,并加载: 在`prompts/`目录新建文件写入预设文字,即可通过`!reset <文件名>`指令加载
|
- 支持使用文件存储情景预设文字,并加载: 在`prompts/`目录新建文件写入预设文字,即可通过`!reset <文件名>`命令加载
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
@@ -61,25 +61,25 @@
|
|||||||
|
|
||||||
- 使用SQLite进行会话内容持久化
|
- 使用SQLite进行会话内容持久化
|
||||||
- 最后一次对话一定时间后自动保存,请到`config.py`中修改`session_expire_time`的值以自定义时间
|
- 最后一次对话一定时间后自动保存,请到`config.py`中修改`session_expire_time`的值以自定义时间
|
||||||
- 运行期间可使用`!reset` `!list` `!last` `!next` `!prompt`等指令管理会话
|
- 运行期间可使用`!reset` `!list` `!last` `!next` `!prompt`等命令管理会话
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
<summary>✅支持对话、绘图等模型,可玩性更高</summary>
|
||||||
|
|
||||||
- 现已支持OpenAI的对话`Completion API`和绘图`Image API`
|
- 现已支持OpenAI的对话`Completion API`和绘图`Image API`
|
||||||
- 向机器人发送指令`!draw <prompt>`即可使用绘图模型
|
- 向机器人发送命令`!draw <prompt>`即可使用绘图模型
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持指令控制热重载、热更新</summary>
|
<summary>✅支持命令控制热重载、热更新</summary>
|
||||||
|
|
||||||
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送指令`!reload`进行热重载,无需重启
|
- 允许在运行期间修改`config.py`或其他代码后,以管理员账号向机器人发送命令`!reload`进行热重载,无需重启
|
||||||
- 运行期间允许以管理员账号向机器人发送指令`!update`进行热更新,拉取远程最新代码并执行热重载
|
- 运行期间允许以管理员账号向机器人发送命令`!update`进行热更新,拉取远程最新代码并执行热重载
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅支持插件加载🧩</summary>
|
<summary>✅支持插件加载🧩</summary>
|
||||||
|
|
||||||
- 自行实现插件加载器及相关支持
|
- 自行实现插件加载器及相关支持
|
||||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||||
</details>
|
</details>
|
||||||
<details>
|
<details>
|
||||||
<summary>✅私聊、群聊黑名单机制</summary>
|
<summary>✅私聊、群聊黑名单机制</summary>
|
||||||
@@ -153,14 +153,14 @@
|
|||||||
|
|
||||||
<img alt="绘图功能" src="https://github.com/RockChinQ/QChatGPT/blob/master/res/屏幕截图%202022-12-29%20194948.png" width="550" height="348"/>
|
<img alt="绘图功能" src="https://github.com/RockChinQ/QChatGPT/blob/master/res/屏幕截图%202022-12-29%20194948.png" width="550" height="348"/>
|
||||||
|
|
||||||
### 机器人指令
|
### 机器人命令
|
||||||
|
|
||||||
目前支持的指令
|
目前支持的命令
|
||||||
|
|
||||||
> `<>` 中的为必填参数,使用时请不要包含`<>`
|
> `<>` 中的为必填参数,使用时请不要包含`<>`
|
||||||
> `[]` 中的为可选参数,使用时请不要包含`[]`
|
> `[]` 中的为可选参数,使用时请不要包含`[]`
|
||||||
|
|
||||||
#### 用户级别指令
|
#### 用户级别命令
|
||||||
|
|
||||||
> 可以使用`!help`命令来查看命令说明
|
> 可以使用`!help`命令来查看命令说明
|
||||||
|
|
||||||
@@ -174,7 +174,7 @@
|
|||||||
!del all 删除本会话对象的所有历史记录
|
!del all 删除本会话对象的所有历史记录
|
||||||
!last 切换到前一次会话
|
!last 切换到前一次会话
|
||||||
!next 切换到后一次会话
|
!next 切换到后一次会话
|
||||||
!reset [使用预设] 重置对象的当前会话,可指定使用的情景预设值(通过!default指令查看可用的)
|
!reset [使用预设] 重置对象的当前会话,可指定使用的情景预设值(通过!default命令查看可用的)
|
||||||
!prompt 查看对象当前会话的所有记录
|
!prompt 查看对象当前会话的所有记录
|
||||||
!usage 查看api-key的使用量
|
!usage 查看api-key的使用量
|
||||||
!draw <提示语> 进行绘图
|
!draw <提示语> 进行绘图
|
||||||
@@ -184,7 +184,7 @@
|
|||||||
!default 查看可用的情景预设值
|
!default 查看可用的情景预设值
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 管理员指令
|
#### 管理员命令
|
||||||
|
|
||||||
仅管理员私聊机器人时可使用,必须先在`config.py`中的`admin_qq`设置管理员QQ
|
仅管理员私聊机器人时可使用,必须先在`config.py`中的`admin_qq`设置管理员QQ
|
||||||
|
|
||||||
@@ -197,9 +197,9 @@
|
|||||||
!delhst all 删除所有会话的所有历史记录
|
!delhst all 删除所有会话的所有历史记录
|
||||||
```
|
```
|
||||||
<details>
|
<details>
|
||||||
<summary>⚙ !cfg 指令及其简化形式详解</summary>
|
<summary>⚙ !cfg 命令及其简化形式详解</summary>
|
||||||
|
|
||||||
此指令可以在运行期间由管理员通过QQ私聊窗口修改配置信息,**重启之后会失效**。
|
此命令可以在运行期间由管理员通过QQ私聊窗口修改配置信息,**重启之后会失效**。
|
||||||
|
|
||||||
用法:
|
用法:
|
||||||
1. 查看所有配置项及其值
|
1. 查看所有配置项及其值
|
||||||
@@ -225,7 +225,7 @@
|
|||||||
格式: `!cfg <配置项名称> <配置项新值>`
|
格式: `!cfg <配置项名称> <配置项新值>`
|
||||||
以修改`default_prompt`示例
|
以修改`default_prompt`示例
|
||||||
```
|
```
|
||||||
!cfg default_prompt 我是Rock Chin
|
!cfg default_prompt "我是Rock Chin"
|
||||||
```
|
```
|
||||||
|
|
||||||
输出示例
|
输出示例
|
||||||
@@ -239,11 +239,19 @@
|
|||||||
|
|
||||||
格式:`!~<配置项名称>`
|
格式:`!~<配置项名称>`
|
||||||
其中`!~`等价于`!cfg `
|
其中`!~`等价于`!cfg `
|
||||||
则前述三个指令分别可以简化为:
|
则前述三个命令分别可以简化为:
|
||||||
```
|
```
|
||||||
!~all
|
!~all
|
||||||
!~default_prompt
|
!~default_prompt
|
||||||
!~default_prompt 我是Rock Chin
|
!~default_prompt "我是Rock Chin"
|
||||||
|
```
|
||||||
|
|
||||||
|
5. 配置项名称支持使用点号(.)拼接以索引子配置项
|
||||||
|
|
||||||
|
例如: `openai_config.api_key`将索引`config`字典中的`openai_config`字典中的`api_key`字段,可以通过这个方式查看或修改此子配置项
|
||||||
|
|
||||||
|
```
|
||||||
|
!~openai_config.api_key
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -282,11 +290,11 @@ sensitive_word_filter = True
|
|||||||
### 预设文字(default模式)
|
### 预设文字(default模式)
|
||||||
|
|
||||||
编辑`config.py`中的`default_prompt`字段,预设文字不宜过长(建议1000字以内),目前所有会话都会射到预设文字的影响。
|
编辑`config.py`中的`default_prompt`字段,预设文字不宜过长(建议1000字以内),目前所有会话都会射到预设文字的影响。
|
||||||
或将情景预设文字写入到`prompts/`目录下,运行期间即可使用`!reset <文件名>`指令加载,或使用`!default <文件名>`指令将其设为默认
|
或将情景预设文字写入到`prompts/`目录下,运行期间即可使用`!reset <文件名>`命令加载,或使用`!default <文件名>`命令将其设为默认
|
||||||
|
|
||||||
### 预设文字(full_scenario模式)
|
### 预设文字(full_scenario模式)
|
||||||
|
|
||||||
将JSON情景写入到`scenario/`目录下,运行期间即可使用`!reset <文件名>`指令加载,或使用`!default <文件名>`指令将其设为默认.
|
将JSON情景写入到`scenario/`目录下,运行期间即可使用`!reset <文件名>`命令加载,或使用`!default <文件名>`命令将其设为默认.
|
||||||
|
|
||||||
JSON情景模板参考`scenario/default_template.json`。
|
JSON情景模板参考`scenario/default_template.json`。
|
||||||
|
|
||||||
@@ -359,7 +367,7 @@ prompt_submit_length = <模型单次请求token数上限> - 情景预设中token
|
|||||||
|
|
||||||
在运行期间,使用管理员QQ账号私聊机器人,发送`!reload`加载修改后的`config.py`的值或编辑后的代码,无需重启
|
在运行期间,使用管理员QQ账号私聊机器人,发送`!reload`加载修改后的`config.py`的值或编辑后的代码,无需重启
|
||||||
使用管理员账号私聊机器人,发送`!update`拉取最新代码并进行热更新,无需重启
|
使用管理员账号私聊机器人,发送`!update`拉取最新代码并进行热更新,无需重启
|
||||||
详见前述`管理员指令`段落
|
详见前述`管理员命令`段落
|
||||||
|
|
||||||
### 群内无需@响应规则
|
### 群内无需@响应规则
|
||||||
|
|
||||||
@@ -367,4 +375,5 @@ prompt_submit_length = <模型单次请求token数上限> - 情景预设中token
|
|||||||
|
|
||||||
### 加入黑名单
|
### 加入黑名单
|
||||||
|
|
||||||
编辑`banlist.py`,设置`enable = True`,并在其中的`person`或`group`列表中加入要封禁的人或群聊,修改完成后重启程序或进行热重载
|
- 支持禁用所有`私聊`或`群聊`,请查看`banlist.py`中的`enable_private`和`enable_group`字段
|
||||||
|
- 编辑`banlist.py`,设置`enable = True`,并在其中的`person`或`group`列表中加入要封禁的人或群聊,修改完成后重启程序或进行热重载
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
#### 自动更新
|
#### 自动更新
|
||||||
|
|
||||||
由管理员QQ私聊机器人QQ发送`!update`指令
|
由管理员QQ私聊机器人QQ发送`!update`命令
|
||||||
|
|
||||||
#### 手动更新
|
#### 手动更新
|
||||||
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
以下是QChatGPT实现原理等技术信息,贡献之前请仔细阅读
|
以下是QChatGPT实现原理等技术信息,贡献之前请仔细阅读
|
||||||
|
|
||||||
|
> 太久没更了,过时了,建议读源码,~~注释还挺全的~~
|
||||||
> 请先阅读OpenAI API的相关文档 https://beta.openai.com/docs/ ,以下信息假定您已了解OpenAI模型的相关特性及其接口的调用方法。
|
> 请先阅读OpenAI API的相关文档 https://beta.openai.com/docs/ ,以下信息假定您已了解OpenAI模型的相关特性及其接口的调用方法。
|
||||||
|
|
||||||
## 术语
|
## 术语
|
||||||
55
res/wiki/5-插件使用.md
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
QChatGPT 插件使用Wiki
|
||||||
|
|
||||||
|
## 简介
|
||||||
|
|
||||||
|
`plugins`目录下的所有`.py`程序都将被加载,除了`__init__.py`之外的模块支持热加载
|
||||||
|
|
||||||
|
> 插件分为`行为插件`和`内容插件`两种,行为插件由主程序运行中的事件驱动,内容插件由GPT生成的内容驱动,请查看内容插件页
|
||||||
|
> 已有插件列表:[QChatGPT 插件](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6)
|
||||||
|
|
||||||
|
## 安装
|
||||||
|
|
||||||
|
### 储存库克隆(推荐)
|
||||||
|
|
||||||
|
在运行期间,使用管理员账号对机器人私聊发送`!plugin get <Git储存库地址>`即可自动获取源码并安装插件,程序会根据仓库中的`requirements.txt`文件自动安装依赖库
|
||||||
|
|
||||||
|
例如安装`hello_plugin`插件
|
||||||
|
```
|
||||||
|
!plugin get https://github.com/RockChinQ/hello_plugin
|
||||||
|
```
|
||||||
|
|
||||||
|
安装完成后重启程序或使用管理员账号私聊机器人发送`!reload`进行热重载加载插件
|
||||||
|
|
||||||
|
### 手动安装
|
||||||
|
|
||||||
|
将获取到的插件程序放置到`plugins`目录下,具体使用方式请查看各插件文档或咨询其开发者。
|
||||||
|
|
||||||
|
## 管理
|
||||||
|
|
||||||
|
### !plugin 命令
|
||||||
|
|
||||||
|
```
|
||||||
|
!plugin 列出所有已安装的插件
|
||||||
|
!plugin get <储存库地址> 从Git储存库安装插件(需要管理员权限)
|
||||||
|
!plugin update all 更新所有插件(需要管理员权限,仅支持从储存库安装的插件)
|
||||||
|
!plugin update <插件名> 更新指定插件
|
||||||
|
!plugin del <插件名> 删除插件(需要管理员权限)
|
||||||
|
!plugin on <插件名> 启用插件(需要管理员权限)
|
||||||
|
!plugin off <插件名> 禁用插件(需要管理员权限)
|
||||||
|
|
||||||
|
!func 列出所有内容函数
|
||||||
|
```
|
||||||
|
|
||||||
|
### 控制插件执行顺序
|
||||||
|
|
||||||
|
可以通过修改`plugins/settings.json`中`order`字段中每个插件名称的前后顺序,以更改插件**初始化**和**事件执行**顺序
|
||||||
|
|
||||||
|
### 启用或关闭插件
|
||||||
|
|
||||||
|
无需卸载即可管理插件的开关
|
||||||
|
编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关
|
||||||
|
|
||||||
|
### 控制全局内容函数开关
|
||||||
|
|
||||||
|
内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||||
|
每个插件可以自行注册内容函数,您可以在`plugins`目录下的`settings.json`中设置`functions`下的`enabled`为`true`或`false`控制这些内容函数的启用或禁用。
|
||||||
31
res/wiki/6-插件使用-内容函数.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
> 说白了就是ChatGPT官方插件那种东西
|
||||||
|
|
||||||
|
内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||||
|
|
||||||
|
例如我们为GPT提供一个函数`access_the_web`,并提供其详细的描述以及其参数的描述,那么当我们在与GPT对话时涉及类似以下内容时:
|
||||||
|
|
||||||
|
```
|
||||||
|
Q: 请搜索一下github上有那些QQ机器人项目?
|
||||||
|
Q: 请为我搜索一些不错的云服务商网站?
|
||||||
|
Q:阅读并总结这篇文章:https://zhuanlan.zhihu.com/p/607570830
|
||||||
|
Q:搜一下清远今天天气如何
|
||||||
|
```
|
||||||
|
|
||||||
|
GPT将会回复一个对`access_the_web`的函数调用请求,QChatGPT将自动处理执行该调用,并返回结果给GPT使其生成新的回复。
|
||||||
|
|
||||||
|
当然,函数调用功能不止局限于网络访问,还可以实现图片处理、科学计算、行程规划等需要调用函数的功能,理论上我们可以通过内容函数实现与`ChatGPT Plugins`相同的功能。
|
||||||
|
|
||||||
|
- 您需要使用`v2.5.0`以上的版本才能加载包含内容函数的插件
|
||||||
|
- 您需要同时在`config.py`中的`completion_api_params`中设置`model`为支持函数调用的模型,推荐使用`gpt-3.5-turbo-16k`
|
||||||
|
- 使用此功能可能会造成难以预期的账号余额消耗,请关注
|
||||||
|
- [逆向库插件](https://github.com/RockChinQ/revLibs)现在也支持函数调用了..您可以在完全免费的情况下使用GPT-3.5进行函数调用,若您在主程序配置了内容函数并启用,逆向ChatGPT会自动使用这些函数
|
||||||
|
|
||||||
|
### ?QChatGPT有什么类型的插件?区别是什么?
|
||||||
|
|
||||||
|
QChatGPT具有`行为插件`和`内容函数`两种扩展方式,行为插件是完整的插件结构,是由运行期间的事件驱动的,内容函数被包含于一个完整的插件体中,由GPT接口驱动。
|
||||||
|
|
||||||
|
> 还是不理解?可以尝试根据插件开发页的步骤自行编写插件
|
||||||
|
|
||||||
|
## QChatGPT的一些不错的内容函数插件
|
||||||
|
|
||||||
|
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
QChatGPT 插件开发Wiki
|
QChatGPT 插件开发Wiki
|
||||||
|
|
||||||
> 请先阅读[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
> 请先阅读[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/5-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||||
> 请先阅读[技术信息页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8A%80%E6%9C%AF%E4%BF%A1%E6%81%AF)
|
> 请先阅读[技术信息页](https://github.com/RockChinQ/QChatGPT/wiki/4-%E6%8A%80%E6%9C%AF%E4%BF%A1%E6%81%AF)
|
||||||
> 建议先阅读本项目源码,了解项目架构
|
> 建议先阅读本项目源码,了解项目架构
|
||||||
|
|
||||||
> 问题、需求请到仓库issue发起
|
> 问题、需求请到仓库issue发起
|
||||||
@@ -113,6 +113,199 @@ class HelloPlugin(Plugin):
|
|||||||
- 一个目录内可以存放多个Python程序文件,以独立出插件的各个功能,便于开发者管理,但不建议在一个目录内注册多个插件
|
- 一个目录内可以存放多个Python程序文件,以独立出插件的各个功能,便于开发者管理,但不建议在一个目录内注册多个插件
|
||||||
- 插件需要的依赖库请在插件目录下的`requirements.txt`中指定,程序从储存库获取此插件时将自动安装依赖
|
- 插件需要的依赖库请在插件目录下的`requirements.txt`中指定,程序从储存库获取此插件时将自动安装依赖
|
||||||
|
|
||||||
|
## 🪝内容函数
|
||||||
|
|
||||||
|
通过[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的`内容函数`,这是一种嵌入对话中,由GPT自动调用的函数。
|
||||||
|
|
||||||
|
> 您的插件不一定必须包含内容函数,请先查看内容函数页了解此功能
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>示例:联网插件</summary>
|
||||||
|
|
||||||
|
加载含有联网功能的内容函数的插件[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin),向机器人询问在线内容
|
||||||
|
|
||||||
|
```
|
||||||
|
# 控制台输出
|
||||||
|
[2023-07-29 17:37:18.698] message.py (26) - [INFO] : [person_1010553892]发送消息:介绍一下这个项目:https://git...
|
||||||
|
[2023-07-29 17:37:21.292] util.py (67) - [INFO] : message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=1902 request_id=941afc13b2e1bba1e7877b92a970cdea response_code=200
|
||||||
|
[2023-07-29 17:37:21.293] chat_completion.py (159) - [INFO] : 执行函数调用: name=Webwlkr-access_the_web, arguments={'url': 'https://github.com/RockChinQ/QChatGPT', 'brief_len': 512}
|
||||||
|
[2023-07-29 17:37:21.848] chat_completion.py (164) - [INFO] : 函数执行完成。
|
||||||
|
```
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### 内容函数编写步骤
|
||||||
|
|
||||||
|
1️⃣ 请先按照上方步骤编写您的插件基础结构,现在请删除(当然你也可以不删,只是为了简洁)上述插件内容的诸个由`@on`装饰的类函数
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>删除后的结构</summary>
|
||||||
|
|
||||||
|
```python
|
||||||
|
from pkg.plugin.models import *
|
||||||
|
from pkg.plugin.host import EventContext, PluginHost
|
||||||
|
|
||||||
|
"""
|
||||||
|
在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# 注册插件
|
||||||
|
@register(name="Hello", description="hello world", version="0.1", author="RockChinQ")
|
||||||
|
class HelloPlugin(Plugin):
|
||||||
|
|
||||||
|
# 插件加载时触发
|
||||||
|
# plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码
|
||||||
|
def __init__(self, plugin_host: PluginHost):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 插件卸载时触发
|
||||||
|
def __del__(self):
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
2️⃣ 现在我们将以下函数添加到刚刚删除的函数的位置
|
||||||
|
|
||||||
|
```Python
|
||||||
|
|
||||||
|
# 要添加的函数
|
||||||
|
|
||||||
|
@func(name="access_the_web") # 设置函数名称
|
||||||
|
def _(url: str):
|
||||||
|
"""Call this function to search about the question before you answer any questions.
|
||||||
|
- Do not search through baidu.com at any time.
|
||||||
|
- If you need to search somthing, visit https://www.google.com/search?q=xxx.
|
||||||
|
- If user ask you to open a url (start with http:// or https://), visit it directly.
|
||||||
|
- Summary the plain content result by yourself, DO NOT directly output anything in the result you got.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url(str): url to visit
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: plain text content of the web page
|
||||||
|
"""
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
# 你需要先使用
|
||||||
|
# pip install beautifulsoup4
|
||||||
|
# 安装依赖
|
||||||
|
|
||||||
|
r = requests.get(
|
||||||
|
url,
|
||||||
|
timeout=10,
|
||||||
|
headers={
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
soup = BeautifulSoup(r.text, 'html.parser')
|
||||||
|
|
||||||
|
s = soup.get_text()
|
||||||
|
|
||||||
|
# 删除多余的空行或仅有\t和空格的行
|
||||||
|
s = re.sub(r'\n\s*\n', '\n', s)
|
||||||
|
|
||||||
|
if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字
|
||||||
|
return s[:512]
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
```
|
||||||
|
<details>
|
||||||
|
<summary>现在这个文件内容应该是这样</summary>
|
||||||
|
|
||||||
|
```python
|
||||||
|
from pkg.plugin.models import *
|
||||||
|
from pkg.plugin.host import EventContext, PluginHost
|
||||||
|
|
||||||
|
"""
|
||||||
|
在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# 注册插件
|
||||||
|
@register(name="Hello", description="hello world", version="0.1", author="RockChinQ")
|
||||||
|
class HelloPlugin(Plugin):
|
||||||
|
|
||||||
|
# 插件加载时触发
|
||||||
|
# plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码
|
||||||
|
def __init__(self, plugin_host: PluginHost):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@func(name="access_the_web")
|
||||||
|
def _(url: str):
|
||||||
|
"""Call this function to search about the question before you answer any questions.
|
||||||
|
- Do not search through baidu.com at any time.
|
||||||
|
- If you need to search somthing, visit https://www.google.com/search?q=xxx.
|
||||||
|
- If user ask you to open a url (start with http:// or https://), visit it directly.
|
||||||
|
- Summary the plain content result by yourself, DO NOT directly output anything in the result you got.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url(str): url to visit
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: plain text content of the web page
|
||||||
|
"""
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
# 你需要先使用
|
||||||
|
# pip install beautifulsoup4
|
||||||
|
# 安装依赖
|
||||||
|
|
||||||
|
r = requests.get(
|
||||||
|
url,
|
||||||
|
timeout=10,
|
||||||
|
headers={
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
soup = BeautifulSoup(r.text, 'html.parser')
|
||||||
|
|
||||||
|
s = soup.get_text()
|
||||||
|
|
||||||
|
# 删除多余的空行或仅有\t和空格的行
|
||||||
|
s = re.sub(r'\n\s*\n', '\n', s)
|
||||||
|
|
||||||
|
if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字
|
||||||
|
return s[:512]
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
# 插件卸载时触发
|
||||||
|
def __del__(self):
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
#### 请注意:
|
||||||
|
|
||||||
|
- 函数的注释必须严格按照要求的格式进行书写,具体格式请查看[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
|
||||||
|
- 内容函数和`以@on装饰的行为函数`可以同时存在于同一个插件,并同时受到`switch.json`中的插件开关的控制
|
||||||
|
- 务必确保您使用的模型支持函数调用功能,可以到`config.py`的`completion_api_params`中修改模型,推荐使用`gpt-3.5-turbo-16k`
|
||||||
|
|
||||||
|
3️⃣ 现在您的程序已具备网络访问功能,重启程序,询问机器人有关在线的内容或直接发送文章链接请求其总结。
|
||||||
|
|
||||||
|
- 这仅仅是一个示例,需要更高效的网络访问能力支持插件,请查看[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin)
|
||||||
|
|
||||||
|
## 🔒版本要求
|
||||||
|
|
||||||
|
若您的插件对主程序的版本有要求,可以使用以下函数进行断言,若不符合版本,此函数将报错并打断此函数所在的流程:
|
||||||
|
|
||||||
|
```python
|
||||||
|
require_ver("v2.5.1") # 要求最低版本为 v2.5.1
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
require_ver("v2.5.1", "v2.6.0") # 要求最低版本为 v2.5.1, 同时要求最高版本为 v2.6.0
|
||||||
|
```
|
||||||
|
|
||||||
|
- 此函数在主程序`v2.5.1`中加入
|
||||||
|
- 此函数声明在`pkg.plugin.models`模块中,在插件示例代码最前方已引入此模块所有内容,故可直接使用
|
||||||
|
|
||||||
## 📄API参考
|
## 📄API参考
|
||||||
|
|
||||||
### 说明
|
### 说明
|
||||||
@@ -161,18 +354,18 @@ PersonNormalMessageReceived = "person_normal_message_received"
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
PersonCommandSent = "person_command_sent"
|
PersonCommandSent = "person_command_sent"
|
||||||
"""判断为应该处理的私聊指令时触发
|
"""判断为应该处理的私聊命令时触发
|
||||||
kwargs:
|
kwargs:
|
||||||
launcher_type: str 发起对象类型(group/person)
|
launcher_type: str 发起对象类型(group/person)
|
||||||
launcher_id: int 发起对象ID(群号/QQ号)
|
launcher_id: int 发起对象ID(群号/QQ号)
|
||||||
sender_id: int 发送者ID(QQ号)
|
sender_id: int 发送者ID(QQ号)
|
||||||
command: str 指令
|
command: str 命令
|
||||||
params: list[str] 参数列表
|
params: list[str] 参数列表
|
||||||
text_message: str 完整指令文本
|
text_message: str 完整命令文本
|
||||||
is_admin: bool 是否为管理员
|
is_admin: bool 是否为管理员
|
||||||
|
|
||||||
returns (optional):
|
returns (optional):
|
||||||
alter: str 修改后的完整指令文本
|
alter: str 修改后的完整命令文本
|
||||||
reply: list 回复消息组件列表,元素为YiriMirai支持的消息组件
|
reply: list 回复消息组件列表,元素为YiriMirai支持的消息组件
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -190,18 +383,18 @@ GroupNormalMessageReceived = "group_normal_message_received"
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
GroupCommandSent = "group_command_sent"
|
GroupCommandSent = "group_command_sent"
|
||||||
"""判断为应该处理的群聊指令时触发
|
"""判断为应该处理的群聊命令时触发
|
||||||
kwargs:
|
kwargs:
|
||||||
launcher_type: str 发起对象类型(group/person)
|
launcher_type: str 发起对象类型(group/person)
|
||||||
launcher_id: int 发起对象ID(群号/QQ号)
|
launcher_id: int 发起对象ID(群号/QQ号)
|
||||||
sender_id: int 发送者ID(QQ号)
|
sender_id: int 发送者ID(QQ号)
|
||||||
command: str 指令
|
command: str 命令
|
||||||
params: list[str] 参数列表
|
params: list[str] 参数列表
|
||||||
text_message: str 完整指令文本
|
text_message: str 完整命令文本
|
||||||
is_admin: bool 是否为管理员
|
is_admin: bool 是否为管理员
|
||||||
|
|
||||||
returns (optional):
|
returns (optional):
|
||||||
alter: str 修改后的完整指令文本
|
alter: str 修改后的完整命令文本
|
||||||
reply: list 回复消息组件列表,元素为YiriMirai支持的消息组件
|
reply: list 回复消息组件列表,元素为YiriMirai支持的消息组件
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -214,10 +407,11 @@ NormalMessageResponded = "normal_message_responded"
|
|||||||
session: pkg.openai.session.Session 会话对象
|
session: pkg.openai.session.Session 会话对象
|
||||||
prefix: str 回复文字消息的前缀
|
prefix: str 回复文字消息的前缀
|
||||||
response_text: str 响应文本
|
response_text: str 响应文本
|
||||||
|
finish_reason: str 响应结束原因
|
||||||
|
|
||||||
returns (optional):
|
returns (optional):
|
||||||
prefix: str 修改后的回复文字消息的前缀
|
prefix: str 修改后的回复文字消息的前缀
|
||||||
reply: list 替换回复消息组件列表,元素为YiriMirai支持的消息组件
|
reply: list 替换回复消息组件列表
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SessionFirstMessageReceived = "session_first_message_received"
|
SessionFirstMessageReceived = "session_first_message_received"
|
||||||
@@ -257,6 +451,20 @@ KeySwitched = "key_switched"
|
|||||||
key_name: str 切换成功的api-key名称
|
key_name: str 切换成功的api-key名称
|
||||||
key_list: list[str] api-key列表
|
key_list: list[str] api-key列表
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
PromptPreProcessing = "prompt_pre_processing" # 于v2.5.1加入
|
||||||
|
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||||
|
kwargs:
|
||||||
|
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||||
|
default_prompt: list 此session使用的情景预设内容
|
||||||
|
prompt: list 此session现有的prompt内容
|
||||||
|
text_message: str 用户发送的消息文本
|
||||||
|
|
||||||
|
returns (optional):
|
||||||
|
default_prompt: list 修改后的情景预设内容
|
||||||
|
prompt: list 修改后的prompt内容
|
||||||
|
text_message: str 修改后的消息文本
|
||||||
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
### host: PluginHost 详解
|
### host: PluginHost 详解
|
||||||
70
res/wiki/9-go-cqhttp配置.md
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
# 配置go-cqhttp用于登录QQ
|
||||||
|
|
||||||
|
> 若您是从旧版本升级到此版本以使用go-cqhttp的用户,请您按照`config-template.py`的内容修改`config.py`,添加`msg_source_adapter`配置项并将其设为`nakuru`,同时添加`nakuru_config`字段按照说明配置。
|
||||||
|
|
||||||
|
## 步骤
|
||||||
|
|
||||||
|
1. 从[go-cqhttp的Release](https://github.com/Mrs4s/go-cqhttp/releases/latest)下载最新的go-cqhttp可执行文件(建议直接下载可执行文件压缩包,而不是安装器)
|
||||||
|
2. 解压并运行,首次运行会询问需要开放的网络协议,**请填入`02`并回车,必须输入`02`❗❗❗❗❗❗❗**
|
||||||
|
|
||||||
|
<h1> 你这里必须得输入`02`,你懂么,`0`必须得输入,看好了,看好下面输入什么了吗?别他妈的搁那就输个`2`完了启动连不上还跑群里问,问一个我踢一个。 </h1>
|
||||||
|
|
||||||
|
```
|
||||||
|
C:\Softwares\go-cqhttp.old> .\go-cqhttp.exe
|
||||||
|
未找到配置文件,正在为您生成配置文件中!
|
||||||
|
请选择你需要的通信方式:
|
||||||
|
> 0: HTTP通信
|
||||||
|
> 1: 云函数服务
|
||||||
|
> 2: 正向 Websocket 通信
|
||||||
|
> 3: 反向 Websocket 通信
|
||||||
|
请输入你需要的编号(0-9),可输入多个,同一编号也可输入多个(如: 233)
|
||||||
|
您的选择是:02
|
||||||
|
```
|
||||||
|
|
||||||
|
提示已生成`config.yml`文件,关闭go-cqhttp。
|
||||||
|
|
||||||
|
3. 打开go-cqhttp同目录的`config.yml`
|
||||||
|
|
||||||
|
1. 编辑账号登录信息
|
||||||
|
|
||||||
|
只需要修改下方`uin`和`password`为你要登录的机器人账号的QQ号和密码即可。
|
||||||
|
**若您不填写,将会在启动时请求扫码登录。**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
account: # 账号相关
|
||||||
|
uin: 1233456 # QQ账号
|
||||||
|
password: '' # 密码为空时使用扫码登录
|
||||||
|
encrypt: false # 是否开启密码加密
|
||||||
|
status: 0 # 在线状态 请参考 https://docs.go-cqhttp.org/guide/config.html#在线状态
|
||||||
|
relogin: # 重连设置
|
||||||
|
delay: 3 # 首次重连延迟, 单位秒
|
||||||
|
interval: 3 # 重连间隔
|
||||||
|
max-times: 0 # 最大重连次数, 0为无限制
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 修改websocket端口
|
||||||
|
|
||||||
|
在`config.yml`下方找到以下内容
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- ws:
|
||||||
|
# 正向WS服务器监听地址
|
||||||
|
address: 0.0.0.0:8080
|
||||||
|
middlewares:
|
||||||
|
<<: *default # 引用默认中间件
|
||||||
|
```
|
||||||
|
|
||||||
|
**将`0.0.0.0:8080`改为`0.0.0.0:6700`**,保存并关闭`config.yml`。
|
||||||
|
|
||||||
|
3. 若您的服务器位于公网,强烈建议您填写`access-token` (可选)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# 默认中间件锚点
|
||||||
|
default-middlewares: &default
|
||||||
|
# 访问密钥, 强烈推荐在公网的服务器设置
|
||||||
|
access-token: ''
|
||||||
|
```
|
||||||
|
|
||||||
|
4. 配置完成,重新启动go-cqhttp
|
||||||
|
|
||||||
|
> 若启动后登录不成功,请尝试根据[此文档](https://docs.go-cqhttp.org/guide/config.html#%E8%AE%BE%E5%A4%87%E4%BF%A1%E6%81%AF)修改`device.json`的协议编号。
|
||||||
@@ -8,6 +8,9 @@
|
|||||||
|
|
||||||
- [Mirai](https://github.com/mamoe/mirai) 高效率 QQ 机器人支持库
|
- [Mirai](https://github.com/mamoe/mirai) 高效率 QQ 机器人支持库
|
||||||
- [YiriMirai](https://github.com/YiriMiraiProject/YiriMirai) 一个轻量级、低耦合的基于 mirai-api-http 的 Python SDK。
|
- [YiriMirai](https://github.com/YiriMiraiProject/YiriMirai) 一个轻量级、低耦合的基于 mirai-api-http 的 Python SDK。
|
||||||
|
- [go-cqhttp](https://github.com/Mrs4s/go-cqhttp) cqhttp的golang实现,轻量、原生跨平台.
|
||||||
|
- [nakuru-project](https://github.com/Lxns-Network/nakuru-project) - 一款为 go-cqhttp 的正向 WebSocket 设计的 Python SDK,支持纯 CQ 码与消息链的转换处理
|
||||||
|
- [nakuru-project-idk](https://github.com/idoknow/nakuru-project-idk) - 由idoknow维护的nakuru-project分支
|
||||||
- [dulwich](https://github.com/jelmer/dulwich) Pure-Python Git implementation
|
- [dulwich](https://github.com/jelmer/dulwich) Pure-Python Git implementation
|
||||||
- [OpenAI API](https://openai.com/api/) OpenAI API
|
- [OpenAI API](https://openai.com/api/) OpenAI API
|
||||||
|
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
QChatGPT 插件使用Wiki
|
|
||||||
|
|
||||||
## 简介
|
|
||||||
|
|
||||||
`plugins`目录下的所有`.py`程序都将被加载,除了`__init__.py`之外的模块支持热加载
|
|
||||||
|
|
||||||
## 安装
|
|
||||||
|
|
||||||
### 储存库克隆(推荐)
|
|
||||||
|
|
||||||
在运行期间,使用管理员账号对机器人私聊发送`!plugin get <Git储存库地址>`即可自动获取源码并安装插件,程序会根据仓库中的`requirements.txt`文件自动安装依赖库
|
|
||||||
|
|
||||||
例如安装`hello_plugin`插件
|
|
||||||
```
|
|
||||||
!plugin get https://github.com/RockChinQ/hello_plugin
|
|
||||||
```
|
|
||||||
|
|
||||||
安装完成后重启程序或使用管理员账号私聊机器人发送`!reload`进行热重载加载插件
|
|
||||||
|
|
||||||
### 手动安装
|
|
||||||
|
|
||||||
将获取到的插件程序放置到`plugins`目录下,具体使用方式请查看各插件文档或咨询其开发者。
|
|
||||||
|
|
||||||
## 管理
|
|
||||||
|
|
||||||
### !plugin 指令
|
|
||||||
|
|
||||||
```
|
|
||||||
!plugin 列出所有已安装的插件
|
|
||||||
!plugin get <储存库地址> 从Git储存库安装插件(需要管理员权限)
|
|
||||||
!plugin update 更新所有插件(需要管理员权限,仅支持从储存库安装的插件)
|
|
||||||
!plugin del <插件名> 删除插件(需要管理员权限)
|
|
||||||
!plugin on <插件名> 启用插件(需要管理员权限)
|
|
||||||
!plugin off <插件名> 禁用插件(需要管理员权限)
|
|
||||||
```
|
|
||||||
|
|
||||||
### 控制插件执行顺序
|
|
||||||
|
|
||||||
可以通过修改`plugins/settings.json`中`order`字段中每个插件名称的前后顺序,以更改插件**初始化**和**事件执行**顺序
|
|
||||||
|
|
||||||
### 启用或关闭插件
|
|
||||||
|
|
||||||
无需卸载即可管理插件的开关
|
|
||||||
编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关
|
|
||||||
42
tests/bs_test/bs_test.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
user_agents = [
|
||||||
|
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
|
||||||
|
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||||
|
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
||||||
|
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
|
||||||
|
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
|
||||||
|
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||||
|
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Version/14.1.2 Safari/537.36',
|
||||||
|
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Version/14.1 Safari/537.36',
|
||||||
|
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
||||||
|
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0'
|
||||||
|
]
|
||||||
|
|
||||||
|
r = requests.get(
|
||||||
|
sys.argv[1],
|
||||||
|
headers={
|
||||||
|
"User-Agent": random.choice(user_agents)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
soup = BeautifulSoup(r.text, 'html.parser')
|
||||||
|
# print(soup.get_text())
|
||||||
|
|
||||||
|
raw = soup.get_text()
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
# strip每一行
|
||||||
|
# raw = '\n'.join([line.strip() for line in raw.split('\n')])
|
||||||
|
|
||||||
|
# # 删除所有空行或只有空格的行
|
||||||
|
# raw = re.sub(r'\n\s*\n', '\n', raw)
|
||||||
|
|
||||||
|
|
||||||
|
print(raw)
|
||||||
24
tests/proxy_test/forward_proxy_test.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
import openai
|
||||||
|
|
||||||
|
client = openai.Client(
|
||||||
|
api_key=os.environ["OPENAI_API_KEY"],
|
||||||
|
)
|
||||||
|
|
||||||
|
openai.proxies = {
|
||||||
|
'http': 'http://127.0.0.1:7890',
|
||||||
|
'https': 'http://127.0.0.1:7890',
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = client.chat.completions.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hello, how are you?",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(resp)
|
||||||
7
tests/repo_regexp_test.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
repo_url = "git@github.com:RockChinQ/WebwlkrPlugin.git"
|
||||||
|
|
||||||
|
repo = re.findall(r'(?:https?://github\.com/|git@github\.com:)([^/]+/[^/]+?)(?:\.git|/|$)', repo_url)
|
||||||
|
|
||||||
|
print(repo)
|
||||||
57
tests/ssh_client_test/ssh_client.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import paramiko
|
||||||
|
import time
|
||||||
|
import select
|
||||||
|
|
||||||
|
|
||||||
|
class sshClient:
|
||||||
|
#创建一个ssh客户端,和服务器连接上,准备发消息
|
||||||
|
def __init__(self,host,port,user,password):
|
||||||
|
self.trans = paramiko.Transport((host, port))
|
||||||
|
self.trans.start_client()
|
||||||
|
self.trans.auth_password(username=user, password=password)
|
||||||
|
self.channel = self.trans.open_session()
|
||||||
|
self.channel.get_pty()
|
||||||
|
self.channel.invoke_shell()
|
||||||
|
|
||||||
|
#给服务器发送一个命令
|
||||||
|
def sendCmd(self,cmd):
|
||||||
|
self.channel.sendall(cmd)
|
||||||
|
|
||||||
|
#接收的时候,有时候服务器处理的比较慢,需要设置一个延时等待一下。
|
||||||
|
def recvResponse(self,timeout):
|
||||||
|
data=b''
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
#使用select,不断的读取数据,直到没有多余的数据了,超时返回。
|
||||||
|
readable,w,e= select.select([self.channel],[],[],timeout)
|
||||||
|
if self.channel in readable:
|
||||||
|
data = self.channel.recv(1024)
|
||||||
|
else:
|
||||||
|
sys.stdout.write(data.decode())
|
||||||
|
sys.stdout.flush()
|
||||||
|
return data.decode()
|
||||||
|
except TimeoutError:
|
||||||
|
sys.stdout.write(data.decode())
|
||||||
|
sys.stdout.flush()
|
||||||
|
return data.decode
|
||||||
|
#关闭客户端
|
||||||
|
def close(self):
|
||||||
|
self.channel.close()
|
||||||
|
self.trans.close()
|
||||||
|
|
||||||
|
host='host'
|
||||||
|
port=22#your port
|
||||||
|
user='root'
|
||||||
|
pwd='pass'
|
||||||
|
|
||||||
|
ssh = sshClient(host,port,user,pwd)
|
||||||
|
response = ssh.recvResponse(1)
|
||||||
|
response = ssh.sendCmd("ls\n")
|
||||||
|
ssh.sendCmd("cd /home\n")
|
||||||
|
response = ssh.recvResponse(1)
|
||||||
|
ssh.sendCmd("ls\n")
|
||||||
|
response = ssh.recvResponse(1)
|
||||||
|
|
||||||
|
ssh.close()
|
||||||
124
tests/token_test/tiktoken_test.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
import tiktoken
|
||||||
|
import openai
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
|
||||||
|
def encode(text: str, model: str):
|
||||||
|
import tiktoken
|
||||||
|
enc = tiktoken.get_encoding("cl100k_base")
|
||||||
|
assert enc.decode(enc.encode("hello world")) == "hello world"
|
||||||
|
|
||||||
|
# To get the tokeniser corresponding to a specific model in the OpenAI API:
|
||||||
|
enc = tiktoken.encoding_for_model(model)
|
||||||
|
|
||||||
|
return enc.encode(text)
|
||||||
|
|
||||||
|
|
||||||
|
# def ask(prompt: str, model: str = "gpt-3.5-turbo"):
|
||||||
|
# # To get the tokeniser corresponding to a specific model in the OpenAI API:
|
||||||
|
# enc = tiktoken.encoding_for_model(model)
|
||||||
|
|
||||||
|
# resp = openai.ChatCompletion.create(
|
||||||
|
# model=model,
|
||||||
|
# messages=[
|
||||||
|
# {
|
||||||
|
# "role": "user",
|
||||||
|
# "content": prompt
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
# )
|
||||||
|
|
||||||
|
# return enc.encode(prompt), enc.encode(resp['choices'][0]['message']['content']), resp
|
||||||
|
|
||||||
|
def ask(
|
||||||
|
messages: list,
|
||||||
|
model: str = "gpt-3.5-turbo"
|
||||||
|
):
|
||||||
|
enc = tiktoken.encoding_for_model(model)
|
||||||
|
|
||||||
|
resp = openai.ChatCompletion.create(
|
||||||
|
model=model,
|
||||||
|
messages=messages
|
||||||
|
)
|
||||||
|
|
||||||
|
txt = ""
|
||||||
|
|
||||||
|
for r in messages:
|
||||||
|
txt += r['role'] + r['content'] + "\n"
|
||||||
|
|
||||||
|
txt += "assistant: "
|
||||||
|
|
||||||
|
return enc.encode(txt), enc.encode(resp['choices'][0]['message']['content']), resp
|
||||||
|
|
||||||
|
|
||||||
|
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
|
||||||
|
"""Return the number of tokens used by a list of messages."""
|
||||||
|
try:
|
||||||
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
|
except KeyError:
|
||||||
|
print("Warning: model not found. Using cl100k_base encoding.")
|
||||||
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
|
if model in {
|
||||||
|
"gpt-3.5-turbo-0613",
|
||||||
|
"gpt-3.5-turbo-16k-0613",
|
||||||
|
"gpt-4-0314",
|
||||||
|
"gpt-4-32k-0314",
|
||||||
|
"gpt-4-0613",
|
||||||
|
"gpt-4-32k-0613",
|
||||||
|
}:
|
||||||
|
tokens_per_message = 3
|
||||||
|
tokens_per_name = 1
|
||||||
|
elif model == "gpt-3.5-turbo-0301":
|
||||||
|
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||||
|
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||||
|
elif "gpt-3.5-turbo" in model:
|
||||||
|
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||||
|
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
|
||||||
|
elif "gpt-4" in model:
|
||||||
|
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||||
|
return num_tokens_from_messages(messages, model="gpt-4-0613")
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||||
|
)
|
||||||
|
num_tokens = 0
|
||||||
|
for message in messages:
|
||||||
|
num_tokens += tokens_per_message
|
||||||
|
for key, value in message.items():
|
||||||
|
num_tokens += len(encoding.encode(value))
|
||||||
|
if key == "name":
|
||||||
|
num_tokens += tokens_per_name
|
||||||
|
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||||
|
return num_tokens
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "你叫什么名字?"
|
||||||
|
},{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "我是AI助手,没有具体的名字。你可以叫我GPT-3。有什么可以帮到你的吗?"
|
||||||
|
},{
|
||||||
|
"role": "user",
|
||||||
|
"content": "你是由谁开发的?"
|
||||||
|
},{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "我是由OpenAI开发的,一家人工智能研究实验室。OpenAI的使命是促进人工智能的发展,使其为全人类带来积极影响。我是由OpenAI团队使用GPT-3模型训练而成的。"
|
||||||
|
},{
|
||||||
|
"role": "user",
|
||||||
|
"content": "很高兴见到你。"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
pro, rep, resp=ask(messages)
|
||||||
|
|
||||||
|
print(len(pro), len(rep))
|
||||||
|
print(resp)
|
||||||
|
print(resp['choices'][0]['message']['content'])
|
||||||
|
|
||||||
|
print(num_tokens_from_messages(messages, model="gpt-3.5-turbo"))
|
||||||