mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 11:29:39 +08:00
Compare commits
637 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af292fe050 | ||
|
|
634c7fb302 | ||
|
|
33efb94013 | ||
|
|
549e4dc02e | ||
|
|
3d40909c02 | ||
|
|
1aef81e38f | ||
|
|
1b0ae8da58 | ||
|
|
7979a8e97f | ||
|
|
080e53d9a9 | ||
|
|
89bb364b16 | ||
|
|
3586cd941f | ||
|
|
054d0839ac | ||
|
|
dd75f98d85 | ||
|
|
ec23bb5268 | ||
|
|
bc99db4fc1 | ||
|
|
c8275fcfbf | ||
|
|
a345043c30 | ||
|
|
382d37d479 | ||
|
|
32c144a75d | ||
|
|
7ca2aa5e39 | ||
|
|
86cc4a23ac | ||
|
|
08d1e138bd | ||
|
|
a9fe86542f | ||
|
|
4e29776fcd | ||
|
|
ee3eae8f4d | ||
|
|
a84575858a | ||
|
|
ac472291c7 | ||
|
|
f304873c6a | ||
|
|
18caf8face | ||
|
|
d21115aaa8 | ||
|
|
a05ecd2e7f | ||
|
|
32a725126d | ||
|
|
0528690622 | ||
|
|
819339142e | ||
|
|
1d0573e7ff | ||
|
|
00623bc431 | ||
|
|
c872264456 | ||
|
|
1336d3cb9a | ||
|
|
d1459578cd | ||
|
|
8a67fcf40f | ||
|
|
7930370aa9 | ||
|
|
0b854bdcf1 | ||
|
|
cba6aab48d | ||
|
|
12a9ca7a77 | ||
|
|
a6cbd226e1 | ||
|
|
3577e62b41 | ||
|
|
f86e69fcd1 | ||
|
|
292e00b078 | ||
|
|
2a91497bcf | ||
|
|
b0cca0a4c2 | ||
|
|
a2bda85a9c | ||
|
|
20677cff86 | ||
|
|
c8af5d8445 | ||
|
|
2dbe984539 | ||
|
|
6b8fa664f1 | ||
|
|
2b9612e933 | ||
|
|
749d0219fb | ||
|
|
a11a152bd7 | ||
|
|
fc803a3742 | ||
|
|
13a1e15f24 | ||
|
|
3f41b94da5 | ||
|
|
0fb5bfda20 | ||
|
|
dc1fd73ebb | ||
|
|
161b694f71 | ||
|
|
45d1c89e45 | ||
|
|
e26664aa51 | ||
|
|
e29691efbd | ||
|
|
6d45327882 | ||
|
|
fbd41eef49 | ||
|
|
0a30c88322 | ||
|
|
4f5af0e8c8 | ||
|
|
df3f0fd159 | ||
|
|
f2493c79dd | ||
|
|
a86a035b6b | ||
|
|
7995793bfd | ||
|
|
a56b340646 | ||
|
|
7473cdfe16 | ||
|
|
24273ac158 | ||
|
|
fe6275000e | ||
|
|
5fbf369f82 | ||
|
|
4400475ffa | ||
|
|
796eb7c95d | ||
|
|
89a01378e7 | ||
|
|
f4735e5e30 | ||
|
|
f1bb3045aa | ||
|
|
96e474a555 | ||
|
|
833d29b101 | ||
|
|
dce6734ba2 | ||
|
|
0481167dc6 | ||
|
|
a002f93f7b | ||
|
|
3c894fe70e | ||
|
|
8c69b8a1d9 | ||
|
|
a9dae05303 | ||
|
|
ae6994e241 | ||
|
|
caa72fa40c | ||
|
|
46cc9220c3 | ||
|
|
ddb56d7a8e | ||
|
|
a0267416d7 | ||
|
|
56e1ef3602 | ||
|
|
b4fc1057d1 | ||
|
|
06037df607 | ||
|
|
dce134d08d | ||
|
|
cca471d068 | ||
|
|
ddb211b74a | ||
|
|
cef70751ff | ||
|
|
2d2219fc6e | ||
|
|
514a6b4192 | ||
|
|
7a552b3434 | ||
|
|
ecebd1b0e0 | ||
|
|
8dc34d2a88 | ||
|
|
d52644ceec | ||
|
|
3052510591 | ||
|
|
777a5617db | ||
|
|
e17c1087e9 | ||
|
|
633695175a | ||
|
|
9e78bf3d21 | ||
|
|
43aa68a55d | ||
|
|
b8308f8c57 | ||
|
|
466bfbddeb | ||
|
|
b6da07b225 | ||
|
|
2f2159239a | ||
|
|
67d1ca8a65 | ||
|
|
497a393e83 | ||
|
|
782c0e22ea | ||
|
|
2932fc6dfd | ||
|
|
0a9eab2113 | ||
|
|
50a673a8ec | ||
|
|
9e25d0f9e4 | ||
|
|
23cd7be711 | ||
|
|
025b9e33f1 | ||
|
|
bab2f64913 | ||
|
|
b00e09aa9c | ||
|
|
0b109fdc7a | ||
|
|
018fea2ddb | ||
|
|
f8a3cc4352 | ||
|
|
6ab853acc1 | ||
|
|
e825dea02f | ||
|
|
cf8740d16e | ||
|
|
9c4809e26f | ||
|
|
0a232fd9ef | ||
|
|
23016a0791 | ||
|
|
cdcc67ff23 | ||
|
|
92274bfc34 | ||
|
|
2fed6f61ba | ||
|
|
59b2cd26d2 | ||
|
|
f7b87e99d2 | ||
|
|
70bc985145 | ||
|
|
070dbe9108 | ||
|
|
a63fa6d955 | ||
|
|
c7703809b0 | ||
|
|
37eb74338f | ||
|
|
77d5585b7c | ||
|
|
6cab3ef029 | ||
|
|
820a7b78fc | ||
|
|
c51dffef3a | ||
|
|
983bc3da3c | ||
|
|
09be956a58 | ||
|
|
5eded50c53 | ||
|
|
6d8eebd314 | ||
|
|
19a0572b5f | ||
|
|
6272e98474 | ||
|
|
45042fe7d4 | ||
|
|
d85e840126 | ||
|
|
804889f1de | ||
|
|
919c996434 | ||
|
|
00823b3d62 | ||
|
|
af54efd24a | ||
|
|
b1c9b121f6 | ||
|
|
7b5649d153 | ||
|
|
52bf716d84 | ||
|
|
c149dd7b66 | ||
|
|
65d5a1ed63 | ||
|
|
5516754bbb | ||
|
|
08082f2ee3 | ||
|
|
8489266080 | ||
|
|
51c7e0b235 | ||
|
|
628b6b0bb4 | ||
|
|
7e024d860d | ||
|
|
c2f6273f70 | ||
|
|
96e401ec7b | ||
|
|
ae8ac65447 | ||
|
|
2d4f59f36e | ||
|
|
0e85467e02 | ||
|
|
eb41cf5481 | ||
|
|
b970a42d07 | ||
|
|
8c9d123e1c | ||
|
|
ab2a95e347 | ||
|
|
2184c558a4 | ||
|
|
83cb8588fd | ||
|
|
007e82c533 | ||
|
|
499f8580a7 | ||
|
|
a7dc3c5dab | ||
|
|
d01d3a3c53 | ||
|
|
580e062dbf | ||
|
|
c8cee8410c | ||
|
|
6bf331c2e3 | ||
|
|
4c4930737c | ||
|
|
9de01e9525 | ||
|
|
c6a16f5974 | ||
|
|
253ef44d17 | ||
|
|
15a1f00b73 | ||
|
|
b5fa2ea8b8 | ||
|
|
449e024771 | ||
|
|
1bee7a146b | ||
|
|
270a632789 | ||
|
|
418bb05b4c | ||
|
|
052b834151 | ||
|
|
58ee204a75 | ||
|
|
0a02ee8c04 | ||
|
|
950ef4a181 | ||
|
|
7b7cdd8adb | ||
|
|
471768e760 | ||
|
|
c7517d31a4 | ||
|
|
7d10d0398e | ||
|
|
a2bc25c08b | ||
|
|
3cb49fe2d8 | ||
|
|
5b96ac122f | ||
|
|
612033f478 | ||
|
|
48ee940d8e | ||
|
|
e74df0b37d | ||
|
|
640afdc49c | ||
|
|
6b39df5b9b | ||
|
|
e7e698765e | ||
|
|
43fea13dab | ||
|
|
bc899e5bd0 | ||
|
|
160086feb9 | ||
|
|
016391c976 | ||
|
|
91746448a3 | ||
|
|
5cb0543237 | ||
|
|
fac29a24a8 | ||
|
|
4d3a2a21d0 | ||
|
|
6d4f88041c | ||
|
|
18587d3690 | ||
|
|
423090dccd | ||
|
|
78e88baab3 | ||
|
|
6a276767b3 | ||
|
|
2cb26c7c70 | ||
|
|
ff66c88060 | ||
|
|
611e82b8f9 | ||
|
|
59bdee7137 | ||
|
|
e8dbd426ae | ||
|
|
40d6e809a0 | ||
|
|
236c540d18 | ||
|
|
d6ca059f6c | ||
|
|
52c06a60ca | ||
|
|
6353644ec3 | ||
|
|
20df9ded3d | ||
|
|
7569b18a4c | ||
|
|
b9da4f4951 | ||
|
|
89b9e29257 | ||
|
|
d605de9de4 | ||
|
|
d46c94d7c3 | ||
|
|
2db9c00530 | ||
|
|
66d8d159f9 | ||
|
|
9fa1446284 | ||
|
|
b3e4cb48c7 | ||
|
|
0bca7b2247 | ||
|
|
7812e03c9d | ||
|
|
7a852ae5af | ||
|
|
706d9e61c1 | ||
|
|
8f0ed4ff4b | ||
|
|
3415b6f121 | ||
|
|
256ba6fb86 | ||
|
|
d30b2b9afe | ||
|
|
be943ca1fc | ||
|
|
1ddab2a97a | ||
|
|
e15fd4695c | ||
|
|
ffa4b1b4a1 | ||
|
|
f8eee3a2a6 | ||
|
|
eeee7a8343 | ||
|
|
8447b73fcb | ||
|
|
2863945d5f | ||
|
|
cb1f8ca6f7 | ||
|
|
1d9964bcb1 | ||
|
|
15cb8016d3 | ||
|
|
895cc0a2c5 | ||
|
|
20bf349e4e | ||
|
|
e297763da1 | ||
|
|
e471970654 | ||
|
|
12faaaced8 | ||
|
|
083cbc55cc | ||
|
|
8aa7a3273d | ||
|
|
255e2c4385 | ||
|
|
9856306870 | ||
|
|
527ab8b8a7 | ||
|
|
f8e19ba9b3 | ||
|
|
7649dbfbbc | ||
|
|
81e734644d | ||
|
|
ae55cf5b1e | ||
|
|
af539546ef | ||
|
|
0031ce57d0 | ||
|
|
2f48a2ce57 | ||
|
|
6068ab7100 | ||
|
|
29a7dccef4 | ||
|
|
e2073da86e | ||
|
|
ae079526f7 | ||
|
|
947bae8e26 | ||
|
|
a68e29dff6 | ||
|
|
a588d7f960 | ||
|
|
66224e5a32 | ||
|
|
07abad6a14 | ||
|
|
83d02aaaac | ||
|
|
5a27ac165e | ||
|
|
bd9a523233 | ||
|
|
43959b158f | ||
|
|
d81b457bba | ||
|
|
b40d639785 | ||
|
|
0a8d8f4f66 | ||
|
|
d16cb25cde | ||
|
|
7aef1758e0 | ||
|
|
9758756fdd | ||
|
|
13ef35f96f | ||
|
|
6b8c1209b7 | ||
|
|
7184f3053a | ||
|
|
b83eac10e6 | ||
|
|
cb42eaef69 | ||
|
|
0dfd636a7e | ||
|
|
21ff0fd258 | ||
|
|
c2eaeb2c72 | ||
|
|
2a414a4bea | ||
|
|
fc0c38c8af | ||
|
|
595e6c8a0c | ||
|
|
ced16fd221 | ||
|
|
0817c3f148 | ||
|
|
fb40af81ac | ||
|
|
1c5ad05e89 | ||
|
|
86bef566c4 | ||
|
|
0983ccb61e | ||
|
|
a1d9f469c0 | ||
|
|
952124f783 | ||
|
|
6be12e8ace | ||
|
|
0799f380e1 | ||
|
|
f65270ee7e | ||
|
|
414910719c | ||
|
|
10a1e8faa6 | ||
|
|
4eea21927e | ||
|
|
48c7f659f9 | ||
|
|
b33333f4aa | ||
|
|
9edb32b081 | ||
|
|
c9b25fe806 | ||
|
|
b6ee3939be | ||
|
|
e5485cddd0 | ||
|
|
ac81597236 | ||
|
|
58d991df0a | ||
|
|
3f8e380da4 | ||
|
|
ae831a2654 | ||
|
|
ae72cf2283 | ||
|
|
8164f4b506 | ||
|
|
9617be0ca4 | ||
|
|
f079d7b9fa | ||
|
|
00afda452f | ||
|
|
70386abadd | ||
|
|
5865ac017c | ||
|
|
4061a92f8e | ||
|
|
d37c31b31c | ||
|
|
973ef0078f | ||
|
|
48dcd257da | ||
|
|
da03911610 | ||
|
|
aba9d945b5 | ||
|
|
b6f7f3b73f | ||
|
|
2050d20ea7 | ||
|
|
ac1fb4a63a | ||
|
|
ced38490e1 | ||
|
|
ad28b69198 | ||
|
|
7171817de8 | ||
|
|
73f9d674e1 | ||
|
|
5e046399f8 | ||
|
|
4966cd9ac7 | ||
|
|
da936ecfe3 | ||
|
|
89e10d43de | ||
|
|
3bf289af69 | ||
|
|
c7c9a6c5ca | ||
|
|
aee8446a23 | ||
|
|
2bb4f1fbb8 | ||
|
|
6e7b0ee4ff | ||
|
|
204f5b9a54 | ||
|
|
8c41e3506f | ||
|
|
c2c33e45b8 | ||
|
|
1acaf4e58b | ||
|
|
eca80d5a4c | ||
|
|
f538957be9 | ||
|
|
82a839a60a | ||
|
|
df494da9e4 | ||
|
|
1ea53f7f04 | ||
|
|
ac6d695f6d | ||
|
|
73dccb21f5 | ||
|
|
4221102ad5 | ||
|
|
b100f12e7f | ||
|
|
2069ba6836 | ||
|
|
ea57976808 | ||
|
|
4055d3542b | ||
|
|
0b0271a1f4 | ||
|
|
e03585ad4d | ||
|
|
11a385791e | ||
|
|
e228225178 | ||
|
|
1c96d971e1 | ||
|
|
b799de7995 | ||
|
|
b01d246555 | ||
|
|
9363b073cf | ||
|
|
12ca04ac6f | ||
|
|
51737c28bd | ||
|
|
50d5ec224a | ||
|
|
95a7397d14 | ||
|
|
aedac6d22c | ||
|
|
d522975ecc | ||
|
|
68fda8d7f3 | ||
|
|
b0cfec9913 | ||
|
|
ba8eba1581 | ||
|
|
f9eaed41c1 | ||
|
|
1202a62df7 | ||
|
|
8c1f7796f6 | ||
|
|
42aee35789 | ||
|
|
b628849caa | ||
|
|
031f08b0d4 | ||
|
|
fab6f9b93f | ||
|
|
564c5d937d | ||
|
|
2d3bb01487 | ||
|
|
607ea2d293 | ||
|
|
d817b53780 | ||
|
|
e8a2cbe06a | ||
|
|
d2b0577752 | ||
|
|
b4edd5cbad | ||
|
|
348477747e | ||
|
|
bb7ee174ea | ||
|
|
ab5add14ef | ||
|
|
44f4820cee | ||
|
|
8f1609b944 | ||
|
|
66b5b75631 | ||
|
|
17e293afe8 | ||
|
|
1cf35f59fd | ||
|
|
bb4b897934 | ||
|
|
0eaf1af2e3 | ||
|
|
f70c12540b | ||
|
|
479fe73c24 | ||
|
|
f6cad85476 | ||
|
|
888197e6ce | ||
|
|
e634305759 | ||
|
|
fe054211f4 | ||
|
|
f102a29ea0 | ||
|
|
2b8bd45bcd | ||
|
|
7f730c4be0 | ||
|
|
b6e31cac23 | ||
|
|
9fe4f218d5 | ||
|
|
cc38cc2676 | ||
|
|
f56c6876d1 | ||
|
|
196e424c88 | ||
|
|
9270dc2c52 | ||
|
|
14aec251b4 | ||
|
|
d2a7a57245 | ||
|
|
1964fc76c8 | ||
|
|
b8d4b490ce | ||
|
|
76891e4855 | ||
|
|
3d868b3a39 | ||
|
|
7b56bcf7a9 | ||
|
|
f96ae56bce | ||
|
|
d52108f4e1 | ||
|
|
5f07b7ad1f | ||
|
|
cda10cf1a6 | ||
|
|
d226b8ebc5 | ||
|
|
d08794579c | ||
|
|
7450494741 | ||
|
|
36dca7ae2f | ||
|
|
5dae777e79 | ||
|
|
e518d172d7 | ||
|
|
af29277acd | ||
|
|
79bfa0792d | ||
|
|
cf23c5d31c | ||
|
|
84418a296b | ||
|
|
5f83cc6bb7 | ||
|
|
cde168c93c | ||
|
|
fed24c0748 | ||
|
|
b45d11b3c3 | ||
|
|
84d9af69bb | ||
|
|
684d356646 | ||
|
|
975300c9fc | ||
|
|
ca349e33fc | ||
|
|
ccf62fe95c | ||
|
|
d056cb6769 | ||
|
|
b0016eebf9 | ||
|
|
0490ad9207 | ||
|
|
4a20ae236b | ||
|
|
9be1c7fc6f | ||
|
|
5621d32b30 | ||
|
|
b7642fe876 | ||
|
|
c842485d33 | ||
|
|
341444ef1c | ||
|
|
66f5a219d2 | ||
|
|
cf678aa345 | ||
|
|
d1549b3df0 | ||
|
|
002919fffe | ||
|
|
087d097204 | ||
|
|
ca4eeda6f0 | ||
|
|
94543a4708 | ||
|
|
d4738dfb46 | ||
|
|
3bdf6810aa | ||
|
|
f489c2f3b4 | ||
|
|
a724bfe155 | ||
|
|
179a372bfe | ||
|
|
651d765ab0 | ||
|
|
7ddc853f63 | ||
|
|
1bd1bfc725 | ||
|
|
f6ec0fda7a | ||
|
|
7be368ae8c | ||
|
|
f67db2617b | ||
|
|
ed5bf8100f | ||
|
|
0ef8a1c9ae | ||
|
|
32460cbf78 | ||
|
|
6f6c9c222c | ||
|
|
438d0ed1ea | ||
|
|
3ef1c71cad | ||
|
|
aaadf6b8ba | ||
|
|
6af614f319 | ||
|
|
c75dbd67df | ||
|
|
dc3d186e2a | ||
|
|
44550feddd | ||
|
|
a0810d5f63 | ||
|
|
cfc97fb22d | ||
|
|
d67dbe8062 | ||
|
|
e89035e11c | ||
|
|
2ea711e629 | ||
|
|
a716f071be | ||
|
|
3450a91824 | ||
|
|
d2c2b457e5 | ||
|
|
9cd7e49804 | ||
|
|
e9155e836f | ||
|
|
ed248539c7 | ||
|
|
54cc75506f | ||
|
|
4269c7927e | ||
|
|
064ac7f603 | ||
|
|
48ccf15273 | ||
|
|
b920ced6d4 | ||
|
|
69610a674c | ||
|
|
1828e34190 | ||
|
|
d53f4e3917 | ||
|
|
01706d5b4e | ||
|
|
8916b8a450 | ||
|
|
ed33af5638 | ||
|
|
c94a9e1ae6 | ||
|
|
e2e93afd06 | ||
|
|
a810158d5b | ||
|
|
5a5ebb95fc | ||
|
|
61dd9e29c0 | ||
|
|
ac65d81ba1 | ||
|
|
3aca987176 | ||
|
|
7288d3cb15 | ||
|
|
7477c7c67f | ||
|
|
453952859e | ||
|
|
85d46089e3 | ||
|
|
3b55f706de | ||
|
|
f448276423 | ||
|
|
830ee704da | ||
|
|
393369e446 | ||
|
|
2cc6a09905 | ||
|
|
d7d9d88e16 | ||
|
|
357d6aaf75 | ||
|
|
8059c422e3 | ||
|
|
b336e1334d | ||
|
|
12a0942ddb | ||
|
|
7e5a77f77e | ||
|
|
e0caeb5dd2 | ||
|
|
77076f3bdd | ||
|
|
2933d4843f | ||
|
|
c5de978098 | ||
|
|
8b9cfab072 | ||
|
|
ea5f3c222f | ||
|
|
36bcbca15b | ||
|
|
2b2060e71b | ||
|
|
451688f2df | ||
|
|
d993852de7 | ||
|
|
9d73770a4e | ||
|
|
2541acf9d2 | ||
|
|
a1bfbad24e | ||
|
|
8af4918048 | ||
|
|
49f4ab0ec8 | ||
|
|
85c623fb0f | ||
|
|
9e28298250 | ||
|
|
7a04ef0985 | ||
|
|
83005e9ba9 | ||
|
|
f0c78f0529 | ||
|
|
3f638adcf9 | ||
|
|
d9405d8d5d | ||
|
|
606713a418 | ||
|
|
52102f0d0a | ||
|
|
61c29829ed | ||
|
|
df30931aad | ||
|
|
5afcc03e8b | ||
|
|
fbeb4673f4 | ||
|
|
4aba319560 | ||
|
|
74f79e002c | ||
|
|
2668ef2b3f | ||
|
|
74c018e271 | ||
|
|
64776fd601 | ||
|
|
59877bf71d | ||
|
|
d2800ac58b | ||
|
|
ffef944119 | ||
|
|
651b291ef6 | ||
|
|
e4b581f197 | ||
|
|
4f3939e2d9 | ||
|
|
1048ca612d | ||
|
|
b1a2d21ee9 | ||
|
|
dd4e8bdc8b | ||
|
|
e28c9bae0c | ||
|
|
5c10f520fb | ||
|
|
f8abe90674 | ||
|
|
964ad42cb4 | ||
|
|
424b970469 | ||
|
|
792366e221 | ||
|
|
79e970c4c3 | ||
|
|
d12acd5f31 | ||
|
|
13e55e05a4 | ||
|
|
9a7490bc2f | ||
|
|
a610a9d3d3 | ||
|
|
56e906c83f | ||
|
|
101f26e5a3 | ||
|
|
0bba205cf2 | ||
|
|
cc3beb191f | ||
|
|
42f5092bb9 | ||
|
|
bc6728d123 | ||
|
|
754278f80f | ||
|
|
c9c980b6fe | ||
|
|
a457d13d2c | ||
|
|
7440e9e5d2 | ||
|
|
39d901a5cb | ||
|
|
2e1ebff985 | ||
|
|
b8ed9ba321 | ||
|
|
c89a8e1cd1 | ||
|
|
480d201c55 | ||
|
|
a4b7d4a012 | ||
|
|
7fe676712b | ||
|
|
552733129c | ||
|
|
a4d73090f8 | ||
|
|
7d39b72800 | ||
|
|
f1e12563e9 | ||
|
|
0ac5e5b35e | ||
|
|
6b3f74a39a | ||
|
|
3c3e2e86c3 | ||
|
|
204a778db2 |
51
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
51
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: 漏洞反馈
|
||||
description: 报错或漏洞请使用这个模板创建,不使用此模板创建的异常、漏洞相关issue将被直接关闭
|
||||
title: "[Bug]: "
|
||||
labels: ["bug?"]
|
||||
body:
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: 部署方式
|
||||
description: "主程序使用的部署方式"
|
||||
options:
|
||||
- 手动部署
|
||||
- 安装器部署
|
||||
- 一键安装包部署
|
||||
- Docker部署
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: 登录框架
|
||||
description: "连接QQ使用的框架"
|
||||
options:
|
||||
- Mirai
|
||||
- go-cqhttp
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
attributes:
|
||||
label: 系统环境
|
||||
description: 操作系统、系统架构。
|
||||
placeholder: 例如: CentOS x64、Windows11
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Python环境
|
||||
description: 运行程序的Python版本
|
||||
placeholder: 例如: Python 3.10
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 异常情况
|
||||
description: 完整描述异常情况,什么时候发生的、发生了什么
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 报错信息
|
||||
description: 请提供完整的**控制台**报错信息(若有)
|
||||
validations:
|
||||
required: false
|
||||
21
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
Normal file
21
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: 需求建议
|
||||
title: "[Feature]: "
|
||||
labels: ["enhancement"]
|
||||
description: "新功能或现有功能优化请使用这个模板;不符合类别的issue将被直接关闭"
|
||||
body:
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: 这是一个?
|
||||
description: 新功能建议还是现有功能优化
|
||||
options:
|
||||
- 新功能
|
||||
- 现有功能优化
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 详细描述
|
||||
description: 详细描述,越详细越好
|
||||
validations:
|
||||
required: true
|
||||
|
||||
24
.github/ISSUE_TEMPLATE/漏洞反馈.md
vendored
24
.github/ISSUE_TEMPLATE/漏洞反馈.md
vendored
@@ -1,24 +0,0 @@
|
||||
---
|
||||
name: 漏洞反馈
|
||||
about: 报错或漏洞请使用这个模板创建
|
||||
title: "[BUG]"
|
||||
labels: 'bug'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
请认真按照实际情况填写以下信息!!!!
|
||||
|
||||
**运行环境**
|
||||
- 部署方式:
|
||||
手动部署/自动部署/Docker部署
|
||||
- 系统环境:
|
||||
例如: Centos x64
|
||||
- Python环境(仅手动部署填写):
|
||||
例如: Python 3.10.9
|
||||
|
||||
**描述漏洞**
|
||||
什么时候发生的,mirai还是主程序,越详细越好
|
||||
|
||||
**完整报错信息**
|
||||
完整的报错信息
|
||||
10
.github/ISSUE_TEMPLATE/需求建议.md
vendored
10
.github/ISSUE_TEMPLATE/需求建议.md
vendored
@@ -1,10 +0,0 @@
|
||||
---
|
||||
name: 需求建议
|
||||
about: 软件优化建议请使用这个模板创建
|
||||
title: "[ENHANCE]"
|
||||
labels: 'enhancement'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
不是需求建议请勿填写此模板!!!!
|
||||
25
.github/pull_request_template.md
vendored
Normal file
25
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
## 概述
|
||||
|
||||
实现/解决/优化的内容:
|
||||
|
||||
### 事务
|
||||
|
||||
- [ ] 已阅读仓库[贡献指引](https://github.com/RockChinQ/QChatGPT/blob/master/CONTRIBUTING.md)
|
||||
- [ ] 已与维护者在issues或其他平台沟通此PR大致内容
|
||||
|
||||
## 以下内容可在起草PR后、合并PR前逐步完成
|
||||
|
||||
### 功能
|
||||
|
||||
- [ ] 已编写完善的配置文件字段说明(若有新增)
|
||||
- [ ] 已编写面向用户的新功能说明(若有必要)
|
||||
- [ ] 已测试新功能或更改
|
||||
|
||||
### 兼容性
|
||||
|
||||
- [ ] 已处理版本兼容性
|
||||
- [ ] 已处理插件兼容问题
|
||||
|
||||
### 风险
|
||||
|
||||
可能导致或已知的问题:
|
||||
45
.github/workflows/sync-wiki.yml
vendored
Normal file
45
.github/workflows/sync-wiki.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Update Wiki
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'res/wiki/**'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'res/wiki/**'
|
||||
|
||||
jobs:
|
||||
update-wiki:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Git
|
||||
run: |
|
||||
git config --global user.name "GitHub Actions"
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
- name: Clone Wiki Repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: RockChinQ/QChatGPT.wiki
|
||||
path: wiki
|
||||
- name: Copy res/wiki content to wiki
|
||||
run: |
|
||||
cp -r res/wiki/* wiki/
|
||||
- name: Check for changes
|
||||
run: |
|
||||
cd wiki
|
||||
if git diff --quiet; then
|
||||
echo "No changes to commit."
|
||||
exit 0
|
||||
fi
|
||||
- name: Commit and Push Changes
|
||||
run: |
|
||||
cd wiki
|
||||
git add .
|
||||
git commit -m "Update wiki"
|
||||
git push
|
||||
58
.github/workflows/update-cmdpriv-template.yml
vendored
Normal file
58
.github/workflows/update-cmdpriv-template.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: Update cmdpriv-template
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'pkg/qqbot/cmds/**'
|
||||
pull_request:
|
||||
types: [closed]
|
||||
paths:
|
||||
- 'pkg/qqbot/cmds/**'
|
||||
|
||||
jobs:
|
||||
update-cmdpriv-template:
|
||||
if: github.event.pull_request.merged == true || github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT tiktoken
|
||||
|
||||
- name: Copy Scripts
|
||||
run: |
|
||||
cp res/scripts/generate_cmdpriv_template.py .
|
||||
|
||||
- name: Generate Files
|
||||
run: |
|
||||
python main.py
|
||||
|
||||
- name: Run generate_cmdpriv_template.py
|
||||
run: python3 generate_cmdpriv_template.py
|
||||
|
||||
- name: Check for changes in cmdpriv-template.json
|
||||
id: check_changes
|
||||
run: |
|
||||
if git diff --name-only | grep -q "res/templates/cmdpriv-template.json"; then
|
||||
echo "::set-output name=changes_detected::true"
|
||||
else
|
||||
echo "::set-output name=changes_detected::false"
|
||||
fi
|
||||
|
||||
- name: Commit changes to cmdpriv-template.json
|
||||
if: steps.check_changes.outputs.changes_detected == 'true'
|
||||
run: |
|
||||
git config --global user.name "GitHub Actions Bot"
|
||||
git config --global user.email "<github-actions@github.com>"
|
||||
git add res/templates/cmdpriv-template.json
|
||||
git commit -m "Update cmdpriv-template.json"
|
||||
git push
|
||||
53
.github/workflows/update-override-all.yml
vendored
Normal file
53
.github/workflows/update-override-all.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Check and Update override_all
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'config-template.py'
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'config-template.py'
|
||||
|
||||
jobs:
|
||||
update-override-all:
|
||||
name: check and update
|
||||
if: github.event.pull_request.merged == true || github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
# 在此处添加您的项目所需的其他依赖
|
||||
|
||||
- name: Copy Scripts
|
||||
run: |
|
||||
cp res/scripts/generate_override_all.py .
|
||||
|
||||
- name: Run generate_override_all.py
|
||||
run: python3 generate_override_all.py
|
||||
|
||||
- name: Check for changes in override-all.json
|
||||
id: check_changes
|
||||
run: |
|
||||
git diff --exit-code override-all.json || echo "::set-output name=changes_detected::true"
|
||||
|
||||
- name: Commit and push changes
|
||||
if: steps.check_changes.outputs.changes_detected == 'true'
|
||||
run: |
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --global user.name "GitHub Actions"
|
||||
git add override-all.json
|
||||
git commit -m "Update override-all.json"
|
||||
git push
|
||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -3,10 +3,28 @@ config.py
|
||||
__pycache__/
|
||||
database.db
|
||||
qchatgpt.log
|
||||
config.py
|
||||
/banlist.py
|
||||
plugins/
|
||||
!plugins/__init__.py
|
||||
/revcfg.py
|
||||
prompts/
|
||||
logs/
|
||||
logs/
|
||||
sensitive.json
|
||||
temp/
|
||||
current_tag
|
||||
scenario/
|
||||
!scenario/default-template.json
|
||||
override.json
|
||||
cookies.json
|
||||
res/announcement_saved
|
||||
res/announcement_saved.json
|
||||
cmdpriv.json
|
||||
tips.py
|
||||
.venv
|
||||
bin/
|
||||
.vscode
|
||||
test_*
|
||||
venv/
|
||||
hugchat.json
|
||||
qcapi
|
||||
/*.yaml
|
||||
19
CONTRIBUTING.md
Normal file
19
CONTRIBUTING.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## 参与项目
|
||||
|
||||
欢迎为此项目贡献代码或其他支持,以使您的点子或众人期待的功能成为现实,助力社区成长。
|
||||
|
||||
### 贡献形式
|
||||
|
||||
- 提交PR,解决issues中提到的bug或期待的功能
|
||||
- 提交PR,实现您设想的功能(请先提出issue与作者沟通)
|
||||
- 优化代码架构,使各个模块的组织更加整洁优雅
|
||||
- 在issues中提出发现的bug或者期待的功能
|
||||
- 为本项目在其他社交平台撰写文章、制作视频等
|
||||
- 为本项目的衍生项目作出贡献,或开发插件增加功能
|
||||
|
||||
### 如何开始
|
||||
|
||||
- 加入本项目交流群,一同探讨项目相关事务
|
||||
- 解决本项目或衍生项目的issues中亟待解决的问题
|
||||
- 阅读并完善本项目文档
|
||||
- 在各个社交媒体撰写本项目教程等
|
||||
17
Dockerfile
Normal file
17
Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM python:3.9-slim
|
||||
WORKDIR /QChatGPT
|
||||
|
||||
RUN sed -i "s/deb.debian.org/mirrors.tencent.com/g" /etc/apt/sources.list \
|
||||
&& sed -i 's|security.debian.org/debian-security|mirrors.tencent.com/debian-security|g' /etc/apt/sources.list \
|
||||
&& apt-get clean \
|
||||
&& apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get install -y git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY . /QChatGPT/
|
||||
|
||||
RUN pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
|
||||
CMD [ "python", "main.py" ]
|
||||
247
README.md
247
README.md
@@ -1,25 +1,81 @@
|
||||
# QChatGPT🤖
|
||||
> 2023/3/3 官方接口疑似被墙,请自行测试,或等待近期敏感时期结束。
|
||||
> 2023/3/3 现已在主线支持官方ChatGPT接口,使用方法查看[#195](https://github.com/RockChinQ/QChatGPT/issues/195)
|
||||
> 2023/3/2 OpenAI已发布ChatGPT官方接口,我们正在全力接入,预计明日前完成,请查看[此PR](https://github.com/RockChinQ/QChatGPT/pull/194)
|
||||
> 2023/2/16 现已支持接入ChatGPT网页版,详情请完成部署并查看底部**插件**小节或[此仓库](https://github.com/RockChinQ/revLibs)
|
||||
|
||||
- 到[项目Wiki](https://github.com/RockChinQ/QChatGPT/wiki)可了解项目详细信息
|
||||
- 由bilibili TheLazy制作的[视频教程](https://www.bilibili.com/video/BV15v4y1X7aP)
|
||||
- 交流、答疑群: ~~204785790~~(已满)、691226829、656285629
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- QQ频道机器人见[QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
||||
<p align="center">
|
||||
<img src="res/logo.png" alt="QChatGPT" width="120" />
|
||||
</p>
|
||||
|
||||
通过调用OpenAI的ChatGPT等语言模型来实现一个更加智能的QQ机器人
|
||||
<div align="center">
|
||||
|
||||
# QChatGPT
|
||||
|
||||
高稳定性/持续迭代/架构清晰/支持插件/高可自定义的 ChatGPT QQ机器人框架
|
||||
|
||||
[English](README_en.md) | 简体中文
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||

|
||||
<img src="https://img.shields.io/badge/python-3.9+-blue.svg" alt="python">
|
||||
<a href="https://github.com/RockChinQ/QChatGPT/wiki">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E6%9F%A5%E7%9C%8B-%E9%A1%B9%E7%9B%AEWiki-blue">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV1Y14y1Q7kQ">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E8%A7%86%E9%A2%91%E6%95%99%E7%A8%8B-208647">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV11h4y1y74H">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Linux%E9%83%A8%E7%BD%B2%E8%A7%86%E9%A2%91-208647">
|
||||
</a>
|
||||
|
||||
</div>
|
||||
|
||||
> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果,请见[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
|
||||
## 🍺模型适配一览
|
||||
|
||||
</summary>
|
||||
|
||||
### 文字对话
|
||||
|
||||
- OpenAI GPT-3.5模型(ChatGPT API), 本项目原生支持, 默认使用
|
||||
- OpenAI GPT-3模型, 本项目原生支持, 部署完成后前往`config.py`切换
|
||||
- OpenAI GPT-4模型, 本项目原生支持, 目前需要您的账户通过OpenAI的内测申请, 请前往`config.py`切换
|
||||
- ChatGPT网页版GPT-3.5模型, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- ChatGPT网页版GPT-4模型, 目前需要ChatGPT Plus订阅, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- New Bing逆向库, 由[插件](https://github.com/RockChinQ/revLibs)接入
|
||||
- HuggingChat, 由[插件](https://github.com/RockChinQ/revLibs)接入, 仅支持英文
|
||||
|
||||
### 故事续写
|
||||
|
||||
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
||||
|
||||
### 图片绘制
|
||||
|
||||
- OpenAI DALL·E模型, 本项目原生支持, 使用方法查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
- NovelAI API, 由[插件](https://github.com/dominoar/QCPNovelAi)接入
|
||||
|
||||
### 语音生成
|
||||
|
||||
- TTS+VITS, 由[插件](https://github.com/dominoar/QChatPlugins)接入
|
||||
- Plachta/VITS-Umamusume-voice-synthesizer, 由[插件](https://github.com/oliverkirk-sudo/chat_voice)接入
|
||||
|
||||
|
||||
安装[此插件](https://github.com/RockChinQ/Switcher),即可在使用中切换文字模型。
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
|
||||
## ✅功能
|
||||
|
||||
</summary>
|
||||
|
||||
<details>
|
||||
<summary>✅支持敏感词过滤,避免账号风险</summary>
|
||||
|
||||
- 难以监测机器人与用户对话时的内容,故引入此功能以减少机器人风险
|
||||
- 加入了百度云内容审核,在`config.py`中修改`baidu_check`的值,并填写`baidu_api_key`和`baidu_secret_key`以开启此功能
|
||||
- 编辑`sensitive.json`,并在`config.py`中修改`sensitive_word_filter`的值以开启此功能
|
||||
</details>
|
||||
|
||||
@@ -63,6 +119,7 @@
|
||||
<summary>✅支持插件加载🧩</summary>
|
||||
|
||||
- 自行实现插件加载器及相关支持
|
||||
- 支持GPT的Function Calling功能
|
||||
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
</details>
|
||||
<details>
|
||||
@@ -72,6 +129,12 @@
|
||||
- 详见Wiki`加入黑名单`节
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅长消息处理策略</summary>
|
||||
|
||||
- 支持将长消息转换成图片或消息记录组件,避免消息刷屏
|
||||
- 请查看`config.py`中`blob_message_strategy`等字段
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅回复速度限制</summary>
|
||||
|
||||
- 支持限制单会话内每分钟可进行的对话次数
|
||||
@@ -80,24 +143,63 @@
|
||||
- “丢弃”策略:此分钟内对话次数达到限制时,丢弃之后的对话
|
||||
- 详细请查看config.py中的相关配置
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持使用网络代理</summary>
|
||||
|
||||
- 目前已支持正向代理访问接口
|
||||
- 详细请查看config.py中的`openai_config`的说明
|
||||
</details>
|
||||
<details>
|
||||
<summary>✅支持自定义提示内容</summary>
|
||||
|
||||
- 允许用户自定义报错、帮助等提示信息
|
||||
- 请查看`tips.py`
|
||||
</details>
|
||||
|
||||
### 🏞️截图
|
||||
|
||||
<img alt="私聊GPT-3.5" src="res/screenshots/person_gpt3.5.png" width="400"/>
|
||||
<br/>
|
||||
<img alt="群聊GPT-3.5" src="res/screenshots/group_gpt3.5.png" width="400"/>
|
||||
<br/>
|
||||
<img alt="New Bing" src="res/screenshots/person_newbing.png" width="400"/>
|
||||
|
||||
详情请查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>
|
||||
|
||||
## 🔩部署
|
||||
|
||||
**部署过程中遇到任何问题,请先在[QChatGPT](https://github.com/RockChinQ/QChatGPT/issues)或[qcg-installer](https://github.com/RockChinQ/qcg-installer/issues)的issue里进行搜索**
|
||||
</summary>
|
||||
|
||||
**部署过程中遇到任何问题,请先在[QChatGPT](https://github.com/RockChinQ/QChatGPT/issues)或[qcg-installer](https://github.com/RockChinQ/qcg-installer/issues)的issue里进行搜索**
|
||||
**QChatGPT需要Python版本>=3.9**
|
||||
|
||||
- 官方交流、答疑群: 656285629
|
||||
- **进群提问前请您`确保`已经找遍文档和issue均无法解决**
|
||||
- 社区群(内有一键部署包、图形化界面等资源): 891448839
|
||||
|
||||
### - 注册OpenAI账号
|
||||
|
||||
**可以直接进群找群主购买**
|
||||
或参考以下文章自行注册
|
||||
<details>
|
||||
<summary>点此查看步骤</summary>
|
||||
|
||||
> ~~[只需 1 元搞定 ChatGPT 注册](https://zhuanlan.zhihu.com/p/589470082)~~(已失效)
|
||||
> 若您要直接使用非OpenAI的模型(如New Bing),可跳过此步骤,直接进行之后的部署,完成后按照相关插件的文档进行配置即可
|
||||
|
||||
参考以下文章自行注册
|
||||
|
||||
> [国内注册ChatGPT的方法(100%可用)](https://www.pythonthree.com/register-openai-chatgpt/)
|
||||
> [手把手教你如何注册ChatGPT,超级详细](https://guxiaobei.com/51461)
|
||||
|
||||
注册成功后请前往[个人中心查看](https://beta.openai.com/account/api-keys)api_key
|
||||
完成注册后,使用以下自动化或手动部署步骤
|
||||
|
||||
</details>
|
||||
|
||||
### - 自动化部署
|
||||
|
||||
<details>
|
||||
@@ -105,9 +207,13 @@
|
||||
|
||||
#### Docker方式
|
||||
|
||||
请查看此仓库[mikumifa/QChatGPT-Docker-Installer](https://github.com/mikumifa/QChatGPT-Docker-Installer)
|
||||
> docker方式目前仅支持使用mirai登录,若您不**熟悉**docker的操作及相关知识,强烈建议您使用其他方式部署,我们**不会且难以**解决您主机上多个容器的连接问题。
|
||||
|
||||
请查看[此文档](res/docs/docker_deploy.md)
|
||||
由[@mikumifa](https://github.com/mikumifa)贡献
|
||||
|
||||
#### 安装器方式
|
||||
|
||||
使用[此安装器](https://github.com/RockChinQ/qcg-installer)(若无法访问请到[Gitee](https://gitee.com/RockChin/qcg-installer))进行部署
|
||||
|
||||
- 安装器目前仅支持部分平台,请到仓库文档查看,其他平台请手动部署
|
||||
@@ -118,17 +224,31 @@
|
||||
<details>
|
||||
<summary>手动部署适用于所有平台</summary>
|
||||
|
||||
- 请使用Python 3.9.x以上版本
|
||||
- 请注意OpenAI账号额度消耗
|
||||
- 每个账户仅有18美元免费额度,如未绑定银行卡,则会在超出时报错
|
||||
- OpenAI收费标准:默认使用的`text-davinci-003`模型 0.02美元/千字
|
||||
- 请使用Python 3.9.x以上版本
|
||||
|
||||
#### 配置Mirai
|
||||
#### ① 配置QQ登录框架
|
||||
|
||||
按照[此教程](https://yiri-mirai.wybxc.cc/tutorials/01/configuration)配置Mirai及YiriMirai
|
||||
启动mirai-console后,使用`login`命令登录QQ账号,保持mirai-console运行状态
|
||||
目前支持mirai和go-cqhttp,配置任意一个即可
|
||||
|
||||
#### 配置主程序
|
||||
<details>
|
||||
<summary>mirai</summary>
|
||||
|
||||
1. 按照[此教程](https://yiri-mirai.wybxc.cc/tutorials/01/configuration)配置Mirai及mirai-api-http
|
||||
2. 启动mirai-console后,使用`login`命令登录QQ账号,保持mirai-console运行状态
|
||||
3. 在下一步配置主程序时请在config.py中将`msg_source_adapter`设为`yirimirai`
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>go-cqhttp</summary>
|
||||
|
||||
1. 按照[此文档](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE)配置go-cqhttp
|
||||
2. 启动go-cqhttp,确保登录成功,保持运行
|
||||
3. 在下一步配置主程序时请在config.py中将`msg_source_adapter`设为`nakuru`
|
||||
|
||||
</details>
|
||||
|
||||
#### ② 配置主程序
|
||||
|
||||
1. 克隆此项目
|
||||
|
||||
@@ -140,8 +260,7 @@ cd QChatGPT
|
||||
2. 安装依赖
|
||||
|
||||
```bash
|
||||
pip3 install yiri-mirai openai colorlog func_timeout
|
||||
pip3 install dulwich
|
||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk CallingGPT tiktoken
|
||||
```
|
||||
|
||||
3. 运行一次主程序,生成配置文件
|
||||
@@ -164,49 +283,85 @@ python3 main.py
|
||||
|
||||
**常见问题**
|
||||
|
||||
- mirai登录提示`QQ版本过低`,见[此issue](https://github.com/RockChinQ/QChatGPT/issues/38)
|
||||
- mirai登录提示`QQ版本过低`,见[此issue](https://github.com/RockChinQ/QChatGPT/issues/137)
|
||||
- 如提示安装`uvicorn`或`hypercorn`请*不要*安装,这两个不是必需的,目前存在未知原因bug
|
||||
- 如报错`TypeError: As of 3.10, the *loop* parameter was removed from Lock() since it is no longer necessary`, 请参考 [此处](https://github.com/RockChinQ/QChatGPT/issues/5)
|
||||
|
||||
</details>
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>
|
||||
|
||||
## 🚀使用
|
||||
|
||||
查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E4%BD%BF%E7%94%A8%E6%96%B9%E5%BC%8F)
|
||||
</summary>
|
||||
|
||||
**部署完成后必看: [指令说明](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
|
||||
所有功能查看[Wiki功能使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E4%BD%BF%E7%94%A8%E6%96%B9%E5%BC%8F)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
|
||||
## 🧩插件生态
|
||||
|
||||
现已支持自行开发插件对功能进行扩展或自定义程序行为
|
||||
详见[Wiki插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
开发教程见[Wiki插件开发页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
||||
</summary>
|
||||
|
||||
### 示例插件
|
||||
⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling),请查看[Wiki内容函数](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
|
||||
|
||||
在`tests/plugin_examples`目录下,将其整个目录复制到`plugins`目录下即可使用
|
||||
> 使用方法见:[Wiki插件使用](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
> 开发教程见:[Wiki插件开发](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
|
||||
|
||||
- `cmdcn` - 主程序指令中文形式
|
||||
- `hello_plugin` - 在收到消息`hello`时回复相应消息
|
||||
- `urlikethisijustsix` - 收到冒犯性消息时回复相应消息
|
||||
|
||||
### 更多
|
||||
[所有插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6),欢迎提出issue以提交新的插件
|
||||
|
||||
欢迎提交新的插件
|
||||
### 部分插件
|
||||
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E4%B8%8EChatGPT%E7%BD%91%E9%A1%B5%E7%89%88)
|
||||
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语言输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语音输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,使用HuggingFace上的[VITS-Umamusume-voice-synthesizer模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer)
|
||||
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
||||
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
||||
- [oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot) - 接入[Poe](https://poe.com/)上的机器人
|
||||
- [lieyanqzu/WeatherPlugin](https://github.com/lieyanqzu/WeatherPlugin) - 天气查询插件
|
||||
- [SysStatPlugin](https://github.com/RockChinQ/SysStatPlugin) - 查看系统状态
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>
|
||||
|
||||
## 😘致谢
|
||||
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV15v4y1X7aP)
|
||||
</summary>
|
||||
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) 为本项目制作[视频教程](https://www.bilibili.com/video/BV1Y14y1Q7kQ)
|
||||
- [@mikumifa](https://github.com/mikumifa) 本项目Docker部署仓库开发者
|
||||
- [@dominoar](https://github.com/dominoar) 为本项目开发多种插件
|
||||
- [@hissincn](https://github.com/hissincn) 本项目贡献者
|
||||
- [@LINSTCL](https://github.com/LINSTCL) GPT-3.5官方模型适配贡献者
|
||||
- [@万神的星空](https://github.com/qq255204159) 整合包发行
|
||||
- [@ljcduo](https://github.com/ljcduo) GPT-4 API内测账号提供
|
||||
|
||||
以及其他所有为本项目提供支持的朋友们。
|
||||
以及所有[贡献者](https://github.com/RockChinQ/QChatGPT/graphs/contributors)和其他为本项目提供支持的朋友们。
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>
|
||||
|
||||
## 👍赞赏
|
||||
|
||||
</summary>
|
||||
|
||||
<img alt="赞赏码" src="res/mm_reward_qrcode_1672840549070.png" width="400" height="400"/>
|
||||
|
||||
</details>
|
||||
|
||||
215
README_en.md
Normal file
215
README_en.md
Normal file
@@ -0,0 +1,215 @@
|
||||
# QChatGPT🤖
|
||||
|
||||
<p align="center">
|
||||
<img src="res/social.png" alt="QChatGPT" width="640" />
|
||||
</p>
|
||||
|
||||
English | [简体中文](README.md)
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||

|
||||
|
||||
- Refer to [Wiki](https://github.com/RockChinQ/QChatGPT/wiki) to get further information.
|
||||
- Official QQ group: 656285629
|
||||
- Community QQ group: 362515018
|
||||
- QQ channel robot: [QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
||||
- Any contribution is welcome, please refer to [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
## 🍺List of supported models
|
||||
|
||||
<details>
|
||||
<summary>Details</summary>
|
||||
|
||||
### Chat
|
||||
|
||||
- OpenAI GPT-3.5 (ChatGPT API), default model
|
||||
- OpenAI GPT-3, supported natively, switch to it in `config.py`
|
||||
- OpenAI GPT-4, supported natively, qualification for internal testing required, switch to it in `config.py`
|
||||
- ChatGPT website edition (GPT-3.5), see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- ChatGPT website edition (GPT-4), ChatGPT plus subscription required, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- New Bing, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- HuggingChat, see [revLibs plugin](https://github.com/RockChinQ/revLibs), English only
|
||||
|
||||
### Story
|
||||
|
||||
- NovelAI API, see [QCPNovelAi plugin](https://github.com/dominoar/QCPNovelAi)
|
||||
|
||||
### Image
|
||||
|
||||
- OpenAI DALL·E, supported natively, see [Wiki(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
- NovelAI API, see [QCPNovelAi plugin](https://github.com/dominoar/QCPNovelAi)
|
||||
|
||||
### Voice
|
||||
|
||||
- TTS+VITS, see [QChatPlugins](https://github.com/dominoar/QChatPlugins)
|
||||
- Plachta/VITS-Umamusume-voice-synthesizer, see [chat_voice plugin](https://github.com/oliverkirk-sudo/chat_voice)
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
Install this [plugin](https://github.com/RockChinQ/Switcher) to switch between different models.
|
||||
|
||||
## ✅Function Points
|
||||
|
||||
<details>
|
||||
<summary>Details</summary>
|
||||
|
||||
- ✅Sensitive word filtering, avoid being banned
|
||||
- ✅Multiple responding rules, including regular expression matching
|
||||
- ✅Multiple api-key management, automatic switching when exceeding
|
||||
- ✅Support for customizing the preset prompt text
|
||||
- ✅Chat, story, image, voice, etc. models are supported
|
||||
- ✅Support for hot reloading and hot updating
|
||||
- ✅Support for plugin loading
|
||||
- ✅Blacklist mechanism for private chat and group chat
|
||||
- ✅Excellent long message processing strategy
|
||||
- ✅Reply rate limitation
|
||||
- ✅Support for network proxy
|
||||
- ✅Support for customizing the output format
|
||||
</details>
|
||||
|
||||
More details, see [Wiki(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
|
||||
## 🔩Deployment
|
||||
|
||||
**If you encounter any problems during deployment, please search in the issue of [QChatGPT](https://github.com/RockChinQ/QChatGPT/issues) or [qcg-installer](https://github.com/RockChinQ/qcg-installer/issues) first.**
|
||||
|
||||
### - Register OpenAI account
|
||||
|
||||
> If you want to use a model other than OpenAI (such as New Bing), you can skip this step and directly refer to following steps, and then configure it according to the relevant plugin documentation.
|
||||
|
||||
To register OpenAI account, please refer to the following articles(in Chinese):
|
||||
|
||||
> [国内注册ChatGPT的方法(100%可用)](https://www.pythonthree.com/register-openai-chatgpt/)
|
||||
> [手把手教你如何注册ChatGPT,超级详细](https://guxiaobei.com/51461)
|
||||
|
||||
Check your api-key in [personal center](https://beta.openai.com/account/api-keys) after registration, and then follow the following steps to deploy.
|
||||
|
||||
### - Deploy Automatically
|
||||
|
||||
<details>
|
||||
<summary>Details</summary>
|
||||
|
||||
#### Docker
|
||||
|
||||
See [this document(cn)](res/docs/docker_deploy.md)
|
||||
Contributed by [@mikumifa](https://github.com/mikumifa)
|
||||
|
||||
#### Installer
|
||||
|
||||
Use [this installer](https://github.com/RockChinQ/qcg-installer) to deploy.
|
||||
|
||||
- The installer currently only supports some platforms, please refer to the repository document for details, and manually deploy for other platforms
|
||||
|
||||
</details>
|
||||
|
||||
### - Deploy Manually
|
||||
<details>
|
||||
<summary>Manually deployment supports any platforms</summary>
|
||||
|
||||
- Python 3.9.x or higher
|
||||
|
||||
#### 配置QQ登录框架
|
||||
|
||||
Currently supports mirai and go-cqhttp, configure either one
|
||||
|
||||
<details>
|
||||
<summary>mirai</summary>
|
||||
|
||||
Follow [this tutorial(cn)](https://yiri-mirai.wybxc.cc/tutorials/01/configuration) to configure Mirai and YiriMirai.
|
||||
After starting mirai-console, use the `login` command to log in to the QQ account, and keep the mirai-console running.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>go-cqhttp</summary>
|
||||
|
||||
1. Follow [this tutorial(cn)](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE) to configure go-cqhttp.
|
||||
2. Start go-cqhttp, make sure it is logged in and running.
|
||||
|
||||
</details>
|
||||
|
||||
#### Configure QChatGPT
|
||||
|
||||
1. Clone the repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/RockChinQ/QChatGPT
|
||||
cd QChatGPT
|
||||
```
|
||||
|
||||
2. Install dependencies
|
||||
|
||||
```bash
|
||||
pip3 install requests yiri-mirai openai colorlog func_timeout dulwich Pillow nakuru-project-idk
|
||||
```
|
||||
|
||||
3. Generate `config.py`
|
||||
|
||||
```bash
|
||||
python3 main.py
|
||||
```
|
||||
|
||||
4. Edit `config.py`
|
||||
|
||||
5. Run
|
||||
|
||||
```bash
|
||||
python3 main.py
|
||||
```
|
||||
|
||||
Any problems, please refer to the issues page.
|
||||
|
||||
</details>
|
||||
|
||||
## 🚀Usage
|
||||
|
||||
**After deployment, please read: [Commands(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
|
||||
**For more details, please refer to the [Wiki(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E4%BD%BF%E7%94%A8%E6%96%B9%E5%BC%8F)**
|
||||
|
||||
|
||||
## 🧩Plugin Ecosystem
|
||||
|
||||
Plugin [usage](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8) and [development](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91) are supported.
|
||||
|
||||
<details>
|
||||
<summary>List of plugins (cn)</summary>
|
||||
|
||||
### Examples
|
||||
|
||||
在`tests/plugin_examples`目录下,将其整个目录复制到`plugins`目录下即可使用
|
||||
|
||||
- `cmdcn` - 主程序指令中文形式
|
||||
- `hello_plugin` - 在收到消息`hello`时回复相应消息
|
||||
- `urlikethisijustsix` - 收到冒犯性消息时回复相应消息
|
||||
|
||||
### More Plugins
|
||||
|
||||
欢迎提交新的插件
|
||||
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E4%B8%8EChatGPT%E7%BD%91%E9%A1%B5%E7%89%88)
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语音输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,使用HuggingFace上的[VITS-Umamusume-voice-synthesizer模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer)
|
||||
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
||||
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
||||
- [oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot) - 接入[Poe](https://poe.com/)上的机器人
|
||||
- [lieyanqzu/WeatherPlugin](https://github.com/lieyanqzu/WeatherPlugin) - 天气查询插件
|
||||
</details>
|
||||
|
||||
## 😘Thanks
|
||||
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) video tutorial creator
|
||||
- [@mikumifa](https://github.com/mikumifa) Docker deployment
|
||||
- [@dominoar](https://github.com/dominoar) Plugin development
|
||||
- [@万神的星空](https://github.com/qq255204159) Packages publisher
|
||||
- [@ljcduo](https://github.com/ljcduo) GPT-4 API internal test account
|
||||
|
||||
And all [contributors](https://github.com/RockChinQ/QChatGPT/graphs/contributors) and other friends who support this project.
|
||||
|
||||
<!-- ## 👍赞赏
|
||||
|
||||
<img alt="赞赏码" src="res/mm_reward_qrcode_1672840549070.png" width="400" height="400"/> -->
|
||||
@@ -1,7 +1,13 @@
|
||||
# 配置文件: 注释里标[必需]的参数必须修改, 其他参数根据需要修改, 但请勿删除
|
||||
import logging
|
||||
|
||||
# [必需] Mirai的配置
|
||||
# 消息处理协议适配器
|
||||
# 目前支持以下适配器:
|
||||
# - "yirimirai": mirai的通信框架,YiriMirai框架适配器, 请同时填写下方mirai_http_api_config
|
||||
# - "nakuru": go-cqhttp通信框架,请同时填写下方nakuru_config
|
||||
msg_source_adapter = "yirimirai"
|
||||
|
||||
# [必需(与nakuru二选一,取决于msg_source_adapter)] Mirai的配置
|
||||
# 请到配置mirai的步骤中的教程查看每个字段的信息
|
||||
# adapter: 选择适配器,目前支持HTTPAdapter和WebSocketAdapter
|
||||
# host: 运行mirai的主机地址
|
||||
@@ -18,6 +24,15 @@ mirai_http_api_config = {
|
||||
"qq": 1234567890
|
||||
}
|
||||
|
||||
# [必需(与mirai二选一,取决于msg_source_adapter)]
|
||||
# 使用nakuru-project框架连接go-cqhttp的配置
|
||||
nakuru_config = {
|
||||
"host": "localhost", # go-cqhttp的地址
|
||||
"port": 6700, # go-cqhttp的正向websocket端口
|
||||
"http_port": 5700, # go-cqhttp的正向http端口
|
||||
"token": "" # 若在go-cqhttp的config.yml设置了access_token, 则填写此处
|
||||
}
|
||||
|
||||
# [必需] OpenAI的配置
|
||||
# api_key: OpenAI的API Key
|
||||
# http_proxy: 请求OpenAI时使用的代理,None为不使用,https和socks5暂不能使用
|
||||
@@ -33,13 +48,33 @@ mirai_http_api_config = {
|
||||
# },
|
||||
# "http_proxy": "http://127.0.0.1:12345"
|
||||
# }
|
||||
#
|
||||
# 现已支持反向代理,可以添加reverse_proxy字段以使用反向代理
|
||||
# 使用反向代理可以在国内使用OpenAI的API,反向代理的配置请参考
|
||||
# https://github.com/Ice-Hazymoon/openai-scf-proxy
|
||||
#
|
||||
# 反向代理填写示例:
|
||||
# openai_config = {
|
||||
# "api_key": {
|
||||
# "default": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key1": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key2": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# },
|
||||
# "reverse_proxy": "http://example.com:12345/v1"
|
||||
# }
|
||||
openai_config = {
|
||||
"api_key": {
|
||||
"default": "openai_api_key"
|
||||
},
|
||||
"http_proxy": None
|
||||
"http_proxy": None,
|
||||
"reverse_proxy": None
|
||||
}
|
||||
|
||||
# api-key切换策略
|
||||
# active:每次请求时都会切换api-key
|
||||
# passive:仅当api-key超额时才会切换api-key
|
||||
switch_strategy = "active"
|
||||
|
||||
# [必需] 管理员QQ号,用于接收报错等通知及执行管理员级别指令
|
||||
# 支持多个管理员,可以使用list形式设置,例如:
|
||||
# admin_qq = [12345678, 87654321]
|
||||
@@ -79,17 +114,49 @@ default_prompt = {
|
||||
"default": "如果我之后想获取帮助,请你说“输入!help获取帮助”",
|
||||
}
|
||||
|
||||
# 情景预设格式
|
||||
# 参考值:默认方式:normal | 完整情景:full_scenario
|
||||
# 默认方式 的格式为上述default_prompt中的内容,或prompts目录下的文件名
|
||||
# 完整情景方式 的格式为JSON,在scenario目录下的JSON文件中列出对话的每个回合,编写方法见scenario/default-template.json
|
||||
# 编写方法请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E9%A2%84%E8%AE%BE%E6%96%87%E5%AD%97full_scenario%E6%A8%A1%E5%BC%8F
|
||||
preset_mode = "normal"
|
||||
|
||||
# 群内响应规则
|
||||
# 符合此消息的群内消息即使不包含at机器人也会响应
|
||||
# 支持消息前缀匹配及正则表达式匹配
|
||||
# 支持设置是否响应at消息、随机响应概率
|
||||
# 注意:由消息前缀(prefix)匹配的消息中将会删除此前缀,正则表达式(regexp)匹配的消息不会删除匹配的部分
|
||||
# 前缀匹配优先级高于正则表达式匹配
|
||||
# 正则表达式简明教程:https://www.runoob.com/regexp/regexp-tutorial.html
|
||||
#
|
||||
# 支持针对不同群设置不同的响应规则,例如:
|
||||
# response_rules = {
|
||||
# "default": {
|
||||
# "at": True,
|
||||
# "prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
# "regexp": [],
|
||||
# "random_rate": 0.0,
|
||||
# },
|
||||
# "12345678": {
|
||||
# "at": False,
|
||||
# "prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
# "regexp": [],
|
||||
# "random_rate": 0.0,
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# 以上设置将会在群号为12345678的群中关闭at响应
|
||||
# 未单独设置的群将使用default规则
|
||||
response_rules = {
|
||||
"prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
"regexp": [] # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
||||
"default": {
|
||||
"at": True, # 是否响应at机器人的消息
|
||||
"prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
"regexp": [], # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
||||
"random_rate": 0.0, # 随机响应概率,0.0-1.0,0.0为不随机响应,1.0为响应所有消息, 仅在前几项判断不通过时生效
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# 消息忽略规则
|
||||
# 适用于私聊及群聊
|
||||
# 符合此规则的消息将不会被响应
|
||||
@@ -102,10 +169,27 @@ ignore_rules = {
|
||||
"regexp": []
|
||||
}
|
||||
|
||||
# 是否检查收到的消息中是否包含敏感词
|
||||
# 若收到的消息无法通过下方指定的敏感词检查策略,则发送提示信息
|
||||
income_msg_check = False
|
||||
|
||||
# 敏感词过滤开关,以同样数量的*代替敏感词回复
|
||||
# 请在sensitive.json中添加敏感词
|
||||
sensitive_word_filter = True
|
||||
|
||||
# 是否启用百度云内容安全审核
|
||||
# 注册方式查看 https://cloud.baidu.com/doc/ANTIPORN/s/Wkhu9d5iy
|
||||
baidu_check = False
|
||||
|
||||
# 百度云API_KEY 24位英文数字字符串
|
||||
baidu_api_key = ""
|
||||
|
||||
# 百度云SECRET_KEY 32位的英文数字字符串
|
||||
baidu_secret_key = ""
|
||||
|
||||
# 不合规消息自定义返回
|
||||
inappropriate_message_tips = "[百度云]请珍惜机器人,当前返回内容不合规"
|
||||
|
||||
# 启动时是否发送赞赏码
|
||||
# 仅当使用量已经超过2048字时发送
|
||||
encourage_sponsor_at_start = True
|
||||
@@ -113,14 +197,24 @@ encourage_sponsor_at_start = True
|
||||
# 每次向OpenAI接口发送对话记录上下文的字符数
|
||||
# 最大不超过(4096 - max_tokens)个字符,max_tokens为下方completion_api_params中的max_tokens
|
||||
# 注意:较大的prompt_submit_length会导致OpenAI账户额度消耗更快
|
||||
prompt_submit_length = 1024
|
||||
prompt_submit_length = 2048
|
||||
|
||||
# 是否在token超限报错时自动重置会话
|
||||
# 可在tips.py中编辑提示语
|
||||
auto_reset = True
|
||||
|
||||
# OpenAI补全API的参数
|
||||
# 请在下方填写模型,程序自动选择接口
|
||||
# 现已支持的模型有:
|
||||
#
|
||||
# 'gpt-4'
|
||||
# 'gpt-4-0613'
|
||||
# 'gpt-4-32k'
|
||||
# 'gpt-4-32k-0613'
|
||||
# 'gpt-3.5-turbo'
|
||||
# 'gpt-3.5-turbo-0301'
|
||||
# 'gpt-3.5-turbo-16k'
|
||||
# 'gpt-3.5-turbo-0613'
|
||||
# 'gpt-3.5-turbo-16k-0613'
|
||||
# 'text-davinci-003'
|
||||
# 'text-davinci-002'
|
||||
# 'code-davinci-002'
|
||||
@@ -130,10 +224,10 @@ prompt_submit_length = 1024
|
||||
# 'text-ada-001'
|
||||
#
|
||||
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
|
||||
# 请将内容修改到config.py中,请勿修改config-template.py
|
||||
completion_api_params = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
||||
"max_tokens": 512, # 每次获取OpenAI接口响应的文字量上限, 不高于4096
|
||||
"top_p": 1, # 生成的文本的文本与要求的符合度, 取值范围[0, 1]
|
||||
"frequency_penalty": 0.2,
|
||||
"presence_penalty": 1.0,
|
||||
@@ -145,22 +239,55 @@ image_api_params = {
|
||||
"size": "256x256", # 图片尺寸,支持256x256, 512x512, 1024x1024
|
||||
}
|
||||
|
||||
# 跟踪函数调用
|
||||
# 为True时,在每次GPT进行Function Calling时都会输出发送一条回复给用户
|
||||
# 同时,一次提问内所有的Function Calling和普通回复消息都会单独发送给用户
|
||||
trace_function_calls = False
|
||||
|
||||
# 群内回复消息时是否引用原消息
|
||||
quote_origin = True
|
||||
|
||||
# 群内回复消息时是否at发送者
|
||||
at_sender = False
|
||||
|
||||
# 回复绘图时是否包含图片描述
|
||||
include_image_description = True
|
||||
|
||||
# 消息处理的超时时间,单位为秒
|
||||
process_message_timeout = 30
|
||||
|
||||
# [暂未实现] 群内会话是否启用多对象名称
|
||||
# 若不启用,群内会话的prompt只使用user_name和bot_name
|
||||
multi_subject = False
|
||||
|
||||
# 回复消息时是否显示[GPT]前缀
|
||||
show_prefix = False
|
||||
|
||||
# 回复前的强制延迟时间,降低机器人被腾讯风控概率
|
||||
# *此机制对命令和消息、私聊及群聊均生效
|
||||
# 每次处理时从以下的范围取一个随机秒数,
|
||||
# 当此次消息处理时间低于此秒数时,将会强制延迟至此秒数
|
||||
# 例如:[1.5, 3],则每次处理时会随机取一个1.5-3秒的随机数,若处理时间低于此随机数,则强制延迟至此随机秒数
|
||||
# 若您不需要此功能,请将force_delay_range设置为[0, 0]
|
||||
force_delay_range = [1.5, 3]
|
||||
|
||||
# 应用长消息处理策略的阈值
|
||||
# 当回复消息长度超过此值时,将使用长消息处理策略
|
||||
blob_message_threshold = 256
|
||||
|
||||
# 长消息处理策略
|
||||
# - "image": 将长消息转换为图片发送
|
||||
# - "forward": 将长消息转换为转发消息组件发送
|
||||
blob_message_strategy = "forward"
|
||||
|
||||
# 允许等待
|
||||
# 同一会话内,是否等待上一条消息处理完成后再处理下一条消息
|
||||
# 若设置为False,若上一条未处理完时收到了新消息,将会丢弃新消息
|
||||
# 丢弃消息时的提示信息可以在tips.py中修改
|
||||
wait_last_done = True
|
||||
|
||||
# 文字转图片时使用的字体文件路径
|
||||
# 当策略为"image"时生效
|
||||
# 若在Windows系统下,程序会自动使用Windows自带的微软雅黑字体
|
||||
# 若未填写或不存在且不是Windows,将禁用文字转图片功能,改为使用转发消息组件
|
||||
font_path = ""
|
||||
|
||||
# 消息处理超时重试次数
|
||||
retry_times = 3
|
||||
|
||||
@@ -169,30 +296,59 @@ retry_times = 3
|
||||
# 设置为False时,向用户及管理员发送错误详细信息
|
||||
hide_exce_info_to_user = False
|
||||
|
||||
# 消息处理出错时向用户发送的提示信息
|
||||
# 仅当hide_exce_info_to_user为True时生效
|
||||
# 设置为空字符串时,不发送提示信息
|
||||
alter_tip_message = '出错了,请稍后再试'
|
||||
# 线程池相关配置
|
||||
# 该参数决定机器人可以同时处理几个人的消息,超出线程池数量的请求会被阻塞,不会被丢弃
|
||||
# 如果你不清楚该参数的意义,请不要更改
|
||||
# 程序运行本身线程池,无代码层面修改请勿更改
|
||||
sys_pool_num = 8
|
||||
|
||||
# 执行管理员请求和指令的线程池并行线程数量,一般和管理员数量相等
|
||||
admin_pool_num = 4
|
||||
|
||||
# 执行用户请求和指令的线程池并行线程数量
|
||||
# 如需要更高的并发,可以增大该值
|
||||
user_pool_num = 8
|
||||
|
||||
# 每个会话的过期时间,单位为秒
|
||||
# 默认值20分钟
|
||||
session_expire_time = 60 * 20
|
||||
session_expire_time = 1200
|
||||
|
||||
# 会话限速
|
||||
# 单会话内每分钟可进行的对话次数
|
||||
# 若不需要限速,可以设置为一个很大的值
|
||||
# 默认值60次,基本上不会触发限速
|
||||
rate_limitation = 60
|
||||
#
|
||||
# 若要设置针对某特定群的限速,请使用如下格式:
|
||||
# {
|
||||
# "group_<群号>": 60,
|
||||
# "default": 60,
|
||||
# }
|
||||
# 若要设置针对某特定用户私聊的限速,请使用如下格式:
|
||||
# {
|
||||
# "person_<用户QQ>": 60,
|
||||
# "default": 60,
|
||||
# }
|
||||
# 同时设置多个群和私聊的限速,示例:
|
||||
# {
|
||||
# "group_12345678": 60,
|
||||
# "group_87654321": 60,
|
||||
# "person_234567890": 60,
|
||||
# "person_345678901": 60,
|
||||
# "default": 60,
|
||||
# }
|
||||
#
|
||||
# 注意: 未指定的都使用default的限速值,default不可删除
|
||||
rate_limitation = {
|
||||
"default": 60,
|
||||
}
|
||||
|
||||
# 会话限速策略
|
||||
# - "wait": 每次对话获取到回复时,等待一定时间再发送回复,保证其不会超过限速均值
|
||||
# - "drop": 此分钟内,若对话次数超过限速次数,则丢弃之后的对话,每自然分钟重置
|
||||
rate_limit_strategy = "wait"
|
||||
rate_limit_strategy = "drop"
|
||||
|
||||
# drop策略时,超过限速均值时,丢弃的对话的提示信息
|
||||
# 仅当rate_limitation_strategy为"drop"时生效
|
||||
# 若设置为空字符串,则不发送提示信息
|
||||
rate_limit_drop_tip = "本分钟对话次数超过限速次数,此对话被丢弃"
|
||||
# 是否在启动时进行依赖库更新
|
||||
upgrade_dependencies = True
|
||||
|
||||
# 是否上报统计信息
|
||||
# 用于统计机器人的使用情况,不会收集任何用户信息
|
||||
@@ -201,20 +357,3 @@ report_usage = True
|
||||
|
||||
# 日志级别
|
||||
logging_level = logging.INFO
|
||||
|
||||
# 定制帮助消息
|
||||
help_message = """此机器人通过调用OpenAI的GPT-3大型语言模型生成回复,不具有情感。
|
||||
你可以用自然语言与其交流,回复的消息中[GPT]开头的为模型生成的语言,[bot]开头的为程序提示。
|
||||
了解此项目请找QQ 1010553892 联系作者
|
||||
请不要用其生成整篇文章或大段代码,因为每次只会向模型提交少部分文字,生成大部分文字会产生偏题、前后矛盾等问题
|
||||
每次会话最后一次交互后{}分钟后会自动结束,结束后将开启新会话,如需继续前一次会话请发送 !last 重新开启
|
||||
欢迎到github.com/RockChinQ/QChatGPT 给个star
|
||||
|
||||
帮助信息:
|
||||
!help - 显示帮助
|
||||
!reset - 重置会话
|
||||
!last - 切换到前一次的对话
|
||||
!next - 切换到后一次的对话
|
||||
!prompt - 显示当前对话所有内容
|
||||
!list - 列出所有历史会话
|
||||
!usage - 列出各个api-key的使用量""".format(session_expire_time // 60)
|
||||
|
||||
530
main.py
530
main.py
@@ -1,4 +1,5 @@
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
@@ -6,14 +7,20 @@ import time
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from pkg.utils.log import init_runtime_log_file, reset_logging
|
||||
|
||||
try:
|
||||
import colorlog
|
||||
except ImportError:
|
||||
# 尝试安装
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
pkgmgr.install_requirements("requirements.txt")
|
||||
try:
|
||||
pkgmgr.install_requirements("requirements.txt")
|
||||
pkgmgr.install_upgrade("websockets")
|
||||
import colorlog
|
||||
except ImportError:
|
||||
print("依赖不满足,请查看 https://github.com/RockChinQ/qcg-installer/issues/15")
|
||||
@@ -23,17 +30,12 @@ import colorlog
|
||||
import requests
|
||||
import websockets.exceptions
|
||||
from urllib3.exceptions import InsecureRequestWarning
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
log_colors_config = {
|
||||
'DEBUG': 'green', # cyan white
|
||||
'INFO': 'white',
|
||||
'WARNING': 'yellow',
|
||||
'ERROR': 'red',
|
||||
'CRITICAL': 'bold_red',
|
||||
}
|
||||
# 是否使用override.json覆盖配置
|
||||
# 仅在启动时提供 --override 或 -r 参数时生效
|
||||
use_override = False
|
||||
|
||||
|
||||
def init_db():
|
||||
@@ -43,202 +45,269 @@ def init_db():
|
||||
database.initialize_database()
|
||||
|
||||
|
||||
def ensure_dependencies():
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "tiktoken", "--upgrade",
|
||||
"-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
|
||||
|
||||
known_exception_caught = False
|
||||
|
||||
log_file_name = "qchatgpt.log"
|
||||
|
||||
def override_config():
|
||||
import config
|
||||
# 检查override.json覆盖
|
||||
if os.path.exists("override.json") and use_override:
|
||||
override_json = json.load(open("override.json", "r", encoding="utf-8"))
|
||||
overrided = []
|
||||
for key in override_json:
|
||||
if hasattr(config, key):
|
||||
setattr(config, key, override_json[key])
|
||||
# logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
||||
overrided.append(key)
|
||||
else:
|
||||
logging.error("无法覆写配置[{}]为[{}],该配置不存在,请检查override.json是否正确".format(key, override_json[key]))
|
||||
if len(overrided) > 0:
|
||||
logging.info("已根据override.json覆写配置项: {}".format(", ".join(overrided)))
|
||||
|
||||
|
||||
def init_runtime_log_file():
|
||||
"""为此次运行生成日志文件
|
||||
格式: qchatgpt-yyyy-MM-dd-HH-mm-ss.log
|
||||
"""
|
||||
global log_file_name
|
||||
|
||||
# 检查logs目录是否存在
|
||||
if not os.path.exists("logs"):
|
||||
os.mkdir("logs")
|
||||
|
||||
# 检查本目录是否有qchatgpt.log,若有,移动到logs目录
|
||||
if os.path.exists("qchatgpt.log"):
|
||||
shutil.move("qchatgpt.log", "logs/qchatgpt.legacy.log")
|
||||
|
||||
log_file_name = "logs/qchatgpt-%s.log" % time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
|
||||
|
||||
def reset_logging():
|
||||
global log_file_name
|
||||
assert os.path.exists('config.py')
|
||||
# 临时函数,用于加载config和上下文,未来统一放在config类
|
||||
def load_config():
|
||||
logging.info("检查config模块完整性.")
|
||||
# 完整性校验
|
||||
non_exist_keys = []
|
||||
|
||||
is_integrity = True
|
||||
config_template = importlib.import_module('config-template')
|
||||
config = importlib.import_module('config')
|
||||
for key in dir(config_template):
|
||||
if not key.startswith("__") and not hasattr(config, key):
|
||||
setattr(config, key, getattr(config_template, key))
|
||||
# logging.warning("[{}]不存在".format(key))
|
||||
non_exist_keys.append(key)
|
||||
is_integrity = False
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("以下配置字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||
|
||||
# 检查override.json覆盖
|
||||
override_config()
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("以上不存在的配置已被设为默认值,您可以依据config-template.py检查config.py,将在3秒后继续启动... ")
|
||||
time.sleep(3)
|
||||
|
||||
# 存进上下文
|
||||
pkg.utils.context.set_config(config)
|
||||
|
||||
|
||||
def complete_tips():
|
||||
"""根据tips-custom-template模块补全tips模块的属性"""
|
||||
non_exist_keys = []
|
||||
|
||||
is_integrity = True
|
||||
logging.info("检查tips模块完整性.")
|
||||
tips_template = importlib.import_module('tips-custom-template')
|
||||
tips = importlib.import_module('tips')
|
||||
for key in dir(tips_template):
|
||||
if not key.startswith("__") and not hasattr(tips, key):
|
||||
setattr(tips, key, getattr(tips_template, key))
|
||||
# logging.warning("[{}]不存在".format(key))
|
||||
non_exist_keys.append(key)
|
||||
is_integrity = False
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("以下提示语字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||
logging.warning("tips模块不完整,您可以依据tips-custom-template.py检查tips.py")
|
||||
logging.warning("以上配置已被设为默认值,将在3秒后继续启动... ")
|
||||
time.sleep(3)
|
||||
|
||||
|
||||
def start(first_time_init=False):
|
||||
"""启动流程,reload之后会被执行"""
|
||||
|
||||
global known_exception_caught
|
||||
import pkg.utils.context
|
||||
|
||||
if pkg.utils.context.context['logger_handler'] is not None:
|
||||
logging.getLogger().removeHandler(pkg.utils.context.context['logger_handler'])
|
||||
|
||||
for handler in logging.getLogger().handlers:
|
||||
logging.getLogger().removeHandler(handler)
|
||||
|
||||
logging.basicConfig(level=config.logging_level, # 设置日志输出格式
|
||||
filename=log_file_name, # log日志输出的文件位置和文件名
|
||||
format="[%(asctime)s.%(msecs)03d] %(filename)s (%(lineno)d) - [%(levelname)s] : %(message)s",
|
||||
# 日志输出的格式
|
||||
# -8表示占位符,让输出左对齐,输出长度都为8位
|
||||
datefmt="%Y-%m-%d %H:%M:%S" # 时间输出的格式
|
||||
)
|
||||
sh = logging.StreamHandler()
|
||||
sh.setLevel(config.logging_level)
|
||||
sh.setFormatter(colorlog.ColoredFormatter(
|
||||
fmt="%(log_color)s[%(asctime)s.%(msecs)03d] %(filename)s (%(lineno)d) - [%(levelname)s] : "
|
||||
"%(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
log_colors=log_colors_config
|
||||
))
|
||||
logging.getLogger().addHandler(sh)
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
return sh
|
||||
|
||||
|
||||
def main(first_time_init=False):
|
||||
global known_exception_caught
|
||||
|
||||
# 检查并创建plugins、prompts目录
|
||||
check_path = ["plugins", "prompts"]
|
||||
for path in check_path:
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
config = pkg.utils.context.get_config()
|
||||
# 更新openai库到最新版本
|
||||
if not hasattr(config, 'upgrade_dependencies') or config.upgrade_dependencies:
|
||||
print("正在更新依赖库,请等待...")
|
||||
if not hasattr(config, 'upgrade_dependencies'):
|
||||
print("这个操作不是必须的,如果不想更新,请在config.py中添加upgrade_dependencies=False")
|
||||
else:
|
||||
print("这个操作不是必须的,如果不想更新,请在config.py中将upgrade_dependencies设置为False")
|
||||
try:
|
||||
ensure_dependencies()
|
||||
except Exception as e:
|
||||
print("更新openai库失败:{}, 请忽略或自行更新".format(e))
|
||||
|
||||
known_exception_caught = False
|
||||
try:
|
||||
# 导入config.py
|
||||
assert os.path.exists('config.py')
|
||||
try:
|
||||
|
||||
config = importlib.import_module('config')
|
||||
sh = reset_logging()
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
|
||||
import pkg.utils.context
|
||||
pkg.utils.context.set_config(config)
|
||||
# 检查是否设置了管理员
|
||||
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
||||
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||
while True:
|
||||
try:
|
||||
config.admin_qq = int(input("未设置管理员QQ,管理员权限指令及运行告警将无法使用,请输入管理员QQ号: "))
|
||||
# 写入到文件
|
||||
|
||||
init_runtime_log_file()
|
||||
# 读取文件
|
||||
config_file_str = ""
|
||||
with open("config.py", "r", encoding="utf-8") as f:
|
||||
config_file_str = f.read()
|
||||
# 替换
|
||||
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(config.admin_qq))
|
||||
# 写入
|
||||
with open("config.py", "w", encoding="utf-8") as f:
|
||||
f.write(config_file_str)
|
||||
|
||||
sh = reset_logging()
|
||||
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
||||
time.sleep(4)
|
||||
break
|
||||
except ValueError:
|
||||
print("请输入数字")
|
||||
|
||||
# 检查是否设置了管理员
|
||||
if not (hasattr(config, 'admin_qq') and config.admin_qq != 0):
|
||||
# logging.warning("未设置管理员QQ,管理员权限指令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||
while True:
|
||||
try:
|
||||
config.admin_qq = int(input("未设置管理员QQ,管理员权限指令及运行告警将无法使用,请输入管理员QQ号: "))
|
||||
# 写入到文件
|
||||
import pkg.openai.manager
|
||||
import pkg.database.manager
|
||||
import pkg.openai.session
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.dprompt
|
||||
import pkg.qqbot.cmds.aamgr
|
||||
|
||||
try:
|
||||
pkg.openai.dprompt.register_all()
|
||||
pkg.qqbot.cmds.aamgr.register_all()
|
||||
pkg.qqbot.cmds.aamgr.apply_privileges()
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
traceback.print_exc()
|
||||
|
||||
# 读取文件
|
||||
config_file_str = ""
|
||||
with open("config.py", "r", encoding="utf-8") as f:
|
||||
config_file_str = f.read()
|
||||
# 替换
|
||||
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(config.admin_qq))
|
||||
# 写入
|
||||
with open("config.py", "w", encoding="utf-8") as f:
|
||||
f.write(config_file_str)
|
||||
# 配置OpenAI proxy
|
||||
import openai
|
||||
openai.proxy = None # 先重置,因为重载后可能需要清除proxy
|
||||
if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None:
|
||||
openai.proxy = config.openai_config["http_proxy"]
|
||||
|
||||
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
||||
time.sleep(4)
|
||||
break
|
||||
except ValueError:
|
||||
print("请输入数字")
|
||||
# 配置openai api_base
|
||||
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
|
||||
openai.api_base = config.openai_config["reverse_proxy"]
|
||||
|
||||
import pkg.openai.manager
|
||||
import pkg.database.manager
|
||||
import pkg.openai.session
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.dprompt
|
||||
# 主启动流程
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
|
||||
pkg.openai.dprompt.read_prompt_from_file()
|
||||
database.initialize_database()
|
||||
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
# 主启动流程
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
openai_interact = pkg.openai.manager.OpenAIInteract(config.openai_config['api_key'])
|
||||
|
||||
database.initialize_database()
|
||||
# 加载所有未超时的session
|
||||
pkg.openai.session.load_sessions()
|
||||
|
||||
openai_interact = pkg.openai.manager.OpenAIInteract(config.openai_config['api_key'])
|
||||
# 初始化qq机器人
|
||||
qqbot = pkg.qqbot.manager.QQBotManager(first_time_init=first_time_init)
|
||||
|
||||
# 加载所有未超时的session
|
||||
pkg.openai.session.load_sessions()
|
||||
# 加载插件
|
||||
import pkg.plugin.host
|
||||
pkg.plugin.host.load_plugins()
|
||||
|
||||
# 初始化qq机器人
|
||||
qqbot = pkg.qqbot.manager.QQBotManager(mirai_http_api_config=config.mirai_http_api_config,
|
||||
timeout=config.process_message_timeout, retry=config.retry_times,
|
||||
first_time_init=first_time_init)
|
||||
pkg.plugin.host.initialize_plugins()
|
||||
|
||||
# 加载插件
|
||||
import pkg.plugin.host
|
||||
pkg.plugin.host.load_plugins()
|
||||
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
||||
|
||||
pkg.plugin.host.initialize_plugins()
|
||||
import mirai.exceptions
|
||||
|
||||
if first_time_init: # 不是热重载之后的启动,则不启动新的bot线程
|
||||
def run_bot_wrapper():
|
||||
global known_exception_caught
|
||||
try:
|
||||
logging.debug("使用账号: {}".format(qqbot.bot_account_id))
|
||||
qqbot.adapter.run_sync()
|
||||
except TypeError as e:
|
||||
if str(e).__contains__("argument 'debug'"):
|
||||
logging.error(
|
||||
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("As of 3.10, the *loop*"):
|
||||
logging.error(
|
||||
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
||||
known_exception_caught = True
|
||||
|
||||
import mirai.exceptions
|
||||
|
||||
def run_bot_wrapper():
|
||||
global known_exception_caught
|
||||
try:
|
||||
qqbot.bot.run()
|
||||
except TypeError as e:
|
||||
if str(e).__contains__("argument 'debug'"):
|
||||
logging.error(
|
||||
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("As of 3.10, the *loop*"):
|
||||
logging.error(
|
||||
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
||||
known_exception_caught = True
|
||||
|
||||
except websockets.exceptions.InvalidStatus as e:
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
except mirai.exceptions.NetworkError as e:
|
||||
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
||||
known_exception_caught = True
|
||||
except Exception as e:
|
||||
if str(e).__contains__("404"):
|
||||
except websockets.exceptions.InvalidStatus as e:
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("signal only works in main thread"):
|
||||
logging.error(
|
||||
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
||||
e))
|
||||
except mirai.exceptions.NetworkError as e:
|
||||
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("did not receive a valid HTTP"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
else:
|
||||
logging.error(
|
||||
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
||||
known_exception_caught = True
|
||||
raise e
|
||||
|
||||
qq_bot_thread = threading.Thread(target=run_bot_wrapper, args=(), daemon=True)
|
||||
qq_bot_thread.start()
|
||||
except Exception as e:
|
||||
if str(e).__contains__("404"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("signal only works in main thread"):
|
||||
logging.error(
|
||||
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("did not receive a valid HTTP"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
logging.error(
|
||||
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
||||
known_exception_caught = True
|
||||
raise e
|
||||
finally:
|
||||
time.sleep(12)
|
||||
threading.Thread(
|
||||
target=run_bot_wrapper
|
||||
).start()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
logging.info("程序被用户中止")
|
||||
sys.exit(0)
|
||||
elif isinstance(e, SyntaxError):
|
||||
logging.error("配置文件存在语法错误,请检查配置文件:\n1. 是否存在中文符号\n2. 是否已按照文件中的说明填写正确")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.error("初始化失败:{}".format(e))
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# 判断若是Windows,输出选择模式可能会暂停程序的警告
|
||||
if os.name == 'nt':
|
||||
time.sleep(2)
|
||||
logging.info("您正在使用Windows系统,若命令行窗口处于“选择”模式,程序可能会被暂停,此时请右键点击窗口空白区域使其取消选择模式。")
|
||||
|
||||
time.sleep(12)
|
||||
|
||||
if first_time_init:
|
||||
if not known_exception_caught:
|
||||
logging.info('程序启动完成,如长时间未显示 ”成功登录到账号xxxxx“ ,并且不回复消息,请查看 '
|
||||
'https://github.com/RockChinQ/QChatGPT/issues/37')
|
||||
import config
|
||||
if config.msg_source_adapter == "yirimirai":
|
||||
logging.info("QQ: {}, MAH: {}".format(config.mirai_http_api_config['qq'], config.mirai_http_api_config['host']+":"+str(config.mirai_http_api_config['port'])))
|
||||
logging.critical('程序启动完成,如长时间未显示 "成功登录到账号xxxxx" ,并且不回复消息,解决办法(请勿到群里问): '
|
||||
'https://github.com/RockChinQ/QChatGPT/issues/37')
|
||||
elif config.msg_source_adapter == 'nakuru':
|
||||
logging.info("host: {}, port: {}, http_port: {}".format(config.nakuru_config['host'], config.nakuru_config['port'], config.nakuru_config['http_port']))
|
||||
logging.critical('程序启动完成,如长时间未显示 "Protocol: connected" ,并且不回复消息,请检查config.py中的nakuru_config是否正确')
|
||||
else:
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info('热重载完成')
|
||||
|
||||
# 发送赞赏码
|
||||
if hasattr(config, 'encourage_sponsor_at_start') \
|
||||
and config.encourage_sponsor_at_start \
|
||||
if config.encourage_sponsor_at_start \
|
||||
and pkg.utils.context.get_openai_manager().audit_mgr.get_total_text_length() >= 2048:
|
||||
|
||||
logging.info("发送赞赏码")
|
||||
@@ -258,28 +327,26 @@ def main(first_time_init=False):
|
||||
import pkg.utils.updater
|
||||
try:
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("新版本可用,请发送 !update 进行自动更新")
|
||||
logging.info("新版本可用,请发送 !update 进行自动更新\n更新日志:\n{}".format("\n".join(pkg.utils.updater.get_rls_notes())))
|
||||
else:
|
||||
logging.info("当前已是最新版本")
|
||||
# logging.info("当前已是最新版本")
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logging.warning("检查更新失败:{}".format(e))
|
||||
|
||||
while True:
|
||||
try:
|
||||
time.sleep(10)
|
||||
if qqbot != pkg.utils.context.get_qqbot_manager(): # 已经reload了
|
||||
logging.info("以前的main流程由于reload退出")
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
stop()
|
||||
|
||||
print("程序退出")
|
||||
sys.exit(0)
|
||||
try:
|
||||
import pkg.utils.announcement as announcement
|
||||
new_announcement = announcement.fetch_new()
|
||||
if len(new_announcement) > 0:
|
||||
for announcement in new_announcement:
|
||||
logging.critical("[公告]<{}> {}".format(announcement['time'], announcement['content']))
|
||||
except Exception as e:
|
||||
logging.warning("获取公告失败:{}".format(e))
|
||||
|
||||
return qqbot
|
||||
|
||||
def stop():
|
||||
import pkg.utils.context
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.session
|
||||
try:
|
||||
@@ -298,39 +365,108 @@ def stop():
|
||||
raise e
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 检查是否有config.py,如果没有就把config-template.py复制一份,并退出程序
|
||||
def check_file():
|
||||
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
|
||||
if not os.path.exists('banlist.py'):
|
||||
shutil.copy('res/templates/banlist-template.py', 'banlist.py')
|
||||
|
||||
# 检查是否有sensitive.json
|
||||
if not os.path.exists("sensitive.json"):
|
||||
shutil.copy("res/templates/sensitive-template.json", "sensitive.json")
|
||||
|
||||
# 检查是否有scenario/default.json
|
||||
if not os.path.exists("scenario/default.json"):
|
||||
shutil.copy("scenario/default-template.json", "scenario/default.json")
|
||||
|
||||
# 检查cmdpriv.json
|
||||
if not os.path.exists("cmdpriv.json"):
|
||||
shutil.copy("res/templates/cmdpriv-template.json", "cmdpriv.json")
|
||||
|
||||
# 检查tips_custom
|
||||
if not os.path.exists("tips.py"):
|
||||
shutil.copy("tips-custom-template.py", "tips.py")
|
||||
|
||||
# 检查temp目录
|
||||
if not os.path.exists("temp/"):
|
||||
os.mkdir("temp/")
|
||||
|
||||
# 检查并创建plugins、prompts目录
|
||||
check_path = ["plugins", "prompts"]
|
||||
for path in check_path:
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
|
||||
# 配置文件存在性校验
|
||||
if not os.path.exists('config.py'):
|
||||
shutil.copy('config-template.py', 'config.py')
|
||||
print('请先在config.py中填写配置')
|
||||
sys.exit(0)
|
||||
|
||||
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
|
||||
if not os.path.exists('banlist.py'):
|
||||
shutil.copy('banlist-template.py', 'banlist.py')
|
||||
|
||||
def main():
|
||||
global use_override
|
||||
# 检查是否携带了 --override 或 -r 参数
|
||||
if '--override' in sys.argv or '-r' in sys.argv:
|
||||
use_override = True
|
||||
|
||||
# 初始化相关文件
|
||||
check_file()
|
||||
|
||||
# 初始化logging
|
||||
init_runtime_log_file()
|
||||
pkg.utils.context.context['logger_handler'] = reset_logging()
|
||||
|
||||
# 加载配置
|
||||
load_config()
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
# 检查tips模块
|
||||
complete_tips()
|
||||
|
||||
# 配置线程池
|
||||
from pkg.utils import ThreadCtl
|
||||
thread_ctl = ThreadCtl(
|
||||
sys_pool_num=config.sys_pool_num,
|
||||
admin_pool_num=config.admin_pool_num,
|
||||
user_pool_num=config.user_pool_num
|
||||
)
|
||||
# 存进上下文
|
||||
pkg.utils.context.set_thread_ctl(thread_ctl)
|
||||
|
||||
# 启动指令处理
|
||||
if len(sys.argv) > 1 and sys.argv[1] == 'init_db':
|
||||
init_db()
|
||||
sys.exit(0)
|
||||
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == 'update':
|
||||
try:
|
||||
try:
|
||||
import pkg.utils.pkgmgr
|
||||
pkg.utils.pkgmgr.ensure_dulwich()
|
||||
except:
|
||||
pass
|
||||
|
||||
from dulwich import porcelain
|
||||
|
||||
repo = porcelain.open_repo('.')
|
||||
porcelain.pull(repo)
|
||||
except ModuleNotFoundError:
|
||||
print("dulwich模块未安装,请查看 https://github.com/RockChinQ/QChatGPT/issues/77")
|
||||
print("正在进行程序更新...")
|
||||
import pkg.utils.updater as updater
|
||||
updater.update_all(cli=True)
|
||||
sys.exit(0)
|
||||
|
||||
# import pkg.utils.configmgr
|
||||
#
|
||||
# pkg.utils.configmgr.set_config_and_reload("quote_origin", False)
|
||||
# 关闭urllib的http警告
|
||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||
main(True)
|
||||
|
||||
pkg.utils.context.get_thread_ctl().submit_sys_task(
|
||||
start,
|
||||
True
|
||||
)
|
||||
|
||||
# 主线程循环
|
||||
while True:
|
||||
try:
|
||||
time.sleep(0xFF)
|
||||
except:
|
||||
stop()
|
||||
pkg.utils.context.get_thread_ctl().shutdown()
|
||||
import platform
|
||||
if platform.system() == 'Windows':
|
||||
cmd = "taskkill /F /PID {}".format(os.getpid())
|
||||
elif platform.system() in ['Linux', 'Darwin']:
|
||||
cmd = "kill -9 {}".format(os.getpid())
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
95
override-all.json
Normal file
95
override-all.json
Normal file
@@ -0,0 +1,95 @@
|
||||
{
|
||||
"comment": "这是override.json支持的字段全集, 关于override.json机制, 请查看https://github.com/RockChinQ/QChatGPT/pull/271",
|
||||
"msg_source_adapter": "yirimirai",
|
||||
"mirai_http_api_config": {
|
||||
"adapter": "WebSocketAdapter",
|
||||
"host": "localhost",
|
||||
"port": 8080,
|
||||
"verifyKey": "yirimirai",
|
||||
"qq": 1234567890
|
||||
},
|
||||
"nakuru_config": {
|
||||
"host": "localhost",
|
||||
"port": 6700,
|
||||
"http_port": 5700,
|
||||
"token": ""
|
||||
},
|
||||
"openai_config": {
|
||||
"api_key": {
|
||||
"default": "openai_api_key"
|
||||
},
|
||||
"http_proxy": null,
|
||||
"reverse_proxy": null
|
||||
},
|
||||
"switch_strategy": "active",
|
||||
"admin_qq": 0,
|
||||
"default_prompt": {
|
||||
"default": "如果我之后想获取帮助,请你说“输入!help获取帮助”"
|
||||
},
|
||||
"preset_mode": "normal",
|
||||
"response_rules": {
|
||||
"default": {
|
||||
"at": true,
|
||||
"prefix": [
|
||||
"/ai",
|
||||
"!ai",
|
||||
"!ai",
|
||||
"ai"
|
||||
],
|
||||
"regexp": [],
|
||||
"random_rate": 0.0
|
||||
}
|
||||
},
|
||||
"ignore_rules": {
|
||||
"prefix": [
|
||||
"/"
|
||||
],
|
||||
"regexp": []
|
||||
},
|
||||
"income_msg_check": false,
|
||||
"sensitive_word_filter": true,
|
||||
"baidu_check": false,
|
||||
"baidu_api_key": "",
|
||||
"baidu_secret_key": "",
|
||||
"inappropriate_message_tips": "[百度云]请珍惜机器人,当前返回内容不合规",
|
||||
"encourage_sponsor_at_start": true,
|
||||
"prompt_submit_length": 2048,
|
||||
"auto_reset": true,
|
||||
"completion_api_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0.2,
|
||||
"presence_penalty": 1.0
|
||||
},
|
||||
"image_api_params": {
|
||||
"size": "256x256"
|
||||
},
|
||||
"trace_function_calls": false,
|
||||
"quote_origin": true,
|
||||
"at_sender": false,
|
||||
"include_image_description": true,
|
||||
"process_message_timeout": 30,
|
||||
"show_prefix": false,
|
||||
"force_delay_range": [
|
||||
1.5,
|
||||
3
|
||||
],
|
||||
"blob_message_threshold": 256,
|
||||
"blob_message_strategy": "forward",
|
||||
"wait_last_done": true,
|
||||
"font_path": "",
|
||||
"retry_times": 3,
|
||||
"hide_exce_info_to_user": false,
|
||||
"sys_pool_num": 8,
|
||||
"admin_pool_num": 4,
|
||||
"user_pool_num": 8,
|
||||
"session_expire_time": 1200,
|
||||
"rate_limitation": {
|
||||
"default": 60
|
||||
},
|
||||
"rate_limit_strategy": "drop",
|
||||
"upgrade_dependencies": true,
|
||||
"report_usage": true,
|
||||
"logging_level": 20
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
审计相关操作
|
||||
"""
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
使用量统计以及数据上报功能实现
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
@@ -10,8 +14,11 @@ import pkg.utils.updater
|
||||
|
||||
class DataGatherer:
|
||||
"""数据收集器"""
|
||||
|
||||
usage = {}
|
||||
"""以key值md5为key,{
|
||||
"""各api-key的使用量
|
||||
|
||||
以key值md5为key,{
|
||||
"text": {
|
||||
"text-davinci-003": 文字量:int,
|
||||
},
|
||||
@@ -20,21 +27,26 @@ class DataGatherer:
|
||||
}
|
||||
}为值的字典"""
|
||||
|
||||
version_str = "0.1.0"
|
||||
version_str = "undetermined"
|
||||
|
||||
def __init__(self):
|
||||
self.load_from_db()
|
||||
try:
|
||||
self.version_str = pkg.utils.updater.get_commit_id_and_time_and_msg()[:40 if len(pkg.utils.updater.get_commit_id_and_time_and_msg()) > 40 else len(pkg.utils.updater.get_commit_id_and_time_and_msg())]
|
||||
self.version_str = pkg.utils.updater.get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
pass
|
||||
|
||||
def report_to_server(self, subservice_name: str, count: int):
|
||||
"""向中央服务器报告使用量
|
||||
|
||||
只会报告此次请求的使用量,不会报告总量。
|
||||
不包含除版本号、使用类型、使用量以外的任何信息,仅供开发者分析使用情况。
|
||||
"""
|
||||
try:
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "report_usage") and not config.report_usage:
|
||||
if not config.report_usage:
|
||||
return
|
||||
res = requests.get("http://rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}".format(subservice_name, self.version_str, count))
|
||||
res = requests.get("http://reports.rockchin.top:18989/usage?service_name=qchatgpt.{}&version={}&count={}&msg_source={}".format(subservice_name, self.version_str, count, config.msg_source_adapter))
|
||||
if res.status_code != 200 or res.text != "ok":
|
||||
logging.warning("report to server failed, status_code: {}, text: {}".format(res.status_code, res.text))
|
||||
except:
|
||||
@@ -44,7 +56,9 @@ class DataGatherer:
|
||||
return self.usage[key_md5] if key_md5 in self.usage else {}
|
||||
|
||||
def report_text_model_usage(self, model, total_tokens):
|
||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()
|
||||
"""调用方报告文字模型请求文字使用量"""
|
||||
|
||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
|
||||
|
||||
if key_md5 not in self.usage:
|
||||
self.usage[key_md5] = {}
|
||||
@@ -62,6 +76,8 @@ class DataGatherer:
|
||||
self.report_to_server("text", length)
|
||||
|
||||
def report_image_model_usage(self, size):
|
||||
"""调用方报告图片模型请求图片使用量"""
|
||||
|
||||
key_md5 = pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()
|
||||
|
||||
if key_md5 not in self.usage:
|
||||
@@ -79,6 +95,7 @@ class DataGatherer:
|
||||
self.report_to_server("image", 1)
|
||||
|
||||
def get_text_length_of_key(self, key):
|
||||
"""获取指定api-key (明文) 的文字总使用量(本地记录)"""
|
||||
key_md5 = hashlib.md5(key.encode('utf-8')).hexdigest()
|
||||
if key_md5 not in self.usage:
|
||||
return 0
|
||||
@@ -88,6 +105,8 @@ class DataGatherer:
|
||||
return sum(self.usage[key_md5]["text"].values())
|
||||
|
||||
def get_image_count_of_key(self, key):
|
||||
"""获取指定api-key (明文) 的图片总使用量(本地记录)"""
|
||||
|
||||
key_md5 = hashlib.md5(key.encode('utf-8')).hexdigest()
|
||||
if key_md5 not in self.usage:
|
||||
return 0
|
||||
@@ -97,6 +116,7 @@ class DataGatherer:
|
||||
return sum(self.usage[key_md5]["image"].values())
|
||||
|
||||
def get_total_text_length(self):
|
||||
"""获取所有api-key的文字总使用量(本地记录)"""
|
||||
total = 0
|
||||
for key in self.usage:
|
||||
if "text" not in self.usage[key]:
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
数据库操作封装
|
||||
"""
|
||||
@@ -1,3 +1,6 @@
|
||||
"""
|
||||
数据库管理模块
|
||||
"""
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
@@ -9,9 +12,9 @@ import sqlite3
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
# 数据库管理
|
||||
# 为其他模块提供数据库操作接口
|
||||
class DatabaseManager:
|
||||
"""封装数据库底层操作,并提供方法给上层使用"""
|
||||
|
||||
conn = None
|
||||
cursor = None
|
||||
|
||||
@@ -23,21 +26,25 @@ class DatabaseManager:
|
||||
|
||||
# 连接到数据库文件
|
||||
def reconnect(self):
|
||||
"""连接到数据库"""
|
||||
self.conn = sqlite3.connect('database.db', check_same_thread=False)
|
||||
self.cursor = self.conn.cursor()
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
def execute(self, *args, **kwargs) -> Cursor:
|
||||
def __execute__(self, *args, **kwargs) -> Cursor:
|
||||
# logging.debug('SQL: {}'.format(sql))
|
||||
logging.debug('SQL: {}'.format(args))
|
||||
c = self.cursor.execute(*args, **kwargs)
|
||||
self.conn.commit()
|
||||
return c
|
||||
|
||||
# 初始化数据库的函数
|
||||
def initialize_database(self):
|
||||
self.execute("""
|
||||
"""创建数据表"""
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `sessions` (
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`name` varchar(255) not null,
|
||||
@@ -46,11 +53,31 @@ class DatabaseManager:
|
||||
`create_timestamp` bigint not null,
|
||||
`last_interact_timestamp` bigint not null,
|
||||
`status` varchar(255) not null default 'on_going',
|
||||
`prompt` text not null
|
||||
`default_prompt` text not null default '',
|
||||
`prompt` text not null,
|
||||
`token_counts` text not null default '[]'
|
||||
)
|
||||
""")
|
||||
|
||||
self.execute("""
|
||||
# 检查sessions表是否存在`default_prompt`字段, 检查是否存在`token_counts`字段
|
||||
self.__execute__("PRAGMA table_info('sessions')")
|
||||
columns = self.cursor.fetchall()
|
||||
has_default_prompt = False
|
||||
has_token_counts = False
|
||||
for field in columns:
|
||||
if field[1] == 'default_prompt':
|
||||
has_default_prompt = True
|
||||
if field[1] == 'token_counts':
|
||||
has_token_counts = True
|
||||
if has_default_prompt and has_token_counts:
|
||||
break
|
||||
if not has_default_prompt:
|
||||
self.__execute__("alter table `sessions` add column `default_prompt` text not null default ''")
|
||||
if not has_token_counts:
|
||||
self.__execute__("alter table `sessions` add column `token_counts` text not null default '[]'")
|
||||
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `account_fee`(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`key_md5` varchar(255) not null,
|
||||
@@ -59,7 +86,7 @@ class DatabaseManager:
|
||||
)
|
||||
""")
|
||||
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
create table if not exists `account_usage`(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`json` text not null
|
||||
@@ -69,47 +96,49 @@ class DatabaseManager:
|
||||
|
||||
# session持久化
|
||||
def persistence_session(self, subject_type: str, subject_number: int, create_timestamp: int,
|
||||
last_interact_timestamp: int, prompt: str):
|
||||
last_interact_timestamp: int, prompt: str, default_prompt: str = '', token_counts: str = ''):
|
||||
"""持久化指定session"""
|
||||
|
||||
# 检查是否已经有了此name和create_timestamp的session
|
||||
# 如果有,就更新prompt和last_interact_timestamp
|
||||
# 如果没有,就插入一条新的记录
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select count(*) from `sessions` where `type` = '{}' and `number` = {} and `create_timestamp` = {}
|
||||
""".format(subject_type, subject_number, create_timestamp))
|
||||
count = self.cursor.fetchone()[0]
|
||||
if count == 0:
|
||||
|
||||
sql = """
|
||||
insert into `sessions` (`name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`)
|
||||
values (?, ?, ?, ?, ?, ?)
|
||||
insert into `sessions` (`name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `default_prompt`, `token_counts`)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
self.execute(sql,
|
||||
("{}_{}".format(subject_type, subject_number), subject_type, subject_number, create_timestamp,
|
||||
last_interact_timestamp, prompt))
|
||||
self.__execute__(sql,
|
||||
("{}_{}".format(subject_type, subject_number), subject_type, subject_number, create_timestamp,
|
||||
last_interact_timestamp, prompt, default_prompt, token_counts))
|
||||
else:
|
||||
sql = """
|
||||
update `sessions` set `last_interact_timestamp` = ?, `prompt` = ?
|
||||
update `sessions` set `last_interact_timestamp` = ?, `prompt` = ?, `token_counts` = ?
|
||||
where `type` = ? and `number` = ? and `create_timestamp` = ?
|
||||
"""
|
||||
|
||||
self.execute(sql, (last_interact_timestamp, prompt, subject_type,
|
||||
subject_number, create_timestamp))
|
||||
self.__execute__(sql, (last_interact_timestamp, prompt, token_counts, subject_type,
|
||||
subject_number, create_timestamp))
|
||||
|
||||
# 显式关闭一个session
|
||||
def explicit_close_session(self, session_name: str, create_timestamp: int):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'explicitly_closed' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
def set_session_ongoing(self, session_name: str, create_timestamp: int):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'on_going' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
# 设置session为过期
|
||||
def set_session_expired(self, session_name: str, create_timestamp: int):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'expired' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
@@ -117,8 +146,8 @@ class DatabaseManager:
|
||||
def load_valid_sessions(self) -> dict:
|
||||
# 从数据库中加载所有还没过期的session
|
||||
config = pkg.utils.context.get_config()
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `last_interact_timestamp` > {}
|
||||
""".format(int(time.time()) - config.session_expire_time))
|
||||
results = self.cursor.fetchall()
|
||||
@@ -131,6 +160,8 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
# 当且仅当最后一个该对象的会话是on_going状态时,才会被加载
|
||||
if status == 'on_going':
|
||||
@@ -139,7 +170,9 @@ class DatabaseManager:
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
}
|
||||
else:
|
||||
if session_name in sessions:
|
||||
@@ -150,8 +183,8 @@ class DatabaseManager:
|
||||
# 获取此session_name前一个session的数据
|
||||
def last_session(self, session_name: str, cursor_timestamp: int):
|
||||
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `name` = '{}' and `last_interact_timestamp` < {} order by `last_interact_timestamp` desc
|
||||
limit 1
|
||||
""".format(session_name, cursor_timestamp))
|
||||
@@ -167,20 +200,24 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
return {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
}
|
||||
|
||||
# 获取此session_name后一个session的数据
|
||||
def next_session(self, session_name: str, cursor_timestamp: int):
|
||||
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `name` = '{}' and `last_interact_timestamp` > {} order by `last_interact_timestamp` asc
|
||||
limit 1
|
||||
""".format(session_name, cursor_timestamp))
|
||||
@@ -196,19 +233,23 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
return {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
}
|
||||
|
||||
# 列出与某个对象的所有对话session
|
||||
def list_history(self, session_name: str, capacity: int, page: int):
|
||||
self.execute("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `name` = '{}' order by `last_interact_timestamp` desc limit {} offset {}
|
||||
""".format(session_name, capacity, capacity * page))
|
||||
results = self.cursor.fetchall()
|
||||
@@ -221,17 +262,42 @@ class DatabaseManager:
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
sessions.append({
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
})
|
||||
|
||||
return sessions
|
||||
|
||||
def delete_history(self, session_name: str, index: int) -> bool:
|
||||
# 删除倒序第index个session
|
||||
# 查找其id再删除
|
||||
self.__execute__("""
|
||||
delete from `sessions` where `id` in (select `id` from `sessions` where `name` = '{}' order by `last_interact_timestamp` desc limit 1 offset {})
|
||||
""".format(session_name, index))
|
||||
|
||||
return self.cursor.rowcount == 1
|
||||
|
||||
def delete_all_history(self, session_name: str) -> bool:
|
||||
self.__execute__("""
|
||||
delete from `sessions` where `name` = '{}'
|
||||
""".format(session_name))
|
||||
return self.cursor.rowcount > 0
|
||||
|
||||
def delete_all_session_history(self) -> bool:
|
||||
self.__execute__("""
|
||||
delete from `sessions`
|
||||
""")
|
||||
return self.cursor.rowcount > 0
|
||||
|
||||
# 将apikey的使用量存进数据库
|
||||
def dump_api_key_usage(self, api_keys: dict, usage: dict):
|
||||
logging.debug('dumping api key usage...')
|
||||
@@ -246,22 +312,22 @@ class DatabaseManager:
|
||||
usage_count = usage[key_md5]
|
||||
# 将使用量存进数据库
|
||||
# 先检查是否已存在
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select count(*) from `api_key_usage` where `key_md5` = '{}'""".format(key_md5))
|
||||
result = self.cursor.fetchone()
|
||||
if result[0] == 0:
|
||||
# 不存在则插入
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
insert into `api_key_usage` (`key_md5`, `usage`,`timestamp`) values ('{}', {}, {})
|
||||
""".format(key_md5, usage_count, int(time.time())))
|
||||
else:
|
||||
# 存在则更新,timestamp设置为当前
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `api_key_usage` set `usage` = {}, `timestamp` = {} where `key_md5` = '{}'
|
||||
""".format(usage_count, int(time.time()), key_md5))
|
||||
|
||||
def load_api_key_usage(self):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select `key_md5`, `usage` from `api_key_usage`
|
||||
""")
|
||||
results = self.cursor.fetchall()
|
||||
@@ -273,23 +339,24 @@ class DatabaseManager:
|
||||
return usage
|
||||
|
||||
def dump_usage_json(self, usage: dict):
|
||||
|
||||
json_str = json.dumps(usage)
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select count(*) from `account_usage`""")
|
||||
result = self.cursor.fetchone()
|
||||
if result[0] == 0:
|
||||
# 不存在则插入
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
insert into `account_usage` (`json`) values ('{}')
|
||||
""".format(json_str))
|
||||
else:
|
||||
# 存在则更新
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
update `account_usage` set `json` = '{}' where `id` = 1
|
||||
""".format(json_str))
|
||||
|
||||
def load_usage_json(self):
|
||||
self.execute("""
|
||||
self.__execute__("""
|
||||
select `json` from `account_usage` order by id desc limit 1
|
||||
""")
|
||||
result = self.cursor.fetchone()
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
"""OpenAI 接口处理及会话管理相关"""
|
||||
|
||||
0
pkg/openai/api/__init__.py
Normal file
0
pkg/openai/api/__init__.py
Normal file
204
pkg/openai/api/chat_completion.py
Normal file
204
pkg/openai/api/chat_completion.py
Normal file
@@ -0,0 +1,204 @@
|
||||
import openai
|
||||
import json
|
||||
import logging
|
||||
|
||||
from .model import RequestBase
|
||||
|
||||
from ..funcmgr import get_func_schema_list, execute_function, get_func, get_func_schema, ContentFunctionNotFoundError
|
||||
|
||||
|
||||
class ChatCompletionRequest(RequestBase):
|
||||
"""调用ChatCompletion接口的请求类。
|
||||
|
||||
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
|
||||
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
|
||||
"""
|
||||
model: str
|
||||
messages: list[dict[str, str]]
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
pending_func_call: dict = None
|
||||
|
||||
pending_msg: str
|
||||
|
||||
def flush_pending_msg(self):
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content=self.pending_msg
|
||||
)
|
||||
self.pending_msg = ""
|
||||
|
||||
def append_message(self, role: str, content: str, name: str=None, function_call: dict=None):
|
||||
msg = {
|
||||
"role": role,
|
||||
"content": content
|
||||
}
|
||||
|
||||
if name is not None:
|
||||
msg['name'] = name
|
||||
|
||||
if function_call is not None:
|
||||
msg['function_call'] = function_call
|
||||
|
||||
self.messages.append(msg)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.model = model
|
||||
self.messages = messages.copy()
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = openai.ChatCompletion.acreate
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
self.stopped = False
|
||||
|
||||
self.pending_msg = ""
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self) -> dict:
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
if self.pending_func_call is None: # 没有待处理的函数调用请求
|
||||
|
||||
args = {
|
||||
"model": self.model,
|
||||
"messages": self.messages,
|
||||
}
|
||||
|
||||
funcs = get_func_schema_list()
|
||||
|
||||
if len(funcs) > 0:
|
||||
args['functions'] = funcs
|
||||
|
||||
# 拼接kwargs
|
||||
args = {**args, **self.kwargs}
|
||||
|
||||
resp = self._req(**args)
|
||||
|
||||
choice0 = resp["choices"][0]
|
||||
|
||||
# 如果不是函数调用,且finish_reason为stop,则停止迭代
|
||||
if choice0['finish_reason'] == 'stop': # and choice0["finish_reason"] == "stop"
|
||||
self.stopped = True
|
||||
|
||||
if 'function_call' in choice0['message']:
|
||||
self.pending_func_call = choice0['message']['function_call']
|
||||
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content=choice0['message']['content'],
|
||||
function_call=choice0['message']['function_call']
|
||||
)
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "function_call",
|
||||
"content": choice0['message']['content'],
|
||||
"function_call": choice0['message']['function_call']
|
||||
},
|
||||
"finish_reason": "function_call"
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
else:
|
||||
|
||||
# self.pending_msg += choice0['message']['content']
|
||||
# 普通回复一定处于最后方,故不用再追加进内部messages
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0['message']['content']
|
||||
},
|
||||
"finish_reason": choice0["finish_reason"]
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
else: # 处理函数调用请求
|
||||
|
||||
cp_pending_func_call = self.pending_func_call.copy()
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
func_name = cp_pending_func_call['name']
|
||||
arguments = {}
|
||||
|
||||
try:
|
||||
|
||||
try:
|
||||
arguments = json.loads(cp_pending_func_call['arguments'])
|
||||
# 若不是json格式的异常处理
|
||||
except json.decoder.JSONDecodeError:
|
||||
# 获取函数的参数列表
|
||||
func_schema = get_func_schema(func_name)
|
||||
|
||||
arguments = {
|
||||
func_schema['parameters']['required'][0]: cp_pending_func_call['arguments']
|
||||
}
|
||||
|
||||
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
|
||||
|
||||
# 执行函数调用
|
||||
ret = ""
|
||||
try:
|
||||
ret = execute_function(func_name, arguments)
|
||||
|
||||
logging.info("函数执行完成。")
|
||||
except Exception as e:
|
||||
ret = "error: execute function failed: {}".format(str(e))
|
||||
logging.error("函数执行失败: {}".format(str(e)))
|
||||
|
||||
self.append_message(
|
||||
role="function",
|
||||
content=json.dumps(ret, ensure_ascii=False),
|
||||
name=func_name
|
||||
)
|
||||
|
||||
return {
|
||||
"id": -1,
|
||||
"choices": [
|
||||
{
|
||||
"index": -1,
|
||||
"message": {
|
||||
"role": "function",
|
||||
"type": "function_return",
|
||||
"function_name": func_name,
|
||||
"content": json.dumps(ret, ensure_ascii=False)
|
||||
},
|
||||
"finish_reason": "function_return"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"total_tokens": 0
|
||||
}
|
||||
}
|
||||
|
||||
except ContentFunctionNotFoundError:
|
||||
raise Exception("没有找到函数: {}".format(func_name))
|
||||
|
||||
111
pkg/openai/api/completion.py
Normal file
111
pkg/openai/api/completion.py
Normal file
@@ -0,0 +1,111 @@
|
||||
import openai
|
||||
|
||||
from .model import RequestBase
|
||||
|
||||
|
||||
class CompletionRequest(RequestBase):
|
||||
"""调用Completion接口的请求类。
|
||||
|
||||
调用方可以一直next completion直到finish_reason为stop。
|
||||
"""
|
||||
|
||||
model: str
|
||||
prompt: str
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.model = model
|
||||
self.prompt = ""
|
||||
|
||||
for message in messages:
|
||||
self.prompt += message["role"] + ": " + message["content"] + "\n"
|
||||
|
||||
self.prompt += "assistant: "
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = openai.Completion.acreate
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self) -> dict:
|
||||
"""调用Completion接口,返回生成的文本
|
||||
|
||||
{
|
||||
"id": "id",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": "message"
|
||||
},
|
||||
"finish_reason": "reason"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 10,
|
||||
"completion_tokens": 20,
|
||||
"total_tokens": 30
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
resp = self._req(
|
||||
model=self.model,
|
||||
prompt=self.prompt,
|
||||
**self.kwargs
|
||||
)
|
||||
|
||||
if resp["choices"][0]["finish_reason"] == "stop":
|
||||
self.stopped = True
|
||||
|
||||
choice0 = resp["choices"][0]
|
||||
|
||||
self.prompt += choice0["text"]
|
||||
|
||||
return {
|
||||
"id": resp["id"],
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0["index"],
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0["text"]
|
||||
},
|
||||
"finish_reason": choice0["finish_reason"]
|
||||
}
|
||||
],
|
||||
"usage": resp["usage"]
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
openai.api_key = os.environ["OPENAI_API_KEY"]
|
||||
|
||||
for resp in CompletionRequest(
|
||||
model="text-davinci-003",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello, who are you?"
|
||||
}
|
||||
]
|
||||
):
|
||||
print(resp)
|
||||
if resp["choices"][0]["finish_reason"] == "stop":
|
||||
break
|
||||
62
pkg/openai/api/model.py
Normal file
62
pkg/openai/api/model.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# 定义不同接口请求的模型
|
||||
import threading
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import openai
|
||||
|
||||
|
||||
class RequestBase:
|
||||
|
||||
req_func: callable
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def _next_key(self):
|
||||
import pkg.utils.context as context
|
||||
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||
logging.debug("切换api-key: switched={}, name={}".format(switched, name))
|
||||
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||
|
||||
def _req(self, **kwargs):
|
||||
"""处理代理问题"""
|
||||
import config
|
||||
|
||||
ret: dict = {}
|
||||
exception: Exception = None
|
||||
|
||||
async def awrapper(**kwargs):
|
||||
nonlocal ret, exception
|
||||
|
||||
try:
|
||||
ret = await self.req_func(**kwargs)
|
||||
logging.debug("接口请求返回:%s", str(ret))
|
||||
|
||||
if config.switch_strategy == 'active':
|
||||
self._next_key()
|
||||
|
||||
return ret
|
||||
except Exception as e:
|
||||
exception = e
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
thr = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(awrapper(**kwargs),)
|
||||
)
|
||||
|
||||
thr.start()
|
||||
thr.join()
|
||||
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
return ret
|
||||
|
||||
def __iter__(self):
|
||||
raise self
|
||||
|
||||
def __next__(self):
|
||||
raise NotImplementedError
|
||||
@@ -1,74 +1,145 @@
|
||||
# 多情景预设值管理
|
||||
import json
|
||||
import logging
|
||||
import config
|
||||
import os
|
||||
|
||||
__current__ = "default"
|
||||
# __current__ = "default"
|
||||
# """当前默认使用的情景预设的名称
|
||||
|
||||
__prompts_from_files__ = {}
|
||||
# 由管理员使用`!default <名称>`指令切换
|
||||
# """
|
||||
|
||||
# __prompts_from_files__ = {}
|
||||
# """从文件中读取的情景预设值"""
|
||||
|
||||
# __scenario_from_files__ = {}
|
||||
|
||||
|
||||
def read_prompt_from_file() -> str:
|
||||
"""从文件读取预设值"""
|
||||
# 读取prompts/目录下的所有文件,以文件名为键,文件内容为值
|
||||
# 保存在__prompts_from_files__中
|
||||
global __prompts_from_files__
|
||||
import os
|
||||
|
||||
__prompts_from_files__ = {}
|
||||
for file in os.listdir("prompts"):
|
||||
with open(os.path.join("prompts", file), encoding="utf-8") as f:
|
||||
__prompts_from_files__[file] = f.read()
|
||||
__universal_first_reply__ = "ok, I'll follow your commands."
|
||||
"""通用首次回复"""
|
||||
|
||||
|
||||
def get_prompt_dict() -> dict:
|
||||
"""获取预设值字典"""
|
||||
class ScenarioMode:
|
||||
"""情景预设模式抽象类"""
|
||||
|
||||
using_prompt_name = "default"
|
||||
"""新session创建时使用的prompt名称"""
|
||||
|
||||
prompts: dict[str, list] = {}
|
||||
|
||||
def __init__(self):
|
||||
logging.debug("prompts: {}".format(self.prompts))
|
||||
|
||||
def list(self) -> dict[str, list]:
|
||||
"""获取所有情景预设的名称及内容"""
|
||||
return self.prompts
|
||||
|
||||
def get_prompt(self, name: str) -> tuple[list, str]:
|
||||
"""获取指定情景预设的名称及内容"""
|
||||
for key in self.prompts:
|
||||
if key.startswith(name):
|
||||
return self.prompts[key], key
|
||||
raise Exception("没有找到情景预设: {}".format(name))
|
||||
|
||||
def set_using_name(self, name: str) -> str:
|
||||
"""设置默认情景预设"""
|
||||
for key in self.prompts:
|
||||
if key.startswith(name):
|
||||
self.using_prompt_name = key
|
||||
return key
|
||||
raise Exception("没有找到情景预设: {}".format(name))
|
||||
|
||||
def get_full_name(self, name: str) -> str:
|
||||
"""获取完整的情景预设名称"""
|
||||
for key in self.prompts:
|
||||
if key.startswith(name):
|
||||
return key
|
||||
raise Exception("没有找到情景预设: {}".format(name))
|
||||
|
||||
def get_using_name(self) -> str:
|
||||
"""获取默认情景预设"""
|
||||
return self.using_prompt_name
|
||||
|
||||
|
||||
class NormalScenarioMode(ScenarioMode):
|
||||
"""普通情景预设模式"""
|
||||
|
||||
def __init__(self):
|
||||
global __universal_first_reply__
|
||||
# 加载config中的default_prompt值
|
||||
if type(config.default_prompt) == str:
|
||||
self.using_prompt_name = "default"
|
||||
self.prompts = {"default": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": config.default_prompt
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": __universal_first_reply__
|
||||
}
|
||||
]}
|
||||
|
||||
elif type(config.default_prompt) == dict:
|
||||
for key in config.default_prompt:
|
||||
self.prompts[key] = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": config.default_prompt[key]
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": __universal_first_reply__
|
||||
}
|
||||
]
|
||||
|
||||
# 从prompts/目录下的文件中载入
|
||||
# 遍历文件
|
||||
for file in os.listdir("prompts"):
|
||||
with open(os.path.join("prompts", file), encoding="utf-8") as f:
|
||||
self.prompts[file] = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": f.read()
|
||||
},{
|
||||
"role": "assistant",
|
||||
"content": __universal_first_reply__
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
class FullScenarioMode(ScenarioMode):
|
||||
"""完整情景预设模式"""
|
||||
|
||||
def __init__(self):
|
||||
"""从json读取所有"""
|
||||
# 遍历scenario/目录下的所有文件,以文件名为键,文件内容中的prompt为值
|
||||
for file in os.listdir("scenario"):
|
||||
if file == "default-template.json":
|
||||
continue
|
||||
with open(os.path.join("scenario", file), encoding="utf-8") as f:
|
||||
self.prompts[file] = json.load(f)["prompt"]
|
||||
|
||||
super().__init__()
|
||||
|
||||
|
||||
scenario_mode_mapping = {}
|
||||
"""情景预设模式名称与对象的映射"""
|
||||
|
||||
|
||||
def register_all():
|
||||
"""注册所有情景预设模式,不使用装饰器,因为装饰器的方式不支持热重载"""
|
||||
global scenario_mode_mapping
|
||||
scenario_mode_mapping = {
|
||||
"normal": NormalScenarioMode(),
|
||||
"full_scenario": FullScenarioMode()
|
||||
}
|
||||
|
||||
|
||||
def mode_inst() -> ScenarioMode:
|
||||
"""获取指定名称的情景预设模式对象"""
|
||||
import config
|
||||
default_prompt = config.default_prompt
|
||||
if type(default_prompt) == str:
|
||||
default_prompt = {"default": default_prompt}
|
||||
elif type(default_prompt) == dict:
|
||||
pass
|
||||
else:
|
||||
raise TypeError("default_prompt must be str or dict")
|
||||
|
||||
# 将文件中的预设值合并到default_prompt中
|
||||
for key in __prompts_from_files__:
|
||||
default_prompt[key] = __prompts_from_files__[key]
|
||||
if config.preset_mode == "default":
|
||||
config.preset_mode = "normal"
|
||||
|
||||
return default_prompt
|
||||
|
||||
|
||||
def set_current(name):
|
||||
global __current__
|
||||
for key in get_prompt_dict():
|
||||
if key.lower().startswith(name.lower()):
|
||||
__current__ = key
|
||||
return
|
||||
raise KeyError("未找到情景预设: " + name)
|
||||
|
||||
|
||||
def get_current():
|
||||
global __current__
|
||||
return __current__
|
||||
|
||||
|
||||
def set_to_default():
|
||||
global __current__
|
||||
default_dict = get_prompt_dict()
|
||||
|
||||
if "default" in default_dict:
|
||||
__current__ = "default"
|
||||
else:
|
||||
__current__ = list(default_dict.keys())[0]
|
||||
|
||||
|
||||
def get_prompt(name: str = None) -> str:
|
||||
"""获取预设值"""
|
||||
if name is None:
|
||||
name = get_current()
|
||||
|
||||
default_dict = get_prompt_dict()
|
||||
|
||||
for key in default_dict:
|
||||
if key.lower().startswith(name.lower()):
|
||||
return default_dict[key]
|
||||
|
||||
raise KeyError("未找到情景预设: " + name)
|
||||
return scenario_mode_mapping[config.preset_mode]
|
||||
|
||||
47
pkg/openai/funcmgr.py
Normal file
47
pkg/openai/funcmgr.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# 封装了function calling的一些支持函数
|
||||
import logging
|
||||
|
||||
|
||||
from pkg.plugin import host
|
||||
|
||||
|
||||
class ContentFunctionNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_func_schema_list() -> list:
|
||||
"""从plugin包中的函数结构中获取并处理成受GPT支持的格式"""
|
||||
if not host.__enable_content_functions__:
|
||||
return []
|
||||
|
||||
schemas = []
|
||||
|
||||
for func in host.__callable_functions__:
|
||||
if func['enabled']:
|
||||
fun_cp = func.copy()
|
||||
|
||||
del fun_cp['enabled']
|
||||
|
||||
schemas.append(fun_cp)
|
||||
|
||||
return schemas
|
||||
|
||||
def get_func(name: str) -> callable:
|
||||
if name not in host.__function_inst_map__:
|
||||
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||
|
||||
return host.__function_inst_map__[name]
|
||||
|
||||
def get_func_schema(name: str) -> dict:
|
||||
for func in host.__callable_functions__:
|
||||
if func['name'] == name:
|
||||
return func
|
||||
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||
|
||||
def execute_function(name: str, kwargs: dict) -> any:
|
||||
"""执行函数调用"""
|
||||
|
||||
logging.debug("executing function: name='{}', kwargs={}".format(name, kwargs))
|
||||
|
||||
func = get_func(name)
|
||||
return func(**kwargs)
|
||||
@@ -5,18 +5,25 @@ import logging
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
|
||||
|
||||
class KeysManager:
|
||||
api_key = {}
|
||||
"""所有api-key"""
|
||||
|
||||
# api-key的使用量
|
||||
# 其中键为api-key的md5值,值为使用量
|
||||
using_key = ""
|
||||
"""当前使用的api-key"""
|
||||
|
||||
alerted = []
|
||||
"""已提示过超额的key
|
||||
|
||||
记录在此以避免重复提示
|
||||
"""
|
||||
|
||||
# 在此list中的都是经超额报错标记过的api-key
|
||||
# 记录的是key值,仅在运行时有效
|
||||
exceeded = []
|
||||
"""已超额的key
|
||||
|
||||
供自动切换功能识别
|
||||
"""
|
||||
|
||||
def get_using_key(self):
|
||||
return self.using_key
|
||||
@@ -25,8 +32,6 @@ class KeysManager:
|
||||
return hashlib.md5(self.using_key.encode('utf-8')).hexdigest()
|
||||
|
||||
def __init__(self, api_key):
|
||||
# if hasattr(config, 'api_key_usage_threshold'):
|
||||
# self.api_key_usage_threshold = config.api_key_usage_threshold
|
||||
|
||||
if type(api_key) is dict:
|
||||
self.api_key = api_key
|
||||
@@ -42,10 +47,31 @@ class KeysManager:
|
||||
|
||||
self.auto_switch()
|
||||
|
||||
# 根据tested自动切换到可用的api-key
|
||||
# 返回是否切换成功, 切换后的api-key的别名
|
||||
def auto_switch(self) -> (bool, str):
|
||||
def auto_switch(self) -> tuple[bool, str]:
|
||||
"""尝试切换api-key
|
||||
|
||||
Returns:
|
||||
是否切换成功, 切换后的api-key的别名
|
||||
"""
|
||||
|
||||
index = 0
|
||||
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] == self.using_key:
|
||||
break
|
||||
|
||||
index += 1
|
||||
|
||||
# 从当前key开始向后轮询
|
||||
start_index = index
|
||||
index += 1
|
||||
if index >= len(self.api_key):
|
||||
index = 0
|
||||
|
||||
while index != start_index:
|
||||
|
||||
key_name = list(self.api_key.keys())[index]
|
||||
|
||||
if self.api_key[key_name] not in self.exceeded:
|
||||
self.using_key = self.api_key[key_name]
|
||||
|
||||
@@ -60,20 +86,20 @@ class KeysManager:
|
||||
|
||||
return True, key_name
|
||||
|
||||
self.using_key = list(self.api_key.values())[0]
|
||||
logging.info("使用api-key:" + list(self.api_key.keys())[0])
|
||||
index += 1
|
||||
if index >= len(self.api_key):
|
||||
index = 0
|
||||
|
||||
return False, ""
|
||||
self.using_key = list(self.api_key.values())[start_index]
|
||||
logging.debug("使用api-key:" + list(self.api_key.keys())[start_index])
|
||||
|
||||
return False, list(self.api_key.keys())[start_index]
|
||||
|
||||
def add(self, key_name, key):
|
||||
self.api_key[key_name] = key
|
||||
|
||||
# 设置当前使用的api-key使用量超限
|
||||
# 这是在尝试调用api时发生超限异常时调用的
|
||||
def set_current_exceeded(self):
|
||||
# md5 = hashlib.md5(self.using_key.encode('utf-8')).hexdigest()
|
||||
# self.usage[md5] = self.api_key_usage_threshold
|
||||
# self.fee[md5] = self.api_key_fee_threshold
|
||||
"""设置当前使用的api-key使用量超限"""
|
||||
self.exceeded.append(self.using_key)
|
||||
|
||||
def get_key_name(self, api_key):
|
||||
@@ -81,4 +107,4 @@ class KeysManager:
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] == api_key:
|
||||
return key_name
|
||||
return ""
|
||||
return ""
|
||||
|
||||
@@ -5,11 +5,16 @@ import openai
|
||||
import pkg.openai.keymgr
|
||||
import pkg.utils.context
|
||||
import pkg.audit.gatherer
|
||||
from pkg.openai.modelmgr import ModelRequest, create_openai_model_request
|
||||
from pkg.openai.modelmgr import select_request_cls
|
||||
|
||||
from pkg.openai.api.model import RequestBase
|
||||
|
||||
|
||||
# 为其他模块提供与OpenAI交互的接口
|
||||
class OpenAIInteract:
|
||||
api_params = {}
|
||||
"""OpenAI 接口封装
|
||||
|
||||
将文字接口和图片接口封装供调用方使用
|
||||
"""
|
||||
|
||||
key_mgr: pkg.openai.keymgr.KeysManager = None
|
||||
|
||||
@@ -20,48 +25,53 @@ class OpenAIInteract:
|
||||
}
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
# self.api_key = api_key
|
||||
|
||||
self.key_mgr = pkg.openai.keymgr.KeysManager(api_key)
|
||||
self.audit_mgr = pkg.audit.gatherer.DataGatherer()
|
||||
|
||||
logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||
|
||||
openai.api_key = self.key_mgr.get_using_key()
|
||||
|
||||
pkg.utils.context.set_openai_manager(self)
|
||||
|
||||
# 请求OpenAI Completion
|
||||
def request_completion(self, prompts):
|
||||
def request_completion(self, messages: list):
|
||||
"""请求补全接口回复=
|
||||
"""
|
||||
# 选择接口请求类
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
# 根据模型选择使用的接口
|
||||
ai: ModelRequest = create_openai_model_request(
|
||||
config.completion_api_params['model'],
|
||||
'user',
|
||||
config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None
|
||||
)
|
||||
ai.request(
|
||||
prompts,
|
||||
**config.completion_api_params
|
||||
)
|
||||
response = ai.get_response()
|
||||
request: RequestBase
|
||||
|
||||
logging.debug("OpenAI response: %s", response)
|
||||
model: str = config.completion_api_params['model']
|
||||
|
||||
if 'model' in config.completion_api_params:
|
||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['model'],
|
||||
ai.get_total_tokens())
|
||||
elif 'engine' in config.completion_api_params:
|
||||
self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'],
|
||||
response['usage']['total_tokens'])
|
||||
cp_parmas = config.completion_api_params.copy()
|
||||
del cp_parmas['model']
|
||||
|
||||
return ai.get_message()
|
||||
request = select_request_cls(model, messages, cp_parmas)
|
||||
|
||||
def request_image(self, prompt):
|
||||
# 请求接口
|
||||
for resp in request:
|
||||
|
||||
if resp['usage']['total_tokens'] > 0:
|
||||
self.audit_mgr.report_text_model_usage(
|
||||
model,
|
||||
resp['usage']['total_tokens']
|
||||
)
|
||||
|
||||
yield resp
|
||||
|
||||
def request_image(self, prompt) -> dict:
|
||||
"""请求图片接口回复
|
||||
|
||||
Parameters:
|
||||
prompt (str): 提示语
|
||||
|
||||
Returns:
|
||||
dict: 响应
|
||||
"""
|
||||
config = pkg.utils.context.get_config()
|
||||
params = config.image_api_params if hasattr(config, "image_api_params") else self.default_image_api_params
|
||||
params = config.image_api_params
|
||||
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
|
||||
@@ -1,5 +1,17 @@
|
||||
# 提供与模型交互的抽象接口
|
||||
"""OpenAI 接口底层封装
|
||||
|
||||
目前使用的对话接口有:
|
||||
ChatCompletion - gpt-3.5-turbo 等模型
|
||||
Completion - text-davinci-003 等模型
|
||||
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
|
||||
"""
|
||||
import openai, logging, threading, asyncio
|
||||
import openai.error as aiE
|
||||
import tiktoken
|
||||
|
||||
from pkg.openai.api.model import RequestBase
|
||||
from pkg.openai.api.completion import CompletionRequest
|
||||
from pkg.openai.api.chat_completion import ChatCompletionRequest
|
||||
|
||||
COMPLETION_MODELS = {
|
||||
'text-davinci-003',
|
||||
@@ -13,7 +25,14 @@ COMPLETION_MODELS = {
|
||||
|
||||
CHAT_COMPLETION_MODELS = {
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-0301',
|
||||
'gpt-3.5-turbo-16k',
|
||||
'gpt-3.5-turbo-0613',
|
||||
'gpt-3.5-turbo-16k-0613',
|
||||
# 'gpt-3.5-turbo-0301',
|
||||
'gpt-4',
|
||||
'gpt-4-0613',
|
||||
'gpt-4-32k',
|
||||
'gpt-4-32k-0613'
|
||||
}
|
||||
|
||||
EDIT_MODELS = {
|
||||
@@ -25,128 +44,77 @@ IMAGE_MODELS = {
|
||||
}
|
||||
|
||||
|
||||
class ModelRequest():
|
||||
"""GPT父类"""
|
||||
can_chat = False
|
||||
runtime:threading.Thread = None
|
||||
ret = ""
|
||||
proxy:str = None
|
||||
|
||||
def __init__(self, model_name, user_name, request_fun, http_proxy:str = None):
|
||||
self.model_name = model_name
|
||||
self.user_name = user_name
|
||||
self.request_fun = request_fun
|
||||
if http_proxy != None:
|
||||
self.proxy = http_proxy
|
||||
openai.proxy = self.proxy
|
||||
|
||||
async def __a_request__(self, **kwargs):
|
||||
self.ret = await self.request_fun(**kwargs)
|
||||
|
||||
def request(self, **kwargs):
|
||||
if self.proxy != None: #异步请求
|
||||
loop = asyncio.new_event_loop()
|
||||
self.runtime = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(self.__a_request__(**kwargs),)
|
||||
)
|
||||
self.runtime.start()
|
||||
else: #同步请求
|
||||
self.ret = self.request_fun(**kwargs)
|
||||
|
||||
def __msg_handle__(self, msg):
|
||||
"""将prompt dict转换成接口需要的格式"""
|
||||
return msg
|
||||
|
||||
def ret_handle(self):
|
||||
'''
|
||||
API消息返回处理函数
|
||||
若重写该方法,应检查异步线程状态,或在需要检查处super该方法
|
||||
'''
|
||||
if self.runtime != None and isinstance(self.runtime, threading.Thread):
|
||||
self.runtime.join()
|
||||
return
|
||||
|
||||
def get_total_tokens(self):
|
||||
try:
|
||||
return self.ret['usage']['total_tokens']
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def get_message(self):
|
||||
return self.message
|
||||
|
||||
def get_response(self):
|
||||
return self.ret
|
||||
|
||||
class ChatCompletionModel(ModelRequest):
|
||||
"""ChatCompletion类模型"""
|
||||
Chat_role = ['system', 'user', 'assistant']
|
||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
||||
if http_proxy == None:
|
||||
request_fun = openai.ChatCompletion.create
|
||||
else:
|
||||
request_fun = openai.ChatCompletion.acreate
|
||||
self.can_chat = True
|
||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
||||
|
||||
def request(self, prompts, **kwargs):
|
||||
prompts = self.__msg_handle__(prompts)
|
||||
kwargs['messages'] = prompts
|
||||
super().request(**kwargs)
|
||||
self.ret_handle()
|
||||
|
||||
def __msg_handle__(self, msgs):
|
||||
temp_msgs = []
|
||||
# 把msgs拷贝进temp_msgs
|
||||
for msg in msgs:
|
||||
temp_msgs.append(msg.copy())
|
||||
return temp_msgs
|
||||
|
||||
def get_message(self):
|
||||
return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗
|
||||
|
||||
|
||||
class CompletionModel(ModelRequest):
|
||||
"""Completion类模型"""
|
||||
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
|
||||
if http_proxy == None:
|
||||
request_fun = openai.Completion.create
|
||||
else:
|
||||
request_fun = openai.Completion.acreate
|
||||
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
|
||||
|
||||
def request(self, prompts, **kwargs):
|
||||
prompts = self.__msg_handle__(prompts)
|
||||
kwargs['prompt'] = prompts
|
||||
super().request(**kwargs)
|
||||
self.ret_handle()
|
||||
|
||||
def __msg_handle__(self, msgs):
|
||||
prompt = ''
|
||||
for msg in msgs:
|
||||
prompt = prompt + "{}: {}\n".format(msg['role'], msg['content'])
|
||||
# for msg in msgs:
|
||||
# if msg['role'] == 'assistant':
|
||||
# prompt = prompt + "{}\n".format(msg['content'])
|
||||
# else:
|
||||
# prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content'])
|
||||
prompt = prompt + "assistant: "
|
||||
return prompt
|
||||
|
||||
def get_message(self):
|
||||
return self.ret["choices"][0]["text"]
|
||||
|
||||
|
||||
def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest:
|
||||
"""使用给定的模型名称创建模型请求对象"""
|
||||
def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase:
|
||||
if model_name in CHAT_COMPLETION_MODELS:
|
||||
model = ChatCompletionModel(model_name, user_name, http_proxy)
|
||||
return ChatCompletionRequest(model_name, messages, **args)
|
||||
elif model_name in COMPLETION_MODELS:
|
||||
model = CompletionModel(model_name, user_name, http_proxy)
|
||||
else :
|
||||
log = "找不到模型[{}],请检查配置文件".format(model_name)
|
||||
logging.error(log)
|
||||
raise IndexError(log)
|
||||
logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name))
|
||||
return model
|
||||
return CompletionRequest(model_name, messages, **args)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||
|
||||
|
||||
def count_chat_completion_tokens(messages: list, model: str) -> int:
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
# print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""count_chat_completion_tokens() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
|
||||
def count_completion_tokens(messages: list, model: str) -> int:
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
text = ""
|
||||
|
||||
for message in messages:
|
||||
text += message['role'] + message['content'] + "\n"
|
||||
|
||||
text += "assistant: "
|
||||
|
||||
return len(encoding.encode(text))
|
||||
|
||||
|
||||
def count_tokens(messages: list, model: str):
|
||||
|
||||
if model in CHAT_COMPLETION_MODELS:
|
||||
return count_chat_completion_tokens(messages, model)
|
||||
elif model in COMPLETION_MODELS:
|
||||
return count_completion_tokens(messages, model)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model))
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# 计费模块
|
||||
# 已弃用 https://github.com/RockChinQ/QChatGPT/issues/81
|
||||
|
||||
import logging
|
||||
|
||||
pricing = {
|
||||
"base": { # 文字模型单位是1000字符
|
||||
"text-davinci-003": 0.02,
|
||||
},
|
||||
"image": {
|
||||
"256x256": 0.016,
|
||||
"512x512": 0.018,
|
||||
"1024x1024": 0.02,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def language_base_price(model, text):
|
||||
salt_rate = 0.93
|
||||
length = ((len(text.encode('utf-8')) - len(text)) / 2 + len(text)) * salt_rate
|
||||
logging.debug("text length: %d" % length)
|
||||
|
||||
return pricing["base"][model] * length / 1000
|
||||
|
||||
|
||||
def image_price(size):
|
||||
logging.debug("image size: %s" % size)
|
||||
return pricing["image"][size]
|
||||
@@ -1,3 +1,8 @@
|
||||
"""主线使用的会话管理模块
|
||||
|
||||
每个人、每个群单独一个session,session内部保留了对话的上下文,
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
@@ -11,6 +16,8 @@ import pkg.utils.context
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
|
||||
from pkg.openai.modelmgr import count_tokens
|
||||
|
||||
# 运行时保存的所有session
|
||||
sessions = {}
|
||||
|
||||
@@ -19,6 +26,7 @@ class SessionOfflineStatus:
|
||||
ON_GOING = 'on_going'
|
||||
EXPLICITLY_CLOSED = 'explicitly_closed'
|
||||
|
||||
|
||||
# 重置session.prompt
|
||||
def reset_session_prompt(session_name, prompt):
|
||||
# 备份原始数据
|
||||
@@ -34,7 +42,7 @@ def reset_session_prompt(session_name, prompt):
|
||||
prompt = [
|
||||
{
|
||||
'role': 'system',
|
||||
'content': config.default_prompt['default']
|
||||
'content': config.default_prompt['default'] if type(config.default_prompt) == dict else config.default_prompt
|
||||
}
|
||||
]
|
||||
# 警告
|
||||
@@ -43,11 +51,14 @@ def reset_session_prompt(session_name, prompt):
|
||||
用户[{}]的数据已被重置,有可能是因为数据版本过旧或存储错误
|
||||
原始数据将备份在:
|
||||
{}""".format(session_name, bak_path)
|
||||
)
|
||||
) # 为保证多行文本格式正确故无缩进
|
||||
return prompt
|
||||
|
||||
|
||||
# 从数据加载session
|
||||
def load_sessions():
|
||||
"""从数据库加载sessions"""
|
||||
|
||||
global sessions
|
||||
|
||||
db_inst = pkg.utils.context.get_database_manager()
|
||||
@@ -55,7 +66,7 @@ def load_sessions():
|
||||
session_data = db_inst.load_valid_sessions()
|
||||
|
||||
for session_name in session_data:
|
||||
logging.info('加载session: {}'.format(session_name))
|
||||
logging.debug('加载session: {}'.format(session_name))
|
||||
|
||||
temp_session = Session(session_name)
|
||||
temp_session.name = session_name
|
||||
@@ -63,15 +74,18 @@ def load_sessions():
|
||||
temp_session.last_interact_timestamp = session_data[session_name]['last_interact_timestamp']
|
||||
try:
|
||||
temp_session.prompt = json.loads(session_data[session_name]['prompt'])
|
||||
temp_session.token_counts = json.loads(session_data[session_name]['token_counts'])
|
||||
except Exception:
|
||||
temp_session.prompt = reset_session_prompt(session_name, session_data[session_name]['prompt'])
|
||||
temp_session.persistence()
|
||||
temp_session.default_prompt = json.loads(session_data[session_name]['default_prompt']) if \
|
||||
session_data[session_name]['default_prompt'] else []
|
||||
|
||||
sessions[session_name] = temp_session
|
||||
|
||||
|
||||
# 获取指定名称的session,如果不存在则创建一个新的
|
||||
def get_session(session_name: str):
|
||||
def get_session(session_name: str) -> 'Session':
|
||||
global sessions
|
||||
if session_name not in sessions:
|
||||
sessions[session_name] = Session(session_name)
|
||||
@@ -93,10 +107,16 @@ class Session:
|
||||
name = ''
|
||||
|
||||
prompt = []
|
||||
"""使用list来保存会话中的回合"""
|
||||
|
||||
default_prompt = []
|
||||
"""本session的默认prompt"""
|
||||
|
||||
create_timestamp = 0
|
||||
"""会话创建时间"""
|
||||
|
||||
last_interact_timestamp = 0
|
||||
"""上次交互(产生回复)时间"""
|
||||
|
||||
just_switched_to_exist_session = False
|
||||
|
||||
@@ -116,34 +136,27 @@ class Session:
|
||||
logging.debug('{},lock release successfully,{}'.format(self.name, self.response_lock))
|
||||
|
||||
# 从配置文件获取会话预设信息
|
||||
def get_default_prompt(self, use_default: str=None):
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
def get_default_prompt(self, use_default: str = None):
|
||||
import pkg.openai.dprompt as dprompt
|
||||
|
||||
if use_default is None:
|
||||
current_default_prompt = dprompt.get_prompt(dprompt.get_current())
|
||||
else:
|
||||
current_default_prompt = dprompt.get_prompt(use_default)
|
||||
use_default = dprompt.mode_inst().get_using_name()
|
||||
|
||||
return [
|
||||
{
|
||||
'role': 'user',
|
||||
'content': current_default_prompt
|
||||
},{
|
||||
'role': 'assistant',
|
||||
'content': 'ok'
|
||||
}
|
||||
]
|
||||
current_default_prompt, _ = dprompt.mode_inst().get_prompt(use_default)
|
||||
return current_default_prompt
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.create_timestamp = int(time.time())
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
self.prompt = []
|
||||
self.token_counts = []
|
||||
self.schedule()
|
||||
|
||||
self.response_lock = threading.Lock()
|
||||
self.prompt = self.get_default_prompt()
|
||||
|
||||
self.default_prompt = self.get_default_prompt()
|
||||
logging.debug("prompt is: {}".format(self.default_prompt))
|
||||
|
||||
# 设定检查session最后一次对话是否超过过期时间的计时器
|
||||
def schedule(self):
|
||||
@@ -181,89 +194,223 @@ class Session:
|
||||
|
||||
# 请求回复
|
||||
# 这个函数是阻塞的
|
||||
def append(self, text: str) -> str:
|
||||
def query(self, text: str=None) -> tuple[str, str, list[str]]:
|
||||
"""向session中添加一条消息,返回接口回复
|
||||
|
||||
Args:
|
||||
text (str): 用户消息
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: (接口回复, finish_reason, 已调用的函数列表)
|
||||
"""
|
||||
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
# 触发插件事件
|
||||
if self.prompt == self.get_default_prompt():
|
||||
if not self.prompt:
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'session': self,
|
||||
'default_prompt': self.prompt,
|
||||
'default_prompt': self.default_prompt,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.SessionFirstMessageReceived, **args)
|
||||
if event.is_prevented_default():
|
||||
return None
|
||||
return None, None, None
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
max_length = config.prompt_submit_length if hasattr(config, "prompt_submit_length") else 1024
|
||||
max_length = config.prompt_submit_length
|
||||
|
||||
local_default_prompt = self.default_prompt.copy()
|
||||
local_prompt = self.prompt.copy()
|
||||
|
||||
# 触发PromptPreProcessing事件
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'default_prompt': self.default_prompt,
|
||||
'prompt': self.prompt,
|
||||
'text_message': text,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.PromptPreProcessing, **args)
|
||||
|
||||
if event.get_return_value('default_prompt') is not None:
|
||||
local_default_prompt = event.get_return_value('default_prompt')
|
||||
|
||||
if event.get_return_value('prompt') is not None:
|
||||
local_prompt = event.get_return_value('prompt')
|
||||
|
||||
if event.get_return_value('text_message') is not None:
|
||||
text = event.get_return_value('text_message')
|
||||
|
||||
prompts, _ = self.cut_out(text, max_length, local_default_prompt, local_prompt)
|
||||
|
||||
res_text = ""
|
||||
|
||||
pending_msgs = []
|
||||
|
||||
total_tokens = 0
|
||||
|
||||
finish_reason: str = ""
|
||||
|
||||
funcs = []
|
||||
|
||||
trace_func_calls = config.trace_function_calls
|
||||
botmgr = pkg.utils.context.get_qqbot_manager()
|
||||
|
||||
session_name_spt: list[str] = self.name.split("_")
|
||||
|
||||
pending_res_text = ""
|
||||
|
||||
# TODO 对不起,我知道这样非常非常屎山,但我之后会重构的
|
||||
for resp in pkg.utils.context.get_openai_manager().request_completion(prompts):
|
||||
|
||||
if pending_res_text != "":
|
||||
botmgr.adapter.send_message(
|
||||
session_name_spt[0],
|
||||
session_name_spt[1],
|
||||
pending_res_text
|
||||
)
|
||||
pending_res_text = ""
|
||||
|
||||
finish_reason = resp['choices'][0]['finish_reason']
|
||||
|
||||
if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应
|
||||
|
||||
if not trace_func_calls:
|
||||
res_text += resp['choices'][0]['message']['content'] + "\n"
|
||||
else:
|
||||
res_text = resp['choices'][0]['message']['content']
|
||||
pending_res_text = resp['choices'][0]['message']['content']
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": resp['choices'][0]['message']['content']
|
||||
}
|
||||
|
||||
if 'function_call' in resp['choices'][0]['message']:
|
||||
msg['function_call'] = json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
|
||||
pending_msgs.append(msg)
|
||||
|
||||
if resp['choices'][0]['message']['type'] == 'function_call':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "assistant",
|
||||
# "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
# }
|
||||
# )
|
||||
if trace_func_calls:
|
||||
botmgr.adapter.send_message(
|
||||
session_name_spt[0],
|
||||
session_name_spt[1],
|
||||
"调用函数 "+resp['choices'][0]['message']['function_call']['name'] + "..."
|
||||
)
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
elif resp['choices'][0]['message']['type'] == 'function_return':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "function",
|
||||
# "name": resp['choices'][0]['message']['function_name'],
|
||||
# "content": json.dumps(resp['choices'][0]['message']['content'])
|
||||
# }
|
||||
# )
|
||||
|
||||
# total_tokens += resp['usage']['total_tokens']
|
||||
funcs.append(
|
||||
resp['choices'][0]['message']['function_name']
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
# 向API请求补全
|
||||
message = pkg.utils.context.get_openai_manager().request_completion(
|
||||
self.cut_out(text, max_length),
|
||||
)
|
||||
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
||||
# prompts,
|
||||
# )
|
||||
|
||||
# 成功获取,处理回复
|
||||
res_test = message
|
||||
res_ans = res_test
|
||||
|
||||
# 去除开头可能的提示
|
||||
res_ans_spt = res_test.split("\n\n")
|
||||
if len(res_ans_spt) > 1:
|
||||
del (res_ans_spt[0])
|
||||
res_ans = '\n\n'.join(res_ans_spt)
|
||||
# res_test = message
|
||||
res_ans = res_text.strip()
|
||||
|
||||
# 将此次对话的双方内容加入到prompt中
|
||||
self.prompt.append({'role':'user', 'content':text})
|
||||
self.prompt.append({'role':'assistant', 'content':res_ans})
|
||||
# self.prompt.append({'role': 'user', 'content': text})
|
||||
# self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||
if text:
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
# 添加pending_msgs
|
||||
self.prompt += pending_msgs
|
||||
|
||||
# 向token_counts中添加本回合的token数量
|
||||
# self.token_counts.append(total_tokens-total_token_before_query)
|
||||
# logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
|
||||
|
||||
if self.just_switched_to_exist_session:
|
||||
self.just_switched_to_exist_session = False
|
||||
self.set_ongoing()
|
||||
|
||||
return res_ans if res_ans[0]!='\n' else res_ans[1:]
|
||||
return res_ans if res_ans[0] != '\n' else res_ans[1:], finish_reason, funcs
|
||||
|
||||
# 删除上一回合并返回上一回合的问题
|
||||
def undo(self) -> str:
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
# 删除上一回合
|
||||
if self.prompt[-1]['role'] != 'user':
|
||||
res = self.prompt[-1]['content']
|
||||
self.prompt.remove(self.prompt[-2])
|
||||
else:
|
||||
res = self.prompt[-2]['content']
|
||||
self.prompt.remove(self.prompt[-1])
|
||||
# 删除最后两个消息
|
||||
if len(self.prompt) < 2:
|
||||
raise Exception('之前无对话,无法撤销')
|
||||
|
||||
question = self.prompt[-2]['content']
|
||||
self.prompt = self.prompt[:-2]
|
||||
self.token_counts = self.token_counts[:-1]
|
||||
|
||||
# 返回上一回合的问题
|
||||
return res
|
||||
return question
|
||||
|
||||
# 构建对话体
|
||||
def cut_out(self, msg: str, max_tokens: int) -> list:
|
||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens"""
|
||||
# 如果用户消息长度超过max_tokens,直接返回
|
||||
|
||||
temp_prompt = [
|
||||
def cut_out(self, msg: str, max_tokens: int, default_prompt: list, prompt: list) -> tuple[list, list]:
|
||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens
|
||||
|
||||
:return: (新的prompt, 新的token_counts)
|
||||
"""
|
||||
|
||||
# 最终由三个部分组成
|
||||
# - default_prompt 情景预设固定值
|
||||
# - changable_prompts 可变部分, 此会话中的历史对话回合
|
||||
# - current_question 当前问题
|
||||
|
||||
# 包装目前的对话回合内容
|
||||
changable_prompts = []
|
||||
|
||||
use_model = pkg.utils.context.get_config().completion_api_params['model']
|
||||
|
||||
ptr = len(prompt) - 1
|
||||
|
||||
# 直接从后向前扫描拼接,不管是否是整回合
|
||||
while ptr >= 0:
|
||||
if count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
|
||||
break
|
||||
|
||||
changable_prompts.insert(0, prompt[ptr])
|
||||
|
||||
ptr -= 1
|
||||
|
||||
# 将default_prompt和changable_prompts合并
|
||||
result_prompt = default_prompt + changable_prompts
|
||||
|
||||
# 添加当前问题
|
||||
if msg:
|
||||
result_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
token_count = len(msg)
|
||||
# 倒序遍历prompt
|
||||
for i in range(len(self.prompt) - 1, -1, -1):
|
||||
if token_count >= max_tokens:
|
||||
break
|
||||
logging.debug("cut_out: {}".format(json.dumps(result_prompt, ensure_ascii=False, indent=4)))
|
||||
|
||||
# 将prompt加到temp_prompt头部
|
||||
temp_prompt.insert(0, self.prompt[i])
|
||||
token_count += len(self.prompt[i]['content'])
|
||||
|
||||
logging.debug('cut_out: {}'.format(str(temp_prompt)))
|
||||
|
||||
return temp_prompt
|
||||
return result_prompt, count_tokens(changable_prompts, use_model)
|
||||
|
||||
# 持久化session
|
||||
def persistence(self):
|
||||
@@ -278,11 +425,11 @@ class Session:
|
||||
subject_number = int(name_spt[1])
|
||||
|
||||
db_inst.persistence_session(subject_type, subject_number, self.create_timestamp, self.last_interact_timestamp,
|
||||
json.dumps(self.prompt))
|
||||
json.dumps(self.prompt), json.dumps(self.default_prompt), json.dumps(self.token_counts))
|
||||
|
||||
# 重置session
|
||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None):
|
||||
if self.prompt[-1]['role'] != "system":
|
||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None, persist: bool = False):
|
||||
if self.prompt:
|
||||
self.persistence()
|
||||
if explicit:
|
||||
# 触发插件事件
|
||||
@@ -298,7 +445,11 @@ class Session:
|
||||
|
||||
if expired:
|
||||
pkg.utils.context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
|
||||
self.prompt = self.get_default_prompt(use_prompt)
|
||||
|
||||
if not persist: # 不要求保持default prompt
|
||||
self.default_prompt = self.get_default_prompt(use_prompt)
|
||||
self.prompt = []
|
||||
self.token_counts = []
|
||||
self.create_timestamp = int(time.time())
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
self.just_switched_to_exist_session = False
|
||||
@@ -324,9 +475,11 @@ class Session:
|
||||
self.last_interact_timestamp = last_one['last_interact_timestamp']
|
||||
try:
|
||||
self.prompt = json.loads(last_one['prompt'])
|
||||
self.token_counts = json.loads(last_one['token_counts'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
self.prompt = reset_session_prompt(self.name, last_one['prompt'])
|
||||
self.persistence()
|
||||
self.default_prompt = json.loads(last_one['default_prompt']) if last_one['default_prompt'] else []
|
||||
|
||||
self.just_switched_to_exist_session = True
|
||||
return self
|
||||
@@ -343,9 +496,11 @@ class Session:
|
||||
self.last_interact_timestamp = next_one['last_interact_timestamp']
|
||||
try:
|
||||
self.prompt = json.loads(next_one['prompt'])
|
||||
self.token_counts = json.loads(next_one['token_counts'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
self.prompt = reset_session_prompt(self.name, next_one['prompt'])
|
||||
self.persistence()
|
||||
self.default_prompt = json.loads(next_one['default_prompt']) if next_one['default_prompt'] else []
|
||||
|
||||
self.just_switched_to_exist_session = True
|
||||
return self
|
||||
@@ -353,5 +508,11 @@ class Session:
|
||||
def list_history(self, capacity: int = 10, page: int = 0):
|
||||
return pkg.utils.context.get_database_manager().list_history(self.name, capacity, page)
|
||||
|
||||
def delete_history(self, index: int) -> bool:
|
||||
return pkg.utils.context.get_database_manager().delete_history(self.name, index)
|
||||
|
||||
def delete_all_history(self) -> bool:
|
||||
return pkg.utils.context.get_database_manager().delete_all_history(self.name)
|
||||
|
||||
def draw_image(self, prompt: str):
|
||||
return pkg.utils.context.get_openai_manager().request_image(prompt)
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
"""插件支持包
|
||||
|
||||
包含插件基类、插件宿主以及部分API接口
|
||||
"""
|
||||
@@ -5,17 +5,21 @@ import importlib
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
import shutil
|
||||
import traceback
|
||||
|
||||
import pkg.utils.updater as updater
|
||||
import pkg.utils.context as context
|
||||
import pkg.plugin.switch as switch
|
||||
import pkg.plugin.settings as settings
|
||||
import pkg.qqbot.adapter as msadapter
|
||||
|
||||
from mirai import Mirai
|
||||
|
||||
from CallingGPT.session.session import Session
|
||||
|
||||
__plugins__ = {}
|
||||
"""
|
||||
插件列表
|
||||
"""插件列表
|
||||
|
||||
示例:
|
||||
{
|
||||
@@ -34,14 +38,24 @@ __plugins__ = {}
|
||||
},
|
||||
"instance": None
|
||||
}
|
||||
}"""
|
||||
}
|
||||
"""
|
||||
|
||||
__plugins_order__ = []
|
||||
"""插件顺序"""
|
||||
|
||||
__enable_content_functions__ = True
|
||||
"""是否启用内容函数"""
|
||||
|
||||
__callable_functions__ = []
|
||||
"""供GPT调用的函数结构"""
|
||||
|
||||
__function_inst_map__: dict[str, callable] = {}
|
||||
"""函数名:实例 映射"""
|
||||
|
||||
|
||||
def generate_plugin_order():
|
||||
""" 根据__plugin__生成插件初始顺序,无视是否启用 """
|
||||
"""根据__plugin__生成插件初始顺序,无视是否启用"""
|
||||
global __plugins_order__
|
||||
__plugins_order__ = []
|
||||
for plugin_name in __plugins__:
|
||||
@@ -49,13 +63,13 @@ def generate_plugin_order():
|
||||
|
||||
|
||||
def iter_plugins():
|
||||
""" 按照顺序迭代插件 """
|
||||
"""按照顺序迭代插件"""
|
||||
for plugin_name in __plugins_order__:
|
||||
yield __plugins__[plugin_name]
|
||||
|
||||
|
||||
def iter_plugins_name():
|
||||
""" 迭代插件名 """
|
||||
"""迭代插件名"""
|
||||
for plugin_name in __plugins_order__:
|
||||
yield plugin_name
|
||||
|
||||
@@ -77,15 +91,15 @@ def walk_plugin_path(module, prefix='', path_prefix=''):
|
||||
__current_module_path__ = "plugins/"+path_prefix + item.name + '.py'
|
||||
|
||||
importlib.import_module(module.__name__ + '.' + item.name)
|
||||
logging.info('加载模块: plugins/{} 成功'.format(path_prefix + item.name + '.py'))
|
||||
logging.debug('加载模块: plugins/{} 成功'.format(path_prefix + item.name + '.py'))
|
||||
except:
|
||||
logging.error('加载模块: plugins/{} 失败: {}'.format(path_prefix + item.name + '.py', sys.exc_info()))
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
def load_plugins():
|
||||
""" 加载插件 """
|
||||
logging.info("加载插件")
|
||||
"""加载插件"""
|
||||
logging.debug("加载插件")
|
||||
PluginHost()
|
||||
walk_plugin_path(__import__('plugins'))
|
||||
|
||||
@@ -99,24 +113,35 @@ def load_plugins():
|
||||
# 加载插件顺序
|
||||
settings.load_settings()
|
||||
|
||||
# 输出已注册的内容函数列表
|
||||
logging.debug("registered content functions: {}".format(__callable_functions__))
|
||||
logging.debug("function instance map: {}".format(__function_inst_map__))
|
||||
|
||||
|
||||
def initialize_plugins():
|
||||
""" 初始化插件 """
|
||||
"""初始化插件"""
|
||||
logging.info("初始化插件")
|
||||
import pkg.plugin.models as models
|
||||
|
||||
successfully_initialized_plugins = []
|
||||
|
||||
for plugin in iter_plugins():
|
||||
if not plugin['enabled']:
|
||||
continue
|
||||
# if not plugin['enabled']:
|
||||
# continue
|
||||
try:
|
||||
models.__current_registering_plugin__ = plugin['name']
|
||||
plugin['instance'] = plugin["class"](plugin_host=context.get_plugin_host())
|
||||
logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||
# logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||
successfully_initialized_plugins.append(plugin['name'])
|
||||
except:
|
||||
logging.error("插件{}初始化时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
||||
|
||||
logging.info("以下插件已初始化: {}".format(", ".join(successfully_initialized_plugins)))
|
||||
|
||||
|
||||
def unload_plugins():
|
||||
""" 卸载插件 """
|
||||
"""卸载插件"""
|
||||
# 不再显式卸载插件,因为当程序结束时,插件的析构函数会被系统执行
|
||||
# for plugin in __plugins__.values():
|
||||
# if plugin['enabled'] and plugin['instance'] is not None:
|
||||
# if not hasattr(plugin['instance'], '__del__'):
|
||||
@@ -131,7 +156,7 @@ def unload_plugins():
|
||||
|
||||
|
||||
def install_plugin(repo_url: str):
|
||||
""" 安装插件,从git储存库获取并解决依赖 """
|
||||
"""安装插件,从git储存库获取并解决依赖"""
|
||||
try:
|
||||
import pkg.utils.pkgmgr
|
||||
pkg.utils.pkgmgr.ensure_dulwich()
|
||||
@@ -154,22 +179,75 @@ def install_plugin(repo_url: str):
|
||||
import pkg.utils.pkgmgr
|
||||
pkg.utils.pkgmgr.install_requirements("plugins/"+repo_url.split(".git")[0].split("/")[-1]+"/requirements.txt")
|
||||
|
||||
import main
|
||||
main.reset_logging()
|
||||
import pkg.utils.log as log
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
def uninstall_plugin(plugin_name: str) -> str:
|
||||
"""卸载插件"""
|
||||
if plugin_name not in __plugins__:
|
||||
raise Exception("插件不存在")
|
||||
|
||||
# 获取文件夹路径
|
||||
plugin_path = __plugins__[plugin_name]['path'].replace("\\", "/")
|
||||
|
||||
# 剪切路径为plugins/插件名
|
||||
plugin_path = plugin_path.split("plugins/")[1].split("/")[0]
|
||||
|
||||
# 删除文件夹
|
||||
shutil.rmtree("plugins/"+plugin_path)
|
||||
return "plugins/"+plugin_path
|
||||
|
||||
|
||||
def update_plugin(plugin_name: str):
|
||||
"""更新插件"""
|
||||
# 检查是否有远程地址记录
|
||||
target_plugin_dir = "plugins/" + __plugins__[plugin_name]['path'].replace("\\", "/").split("plugins/")[1].split("/")[0]
|
||||
|
||||
remote_url = updater.get_remote_url(target_plugin_dir)
|
||||
if remote_url == "https://github.com/RockChinQ/QChatGPT" or remote_url == "https://gitee.com/RockChin/QChatGPT" \
|
||||
or remote_url == "" or remote_url is None or remote_url == "http://github.com/RockChinQ/QChatGPT" or remote_url == "http://gitee.com/RockChin/QChatGPT":
|
||||
raise Exception("插件没有远程地址记录,无法更新")
|
||||
|
||||
# 把远程clone到temp/plugins/update/插件名
|
||||
logging.info("克隆插件储存库: {}".format(remote_url))
|
||||
|
||||
from dulwich import porcelain
|
||||
clone_target_dir = "temp/plugins/update/"+target_plugin_dir.split("/")[-1]+"/"
|
||||
|
||||
if os.path.exists(clone_target_dir):
|
||||
shutil.rmtree(clone_target_dir)
|
||||
|
||||
if not os.path.exists(clone_target_dir):
|
||||
os.makedirs(clone_target_dir)
|
||||
repo = porcelain.clone(remote_url, clone_target_dir, checkout=True)
|
||||
|
||||
# 检查此目录是否包含requirements.txt
|
||||
if os.path.exists(clone_target_dir+"requirements.txt"):
|
||||
logging.info("检测到requirements.txt,正在安装依赖")
|
||||
import pkg.utils.pkgmgr
|
||||
pkg.utils.pkgmgr.install_requirements(clone_target_dir+"requirements.txt")
|
||||
|
||||
import pkg.utils.log as log
|
||||
log.reset_logging()
|
||||
|
||||
# 将temp/plugins/update/插件名 覆盖到 plugins/插件名
|
||||
shutil.rmtree(target_plugin_dir)
|
||||
|
||||
shutil.copytree(clone_target_dir, target_plugin_dir)
|
||||
|
||||
class EventContext:
|
||||
""" 事件上下文 """
|
||||
"""事件上下文"""
|
||||
eid = 0
|
||||
"""事件编号"""
|
||||
|
||||
name = ""
|
||||
|
||||
__prevent_default__ = False
|
||||
""" 是否阻止默认行为 """
|
||||
"""是否阻止默认行为"""
|
||||
|
||||
__prevent_postorder__ = False
|
||||
""" 是否阻止后续插件的执行 """
|
||||
"""是否阻止后续插件的执行"""
|
||||
|
||||
__return_value__ = {}
|
||||
""" 返回值
|
||||
@@ -194,7 +272,7 @@ class EventContext:
|
||||
self.__return_value__[key] = []
|
||||
self.__return_value__[key].append(ret)
|
||||
|
||||
def get_return(self, key: str):
|
||||
def get_return(self, key: str) -> list:
|
||||
"""获取key的所有返回值"""
|
||||
if key in self.__return_value__:
|
||||
return self.__return_value__[key]
|
||||
@@ -232,7 +310,7 @@ class EventContext:
|
||||
|
||||
|
||||
def emit(event_name: str, **kwargs) -> EventContext:
|
||||
""" 触发事件 """
|
||||
"""触发事件"""
|
||||
import pkg.utils.context as context
|
||||
if context.get_plugin_host() is None:
|
||||
return None
|
||||
@@ -243,7 +321,9 @@ class PluginHost:
|
||||
"""插件宿主"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化插件宿主"""
|
||||
context.set_plugin_host(self)
|
||||
self.calling_gpt_session = Session([])
|
||||
|
||||
def get_runtime_context(self) -> context:
|
||||
"""获取运行时上下文(pkg.utils.context模块的对象)
|
||||
@@ -258,20 +338,24 @@ class PluginHost:
|
||||
"""获取机器人对象"""
|
||||
return context.get_qqbot_manager().bot
|
||||
|
||||
def get_bot_adapter(self) -> msadapter.MessageSourceAdapter:
|
||||
"""获取消息源适配器"""
|
||||
return context.get_qqbot_manager().adapter
|
||||
|
||||
def send_person_message(self, person, message):
|
||||
"""发送私聊消息"""
|
||||
asyncio.run(self.get_bot().send_friend_message(person, message))
|
||||
self.get_bot_adapter().send_message("person", person, message)
|
||||
|
||||
def send_group_message(self, group, message):
|
||||
"""发送群消息"""
|
||||
asyncio.run(self.get_bot().send_group_message(group, message))
|
||||
self.get_bot_adapter().send_message("group", group, message)
|
||||
|
||||
def notify_admin(self, message):
|
||||
"""通知管理员"""
|
||||
context.get_qqbot_manager().notify_admin(message)
|
||||
|
||||
def emit(self, event_name: str, **kwargs) -> EventContext:
|
||||
""" 触发事件 """
|
||||
"""触发事件"""
|
||||
import json
|
||||
|
||||
event_context = EventContext(event_name)
|
||||
@@ -309,7 +393,7 @@ class PluginHost:
|
||||
logging.debug("插件 {} 已要求阻止事件 {} 的默认行为".format(plugin['name'], event_name))
|
||||
|
||||
except Exception as e:
|
||||
logging.error("插件{}触发事件{}时发生错误".format(plugin['name'], event_name))
|
||||
logging.error("插件{}响应事件{}时发生错误".format(plugin['name'], event_name))
|
||||
logging.error(traceback.format_exc())
|
||||
|
||||
# print("done:{}".format(plugin['name']))
|
||||
@@ -321,3 +405,6 @@ class PluginHost:
|
||||
event_context.__return_value__))
|
||||
|
||||
return event_context
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
||||
@@ -88,6 +88,8 @@ NormalMessageResponded = "normal_message_responded"
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
prefix: str 回复文字消息的前缀
|
||||
response_text: str 响应文本
|
||||
finish_reason: str 响应结束原因
|
||||
funcs_called: list[str] 此次响应中调用的函数列表
|
||||
|
||||
returns (optional):
|
||||
prefix: str 修改后的回复文字消息的前缀
|
||||
@@ -132,19 +134,66 @@ KeySwitched = "key_switched"
|
||||
key_list: list[str] api-key列表
|
||||
"""
|
||||
|
||||
PromptPreProcessing = "prompt_pre_processing"
|
||||
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
default_prompt: list 此session使用的情景预设内容
|
||||
prompt: list 此session现有的prompt内容
|
||||
text_message: str 用户发送的消息文本
|
||||
|
||||
returns (optional):
|
||||
default_prompt: list 修改后的情景预设内容
|
||||
prompt: list 修改后的prompt内容
|
||||
text_message: str 修改后的消息文本
|
||||
"""
|
||||
|
||||
def on(event: str):
|
||||
|
||||
def on(*args, **kwargs):
|
||||
"""注册事件监听器
|
||||
:param
|
||||
event: str 事件名称
|
||||
"""
|
||||
return Plugin.on(event)
|
||||
return Plugin.on(*args, **kwargs)
|
||||
|
||||
def func(*args, **kwargs):
|
||||
"""注册内容函数,声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用
|
||||
此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
|
||||
所述的格式编写函数的docstring。
|
||||
此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。
|
||||
"""
|
||||
return Plugin.func(*args, **kwargs)
|
||||
|
||||
|
||||
__current_registering_plugin__ = ""
|
||||
|
||||
|
||||
def require_ver(ge: str, le: str="v999.9.9") -> bool:
|
||||
"""插件版本要求装饰器
|
||||
|
||||
Args:
|
||||
ge (str): 最低版本要求
|
||||
le (str, optional): 最高版本要求
|
||||
|
||||
Returns:
|
||||
bool: 是否满足要求, False时为无法获取版本号,True时为满足要求,报错为不满足要求
|
||||
"""
|
||||
qchatgpt_version = ""
|
||||
|
||||
from pkg.utils.updater import get_current_tag, compare_version_str
|
||||
|
||||
try:
|
||||
qchatgpt_version = get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
return False
|
||||
|
||||
if compare_version_str(qchatgpt_version, ge) < 0 or \
|
||||
(compare_version_str(qchatgpt_version, le) > 0):
|
||||
raise Exception("QChatGPT 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, qchatgpt_version))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Plugin:
|
||||
"""插件基类"""
|
||||
|
||||
host: host.PluginHost
|
||||
"""插件宿主,提供插件的一些基础功能"""
|
||||
@@ -175,6 +224,34 @@ class Plugin:
|
||||
|
||||
return wrapper
|
||||
|
||||
@classmethod
|
||||
def func(cls, name: str=None):
|
||||
"""内容函数装饰器
|
||||
"""
|
||||
global __current_registering_plugin__
|
||||
from CallingGPT.entities.namespace import get_func_schema
|
||||
|
||||
def wrapper(func):
|
||||
|
||||
function_schema = get_func_schema(func)
|
||||
function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name)
|
||||
|
||||
function_schema['enabled'] = True
|
||||
|
||||
host.__function_inst_map__[function_schema['name']] = function_schema['function']
|
||||
|
||||
del function_schema['function']
|
||||
|
||||
# logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema))
|
||||
|
||||
host.__callable_functions__.append(
|
||||
function_schema
|
||||
)
|
||||
|
||||
return func
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def register(name: str, description: str, version: str, author: str):
|
||||
"""注册插件, 此函数作为装饰器使用
|
||||
|
||||
@@ -8,7 +8,10 @@ import logging
|
||||
def wrapper_dict_from_runtime_context() -> dict:
|
||||
"""从变量中包装settings.json的数据字典"""
|
||||
settings = {
|
||||
"order": []
|
||||
"order": [],
|
||||
"functions": {
|
||||
"enabled": host.__enable_content_functions__
|
||||
}
|
||||
}
|
||||
|
||||
for plugin_name in host.__plugins_order__:
|
||||
@@ -22,6 +25,11 @@ def apply_settings(settings: dict):
|
||||
if "order" in settings:
|
||||
host.__plugins_order__ = settings["order"]
|
||||
|
||||
if "functions" in settings:
|
||||
if "enabled" in settings["functions"]:
|
||||
host.__enable_content_functions__ = settings["functions"]["enabled"]
|
||||
# logging.debug("set content function enabled: {}".format(host.__enable_content_functions__))
|
||||
|
||||
|
||||
def dump_settings():
|
||||
"""保存settings.json数据"""
|
||||
@@ -78,6 +86,17 @@ def load_settings():
|
||||
settings["order"].append(plugin_name)
|
||||
settings_modified = True
|
||||
|
||||
if "functions" not in settings:
|
||||
settings["functions"] = {
|
||||
"enabled": host.__enable_content_functions__
|
||||
}
|
||||
settings_modified = True
|
||||
elif "enabled" not in settings["functions"]:
|
||||
settings["functions"]["enabled"] = host.__enable_content_functions__
|
||||
settings_modified = True
|
||||
|
||||
logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enabled"] else "禁用"))
|
||||
|
||||
apply_settings(settings)
|
||||
|
||||
if settings_modified:
|
||||
|
||||
@@ -7,7 +7,7 @@ import pkg.plugin.host as host
|
||||
|
||||
|
||||
def wrapper_dict_from_plugin_list() -> dict:
|
||||
""" 将插件列表转换为开关json """
|
||||
"""将插件列表转换为开关json"""
|
||||
switch = {}
|
||||
|
||||
for plugin_name in host.__plugins__:
|
||||
@@ -28,9 +28,14 @@ def apply_switch(switch: dict):
|
||||
for plugin_name in switch:
|
||||
host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"]
|
||||
|
||||
# 查找此插件的所有内容函数
|
||||
for func in host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name + '-'):
|
||||
func['enabled'] = switch[plugin_name]["enabled"]
|
||||
|
||||
|
||||
def dump_switch():
|
||||
""" 保存开关数据 """
|
||||
"""保存开关数据"""
|
||||
logging.debug("保存开关数据")
|
||||
# 将开关数据写入plugins/switch.json
|
||||
|
||||
@@ -41,7 +46,7 @@ def dump_switch():
|
||||
|
||||
|
||||
def load_switch():
|
||||
""" 加载开关数据 """
|
||||
"""加载开关数据"""
|
||||
logging.debug("加载开关数据")
|
||||
# 读取plugins/switch.json
|
||||
|
||||
|
||||
136
pkg/qqbot/adapter.py
Normal file
136
pkg/qqbot/adapter.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# MessageSource的适配器
|
||||
import typing
|
||||
|
||||
import mirai
|
||||
|
||||
|
||||
class MessageSourceAdapter:
|
||||
def __init__(self, config: dict):
|
||||
pass
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
message: mirai.MessageChain
|
||||
):
|
||||
"""发送消息
|
||||
|
||||
Args:
|
||||
target_type (str): 目标类型,`person`或`group`
|
||||
target_id (str): 目标ID
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def reply_message(
|
||||
self,
|
||||
message_source: mirai.MessageEvent,
|
||||
message: mirai.MessageChain,
|
||||
quote_origin: bool = False
|
||||
):
|
||||
"""回复消息
|
||||
|
||||
Args:
|
||||
message_source (mirai.MessageEvent): YiriMirai消息源事件
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_muted(self, group_id: int) -> bool:
|
||||
"""获取账号是否在指定群被禁言"""
|
||||
raise NotImplementedError
|
||||
|
||||
def register_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注册事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def unregister_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注销事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def run_sync(self):
|
||||
"""以阻塞的方式运行适配器"""
|
||||
raise NotImplementedError
|
||||
|
||||
def kill(self) -> bool:
|
||||
"""关闭适配器
|
||||
|
||||
Returns:
|
||||
bool: 是否成功关闭,热重载时若此函数返回False则不会重载MessageSource底层
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class MessageConverter:
|
||||
"""消息链转换器基类"""
|
||||
@staticmethod
|
||||
def yiri2target(message_chain: mirai.MessageChain):
|
||||
"""将YiriMirai消息链转换为目标消息链
|
||||
|
||||
Args:
|
||||
message_chain (mirai.MessageChain): YiriMirai消息链
|
||||
|
||||
Returns:
|
||||
typing.Any: 目标消息链
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(message_chain: typing.Any) -> mirai.MessageChain:
|
||||
"""将目标消息链转换为YiriMirai消息链
|
||||
|
||||
Args:
|
||||
message_chain (typing.Any): 目标消息链
|
||||
|
||||
Returns:
|
||||
mirai.MessageChain: YiriMirai消息链
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EventConverter:
|
||||
"""事件转换器基类"""
|
||||
|
||||
@staticmethod
|
||||
def yiri2target(event: typing.Type[mirai.Event]):
|
||||
"""将YiriMirai事件转换为目标事件
|
||||
|
||||
Args:
|
||||
event (typing.Type[mirai.Event]): YiriMirai事件
|
||||
|
||||
Returns:
|
||||
typing.Any: 目标事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(event: typing.Any) -> mirai.Event:
|
||||
"""将目标事件的调用参数转换为YiriMirai的事件参数对象
|
||||
|
||||
Args:
|
||||
event (typing.Any): 目标事件
|
||||
|
||||
Returns:
|
||||
typing.Type[mirai.Event]: YiriMirai事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
@@ -1,30 +1,34 @@
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
def is_banned(launcher_type: str, launcher_id: int) -> bool:
|
||||
def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
|
||||
if not pkg.utils.context.get_qqbot_manager().enable_banlist:
|
||||
return False
|
||||
|
||||
result = False
|
||||
|
||||
if launcher_type == 'group':
|
||||
for group_rule in pkg.utils.context.get_qqbot_manager().ban_group:
|
||||
if type(group_rule) == int:
|
||||
if group_rule == launcher_id: # 此群群号被禁用
|
||||
result = True
|
||||
elif type(group_rule) == str:
|
||||
if group_rule.startswith('!'):
|
||||
# 截取!后面的字符串作为表达式,判断是否匹配
|
||||
reg_str = group_rule[1:]
|
||||
import re
|
||||
if re.match(reg_str, str(launcher_id)): # 被豁免,最高级别
|
||||
result = False
|
||||
break
|
||||
else:
|
||||
# 判断是否匹配regexp
|
||||
import re
|
||||
if re.match(group_rule, str(launcher_id)): # 此群群号被禁用
|
||||
# 检查是否显式声明发起人QQ要被person忽略
|
||||
if sender_id in pkg.utils.context.get_qqbot_manager().ban_person:
|
||||
result = True
|
||||
else:
|
||||
for group_rule in pkg.utils.context.get_qqbot_manager().ban_group:
|
||||
if type(group_rule) == int:
|
||||
if group_rule == launcher_id: # 此群群号被禁用
|
||||
result = True
|
||||
elif type(group_rule) == str:
|
||||
if group_rule.startswith('!'):
|
||||
# 截取!后面的字符串作为表达式,判断是否匹配
|
||||
reg_str = group_rule[1:]
|
||||
import re
|
||||
if re.match(reg_str, str(launcher_id)): # 被豁免,最高级别
|
||||
result = False
|
||||
break
|
||||
else:
|
||||
# 判断是否匹配regexp
|
||||
import re
|
||||
if re.match(group_rule, str(launcher_id)): # 此群群号被禁用
|
||||
result = True
|
||||
|
||||
else:
|
||||
# ban_person, 与群规则相同
|
||||
|
||||
97
pkg/qqbot/blob.py
Normal file
97
pkg/qqbot/blob.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# 长消息处理相关
|
||||
import os
|
||||
import time
|
||||
import base64
|
||||
|
||||
import config
|
||||
from mirai.models.message import MessageComponent, MessageChain, Image
|
||||
from mirai.models.message import ForwardMessageNode
|
||||
from mirai.models.base import MiraiBaseModel
|
||||
from typing import List
|
||||
import pkg.utils.context as context
|
||||
import pkg.utils.text2img as text2img
|
||||
|
||||
|
||||
class ForwardMessageDiaplay(MiraiBaseModel):
|
||||
title: str = "群聊的聊天记录"
|
||||
brief: str = "[聊天记录]"
|
||||
source: str = "聊天记录"
|
||||
preview: List[str] = []
|
||||
summary: str = "查看x条转发消息"
|
||||
|
||||
|
||||
class Forward(MessageComponent):
|
||||
"""合并转发。"""
|
||||
type: str = "Forward"
|
||||
"""消息组件类型。"""
|
||||
display: ForwardMessageDiaplay
|
||||
"""显示信息"""
|
||||
node_list: List[ForwardMessageNode]
|
||||
"""转发消息节点列表。"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
if len(args) == 1:
|
||||
self.node_list = args[0]
|
||||
super().__init__(**kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '[聊天记录]'
|
||||
|
||||
|
||||
def text_to_image(text: str) -> MessageComponent:
|
||||
"""将文本转换成图片"""
|
||||
# 检查temp文件夹是否存在
|
||||
if not os.path.exists('temp'):
|
||||
os.mkdir('temp')
|
||||
img_path = text2img.text_to_image(text_str=text, save_as='temp/{}.png'.format(int(time.time())))
|
||||
|
||||
compressed_path, size = text2img.compress_image(img_path, outfile="temp/{}_compressed.png".format(int(time.time())))
|
||||
# 读取图片,转换成base64
|
||||
with open(compressed_path, 'rb') as f:
|
||||
img = f.read()
|
||||
|
||||
b64 = base64.b64encode(img)
|
||||
|
||||
# 删除图片
|
||||
os.remove(img_path)
|
||||
|
||||
# 判断compressed_path是否存在
|
||||
if os.path.exists(compressed_path):
|
||||
os.remove(compressed_path)
|
||||
# 返回图片
|
||||
return Image(base64=b64.decode('utf-8'))
|
||||
|
||||
|
||||
def check_text(text: str) -> list:
|
||||
"""检查文本是否为长消息,并转换成该使用的消息链组件"""
|
||||
if len(text) > config.blob_message_threshold:
|
||||
|
||||
# logging.info("长消息: {}".format(text))
|
||||
if config.blob_message_strategy == 'image':
|
||||
# 转换成图片
|
||||
return [text_to_image(text)]
|
||||
elif config.blob_message_strategy == 'forward':
|
||||
|
||||
# 包装转发消息
|
||||
display = ForwardMessageDiaplay(
|
||||
title='群聊的聊天记录',
|
||||
brief='[聊天记录]',
|
||||
source='聊天记录',
|
||||
preview=["bot: "+text],
|
||||
summary="查看1条转发消息"
|
||||
)
|
||||
|
||||
node = ForwardMessageNode(
|
||||
sender_id=config.mirai_http_api_config['qq'],
|
||||
sender_name='bot',
|
||||
message_chain=MessageChain([text])
|
||||
)
|
||||
|
||||
forward = Forward(
|
||||
display=display,
|
||||
node_list=[node]
|
||||
)
|
||||
|
||||
return [forward]
|
||||
else:
|
||||
return [text]
|
||||
0
pkg/qqbot/cmds/__init__.py
Normal file
0
pkg/qqbot/cmds/__init__.py
Normal file
337
pkg/qqbot/cmds/aamgr.py
Normal file
337
pkg/qqbot/cmds/aamgr.py
Normal file
@@ -0,0 +1,337 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import logging
|
||||
import copy
|
||||
import pkgutil
|
||||
import traceback
|
||||
import types
|
||||
import json
|
||||
|
||||
|
||||
__command_list__ = {}
|
||||
|
||||
import tips as tips_custom
|
||||
|
||||
"""命令树
|
||||
|
||||
结构:
|
||||
{
|
||||
'cmd1': {
|
||||
'description': 'cmd1 description',
|
||||
'usage': 'cmd1 usage',
|
||||
'aliases': ['cmd1 alias1', 'cmd1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': None,
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd1.CommandCmd1'>,
|
||||
'sub': [
|
||||
'cmd1-1'
|
||||
]
|
||||
},
|
||||
'cmd1.cmd1-1: {
|
||||
'description': 'cmd1-1 description',
|
||||
'usage': 'cmd1-1 usage',
|
||||
'aliases': ['cmd1-1 alias1', 'cmd1-1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': 'cmd1',
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd1.CommandCmd1_1'>,
|
||||
'sub': []
|
||||
},
|
||||
'cmd2': {
|
||||
'description': 'cmd2 description',
|
||||
'usage': 'cmd2 usage',
|
||||
'aliases': ['cmd2 alias1', 'cmd2 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': None,
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd2.CommandCmd2'>,
|
||||
'sub': [
|
||||
'cmd2-1'
|
||||
]
|
||||
},
|
||||
'cmd2.cmd2-1': {
|
||||
'description': 'cmd2-1 description',
|
||||
'usage': 'cmd2-1 usage',
|
||||
'aliases': ['cmd2-1 alias1', 'cmd2-1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': 'cmd2',
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd2.CommandCmd2_1'>,
|
||||
'sub': [
|
||||
'cmd2-1-1'
|
||||
]
|
||||
},
|
||||
'cmd2.cmd2-1.cmd2-1-1': {
|
||||
'description': 'cmd2-1-1 description',
|
||||
'usage': 'cmd2-1-1 usage',
|
||||
'aliases': ['cmd2-1-1 alias1', 'cmd2-1-1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': 'cmd2.cmd2-1',
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd2.CommandCmd2_1_1'>,
|
||||
'sub': []
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
__tree_index__: dict[str, list] = {}
|
||||
"""命令树索引
|
||||
|
||||
结构:
|
||||
{
|
||||
'pkg.qqbot.cmds.cmd1.CommandCmd1': 'cmd1', # 顶级指令
|
||||
'pkg.qqbot.cmds.cmd1.CommandCmd1_1': 'cmd1.cmd1-1', # 类名: 节点路径
|
||||
'pkg.qqbot.cmds.cmd2.CommandCmd2': 'cmd2',
|
||||
'pkg.qqbot.cmds.cmd2.CommandCmd2_1': 'cmd2.cmd2-1',
|
||||
'pkg.qqbot.cmds.cmd2.CommandCmd2_1_1': 'cmd2.cmd2-1.cmd2-1-1',
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class Context:
|
||||
"""命令执行上下文"""
|
||||
command: str
|
||||
"""顶级指令文本"""
|
||||
|
||||
crt_command: str
|
||||
"""当前子指令文本"""
|
||||
|
||||
params: list
|
||||
"""完整参数列表"""
|
||||
|
||||
crt_params: list
|
||||
"""当前子指令参数列表"""
|
||||
|
||||
session_name: str
|
||||
"""会话名"""
|
||||
|
||||
text_message: str
|
||||
"""指令完整文本"""
|
||||
|
||||
launcher_type: str
|
||||
"""指令发起者类型"""
|
||||
|
||||
launcher_id: int
|
||||
"""指令发起者ID"""
|
||||
|
||||
sender_id: int
|
||||
"""指令发送者ID"""
|
||||
|
||||
is_admin: bool
|
||||
"""[过时]指令发送者是否为管理员"""
|
||||
|
||||
privilege: int
|
||||
"""指令发送者权限等级"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
|
||||
class AbstractCommandNode:
|
||||
"""指令抽象类"""
|
||||
|
||||
parent: type
|
||||
"""父指令类"""
|
||||
|
||||
name: str
|
||||
"""指令名"""
|
||||
|
||||
description: str
|
||||
"""指令描述"""
|
||||
|
||||
usage: str
|
||||
"""指令用法"""
|
||||
|
||||
aliases: list[str]
|
||||
"""指令别名"""
|
||||
|
||||
privilege: int
|
||||
"""指令权限等级, 权限大于等于此值的用户才能执行指令"""
|
||||
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
"""指令处理函数
|
||||
|
||||
:param ctx: 指令执行上下文
|
||||
|
||||
:return: (是否执行, 回复列表(若执行))
|
||||
|
||||
若未执行,将自动以下一个参数查找并执行子指令
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def help(cls) -> str:
|
||||
"""获取指令帮助信息"""
|
||||
return '指令: {}\n描述: {}\n用法: \n{}\n别名: {}\n权限: {}'.format(
|
||||
cls.name,
|
||||
cls.description,
|
||||
cls.usage,
|
||||
', '.join(cls.aliases),
|
||||
cls.privilege
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def register(
|
||||
parent: type = None,
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
usage: str = None,
|
||||
aliases: list[str] = None,
|
||||
privilege: int = 0
|
||||
):
|
||||
"""注册指令
|
||||
|
||||
:param cls: 指令类
|
||||
:param name: 指令名
|
||||
:param parent: 父指令类
|
||||
"""
|
||||
global __command_list__, __tree_index__
|
||||
|
||||
def wrapper(cls):
|
||||
cls.name = name
|
||||
cls.parent = parent
|
||||
cls.description = description
|
||||
cls.usage = usage
|
||||
cls.aliases = aliases
|
||||
cls.privilege = privilege
|
||||
|
||||
logging.debug("cls: {}, name: {}, parent: {}".format(cls, name, parent))
|
||||
|
||||
if parent is None:
|
||||
# 顶级指令注册
|
||||
__command_list__[name] = {
|
||||
'description': cls.description,
|
||||
'usage': cls.usage,
|
||||
'aliases': cls.aliases,
|
||||
'privilege': cls.privilege,
|
||||
'parent': None,
|
||||
'cls': cls,
|
||||
'sub': []
|
||||
}
|
||||
# 更新索引
|
||||
__tree_index__[cls.__module__ + '.' + cls.__name__] = name
|
||||
else:
|
||||
# 获取父节点名称
|
||||
path = __tree_index__[parent.__module__ + '.' + parent.__name__]
|
||||
|
||||
parent_node = __command_list__[path]
|
||||
# 链接父子指令
|
||||
__command_list__[path]['sub'].append(name)
|
||||
# 注册子指令
|
||||
__command_list__[path + '.' + name] = {
|
||||
'description': cls.description,
|
||||
'usage': cls.usage,
|
||||
'aliases': cls.aliases,
|
||||
'privilege': cls.privilege,
|
||||
'parent': path,
|
||||
'cls': cls,
|
||||
'sub': []
|
||||
}
|
||||
# 更新索引
|
||||
__tree_index__[cls.__module__ + '.' + cls.__name__] = path + '.' + name
|
||||
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class CommandPrivilegeError(Exception):
|
||||
"""指令权限不足或不存在异常"""
|
||||
pass
|
||||
|
||||
|
||||
# 传入Context对象,广搜命令树,返回执行结果
|
||||
# 若命令被处理,返回reply列表
|
||||
# 若命令未被处理,继续执行下一级指令
|
||||
# 若命令不存在,报异常
|
||||
def execute(context: Context) -> list:
|
||||
"""执行指令
|
||||
|
||||
:param ctx: 指令执行上下文
|
||||
|
||||
:return: 回复列表
|
||||
"""
|
||||
global __command_list__
|
||||
|
||||
# 拷贝ctx
|
||||
ctx: Context = copy.deepcopy(context)
|
||||
|
||||
# 从树取出顶级指令
|
||||
node = __command_list__
|
||||
|
||||
path = ctx.command
|
||||
|
||||
while True:
|
||||
try:
|
||||
node = __command_list__[path]
|
||||
logging.debug('执行指令: {}'.format(path))
|
||||
|
||||
# 检查权限
|
||||
if ctx.privilege < node['privilege']:
|
||||
raise CommandPrivilegeError(tips_custom.command_admin_message+"{}".format(path))
|
||||
|
||||
# 执行
|
||||
execed, reply = node['cls'].process(ctx)
|
||||
if execed:
|
||||
return reply
|
||||
else:
|
||||
# 删除crt_params第一个参数
|
||||
ctx.crt_command = ctx.crt_params.pop(0)
|
||||
# 下一个path
|
||||
path = path + '.' + ctx.crt_command
|
||||
except KeyError:
|
||||
traceback.print_exc()
|
||||
raise CommandPrivilegeError(tips_custom.command_err_message+"{}".format(path))
|
||||
|
||||
|
||||
def register_all():
|
||||
"""启动时调用此函数注册所有指令
|
||||
|
||||
递归处理pkg.qqbot.cmds包下及其子包下所有模块的所有继承于AbstractCommand的类
|
||||
"""
|
||||
# 模块:遍历其中的继承于AbstractCommand的类,进行注册
|
||||
# 包:递归处理包下的模块
|
||||
# 排除__开头的属性
|
||||
global __command_list__, __tree_index__
|
||||
|
||||
import pkg.qqbot.cmds
|
||||
|
||||
def walk(module, prefix, path_prefix):
|
||||
# 排除不处于pkg.qqbot.cmds中的包
|
||||
if not module.__name__.startswith('pkg.qqbot.cmds'):
|
||||
return
|
||||
|
||||
logging.debug('walk: {}, path: {}'.format(module.__name__, module.__path__))
|
||||
for item in pkgutil.iter_modules(module.__path__):
|
||||
if item.name.startswith('__'):
|
||||
continue
|
||||
|
||||
if item.ispkg:
|
||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
|
||||
else:
|
||||
m = __import__(module.__name__ + '.' + item.name, fromlist=[''])
|
||||
# for name, cls in inspect.getmembers(m, inspect.isclass):
|
||||
# # 检查是否为指令类
|
||||
# if cls.__module__ == m.__name__ and issubclass(cls, AbstractCommandNode) and cls != AbstractCommandNode:
|
||||
# cls.register(cls, cls.name, cls.parent)
|
||||
|
||||
walk(pkg.qqbot.cmds, '', '')
|
||||
logging.debug(__command_list__)
|
||||
|
||||
|
||||
def apply_privileges():
|
||||
"""读取cmdpriv.json并应用指令权限"""
|
||||
# 读取内容
|
||||
json_str = ""
|
||||
with open('cmdpriv.json', 'r', encoding="utf-8") as f:
|
||||
json_str = f.read()
|
||||
|
||||
data = json.loads(json_str)
|
||||
for path, priv in data.items():
|
||||
if path == 'comment':
|
||||
continue
|
||||
|
||||
if path not in __command_list__:
|
||||
continue
|
||||
|
||||
if __command_list__[path]['privilege'] != priv:
|
||||
logging.debug('应用权限: {} -> {}(default: {})'.format(path, priv, __command_list__[path]['privilege']))
|
||||
|
||||
__command_list__[path]['privilege'] = priv
|
||||
0
pkg/qqbot/cmds/funcs/__init__.py
Normal file
0
pkg/qqbot/cmds/funcs/__init__.py
Normal file
36
pkg/qqbot/cmds/funcs/draw.py
Normal file
36
pkg/qqbot/cmds/funcs/draw.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import logging
|
||||
|
||||
from mirai import Image
|
||||
import config
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="draw",
|
||||
description="使用DALL·E生成图片",
|
||||
usage="!draw <图片提示语>",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DrawCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
|
||||
reply = []
|
||||
|
||||
if len(ctx.params) == 0:
|
||||
reply = ["[bot]err: 未提供图片描述文字"]
|
||||
else:
|
||||
session = pkg.openai.session.get_session(ctx.session_name)
|
||||
|
||||
res = session.draw_image(" ".join(ctx.params))
|
||||
|
||||
logging.debug("draw_image result:{}".format(res))
|
||||
reply = [Image(url=res['data'][0]['url'])]
|
||||
if not (hasattr(config, 'include_image_description')
|
||||
and not config.include_image_description):
|
||||
reply.append(" ".join(ctx.params))
|
||||
|
||||
return True, reply
|
||||
33
pkg/qqbot/cmds/funcs/func.py
Normal file
33
pkg/qqbot/cmds/funcs/func.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import logging
|
||||
|
||||
import json
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="func",
|
||||
description="管理内容函数",
|
||||
usage="!func",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class FuncCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
from pkg.plugin.models import host
|
||||
|
||||
reply = []
|
||||
|
||||
reply_str = "当前已加载的内容函数:\n\n"
|
||||
|
||||
logging.debug("host.__callable_functions__: {}".format(json.dumps(host.__callable_functions__, indent=4)))
|
||||
|
||||
index = 1
|
||||
for func in host.__callable_functions__:
|
||||
reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description'])
|
||||
index += 1
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
0
pkg/qqbot/cmds/plugin/__init__.py
Normal file
0
pkg/qqbot/cmds/plugin/__init__.py
Normal file
202
pkg/qqbot/cmds/plugin/plugin.py
Normal file
202
pkg/qqbot/cmds/plugin/plugin.py
Normal file
@@ -0,0 +1,202 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
|
||||
import os
|
||||
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.utils.updater as updater
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="plugin",
|
||||
description="插件管理",
|
||||
usage="!plugin\n!plugin get <插件仓库地址>\n!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class PluginCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
plugin_list = plugin_host.__plugins__
|
||||
if len(ctx.params) == 0:
|
||||
# 列出所有插件
|
||||
|
||||
reply_str = "[bot]所有插件({}):\n".format(len(plugin_host.__plugins__))
|
||||
idx = 0
|
||||
for key in plugin_host.iter_plugins_name():
|
||||
plugin = plugin_list[key]
|
||||
reply_str += "\n#{} {} {}\n{}\nv{}\n作者: {}\n"\
|
||||
.format((idx+1), plugin['name'],
|
||||
"[已禁用]" if not plugin['enabled'] else "",
|
||||
plugin['description'],
|
||||
plugin['version'], plugin['author'])
|
||||
|
||||
if updater.is_repo("/".join(plugin['path'].split('/')[:-1])):
|
||||
remote_url = updater.get_remote_url("/".join(plugin['path'].split('/')[:-1]))
|
||||
if remote_url != "https://github.com/RockChinQ/QChatGPT" and remote_url != "https://gitee.com/RockChin/QChatGPT":
|
||||
reply_str += "源码: "+remote_url+"\n"
|
||||
|
||||
idx += 1
|
||||
|
||||
reply = [reply_str]
|
||||
return True, reply
|
||||
elif ctx.params[0].startswith("http"):
|
||||
reply = ["[bot]err: 此命令已弃用,请使用 !plugin get <插件仓库地址> 进行安装"]
|
||||
return True, reply
|
||||
else:
|
||||
return False, []
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="get",
|
||||
description="安装插件",
|
||||
usage="!plugin get <插件仓库地址>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginGetCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import threading
|
||||
import logging
|
||||
import pkg.utils.context
|
||||
|
||||
if len(ctx.crt_params) == 0:
|
||||
reply = ["[bot]err: 请提供插件仓库地址"]
|
||||
return True, reply
|
||||
|
||||
reply = []
|
||||
def closure():
|
||||
try:
|
||||
plugin_host.install_plugin(ctx.crt_params[0])
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件安装成功,请发送 !reload 指令重载插件")
|
||||
except Exception as e:
|
||||
logging.error("插件安装失败:{}".format(e))
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件安装失败:{}".format(e))
|
||||
|
||||
threading.Thread(target=closure, args=()).start()
|
||||
reply = ["[bot]正在安装插件..."]
|
||||
return True, reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="update",
|
||||
description="更新所有插件",
|
||||
usage="!plugin update",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginUpdateCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import threading
|
||||
import logging
|
||||
plugin_list = plugin_host.__plugins__
|
||||
|
||||
reply = []
|
||||
|
||||
if len(ctx.crt_params) > 0:
|
||||
def closure():
|
||||
try:
|
||||
import pkg.utils.context
|
||||
|
||||
updated = []
|
||||
|
||||
if ctx.crt_params[0] == 'all':
|
||||
for key in plugin_list:
|
||||
plugin_host.update_plugin(key)
|
||||
updated.append(key)
|
||||
else:
|
||||
if ctx.crt_params[0] in plugin_list:
|
||||
plugin_host.update_plugin(ctx.crt_params[0])
|
||||
updated.append(ctx.crt_params[0])
|
||||
else:
|
||||
raise Exception("未找到插件: {}".format(ctx.crt_params[0]))
|
||||
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("已更新插件: {}, 请发送 !reload 重载插件".format(", ".join(updated)))
|
||||
except Exception as e:
|
||||
logging.error("插件更新失败:{}".format(e))
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件更新失败:{} 请尝试手动更新插件".format(e))
|
||||
|
||||
reply = ["[bot]正在更新插件,请勿重复发起..."]
|
||||
threading.Thread(target=closure).start()
|
||||
else:
|
||||
reply = ["[bot]请指定要更新的插件, 或使用 !plugin update all 更新所有插件"]
|
||||
return True, reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="del",
|
||||
description="删除插件",
|
||||
usage="!plugin del <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginDelCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
plugin_list = plugin_host.__plugins__
|
||||
reply = []
|
||||
|
||||
if len(ctx.crt_params) < 1:
|
||||
reply = ["[bot]err: 未指定插件名"]
|
||||
else:
|
||||
plugin_name = ctx.crt_params[0]
|
||||
if plugin_name in plugin_list:
|
||||
unin_path = plugin_host.uninstall_plugin(plugin_name)
|
||||
reply = ["[bot]已删除插件: {} ({}), 请发送 !reload 重载插件".format(plugin_name, unin_path)]
|
||||
else:
|
||||
reply = ["[bot]err:未找到插件: {}, 请使用!plugin指令查看插件列表".format(plugin_name)]
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="on",
|
||||
description="启用指定插件",
|
||||
usage="!plugin on <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
@AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="off",
|
||||
description="禁用指定插件",
|
||||
usage="!plugin off <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginOnOffCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.plugin.switch as plugin_switch
|
||||
|
||||
plugin_list = plugin_host.__plugins__
|
||||
reply = []
|
||||
|
||||
print(ctx.params)
|
||||
new_status = ctx.params[0] == 'on'
|
||||
|
||||
if len(ctx.crt_params) < 1:
|
||||
reply = ["[bot]err: 未指定插件名"]
|
||||
else:
|
||||
plugin_name = ctx.crt_params[0]
|
||||
if plugin_name in plugin_list:
|
||||
plugin_list[plugin_name]['enabled'] = new_status
|
||||
|
||||
for func in plugin_host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name+"-"):
|
||||
func['enabled'] = new_status
|
||||
|
||||
plugin_switch.dump_switch()
|
||||
reply = ["[bot]已{}插件: {}".format("启用" if new_status else "禁用", plugin_name)]
|
||||
else:
|
||||
reply = ["[bot]err:未找到插件: {}, 请使用!plugin指令查看插件列表".format(plugin_name)]
|
||||
|
||||
return True, reply
|
||||
|
||||
0
pkg/qqbot/cmds/session/__init__.py
Normal file
0
pkg/qqbot/cmds/session/__init__.py
Normal file
73
pkg/qqbot/cmds/session/default.py
Normal file
73
pkg/qqbot/cmds/session/default.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="default",
|
||||
description="操作情景预设",
|
||||
usage="!default\n!default set [指定情景预设为默认]",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DefaultCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
import config
|
||||
|
||||
if len(params) == 0:
|
||||
# 输出目前所有情景预设
|
||||
import pkg.openai.dprompt as dprompt
|
||||
reply_str = "[bot]当前所有情景预设({}模式):\n\n".format(config.preset_mode)
|
||||
|
||||
prompts = dprompt.mode_inst().list()
|
||||
|
||||
for key in prompts:
|
||||
pro = prompts[key]
|
||||
reply_str += "名称: {}".format(key)
|
||||
|
||||
for r in pro:
|
||||
reply_str += "\n - [{}]: {}".format(r['role'], r['content'])
|
||||
|
||||
reply_str += "\n\n"
|
||||
|
||||
reply_str += "\n当前默认情景预设:{}\n".format(dprompt.mode_inst().get_using_name())
|
||||
reply_str += "请使用 !default set <情景预设名称> 来设置默认情景预设"
|
||||
reply = [reply_str]
|
||||
elif params[0] != "set":
|
||||
reply = ["[bot]err: 已弃用,请使用!default set <情景预设名称> 来设置默认情景预设"]
|
||||
else:
|
||||
return False, []
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=DefaultCommand,
|
||||
name="set",
|
||||
description="设置默认情景预设",
|
||||
usage="!default set <情景预设名称>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class DefaultSetCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
|
||||
if len(ctx.crt_params) == 0:
|
||||
reply = ["[bot]err: 请指定情景预设名称"]
|
||||
elif len(ctx.crt_params) > 0:
|
||||
import pkg.openai.dprompt as dprompt
|
||||
try:
|
||||
full_name = dprompt.mode_inst().set_using_name(ctx.crt_params[0])
|
||||
reply = ["[bot]已设置默认情景预设为:{}".format(full_name)]
|
||||
except Exception as e:
|
||||
reply = ["[bot]err: {}".format(e)]
|
||||
else:
|
||||
reply = ["[bot]err: 仅管理员可设置默认情景预设"]
|
||||
|
||||
return True, reply
|
||||
52
pkg/qqbot/cmds/session/del.py
Normal file
52
pkg/qqbot/cmds/session/del.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import datetime
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="del",
|
||||
description="删除当前会话的历史记录",
|
||||
usage="!del <序号>\n!del all",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DelCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]参数不足, 格式: !del <序号>\n可以通过!list查看序号"]
|
||||
else:
|
||||
if params[0] == 'all':
|
||||
return False, []
|
||||
elif params[0].isdigit():
|
||||
if pkg.openai.session.get_session(session_name).delete_history(int(params[0])):
|
||||
reply = ["[bot]已删除历史会话 #{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]没有历史会话 #{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]参数错误, 格式: !del <序号>\n可以通过!list查看序号"]
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=DelCommand,
|
||||
name="all",
|
||||
description="删除当前会话的全部历史记录",
|
||||
usage="!del all",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DelAllCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
reply = []
|
||||
pkg.openai.session.get_session(session_name).delete_all_history()
|
||||
reply = ["[bot]已删除所有历史会话"]
|
||||
return True, reply
|
||||
50
pkg/qqbot/cmds/session/delhst.py
Normal file
50
pkg/qqbot/cmds/session/delhst.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="delhst",
|
||||
description="删除指定会话的所有历史记录",
|
||||
usage="!delhst <会话名称>\n!delhst all",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class DelHistoryCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
import pkg.utils.context
|
||||
params = ctx.params
|
||||
reply = []
|
||||
if len(params) == 0:
|
||||
reply = [
|
||||
"[bot]err:请输入要删除的会话名: group_<群号> 或者 person_<QQ号>, 或使用 !delhst all 删除所有会话的历史记录"]
|
||||
else:
|
||||
if params[0] == 'all':
|
||||
return False, []
|
||||
else:
|
||||
if pkg.utils.context.get_database_manager().delete_all_history(params[0]):
|
||||
reply = ["[bot]已删除会话 {} 的所有历史记录".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]未找到会话 {} 的历史记录".format(params[0])]
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=DelHistoryCommand,
|
||||
name="all",
|
||||
description="删除所有会话的全部历史记录",
|
||||
usage="!delhst all",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class DelAllHistoryCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.utils.context
|
||||
reply = []
|
||||
pkg.utils.context.get_database_manager().delete_all_session_history()
|
||||
reply = ["[bot]已删除所有会话的历史记录"]
|
||||
return True, reply
|
||||
|
||||
28
pkg/qqbot/cmds/session/last.py
Normal file
28
pkg/qqbot/cmds/session/last.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import datetime
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="last",
|
||||
description="切换前一次对话",
|
||||
usage="!last",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class LastCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
|
||||
reply = []
|
||||
result = pkg.openai.session.get_session(session_name).last_session()
|
||||
if result is None:
|
||||
reply = ["[bot]没有前一次的对话"]
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到前一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
|
||||
return True, reply
|
||||
67
pkg/qqbot/cmds/session/list.py
Normal file
67
pkg/qqbot/cmds/session/list.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import datetime
|
||||
import json
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name='list',
|
||||
description='列出当前会话的所有历史记录',
|
||||
usage='!list\n!list [页数]',
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ListCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
page = 0
|
||||
|
||||
if len(params) > 0:
|
||||
try:
|
||||
page = int(params[0])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
results = pkg.openai.session.get_session(session_name).list_history(page=page)
|
||||
if len(results) == 0:
|
||||
reply = ["[bot]第{}页没有历史会话".format(page)]
|
||||
else:
|
||||
reply_str = "[bot]历史会话 第{}页:\n".format(page)
|
||||
current = -1
|
||||
for i in range(len(results)):
|
||||
# 时间(使用create_timestamp转换) 序号 部分内容
|
||||
datetime_obj = datetime.datetime.fromtimestamp(results[i]['create_timestamp'])
|
||||
msg = ""
|
||||
try:
|
||||
msg = json.loads(results[i]['prompt'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
msg = pkg.openai.session.reset_session_prompt(session_name, results[i]['prompt'])
|
||||
# 持久化
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
if len(msg) >= 2:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
msg[0]['content'])
|
||||
else:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"无内容")
|
||||
if results[i]['create_timestamp'] == pkg.openai.session.get_session(
|
||||
session_name).create_timestamp:
|
||||
current = i + page * 10
|
||||
|
||||
reply_str += "\n以上信息倒序排列"
|
||||
if current != -1:
|
||||
reply_str += ",当前会话是 #{}\n".format(current)
|
||||
else:
|
||||
reply_str += ",当前处于全新会话或不在此页"
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
28
pkg/qqbot/cmds/session/next.py
Normal file
28
pkg/qqbot/cmds/session/next.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import datetime
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="next",
|
||||
description="切换后一次对话",
|
||||
usage="!next",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class NextCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
reply = []
|
||||
|
||||
result = pkg.openai.session.get_session(session_name).next_session()
|
||||
if result is None:
|
||||
reply = ["[bot]没有后一次的对话"]
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到后一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
|
||||
return True, reply
|
||||
32
pkg/qqbot/cmds/session/prompt.py
Normal file
32
pkg/qqbot/cmds/session/prompt.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import datetime
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="prompt",
|
||||
description="获取当前会话的前文",
|
||||
usage="!prompt",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class PromptCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
|
||||
msgs = ""
|
||||
session: list = pkg.openai.session.get_session(session_name).prompt
|
||||
for msg in session:
|
||||
if len(params) != 0 and params[0] in ['-all', '-a']:
|
||||
msgs = msgs + "{}: {}\n\n".format(msg['role'], msg['content'])
|
||||
elif len(msg['content']) > 30:
|
||||
msgs = msgs + "[{}]: {}...\n\n".format(msg['role'], msg['content'][:30])
|
||||
else:
|
||||
msgs = msgs + "[{}]: {}\n\n".format(msg['role'], msg['content'])
|
||||
reply = ["[bot]当前对话所有内容:\n{}".format(msgs)]
|
||||
|
||||
return True, reply
|
||||
30
pkg/qqbot/cmds/session/resend.py
Normal file
30
pkg/qqbot/cmds/session/resend.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import datetime
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="resend",
|
||||
description="重新获取上一次问题的回复",
|
||||
usage="!resend",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ResendCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
import config
|
||||
session_name = ctx.session_name
|
||||
reply = []
|
||||
|
||||
session = pkg.openai.session.get_session(session_name)
|
||||
to_send = session.undo()
|
||||
|
||||
mgr = pkg.utils.context.get_qqbot_manager()
|
||||
|
||||
reply = pkg.qqbot.message.process_normal_message(to_send, mgr, config,
|
||||
ctx.launcher_type, ctx.launcher_id,
|
||||
ctx.sender_id)
|
||||
|
||||
return True, reply
|
||||
35
pkg/qqbot/cmds/session/reset.py
Normal file
35
pkg/qqbot/cmds/session/reset.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import tips as tips_custom
|
||||
|
||||
import pkg.openai.session
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name='reset',
|
||||
description='重置当前会话',
|
||||
usage='!reset',
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ResetCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
params = ctx.params
|
||||
session_name = ctx.session_name
|
||||
|
||||
reply = ""
|
||||
|
||||
if len(params) == 0:
|
||||
pkg.openai.session.get_session(session_name).reset(explicit=True)
|
||||
reply = [tips_custom.command_reset_message]
|
||||
else:
|
||||
try:
|
||||
import pkg.openai.dprompt as dprompt
|
||||
pkg.openai.session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
|
||||
reply = [tips_custom.command_reset_name_message+"{}".format(dprompt.mode_inst().get_full_name(params[0]))]
|
||||
except Exception as e:
|
||||
reply = ["[bot]会话重置失败:{}".format(e)]
|
||||
|
||||
return True, reply
|
||||
0
pkg/qqbot/cmds/system/__init__.py
Normal file
0
pkg/qqbot/cmds/system/__init__.py
Normal file
100
pkg/qqbot/cmds/system/cconfig.py
Normal file
100
pkg/qqbot/cmds/system/cconfig.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import json
|
||||
|
||||
|
||||
def config_operation(cmd, params):
|
||||
reply = []
|
||||
import pkg.utils.context
|
||||
config = pkg.utils.context.get_config()
|
||||
reply_str = ""
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]err:请输入!cmd cfg查看使用方法"]
|
||||
else:
|
||||
cfg_name = params[0]
|
||||
if cfg_name == 'all':
|
||||
reply_str = "[bot]所有配置项:\n\n"
|
||||
for cfg in dir(config):
|
||||
if not cfg.startswith('__') and not cfg == 'logging':
|
||||
# 根据配置项类型进行格式化,如果是字典则转换为json并格式化
|
||||
if isinstance(getattr(config, cfg), str):
|
||||
reply_str += "{}: \"{}\"\n".format(cfg, getattr(config, cfg))
|
||||
elif isinstance(getattr(config, cfg), dict):
|
||||
# 不进行unicode转义,并格式化
|
||||
reply_str += "{}: {}\n".format(cfg,
|
||||
json.dumps(getattr(config, cfg),
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str += "{}: {}\n".format(cfg, getattr(config, cfg))
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_entry_path = cfg_name.split('.')
|
||||
|
||||
try:
|
||||
if len(params) == 1:
|
||||
cfg_entry = getattr(config, cfg_entry_path[0])
|
||||
if len(cfg_entry_path) > 1:
|
||||
for i in range(1, len(cfg_entry_path)):
|
||||
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||
|
||||
if isinstance(cfg_entry, str):
|
||||
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, cfg_entry)
|
||||
elif isinstance(cfg_entry, dict):
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
||||
json.dumps(cfg_entry,
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, cfg_entry)
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_value = " ".join(params[1:])
|
||||
# 类型转换,如果是json则转换为字典
|
||||
# if cfg_value == 'true':
|
||||
# cfg_value = True
|
||||
# elif cfg_value == 'false':
|
||||
# cfg_value = False
|
||||
# elif cfg_value.isdigit():
|
||||
# cfg_value = int(cfg_value)
|
||||
# elif cfg_value.startswith('{') and cfg_value.endswith('}'):
|
||||
# cfg_value = json.loads(cfg_value)
|
||||
# else:
|
||||
# try:
|
||||
# cfg_value = float(cfg_value)
|
||||
# except ValueError:
|
||||
# pass
|
||||
cfg_value = eval(cfg_value)
|
||||
|
||||
cfg_entry = getattr(config, cfg_entry_path[0])
|
||||
if len(cfg_entry_path) > 1:
|
||||
for i in range(1, len(cfg_entry_path) - 1):
|
||||
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||
if isinstance(cfg_entry[cfg_entry_path[-1]], type(cfg_value)):
|
||||
cfg_entry[cfg_entry_path[-1]] = cfg_value
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
else:
|
||||
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
||||
else:
|
||||
setattr(config, cfg_entry_path[0], cfg_value)
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
except AttributeError:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
except ValueError:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
# else:
|
||||
# reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
|
||||
return reply
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="cfg",
|
||||
description="配置项管理",
|
||||
usage="!cfg <配置项> [配置值]\n!cfg all",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class CfgCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
return True, config_operation(ctx.command, ctx.params)
|
||||
|
||||
39
pkg/qqbot/cmds/system/cmd.py
Normal file
39
pkg/qqbot/cmds/system/cmd.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from ..aamgr import AbstractCommandNode, Context, __command_list__
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="cmd",
|
||||
description="显示指令列表",
|
||||
usage="!cmd\n!cmd <指令名称>",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class CmdCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
command_list = __command_list__
|
||||
|
||||
reply = []
|
||||
|
||||
if len(ctx.params) == 0:
|
||||
reply_str = "[bot]当前所有指令:\n\n"
|
||||
|
||||
# 遍历顶级指令
|
||||
for key in command_list:
|
||||
command = command_list[key]
|
||||
if command['parent'] is None:
|
||||
reply_str += "!{} - {}\n".format(key, command['description'])
|
||||
|
||||
reply_str += "\n请使用 !cmd <指令名称> 来查看指令的详细信息"
|
||||
|
||||
reply = [reply_str]
|
||||
else:
|
||||
command_name = ctx.params[0]
|
||||
if command_name in command_list:
|
||||
reply = [command_list[command_name]['cls'].help()]
|
||||
else:
|
||||
reply = ["[bot]指令 {} 不存在".format(command_name)]
|
||||
|
||||
return True, reply
|
||||
|
||||
24
pkg/qqbot/cmds/system/help.py
Normal file
24
pkg/qqbot/cmds/system/help.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="help",
|
||||
description="显示自定义的帮助信息",
|
||||
usage="!help",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class HelpCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import tips
|
||||
reply = ["[bot] "+tips.help_message + "\n请输入 !cmd 查看指令列表"]
|
||||
|
||||
# 警告config.help_message过时
|
||||
import config
|
||||
if hasattr(config, "help_message"):
|
||||
reply[0] += "\n\n警告:config.py中的help_message已过时,不再生效,请使用tips.py中的help_message替代"
|
||||
|
||||
return True, reply
|
||||
|
||||
23
pkg/qqbot/cmds/system/reload.py
Normal file
23
pkg/qqbot/cmds/system/reload.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import threading
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="reload",
|
||||
description="执行热重载",
|
||||
usage="!reload",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class ReloadCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
|
||||
import pkg.utils.reloader
|
||||
def reload_task():
|
||||
pkg.utils.reloader.reload_all()
|
||||
|
||||
threading.Thread(target=reload_task, daemon=True).start()
|
||||
|
||||
return True, reply
|
||||
38
pkg/qqbot/cmds/system/update.py
Normal file
38
pkg/qqbot/cmds/system/update.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="update",
|
||||
description="更新程序",
|
||||
usage="!update",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class UpdateCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
import pkg.utils.updater
|
||||
import pkg.utils.reloader
|
||||
import pkg.utils.context
|
||||
|
||||
def update_task():
|
||||
try:
|
||||
if pkg.utils.updater.update_all():
|
||||
pkg.utils.reloader.reload_all(notify=False)
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新完成")
|
||||
else:
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("无新版本")
|
||||
except Exception as e0:
|
||||
traceback.print_exc()
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新失败:{}".format(e0))
|
||||
return
|
||||
|
||||
threading.Thread(target=update_task, daemon=True).start()
|
||||
|
||||
reply = ["[bot]正在更新,请耐心等待,请勿重复发起更新..."]
|
||||
|
||||
return True, reply
|
||||
35
pkg/qqbot/cmds/system/usage.py
Normal file
35
pkg/qqbot/cmds/system/usage.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
import logging
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="usage",
|
||||
description="获取使用情况",
|
||||
usage="!usage",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class UsageCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
import config
|
||||
import pkg.utils.credit as credit
|
||||
import pkg.utils.context
|
||||
|
||||
reply = []
|
||||
|
||||
reply_str = "[bot]各api-key使用情况:\n\n"
|
||||
|
||||
api_keys = pkg.utils.context.get_openai_manager().key_mgr.api_key
|
||||
for key_name in api_keys:
|
||||
text_length = pkg.utils.context.get_openai_manager().audit_mgr \
|
||||
.get_text_length_of_key(api_keys[key_name])
|
||||
image_count = pkg.utils.context.get_openai_manager().audit_mgr \
|
||||
.get_image_count_of_key(api_keys[key_name])
|
||||
reply_str += "{}:\n - 文本长度:{}\n - 图片数量:{}\n".format(key_name, int(text_length),
|
||||
int(image_count))
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
27
pkg/qqbot/cmds/system/version.py
Normal file
27
pkg/qqbot/cmds/system/version.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from ..aamgr import AbstractCommandNode, Context
|
||||
|
||||
|
||||
@AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="version",
|
||||
description="查看版本信息",
|
||||
usage="!version",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class VersionCommand(AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
import pkg.utils.updater
|
||||
|
||||
reply_str = "[bot]当前版本:\n{}\n".format(pkg.utils.updater.get_current_version_info())
|
||||
try:
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
reply_str += "\n有新版本可用,请使用命令 !update 进行更新"
|
||||
except:
|
||||
pass
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
@@ -4,6 +4,7 @@ import json
|
||||
import datetime
|
||||
import os
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import pkg.openai.session
|
||||
import pkg.openai.manager
|
||||
@@ -12,151 +13,12 @@ import pkg.utils.updater
|
||||
import pkg.utils.context
|
||||
import pkg.qqbot.message
|
||||
import pkg.utils.credit as credit
|
||||
# import pkg.qqbot.cmds.model as cmdmodel
|
||||
import pkg.qqbot.cmds.aamgr as cmdmgr
|
||||
|
||||
from mirai import Image
|
||||
|
||||
|
||||
def config_operation(cmd, params):
|
||||
reply = []
|
||||
config = pkg.utils.context.get_config()
|
||||
reply_str = ""
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]err:请输入配置项"]
|
||||
else:
|
||||
cfg_name = params[0]
|
||||
if cfg_name == 'all':
|
||||
reply_str = "[bot]所有配置项:\n\n"
|
||||
for cfg in dir(config):
|
||||
if not cfg.startswith('__') and not cfg == 'logging':
|
||||
# 根据配置项类型进行格式化,如果是字典则转换为json并格式化
|
||||
if isinstance(getattr(config, cfg), str):
|
||||
reply_str += "{}: \"{}\"\n".format(cfg, getattr(config, cfg))
|
||||
elif isinstance(getattr(config, cfg), dict):
|
||||
# 不进行unicode转义,并格式化
|
||||
reply_str += "{}: {}\n".format(cfg,
|
||||
json.dumps(getattr(config, cfg),
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str += "{}: {}\n".format(cfg, getattr(config, cfg))
|
||||
reply = [reply_str]
|
||||
elif cfg_name in dir(config):
|
||||
if len(params) == 1:
|
||||
# 按照配置项类型进行格式化
|
||||
if isinstance(getattr(config, cfg_name), str):
|
||||
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, getattr(config, cfg_name))
|
||||
elif isinstance(getattr(config, cfg_name), dict):
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
||||
json.dumps(getattr(config, cfg_name),
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, getattr(config, cfg_name))
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_value = " ".join(params[1:])
|
||||
# 类型转换,如果是json则转换为字典
|
||||
if cfg_value == 'true':
|
||||
cfg_value = True
|
||||
elif cfg_value == 'false':
|
||||
cfg_value = False
|
||||
elif cfg_value.isdigit():
|
||||
cfg_value = int(cfg_value)
|
||||
elif cfg_value.startswith('{') and cfg_value.endswith('}'):
|
||||
cfg_value = json.loads(cfg_value)
|
||||
else:
|
||||
try:
|
||||
cfg_value = float(cfg_value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# 检查类型是否匹配
|
||||
if isinstance(getattr(config, cfg_name), type(cfg_value)):
|
||||
setattr(config, cfg_name, cfg_value)
|
||||
pkg.utils.context.set_config(config)
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
else:
|
||||
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
||||
|
||||
else:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
|
||||
return reply
|
||||
|
||||
|
||||
def plugin_operation(cmd, params, is_admin):
|
||||
reply = []
|
||||
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.utils.updater as updater
|
||||
|
||||
plugin_list = plugin_host.__plugins__
|
||||
|
||||
if len(params) == 0:
|
||||
reply_str = "[bot]所有插件({}):\n".format(len(plugin_host.__plugins__))
|
||||
idx = 0
|
||||
for key in plugin_host.iter_plugins_name():
|
||||
plugin = plugin_list[key]
|
||||
reply_str += "\n#{} {} {}\n{}\nv{}\n作者: {}\n"\
|
||||
.format((idx+1), plugin['name'],
|
||||
"[已禁用]" if not plugin['enabled'] else "",
|
||||
plugin['description'],
|
||||
plugin['version'], plugin['author'])
|
||||
|
||||
if updater.is_repo("/".join(plugin['path'].split('/')[:-1])):
|
||||
remote_url = updater.get_remote_url("/".join(plugin['path'].split('/')[:-1]))
|
||||
if remote_url != "https://github.com/RockChinQ/QChatGPT" and remote_url != "https://gitee.com/RockChin/QChatGPT":
|
||||
reply_str += "源码: "+remote_url+"\n"
|
||||
|
||||
idx += 1
|
||||
|
||||
reply = [reply_str]
|
||||
elif params[0] == 'update':
|
||||
# 更新所有插件
|
||||
if is_admin:
|
||||
def closure():
|
||||
import pkg.utils.context
|
||||
updated = []
|
||||
for key in plugin_list:
|
||||
plugin = plugin_list[key]
|
||||
if updater.is_repo("/".join(plugin['path'].split('/')[:-1])):
|
||||
success = updater.pull_latest("/".join(plugin['path'].split('/')[:-1]))
|
||||
if success:
|
||||
updated.append(plugin['name'])
|
||||
|
||||
# 检查是否有requirements.txt
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("正在安装依赖...")
|
||||
for key in plugin_list:
|
||||
plugin = plugin_list[key]
|
||||
if os.path.exists("/".join(plugin['path'].split('/')[:-1])+"/requirements.txt"):
|
||||
logging.info("{}检测到requirements.txt,安装依赖".format(plugin['name']))
|
||||
import pkg.utils.pkgmgr
|
||||
pkg.utils.pkgmgr.install_requirements("/".join(plugin['path'].split('/')[:-1])+"/requirements.txt")
|
||||
|
||||
import main
|
||||
main.reset_logging()
|
||||
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("已更新插件: {}".format(", ".join(updated)))
|
||||
|
||||
threading.Thread(target=closure).start()
|
||||
reply = ["[bot]正在更新所有插件,请勿重复发起..."]
|
||||
else:
|
||||
reply = ["[bot]err:权限不足"]
|
||||
elif params[0].startswith("http"):
|
||||
if is_admin:
|
||||
|
||||
def closure():
|
||||
try:
|
||||
plugin_host.install_plugin(params[0])
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件安装成功,请发送 !reload 指令重载插件")
|
||||
except Exception as e:
|
||||
logging.error("插件安装失败:{}".format(e))
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件安装失败:{}".format(e))
|
||||
|
||||
threading.Thread(target=closure, args=()).start()
|
||||
reply = ["[bot]正在安装插件..."]
|
||||
else:
|
||||
reply = ["[bot]err:权限不足,请使用管理员账号私聊发起"]
|
||||
return reply
|
||||
|
||||
|
||||
def process_command(session_name: str, text_message: str, mgr, config,
|
||||
launcher_type: str, launcher_id: int, sender_id: int, is_admin: bool) -> list:
|
||||
@@ -169,188 +31,32 @@ def process_command(session_name: str, text_message: str, mgr, config,
|
||||
cmd = text_message[1:].strip().split(' ')[0]
|
||||
|
||||
params = text_message[1:].strip().split(' ')[1:]
|
||||
if cmd == 'help':
|
||||
reply = ["[bot]" + config.help_message]
|
||||
elif cmd == 'reset':
|
||||
if len(params) == 0:
|
||||
pkg.openai.session.get_session(session_name).reset(explicit=True)
|
||||
reply = ["[bot]会话已重置"]
|
||||
else:
|
||||
pkg.openai.session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
|
||||
reply = ["[bot]会话已重置,使用场景预设:{}".format(params[0])]
|
||||
elif cmd == 'last':
|
||||
result = pkg.openai.session.get_session(session_name).last_session()
|
||||
if result is None:
|
||||
reply = ["[bot]没有前一次的对话"]
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到前一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
elif cmd == 'next':
|
||||
result = pkg.openai.session.get_session(session_name).next_session()
|
||||
if result is None:
|
||||
reply = ["[bot]没有后一次的对话"]
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到后一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
elif cmd == 'prompt':
|
||||
msgs = ""
|
||||
session:list = pkg.openai.session.get_session(session_name).prompt
|
||||
for msg in session:
|
||||
if len(params) != 0 and params[0] in ['-all', '-a']:
|
||||
msgs = msgs + "{}: {}\n\n".format(msg['role'], msg['content'])
|
||||
elif len(msg['content']) > 30:
|
||||
msgs = msgs + "[{}]: {}...\n\n".format(msg['role'], msg['content'][:30])
|
||||
else:
|
||||
msgs = msgs + "[{}]: {}\n\n".format(msg['role'], msg['content'])
|
||||
reply = ["[bot]当前对话所有内容:\n{}".format(msgs)]
|
||||
elif cmd == 'list':
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
page = 0
|
||||
|
||||
if len(params) > 0:
|
||||
try:
|
||||
page = int(params[0])
|
||||
except ValueError:
|
||||
pass
|
||||
# 把!~开头的转换成!cfg
|
||||
if cmd.startswith('~'):
|
||||
params = [cmd[1:]] + params
|
||||
cmd = 'cfg'
|
||||
|
||||
results = pkg.openai.session.get_session(session_name).list_history(page=page)
|
||||
if len(results) == 0:
|
||||
reply = ["[bot]第{}页没有历史会话".format(page)]
|
||||
else:
|
||||
reply_str = "[bot]历史会话 第{}页:\n".format(page)
|
||||
current = -1
|
||||
for i in range(len(results)):
|
||||
# 时间(使用create_timestamp转换) 序号 部分内容
|
||||
datetime_obj = datetime.datetime.fromtimestamp(results[i]['create_timestamp'])
|
||||
msg = ""
|
||||
try:
|
||||
msg = json.loads(results[i]['prompt'])
|
||||
except json.decoder.JSONDecodeError:
|
||||
msg = pkg.openai.session.reset_session_prompt(session_name, results[i]['prompt'])
|
||||
# 持久化
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
if len(msg) >= 2:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
msg[1]['content'])
|
||||
else:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"无内容")
|
||||
if results[i]['create_timestamp'] == pkg.openai.session.get_session(
|
||||
session_name).create_timestamp:
|
||||
current = i + page * 10
|
||||
# 包装参数
|
||||
context = cmdmgr.Context(
|
||||
command=cmd,
|
||||
crt_command=cmd,
|
||||
params=params,
|
||||
crt_params=params[:],
|
||||
session_name=session_name,
|
||||
text_message=text_message,
|
||||
launcher_type=launcher_type,
|
||||
launcher_id=launcher_id,
|
||||
sender_id=sender_id,
|
||||
is_admin=is_admin,
|
||||
privilege=2 if is_admin else 1, # 普通用户1,管理员2
|
||||
)
|
||||
try:
|
||||
reply = cmdmgr.execute(context)
|
||||
except cmdmgr.CommandPrivilegeError as e:
|
||||
reply = ["{}".format(e)]
|
||||
|
||||
reply_str += "\n以上信息倒序排列"
|
||||
if current != -1:
|
||||
reply_str += ",当前会话是 #{}\n".format(current)
|
||||
else:
|
||||
reply_str += ",当前处于全新会话或不在此页"
|
||||
|
||||
reply = [reply_str]
|
||||
elif cmd == 'resend':
|
||||
session = pkg.openai.session.get_session(session_name)
|
||||
to_send = session.undo()
|
||||
|
||||
reply = pkg.qqbot.message.process_normal_message(to_send, mgr, config,
|
||||
launcher_type, launcher_id, sender_id)
|
||||
elif cmd == 'usage':
|
||||
reply_str = "[bot]各api-key使用情况:\n\n"
|
||||
|
||||
api_keys = pkg.utils.context.get_openai_manager().key_mgr.api_key
|
||||
for key_name in api_keys:
|
||||
text_length = pkg.utils.context.get_openai_manager().audit_mgr \
|
||||
.get_text_length_of_key(api_keys[key_name])
|
||||
image_count = pkg.utils.context.get_openai_manager().audit_mgr \
|
||||
.get_image_count_of_key(api_keys[key_name])
|
||||
reply_str += "{}:\n - 文本长度:{}\n - 图片数量:{}\n".format(key_name, int(text_length),
|
||||
int(image_count))
|
||||
# 获取此key的额度
|
||||
try:
|
||||
credit_data = credit.fetch_credit_data(api_keys[key_name])
|
||||
reply_str += " - 使用额度:{:.2f}/{:.2f}\n".format(credit_data['total_used'],credit_data['total_granted'])
|
||||
except Exception as e:
|
||||
logging.warning("获取额度失败:{}".format(e))
|
||||
|
||||
reply = [reply_str]
|
||||
elif cmd == 'draw':
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]err:请输入图片描述文字"]
|
||||
else:
|
||||
session = pkg.openai.session.get_session(session_name)
|
||||
|
||||
res = session.draw_image(" ".join(params))
|
||||
|
||||
logging.debug("draw_image result:{}".format(res))
|
||||
reply = [Image(url=res['data'][0]['url'])]
|
||||
if not (hasattr(config, 'include_image_description')
|
||||
and not config.include_image_description):
|
||||
reply.append(" ".join(params))
|
||||
elif cmd == 'version':
|
||||
reply_str = "[bot]当前版本:\n{}\n".format(pkg.utils.updater.get_current_version_info())
|
||||
try:
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
reply_str += "\n有新版本可用,请使用命令 !update 进行更新"
|
||||
except:
|
||||
pass
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
elif cmd == 'plugin':
|
||||
reply = plugin_operation(cmd, params, is_admin)
|
||||
|
||||
elif cmd == 'default':
|
||||
if len(params) == 0:
|
||||
# 输出目前所有情景预设
|
||||
import pkg.openai.dprompt as dprompt
|
||||
reply_str = "[bot]当前所有情景预设:\n\n"
|
||||
for key,value in dprompt.get_prompt_dict().items():
|
||||
reply_str += " - {}: {}\n".format(key,value)
|
||||
|
||||
reply_str += "\n当前默认情景预设:{}\n".format(dprompt.get_current())
|
||||
reply_str += "请使用!default <情景预设>来设置默认情景预设"
|
||||
reply = [reply_str]
|
||||
elif len(params) >0 and is_admin:
|
||||
# 设置默认情景
|
||||
import pkg.openai.dprompt as dprompt
|
||||
try:
|
||||
dprompt.set_current(params[0])
|
||||
reply = ["[bot]已设置默认情景预设为:{}".format(dprompt.get_current())]
|
||||
except KeyError:
|
||||
reply = ["[bot]err: 未找到情景预设:{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]err: 仅管理员可设置默认情景预设"]
|
||||
elif cmd == 'reload' and is_admin:
|
||||
def reload_task():
|
||||
pkg.utils.reloader.reload_all()
|
||||
|
||||
threading.Thread(target=reload_task, daemon=True).start()
|
||||
elif cmd == 'update' and is_admin:
|
||||
def update_task():
|
||||
try:
|
||||
if pkg.utils.updater.update_all():
|
||||
pkg.utils.reloader.reload_all(notify=False)
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新完成")
|
||||
else:
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("无新版本")
|
||||
except Exception as e0:
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新失败:{}".format(e0))
|
||||
return
|
||||
|
||||
threading.Thread(target=update_task, daemon=True).start()
|
||||
|
||||
reply = ["[bot]正在更新,请耐心等待,请勿重复发起更新..."]
|
||||
elif cmd == 'cfg' and is_admin:
|
||||
reply = config_operation(cmd, params)
|
||||
else:
|
||||
if cmd.startswith("~") and is_admin:
|
||||
config_item = cmd[1:]
|
||||
params = [config_item] + params
|
||||
reply = config_operation("cfg", params)
|
||||
else:
|
||||
reply = ["[bot]err:未知的指令或权限不足: " + cmd]
|
||||
return reply
|
||||
except Exception as e:
|
||||
mgr.notify_admin("{}指令执行失败:{}".format(session_name, e))
|
||||
logging.exception(e)
|
||||
|
||||
@@ -1,19 +1,84 @@
|
||||
# 敏感词过滤模块
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
class ReplyFilter:
|
||||
|
||||
sensitive_words = []
|
||||
mask = "*"
|
||||
mask_word = ""
|
||||
|
||||
def __init__(self, sensitive_words: list):
|
||||
# 默认值( 兼容性考虑 )
|
||||
baidu_check = False
|
||||
baidu_api_key = ""
|
||||
baidu_secret_key = ""
|
||||
inappropriate_message_tips = "[百度云]请珍惜机器人,当前返回内容不合规"
|
||||
|
||||
def __init__(self, sensitive_words: list, mask: str = "*", mask_word: str = ""):
|
||||
self.sensitive_words = sensitive_words
|
||||
self.mask = mask
|
||||
self.mask_word = mask_word
|
||||
import config
|
||||
|
||||
self.baidu_check = config.baidu_check
|
||||
self.baidu_api_key = config.baidu_api_key
|
||||
self.baidu_secret_key = config.baidu_secret_key
|
||||
self.inappropriate_message_tips = config.inappropriate_message_tips
|
||||
|
||||
def is_illegal(self, message: str) -> bool:
|
||||
processed = self.process(message)
|
||||
if processed != message:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, message: str) -> str:
|
||||
|
||||
# 本地关键词屏蔽
|
||||
for word in self.sensitive_words:
|
||||
match = re.findall(word, message)
|
||||
if len(match) > 0:
|
||||
for i in range(len(match)):
|
||||
message = message.replace(match[i], "*" * len(match[i]))
|
||||
if self.mask_word == "":
|
||||
message = message.replace(match[i], self.mask * len(match[i]))
|
||||
else:
|
||||
message = message.replace(match[i], self.mask_word)
|
||||
|
||||
# 百度云审核
|
||||
if self.baidu_check:
|
||||
|
||||
# 百度云审核URL
|
||||
baidu_url = "https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined?access_token=" + \
|
||||
str(requests.post("https://aip.baidubce.com/oauth/2.0/token",
|
||||
params={"grant_type": "client_credentials",
|
||||
"client_id": self.baidu_api_key,
|
||||
"client_secret": self.baidu_secret_key}).json().get("access_token"))
|
||||
|
||||
# 百度云审核
|
||||
payload = "text=" + message
|
||||
logging.info("向百度云发送:" + payload)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}
|
||||
|
||||
if isinstance(payload, str):
|
||||
payload = payload.encode('utf-8')
|
||||
|
||||
response = requests.request("POST", baidu_url, headers=headers, data=payload)
|
||||
response_dict = json.loads(response.text)
|
||||
|
||||
if "error_code" in response_dict:
|
||||
error_msg = response_dict.get("error_msg")
|
||||
logging.warning(f"百度云判定出错,错误信息:{error_msg}")
|
||||
conclusion = f"百度云判定出错,错误信息:{error_msg}\n以下是原消息:{message}"
|
||||
else:
|
||||
conclusion = response_dict["conclusion"]
|
||||
if conclusion in ("合规"):
|
||||
logging.info(f"百度云判定结果:{conclusion}")
|
||||
return message
|
||||
else:
|
||||
logging.warning(f"百度云判定结果:{conclusion}")
|
||||
conclusion = self.inappropriate_message_tips
|
||||
# 返回百度云审核结果
|
||||
return conclusion
|
||||
|
||||
return message
|
||||
|
||||
@@ -5,9 +5,6 @@ def ignore(msg: str) -> bool:
|
||||
"""检查消息是否应该被忽略"""
|
||||
import config
|
||||
|
||||
if not hasattr(config, 'ignore_rules'):
|
||||
return False
|
||||
|
||||
if 'prefix' in config.ignore_rules:
|
||||
for rule in config.ignore_rules['prefix']:
|
||||
if msg.startswith(rule):
|
||||
|
||||
@@ -3,9 +3,9 @@ import json
|
||||
import os
|
||||
import threading
|
||||
|
||||
import mirai.models.bus
|
||||
|
||||
from mirai import At, GroupMessage, MessageEvent, Mirai, StrangerMessage, WebSocketAdapter, HTTPAdapter, \
|
||||
FriendMessage, Image
|
||||
FriendMessage, Image, MessageChain, Plain
|
||||
from func_timeout import func_set_timeout
|
||||
|
||||
import pkg.openai.session
|
||||
@@ -19,21 +19,24 @@ import pkg.utils.context
|
||||
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
import tips as tips_custom
|
||||
|
||||
|
||||
# 并行运行
|
||||
def go(func, args=()):
|
||||
thread = threading.Thread(target=func, args=args, daemon=True)
|
||||
thread.start()
|
||||
import pkg.qqbot.adapter as msadapter
|
||||
|
||||
|
||||
# 检查消息是否符合泛响应匹配机制
|
||||
def check_response_rule(text: str):
|
||||
def check_response_rule(group_id:int, text: str):
|
||||
config = pkg.utils.context.get_config()
|
||||
if not hasattr(config, 'response_rules'):
|
||||
return False, ''
|
||||
|
||||
rules = config.response_rules
|
||||
|
||||
# 检查是否有特定规则
|
||||
if 'prefix' not in config.response_rules:
|
||||
if str(group_id) in config.response_rules:
|
||||
rules = config.response_rules[str(group_id)]
|
||||
else:
|
||||
rules = config.response_rules['default']
|
||||
|
||||
# 检查前缀匹配
|
||||
if 'prefix' in rules:
|
||||
for rule in rules['prefix']:
|
||||
@@ -51,57 +54,92 @@ def check_response_rule(text: str):
|
||||
return False, ""
|
||||
|
||||
|
||||
def response_at(group_id: int):
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
use_response_rule = config.response_rules
|
||||
|
||||
# 检查是否有特定规则
|
||||
if 'prefix' not in config.response_rules:
|
||||
if str(group_id) in config.response_rules:
|
||||
use_response_rule = config.response_rules[str(group_id)]
|
||||
else:
|
||||
use_response_rule = config.response_rules['default']
|
||||
|
||||
if 'at' not in use_response_rule:
|
||||
return True
|
||||
|
||||
return use_response_rule['at']
|
||||
|
||||
|
||||
def random_responding(group_id):
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
use_response_rule = config.response_rules
|
||||
|
||||
# 检查是否有特定规则
|
||||
if 'prefix' not in config.response_rules:
|
||||
if str(group_id) in config.response_rules:
|
||||
use_response_rule = config.response_rules[str(group_id)]
|
||||
else:
|
||||
use_response_rule = config.response_rules['default']
|
||||
|
||||
if 'random_rate' in use_response_rule:
|
||||
import random
|
||||
return random.random() < use_response_rule['random_rate']
|
||||
return False
|
||||
|
||||
|
||||
# 控制QQ消息输入输出的类
|
||||
class QQBotManager:
|
||||
retry = 3
|
||||
|
||||
bot: Mirai = None
|
||||
adapter: msadapter.MessageSourceAdapter = None
|
||||
|
||||
bot_account_id: int = 0
|
||||
|
||||
reply_filter = None
|
||||
|
||||
enable_banlist = False
|
||||
|
||||
enable_private = True
|
||||
enable_group = True
|
||||
|
||||
ban_person = []
|
||||
ban_group = []
|
||||
|
||||
def __init__(self, mirai_http_api_config: dict, timeout: int = 60, retry: int = 3, first_time_init=True):
|
||||
def __init__(self, first_time_init=True):
|
||||
import config
|
||||
|
||||
self.timeout = timeout
|
||||
self.retry = retry
|
||||
|
||||
# 加载禁用列表
|
||||
if os.path.exists("banlist.py"):
|
||||
import banlist
|
||||
self.enable_banlist = banlist.enable
|
||||
self.ban_person = banlist.person
|
||||
self.ban_group = banlist.group
|
||||
logging.info("加载禁用列表: person: {}, group: {}".format(self.ban_person, self.ban_group))
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
if os.path.exists("sensitive.json") \
|
||||
and config.sensitive_word_filter is not None \
|
||||
and config.sensitive_word_filter:
|
||||
with open("sensitive.json", "r", encoding="utf-8") as f:
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter(json.load(f)['words'])
|
||||
else:
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter([])
|
||||
self.timeout = config.process_message_timeout
|
||||
self.retry = config.retry_times
|
||||
|
||||
# 由于YiriMirai的bot对象是单例的,且shutdown方法暂时无法使用
|
||||
# 故只在第一次初始化时创建bot对象,重载之后使用原bot对象
|
||||
# 因此,bot的配置不支持热重载
|
||||
if first_time_init:
|
||||
self.first_time_init(mirai_http_api_config)
|
||||
logging.debug("Use adapter:" + config.msg_source_adapter)
|
||||
if config.msg_source_adapter == 'yirimirai':
|
||||
from pkg.qqbot.sources.yirimirai import YiriMiraiAdapter
|
||||
|
||||
mirai_http_api_config = config.mirai_http_api_config
|
||||
self.bot_account_id = config.mirai_http_api_config['qq']
|
||||
self.adapter = YiriMiraiAdapter(mirai_http_api_config)
|
||||
elif config.msg_source_adapter == 'nakuru':
|
||||
from pkg.qqbot.sources.nakuru import NakuruProjectAdapter
|
||||
self.adapter = NakuruProjectAdapter(config.nakuru_config)
|
||||
self.bot_account_id = self.adapter.bot_account_id
|
||||
else:
|
||||
self.bot = pkg.utils.context.get_qqbot_manager().bot
|
||||
self.adapter = pkg.utils.context.get_qqbot_manager().adapter
|
||||
self.bot_account_id = pkg.utils.context.get_qqbot_manager().bot_account_id
|
||||
|
||||
pkg.utils.context.set_qqbot_manager(self)
|
||||
|
||||
# 注册诸事件
|
||||
# Caution: 注册新的事件处理器之后,请务必在unsubscribe_all中编写相应的取消订阅代码
|
||||
@self.bot.on(FriendMessage)
|
||||
async def on_friend_message(event: FriendMessage):
|
||||
|
||||
def friend_message_handler(event: FriendMessage):
|
||||
def on_friend_message(event: FriendMessage):
|
||||
|
||||
def friend_message_handler():
|
||||
# 触发事件
|
||||
args = {
|
||||
"launcher_type": "person",
|
||||
@@ -116,12 +154,17 @@ class QQBotManager:
|
||||
|
||||
self.on_person_message(event)
|
||||
|
||||
go(friend_message_handler, (event,))
|
||||
pkg.utils.context.get_thread_ctl().submit_user_task(
|
||||
friend_message_handler,
|
||||
)
|
||||
self.adapter.register_listener(
|
||||
FriendMessage,
|
||||
on_friend_message
|
||||
)
|
||||
|
||||
@self.bot.on(StrangerMessage)
|
||||
async def on_stranger_message(event: StrangerMessage):
|
||||
def on_stranger_message(event: StrangerMessage):
|
||||
|
||||
def stranger_message_handler(event: StrangerMessage):
|
||||
def stranger_message_handler():
|
||||
# 触发事件
|
||||
args = {
|
||||
"launcher_type": "person",
|
||||
@@ -136,10 +179,17 @@ class QQBotManager:
|
||||
|
||||
self.on_person_message(event)
|
||||
|
||||
go(stranger_message_handler, (event,))
|
||||
pkg.utils.context.get_thread_ctl().submit_user_task(
|
||||
stranger_message_handler,
|
||||
)
|
||||
# nakuru不区分好友和陌生人,故仅为yirimirai注册陌生人事件
|
||||
if config.msg_source_adapter == 'yirimirai':
|
||||
self.adapter.register_listener(
|
||||
StrangerMessage,
|
||||
on_stranger_message
|
||||
)
|
||||
|
||||
@self.bot.on(GroupMessage)
|
||||
async def on_group_message(event: GroupMessage):
|
||||
def on_group_message(event: GroupMessage):
|
||||
|
||||
def group_message_handler(event: GroupMessage):
|
||||
# 触发事件
|
||||
@@ -156,62 +206,96 @@ class QQBotManager:
|
||||
|
||||
self.on_group_message(event)
|
||||
|
||||
go(group_message_handler, (event,))
|
||||
pkg.utils.context.get_thread_ctl().submit_user_task(
|
||||
group_message_handler,
|
||||
event
|
||||
)
|
||||
self.adapter.register_listener(
|
||||
GroupMessage,
|
||||
on_group_message
|
||||
)
|
||||
|
||||
def unsubscribe_all():
|
||||
"""取消所有订阅
|
||||
|
||||
用于在热重载流程中卸载所有事件处理器
|
||||
"""
|
||||
assert isinstance(self.bot, Mirai)
|
||||
bus = self.bot.bus
|
||||
assert isinstance(bus, mirai.models.bus.ModelEventBus)
|
||||
|
||||
bus.unsubscribe(FriendMessage, on_friend_message)
|
||||
bus.unsubscribe(StrangerMessage, on_stranger_message)
|
||||
bus.unsubscribe(GroupMessage, on_group_message)
|
||||
import config
|
||||
self.adapter.unregister_listener(
|
||||
FriendMessage,
|
||||
on_friend_message
|
||||
)
|
||||
if config.msg_source_adapter == 'yirimirai':
|
||||
self.adapter.unregister_listener(
|
||||
StrangerMessage,
|
||||
on_stranger_message
|
||||
)
|
||||
self.adapter.unregister_listener(
|
||||
GroupMessage,
|
||||
on_group_message
|
||||
)
|
||||
|
||||
self.unsubscribe_all = unsubscribe_all
|
||||
|
||||
def first_time_init(self, mirai_http_api_config: dict):
|
||||
"""热重载后不再运行此函数"""
|
||||
# 加载禁用列表
|
||||
if os.path.exists("banlist.py"):
|
||||
import banlist
|
||||
self.enable_banlist = banlist.enable
|
||||
self.ban_person = banlist.person
|
||||
self.ban_group = banlist.group
|
||||
logging.info("加载禁用列表: person: {}, group: {}".format(self.ban_person, self.ban_group))
|
||||
|
||||
if 'adapter' not in mirai_http_api_config or mirai_http_api_config['adapter'] == "WebSocketAdapter":
|
||||
bot = Mirai(
|
||||
qq=mirai_http_api_config['qq'],
|
||||
adapter=WebSocketAdapter(
|
||||
verify_key=mirai_http_api_config['verifyKey'],
|
||||
host=mirai_http_api_config['host'],
|
||||
port=mirai_http_api_config['port']
|
||||
)
|
||||
)
|
||||
elif mirai_http_api_config['adapter'] == "HTTPAdapter":
|
||||
bot = Mirai(
|
||||
qq=mirai_http_api_config['qq'],
|
||||
adapter=HTTPAdapter(
|
||||
verify_key=mirai_http_api_config['verifyKey'],
|
||||
host=mirai_http_api_config['host'],
|
||||
port=mirai_http_api_config['port']
|
||||
)
|
||||
)
|
||||
if hasattr(banlist, "enable_private"):
|
||||
self.enable_private = banlist.enable_private
|
||||
if hasattr(banlist, "enable_group"):
|
||||
self.enable_group = banlist.enable_group
|
||||
|
||||
else:
|
||||
raise Exception("未知的适配器类型")
|
||||
|
||||
self.bot = bot
|
||||
|
||||
def send(self, event, msg, check_quote=True):
|
||||
config = pkg.utils.context.get_config()
|
||||
asyncio.run(
|
||||
self.bot.send(event, msg, quote=True if hasattr(config,
|
||||
"quote_origin") and config.quote_origin and check_quote else False))
|
||||
if os.path.exists("sensitive.json") \
|
||||
and config.sensitive_word_filter is not None \
|
||||
and config.sensitive_word_filter:
|
||||
with open("sensitive.json", "r", encoding="utf-8") as f:
|
||||
sensitive_json = json.load(f)
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter(
|
||||
sensitive_words=sensitive_json['words'],
|
||||
mask=sensitive_json['mask'] if 'mask' in sensitive_json else '*',
|
||||
mask_word=sensitive_json['mask_word'] if 'mask_word' in sensitive_json else ''
|
||||
)
|
||||
else:
|
||||
self.reply_filter = pkg.qqbot.filter.ReplyFilter([])
|
||||
|
||||
def send(self, event, msg, check_quote=True, check_at_sender=True):
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
if check_at_sender and config.at_sender:
|
||||
msg.insert(
|
||||
0,
|
||||
Plain(" \n")
|
||||
)
|
||||
|
||||
# 当回复的正文中包含换行时,quote可能会自带at,此时就不再单独添加at,只添加换行
|
||||
if "\n" not in str(msg[1]) or config.msg_source_adapter == 'nakuru':
|
||||
msg.insert(
|
||||
0,
|
||||
At(
|
||||
event.sender.id
|
||||
)
|
||||
)
|
||||
|
||||
self.adapter.reply_message(
|
||||
event,
|
||||
msg,
|
||||
quote_origin=True if config.quote_origin and check_quote else False
|
||||
)
|
||||
|
||||
# 私聊消息处理
|
||||
def on_person_message(self, event: MessageEvent):
|
||||
import config
|
||||
reply = ''
|
||||
|
||||
if event.sender.id == self.bot.qq:
|
||||
if not self.enable_private:
|
||||
logging.debug("已在banlist.py中禁用所有私聊")
|
||||
elif event.sender.id == self.bot_account_id:
|
||||
pass
|
||||
else:
|
||||
if Image in event.message_chain:
|
||||
@@ -242,20 +326,19 @@ class QQBotManager:
|
||||
if failed == self.retry:
|
||||
pkg.openai.session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
||||
self.notify_admin("{} 请求超时".format("person_{}".format(event.sender.id)))
|
||||
reply = ["[bot]err:请求超时"]
|
||||
reply = [tips_custom.reply_message]
|
||||
|
||||
if reply:
|
||||
return self.send(event, reply, check_quote=False)
|
||||
return self.send(event, reply, check_quote=False, check_at_sender=False)
|
||||
|
||||
# 群消息处理
|
||||
def on_group_message(self, event: GroupMessage):
|
||||
import config
|
||||
reply = ''
|
||||
|
||||
def process(text=None) -> str:
|
||||
replys = ""
|
||||
if At(self.bot.qq) in event.message_chain:
|
||||
event.message_chain.remove(At(self.bot.qq))
|
||||
if At(self.bot_account_id) in event.message_chain:
|
||||
event.message_chain.remove(At(self.bot_account_id))
|
||||
|
||||
# 超时则重试,重试超过次数则放弃
|
||||
failed = 0
|
||||
@@ -282,20 +365,27 @@ class QQBotManager:
|
||||
if failed == self.retry:
|
||||
pkg.openai.session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
||||
self.notify_admin("{} 请求超时".format("group_{}".format(event.group.id)))
|
||||
replys = ["[bot]err:请求超时"]
|
||||
replys = [tips_custom.replys_message]
|
||||
|
||||
return replys
|
||||
|
||||
if Image in event.message_chain:
|
||||
|
||||
if not self.enable_group:
|
||||
logging.debug("已在banlist.py中禁用所有群聊")
|
||||
elif Image in event.message_chain:
|
||||
pass
|
||||
elif At(self.bot.qq) not in event.message_chain:
|
||||
check, result = check_response_rule(str(event.message_chain).strip())
|
||||
|
||||
if check:
|
||||
reply = process(result.strip())
|
||||
else:
|
||||
# 直接调用
|
||||
reply = process()
|
||||
if At(self.bot_account_id) in event.message_chain and response_at(event.group.id):
|
||||
# 直接调用
|
||||
reply = process()
|
||||
else:
|
||||
check, result = check_response_rule(event.group.id, str(event.message_chain).strip())
|
||||
|
||||
if check:
|
||||
reply = process(result.strip())
|
||||
# 检查是否随机响应
|
||||
elif random_responding(event.group.id):
|
||||
logging.info("随机响应group_{}消息".format(event.group.id))
|
||||
reply = process()
|
||||
|
||||
if reply:
|
||||
return self.send(event, reply)
|
||||
@@ -303,25 +393,36 @@ class QQBotManager:
|
||||
# 通知系统管理员
|
||||
def notify_admin(self, message: str):
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "admin_qq") and config.admin_qq != 0 and config.admin_qq != []:
|
||||
if config.admin_qq != 0 and config.admin_qq != []:
|
||||
logging.info("通知管理员:{}".format(message))
|
||||
if type(config.admin_qq) == int:
|
||||
send_task = self.bot.send_friend_message(config.admin_qq, "[bot]{}".format(message))
|
||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
config.admin_qq,
|
||||
MessageChain([Plain("[bot]{}".format(message))])
|
||||
)
|
||||
else:
|
||||
for adm in config.admin_qq:
|
||||
send_task = self.bot.send_friend_message(adm, "[bot]{}".format(message))
|
||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
||||
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
adm,
|
||||
MessageChain([Plain("[bot]{}".format(message))])
|
||||
)
|
||||
|
||||
def notify_admin_message_chain(self, message):
|
||||
config = pkg.utils.context.get_config()
|
||||
if hasattr(config, "admin_qq") and config.admin_qq != 0 and config.admin_qq != []:
|
||||
if config.admin_qq != 0 and config.admin_qq != []:
|
||||
logging.info("通知管理员:{}".format(message))
|
||||
if type(config.admin_qq) == int:
|
||||
send_task = self.bot.send_friend_message(config.admin_qq, message)
|
||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
config.admin_qq,
|
||||
message
|
||||
)
|
||||
else:
|
||||
for adm in config.admin_qq:
|
||||
send_task = self.bot.send_friend_message(adm, message)
|
||||
threading.Thread(target=asyncio.run, args=(send_task,)).start()
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
adm,
|
||||
message
|
||||
)
|
||||
|
||||
@@ -1,23 +1,21 @@
|
||||
# 普通消息处理模块
|
||||
import logging
|
||||
import time
|
||||
import openai
|
||||
import pkg.utils.context
|
||||
import pkg.openai.session
|
||||
|
||||
import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
import pkg.qqbot.blob as blob
|
||||
import tips as tips_custom
|
||||
|
||||
|
||||
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
|
||||
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
|
||||
import config
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin(notify_admin)
|
||||
if hasattr(config, 'hide_exce_info_to_user') and config.hide_exce_info_to_user:
|
||||
if hasattr(config, 'alter_tip_message'):
|
||||
return [config.alter_tip_message] if config.alter_tip_message else []
|
||||
else:
|
||||
return ["[bot]出错了,请重试或联系管理员"]
|
||||
if config.hide_exce_info_to_user:
|
||||
return [tips_custom.alter_tip_message] if tips_custom.alter_tip_message else []
|
||||
else:
|
||||
return [set_reply]
|
||||
|
||||
@@ -40,9 +38,9 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
reply = handle_exception(notify_admin=f"{session_name},多次尝试失败。", set_reply=f"[bot]多次尝试失败,请重试或联系管理员")
|
||||
break
|
||||
try:
|
||||
prefix = "[GPT]" if hasattr(config, "show_prefix") and config.show_prefix else ""
|
||||
prefix = "[GPT]" if config.show_prefix else ""
|
||||
|
||||
text = session.append(text_message)
|
||||
text, finish_reason, funcs = session.query(text_message)
|
||||
|
||||
# 触发插件事件
|
||||
args = {
|
||||
@@ -51,7 +49,9 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
"sender_id": sender_id,
|
||||
"session": session,
|
||||
"prefix": prefix,
|
||||
"response_text": text
|
||||
"response_text": text,
|
||||
"finish_reason": finish_reason,
|
||||
"funcs_called": funcs,
|
||||
}
|
||||
|
||||
event = pkg.plugin.host.emit(plugin_models.NormalMessageResponded, **args)
|
||||
@@ -64,10 +64,11 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
|
||||
if not event.is_prevented_default():
|
||||
reply = [prefix + text]
|
||||
|
||||
except openai.error.APIConnectionError as e:
|
||||
err_msg = str(e)
|
||||
if err_msg.__contains__('Error communicating with OpenAI'):
|
||||
reply = handle_exception("{}会话调用API失败:{}\n请尝试关闭网络代理来解决此问题。".format(session_name, e),
|
||||
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
|
||||
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
else:
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
@@ -116,9 +117,12 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str,
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
|
||||
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
|
||||
except openai.error.InvalidRequestError as e:
|
||||
reply = handle_exception("{}API调用参数错误:{}\n\n这可能是由于config.py中的prompt_submit_length参数或"
|
||||
"completion_api_params中的max_tokens参数数值过大导致的,请尝试将其降低".format(
|
||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||
if config.auto_reset and "This model's maximum context length is" in str(e):
|
||||
session.reset(persist=True)
|
||||
reply = [tips_custom.session_auto_reset_message]
|
||||
else:
|
||||
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||
except openai.error.ServiceUnavailableError as e:
|
||||
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
|
||||
except Exception as e:
|
||||
|
||||
@@ -26,6 +26,8 @@ import pkg.plugin.host as plugin_host
|
||||
import pkg.plugin.models as plugin_models
|
||||
import pkg.qqbot.ignore as ignore
|
||||
import pkg.qqbot.banlist as banlist
|
||||
import pkg.qqbot.blob as blob
|
||||
import tips as tips_custom
|
||||
|
||||
processing = []
|
||||
|
||||
@@ -49,7 +51,7 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
session_name = "{}_{}".format(launcher_type, launcher_id)
|
||||
|
||||
# 检查发送方是否被禁用
|
||||
if banlist.is_banned(launcher_type, launcher_id):
|
||||
if banlist.is_banned(launcher_type, launcher_id, sender_id):
|
||||
logging.info("根据禁用列表忽略{}_{}的消息".format(launcher_type, launcher_id))
|
||||
return []
|
||||
|
||||
@@ -57,24 +59,33 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
logging.info("根据忽略规则忽略消息: {}".format(text_message))
|
||||
return []
|
||||
|
||||
import config
|
||||
|
||||
if not config.wait_last_done and session_name in processing:
|
||||
return MessageChain([Plain(tips_custom.message_drop_tip)])
|
||||
|
||||
# 检查是否被禁言
|
||||
if launcher_type == 'group':
|
||||
result = mgr.bot.member_info(target=launcher_id, member_id=mgr.bot.qq).get()
|
||||
result = asyncio.run(result)
|
||||
if result.mute_time_remaining > 0:
|
||||
logging.info("机器人被禁言,跳过消息处理(group_{},剩余{}s)".format(launcher_id,
|
||||
result.mute_time_remaining))
|
||||
is_muted = mgr.adapter.is_muted(launcher_id)
|
||||
if is_muted:
|
||||
logging.info("机器人被禁言,跳过消息处理(group_{})".format(launcher_id))
|
||||
return reply
|
||||
|
||||
import config
|
||||
if config.income_msg_check:
|
||||
if mgr.reply_filter.is_illegal(text_message):
|
||||
return MessageChain(Plain("[bot] 消息中存在不合适的内容, 请更换措辞"))
|
||||
|
||||
pkg.openai.session.get_session(session_name).acquire_response_lock()
|
||||
|
||||
text_message = text_message.strip()
|
||||
|
||||
|
||||
# 为强制消息延迟计时
|
||||
start_time = time.time()
|
||||
|
||||
# 处理消息
|
||||
try:
|
||||
if session_name in processing:
|
||||
pkg.openai.session.get_session(session_name).release_response_lock()
|
||||
return MessageChain([Plain("[bot]err:正在处理中,请稍后再试")])
|
||||
|
||||
config = pkg.utils.context.get_config()
|
||||
|
||||
@@ -109,10 +120,11 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
else: # 消息
|
||||
# 限速丢弃检查
|
||||
# print(ratelimit.__crt_minute_usage__[session_name])
|
||||
if hasattr(config, "rate_limitation") and config.rate_limit_strategy == "drop":
|
||||
if config.rate_limit_strategy == "drop":
|
||||
if ratelimit.is_reach_limit(session_name):
|
||||
logging.info("根据限速策略丢弃[{}]消息: {}".format(session_name, text_message))
|
||||
return MessageChain(["[bot]"+config.rate_limit_drop_tip]) if hasattr(config, "rate_limit_drop_tip") and config.rate_limit_drop_tip != "" else []
|
||||
|
||||
return MessageChain(["[bot]"+tips_custom.rate_limit_drop_tip]) if tips_custom.rate_limit_drop_tip != "" else []
|
||||
|
||||
before = time.time()
|
||||
# 触发插件事件
|
||||
@@ -138,11 +150,10 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
mgr, config, launcher_type, launcher_id, sender_id)
|
||||
|
||||
# 限速等待时间
|
||||
if hasattr(config, "rate_limitation") and config.rate_limit_strategy == "wait":
|
||||
if config.rate_limit_strategy == "wait":
|
||||
time.sleep(ratelimit.get_rest_wait_time(session_name, time.time() - before))
|
||||
|
||||
if hasattr(config, "rate_limitation"):
|
||||
ratelimit.add_usage(session_name)
|
||||
ratelimit.add_usage(session_name)
|
||||
|
||||
if reply is not None and len(reply) > 0 and (type(reply[0]) == str or type(reply[0]) == mirai.Plain):
|
||||
if type(reply[0]) == mirai.Plain:
|
||||
@@ -152,12 +163,32 @@ def process_message(launcher_type: str, launcher_id: int, text_message: str, mes
|
||||
reply[0][:min(100, len(reply[0]))] + (
|
||||
"..." if len(reply[0]) > 100 else "")))
|
||||
reply = [mgr.reply_filter.process(reply[0])]
|
||||
reply = blob.check_text(reply[0])
|
||||
else:
|
||||
logging.info("回复[{}]图片消息:{}".format(session_name, reply))
|
||||
logging.info("回复[{}]消息".format(session_name))
|
||||
|
||||
finally:
|
||||
processing.remove(session_name)
|
||||
finally:
|
||||
pkg.openai.session.get_session(session_name).release_response_lock()
|
||||
|
||||
# 检查延迟时间
|
||||
if config.force_delay_range[1] == 0:
|
||||
delay_time = 0
|
||||
else:
|
||||
import random
|
||||
|
||||
# 从延迟范围中随机取一个值(浮点)
|
||||
rdm = random.uniform(config.force_delay_range[0], config.force_delay_range[1])
|
||||
|
||||
spent = time.time() - start_time
|
||||
|
||||
# 如果花费时间小于延迟时间,则延迟
|
||||
delay_time = rdm - spent if rdm - spent > 0 else 0
|
||||
|
||||
# 延迟
|
||||
if delay_time > 0:
|
||||
logging.info("[风控] 强制延迟{:.2f}秒(如需关闭,请到config.py修改force_delay_range字段)".format(delay_time))
|
||||
time.sleep(delay_time)
|
||||
|
||||
return MessageChain(reply)
|
||||
|
||||
@@ -10,6 +10,20 @@ __crt_minute_usage__ = {}
|
||||
__timer_thr__: threading.Thread = None
|
||||
|
||||
|
||||
def get_limitation(session_name: str) -> int:
|
||||
"""获取会话的限制次数"""
|
||||
import config
|
||||
|
||||
if type(config.rate_limitation) == dict:
|
||||
# 如果被指定了
|
||||
if session_name in config.rate_limitation:
|
||||
return config.rate_limitation[session_name]
|
||||
else:
|
||||
return config.rate_limitation["default"]
|
||||
elif type(config.rate_limitation) == int:
|
||||
return config.rate_limitation
|
||||
|
||||
|
||||
def add_usage(session_name: str):
|
||||
"""增加会话的对话次数"""
|
||||
global __crt_minute_usage__
|
||||
@@ -56,12 +70,7 @@ def get_rest_wait_time(session_name: str, spent: float) -> float:
|
||||
"""获取会话此回合的剩余等待时间"""
|
||||
global __crt_minute_usage__
|
||||
|
||||
import config
|
||||
|
||||
if not hasattr(config, 'rate_limitation'):
|
||||
return 0
|
||||
|
||||
min_seconds_per_round = 60.0 / config.rate_limitation
|
||||
min_seconds_per_round = 60.0 / get_limitation(session_name)
|
||||
|
||||
if session_name in __crt_minute_usage__:
|
||||
return max(0, min_seconds_per_round - spent)
|
||||
@@ -73,13 +82,8 @@ def is_reach_limit(session_name: str) -> bool:
|
||||
"""判断会话是否超过限制"""
|
||||
global __crt_minute_usage__
|
||||
|
||||
import config
|
||||
|
||||
if not hasattr(config, 'rate_limitation'):
|
||||
return False
|
||||
|
||||
if session_name in __crt_minute_usage__:
|
||||
return __crt_minute_usage__[session_name] >= config.rate_limitation
|
||||
return __crt_minute_usage__[session_name] >= get_limitation(session_name)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
0
pkg/qqbot/sources/__init__.py
Normal file
0
pkg/qqbot/sources/__init__.py
Normal file
321
pkg/qqbot/sources/nakuru.py
Normal file
321
pkg/qqbot/sources/nakuru.py
Normal file
@@ -0,0 +1,321 @@
|
||||
import mirai
|
||||
|
||||
from ..adapter import MessageSourceAdapter, MessageConverter, EventConverter
|
||||
import nakuru
|
||||
import nakuru.entities.components as nkc
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
import traceback
|
||||
import logging
|
||||
import json
|
||||
|
||||
from pkg.qqbot.blob import Forward, ForwardMessageNode, ForwardMessageDiaplay
|
||||
|
||||
|
||||
class NakuruProjectMessageConverter(MessageConverter):
|
||||
"""消息转换器"""
|
||||
@staticmethod
|
||||
def yiri2target(message_chain: mirai.MessageChain) -> list:
|
||||
msg_list = []
|
||||
if type(message_chain) is mirai.MessageChain:
|
||||
msg_list = message_chain.__root__
|
||||
elif type(message_chain) is list:
|
||||
msg_list = message_chain
|
||||
else:
|
||||
raise Exception("Unknown message type: " + str(message_chain) + str(type(message_chain)))
|
||||
|
||||
nakuru_msg_list = []
|
||||
|
||||
# 遍历并转换
|
||||
for component in msg_list:
|
||||
if type(component) is mirai.Plain:
|
||||
nakuru_msg_list.append(nkc.Plain(component.text, False))
|
||||
elif type(component) is mirai.Image:
|
||||
if component.url is not None:
|
||||
nakuru_msg_list.append(nkc.Image.fromURL(component.url))
|
||||
elif component.base64 is not None:
|
||||
nakuru_msg_list.append(nkc.Image.fromBase64(component.base64))
|
||||
elif component.path is not None:
|
||||
nakuru_msg_list.append(nkc.Image.fromFileSystem(component.path))
|
||||
elif type(component) is mirai.Face:
|
||||
nakuru_msg_list.append(nkc.Face(id=component.face_id))
|
||||
elif type(component) is mirai.At:
|
||||
nakuru_msg_list.append(nkc.At(qq=component.target))
|
||||
elif type(component) is mirai.AtAll:
|
||||
nakuru_msg_list.append(nkc.AtAll())
|
||||
elif type(component) is mirai.Voice:
|
||||
if component.url is not None:
|
||||
nakuru_msg_list.append(nkc.Record.fromURL(component.url))
|
||||
elif component.path is not None:
|
||||
nakuru_msg_list.append(nkc.Record.fromFileSystem(component.path))
|
||||
elif type(component) is Forward:
|
||||
# 转发消息
|
||||
yiri_forward_node_list = component.node_list
|
||||
nakuru_forward_node_list = []
|
||||
|
||||
# 遍历并转换
|
||||
for yiri_forward_node in yiri_forward_node_list:
|
||||
try:
|
||||
content_list = NakuruProjectMessageConverter.yiri2target(yiri_forward_node.message_chain)
|
||||
nakuru_forward_node = nkc.Node(
|
||||
name=yiri_forward_node.sender_name,
|
||||
uin=yiri_forward_node.sender_id,
|
||||
time=int(yiri_forward_node.time.timestamp()) if yiri_forward_node.time is not None else None,
|
||||
content=content_list
|
||||
)
|
||||
nakuru_forward_node_list.append(nakuru_forward_node)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
nakuru_msg_list.append(nakuru_forward_node_list)
|
||||
else:
|
||||
nakuru_msg_list.append(nkc.Plain(str(component)))
|
||||
|
||||
return nakuru_msg_list
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(message_chain: typing.Any, message_id: int = -1) -> mirai.MessageChain:
|
||||
"""将Yiri的消息链转换为YiriMirai的消息链"""
|
||||
assert type(message_chain) is list
|
||||
|
||||
yiri_msg_list = []
|
||||
import datetime
|
||||
# 添加Source组件以标记message_id等信息
|
||||
yiri_msg_list.append(mirai.models.message.Source(id=message_id, time=datetime.datetime.now()))
|
||||
for component in message_chain:
|
||||
if type(component) is nkc.Plain:
|
||||
yiri_msg_list.append(mirai.Plain(text=component.text))
|
||||
elif type(component) is nkc.Image:
|
||||
yiri_msg_list.append(mirai.Image(url=component.url))
|
||||
elif type(component) is nkc.Face:
|
||||
yiri_msg_list.append(mirai.Face(face_id=component.id))
|
||||
elif type(component) is nkc.At:
|
||||
yiri_msg_list.append(mirai.At(target=component.qq))
|
||||
elif type(component) is nkc.AtAll:
|
||||
yiri_msg_list.append(mirai.AtAll())
|
||||
else:
|
||||
pass
|
||||
logging.debug("转换后的消息链: " + str(yiri_msg_list))
|
||||
chain = mirai.MessageChain(yiri_msg_list)
|
||||
return chain
|
||||
|
||||
|
||||
class NakuruProjectEventConverter(EventConverter):
|
||||
"""事件转换器"""
|
||||
@staticmethod
|
||||
def yiri2target(event: typing.Type[mirai.Event]):
|
||||
if event is mirai.GroupMessage:
|
||||
return nakuru.GroupMessage
|
||||
elif event is mirai.FriendMessage:
|
||||
return nakuru.FriendMessage
|
||||
else:
|
||||
raise Exception("未支持转换的事件类型: " + str(event))
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(event: typing.Any) -> mirai.Event:
|
||||
yiri_chain = NakuruProjectMessageConverter.target2yiri(event.message, event.message_id)
|
||||
if type(event) is nakuru.FriendMessage: # 私聊消息事件
|
||||
return mirai.FriendMessage(
|
||||
sender=mirai.models.entities.Friend(
|
||||
id=event.sender.user_id,
|
||||
nickname=event.sender.nickname,
|
||||
remark=event.sender.nickname
|
||||
),
|
||||
message_chain=yiri_chain,
|
||||
time=event.time
|
||||
)
|
||||
elif type(event) is nakuru.GroupMessage: # 群聊消息事件
|
||||
permission = "MEMBER"
|
||||
|
||||
if event.sender.role == "admin":
|
||||
permission = "ADMINISTRATOR"
|
||||
elif event.sender.role == "owner":
|
||||
permission = "OWNER"
|
||||
|
||||
import mirai.models.entities as entities
|
||||
return mirai.GroupMessage(
|
||||
sender=mirai.models.entities.GroupMember(
|
||||
id=event.sender.user_id,
|
||||
member_name=event.sender.nickname,
|
||||
permission=permission,
|
||||
group=mirai.models.entities.Group(
|
||||
id=event.group_id,
|
||||
name=event.sender.nickname,
|
||||
permission=entities.Permission.Member
|
||||
),
|
||||
special_title=event.sender.title,
|
||||
join_timestamp=0,
|
||||
last_speak_timestamp=0,
|
||||
mute_time_remaining=0,
|
||||
),
|
||||
message_chain=yiri_chain,
|
||||
time=event.time
|
||||
)
|
||||
else:
|
||||
raise Exception("未支持转换的事件类型: " + str(event))
|
||||
|
||||
|
||||
class NakuruProjectAdapter(MessageSourceAdapter):
|
||||
"""nakuru-project适配器"""
|
||||
bot: nakuru.CQHTTP
|
||||
bot_account_id: int
|
||||
|
||||
message_converter: NakuruProjectMessageConverter = NakuruProjectMessageConverter()
|
||||
event_converter: NakuruProjectEventConverter = NakuruProjectEventConverter()
|
||||
|
||||
listener_list: list[dict]
|
||||
|
||||
def __init__(self, cfg: dict):
|
||||
"""初始化nakuru-project的对象"""
|
||||
self.bot = nakuru.CQHTTP(**cfg)
|
||||
self.listener_list = []
|
||||
# nakuru库有bug,这个接口没法带access_token,会失败
|
||||
# 所以目前自行发请求
|
||||
import config
|
||||
import requests
|
||||
resp = requests.get(
|
||||
url="http://{}:{}/get_login_info".format(config.nakuru_config['host'], config.nakuru_config['http_port']),
|
||||
headers={
|
||||
'Authorization': "Bearer " + config.nakuru_config['token'] if 'token' in config.nakuru_config else ""
|
||||
},
|
||||
timeout=5
|
||||
)
|
||||
if resp.status_code == 403:
|
||||
logging.error("go-cqhttp拒绝访问,请检查config.py中nakuru_config的token是否与go-cqhttp设置的access-token匹配")
|
||||
raise Exception("go-cqhttp拒绝访问,请检查config.py中nakuru_config的token是否与go-cqhttp设置的access-token匹配")
|
||||
self.bot_account_id = int(resp.json()['data']['user_id'])
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
message: typing.Union[mirai.MessageChain, list],
|
||||
converted: bool = False
|
||||
):
|
||||
task = None
|
||||
|
||||
converted_msg = self.message_converter.yiri2target(message) if not converted else message
|
||||
|
||||
# 检查是否有转发消息
|
||||
has_forward = False
|
||||
for msg in converted_msg:
|
||||
if type(msg) is list: # 转发消息,仅回复此消息组件
|
||||
has_forward = True
|
||||
converted_msg = msg
|
||||
break
|
||||
if has_forward:
|
||||
if target_type == "group":
|
||||
task = self.bot.sendGroupForwardMessage(int(target_id), converted_msg)
|
||||
elif target_type == "person":
|
||||
task = self.bot.sendPrivateForwardMessage(int(target_id), converted_msg)
|
||||
else:
|
||||
raise Exception("Unknown target type: " + target_type)
|
||||
else:
|
||||
if target_type == "group":
|
||||
task = self.bot.sendGroupMessage(int(target_id), converted_msg)
|
||||
elif target_type == "person":
|
||||
task = self.bot.sendFriendMessage(int(target_id), converted_msg)
|
||||
else:
|
||||
raise Exception("Unknown target type: " + target_type)
|
||||
|
||||
asyncio.run(task)
|
||||
|
||||
def reply_message(
|
||||
self,
|
||||
message_source: mirai.MessageEvent,
|
||||
message: mirai.MessageChain,
|
||||
quote_origin: bool = False
|
||||
):
|
||||
message = self.message_converter.yiri2target(message)
|
||||
if quote_origin:
|
||||
# 在前方添加引用组件
|
||||
message.insert(0, nkc.Reply(
|
||||
id=message_source.message_chain.message_id,
|
||||
)
|
||||
)
|
||||
if type(message_source) is mirai.GroupMessage:
|
||||
self.send_message(
|
||||
"group",
|
||||
message_source.sender.group.id,
|
||||
message,
|
||||
converted=True
|
||||
)
|
||||
elif type(message_source) is mirai.FriendMessage:
|
||||
self.send_message(
|
||||
"person",
|
||||
message_source.sender.id,
|
||||
message,
|
||||
converted=True
|
||||
)
|
||||
else:
|
||||
raise Exception("Unknown message source type: " + str(type(message_source)))
|
||||
|
||||
def is_muted(self, group_id: int) -> bool:
|
||||
import time
|
||||
# 检查是否被禁言
|
||||
group_member_info = asyncio.run(self.bot.getGroupMemberInfo(group_id, self.bot_account_id))
|
||||
return group_member_info.shut_up_timestamp > int(time.time())
|
||||
|
||||
def register_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
try:
|
||||
logging.debug("注册监听器: " + str(event_type) + " -> " + str(callback))
|
||||
|
||||
# 包装函数
|
||||
async def listener_wrapper(app: nakuru.CQHTTP, source: self.event_converter.yiri2target(event_type)):
|
||||
callback(self.event_converter.target2yiri(source))
|
||||
|
||||
# 将包装函数和原函数的对应关系存入列表
|
||||
self.listener_list.append(
|
||||
{
|
||||
"event_type": event_type,
|
||||
"callable": callback,
|
||||
"wrapper": listener_wrapper,
|
||||
}
|
||||
)
|
||||
|
||||
# 注册监听器
|
||||
self.bot.receiver(self.event_converter.yiri2target(event_type).__name__)(listener_wrapper)
|
||||
logging.debug("注册完成")
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
raise e
|
||||
|
||||
def unregister_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
nakuru_event_name = self.event_converter.yiri2target(event_type).__name__
|
||||
|
||||
new_event_list = []
|
||||
|
||||
# 从本对象的监听器列表中查找并删除
|
||||
target_wrapper = None
|
||||
for listener in self.listener_list:
|
||||
if listener["event_type"] == event_type and listener["callable"] == callback:
|
||||
target_wrapper = listener["wrapper"]
|
||||
self.listener_list.remove(listener)
|
||||
break
|
||||
|
||||
if target_wrapper is None:
|
||||
raise Exception("未找到对应的监听器")
|
||||
|
||||
for func in self.bot.event[nakuru_event_name]:
|
||||
if func.callable != target_wrapper:
|
||||
new_event_list.append(func)
|
||||
|
||||
self.bot.event[nakuru_event_name] = new_event_list
|
||||
|
||||
def run_sync(self):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
self.bot.run()
|
||||
|
||||
def kill(self) -> bool:
|
||||
return False
|
||||
122
pkg/qqbot/sources/yirimirai.py
Normal file
122
pkg/qqbot/sources/yirimirai.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from ..adapter import MessageSourceAdapter
|
||||
import mirai
|
||||
import mirai.models.bus
|
||||
from mirai.bot import MiraiRunner
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
|
||||
|
||||
class YiriMiraiAdapter(MessageSourceAdapter):
|
||||
"""YiriMirai适配器"""
|
||||
bot: mirai.Mirai
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""初始化YiriMirai的对象"""
|
||||
if 'adapter' not in config or \
|
||||
config['adapter'] == 'WebSocketAdapter':
|
||||
self.bot = mirai.Mirai(
|
||||
qq=config['qq'],
|
||||
adapter=mirai.WebSocketAdapter(
|
||||
host=config['host'],
|
||||
port=config['port'],
|
||||
verify_key=config['verifyKey']
|
||||
)
|
||||
)
|
||||
elif config['adapter'] == 'HTTPAdapter':
|
||||
self.bot = mirai.Mirai(
|
||||
qq=config['qq'],
|
||||
adapter=mirai.HTTPAdapter(
|
||||
host=config['host'],
|
||||
port=config['port'],
|
||||
verify_key=config['verifyKey']
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise Exception('Unknown adapter for YiriMirai: ' + config['adapter'])
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
message: mirai.MessageChain
|
||||
):
|
||||
"""发送消息
|
||||
|
||||
Args:
|
||||
target_type (str): 目标类型,`person`或`group`
|
||||
target_id (str): 目标ID
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
"""
|
||||
task = None
|
||||
if target_type == 'person':
|
||||
task = self.bot.send_friend_message(int(target_id), message)
|
||||
elif target_type == 'group':
|
||||
task = self.bot.send_group_message(int(target_id), message)
|
||||
else:
|
||||
raise Exception('Unknown target type: ' + target_type)
|
||||
|
||||
asyncio.run(task)
|
||||
|
||||
def reply_message(
|
||||
self,
|
||||
message_source: mirai.MessageEvent,
|
||||
message: mirai.MessageChain,
|
||||
quote_origin: bool = False
|
||||
):
|
||||
"""回复消息
|
||||
|
||||
Args:
|
||||
message_source (mirai.MessageEvent): YiriMirai消息源事件
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
|
||||
"""
|
||||
asyncio.run(self.bot.send(message_source, message, quote_origin))
|
||||
|
||||
def is_muted(self, group_id: int) -> bool:
|
||||
result = self.bot.member_info(target=group_id, member_id=self.bot.qq).get()
|
||||
result = asyncio.run(result)
|
||||
if result.mute_time_remaining > 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
def register_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注册事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
self.bot.on(event_type)(callback)
|
||||
|
||||
def unregister_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注销事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
assert isinstance(self.bot, mirai.Mirai)
|
||||
bus = self.bot.bus
|
||||
assert isinstance(bus, mirai.models.bus.ModelEventBus)
|
||||
|
||||
bus.unsubscribe(event_type, callback)
|
||||
|
||||
def run_sync(self):
|
||||
"""运行YiriMirai"""
|
||||
|
||||
# 创建新的
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
loop.run_until_complete(MiraiRunner(self.bot)._run())
|
||||
|
||||
def kill(self) -> bool:
|
||||
return False
|
||||
@@ -0,0 +1 @@
|
||||
from .threadctl import ThreadCtl
|
||||
68
pkg/utils/announcement.py
Normal file
68
pkg/utils/announcement.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import base64
|
||||
import os
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def read_latest() -> list:
|
||||
import pkg.utils.network as network
|
||||
resp = requests.get(
|
||||
url="https://api.github.com/repos/RockChinQ/QChatGPT/contents/res/announcement.json",
|
||||
proxies=network.wrapper_proxies()
|
||||
)
|
||||
obj_json = resp.json()
|
||||
b64_content = obj_json["content"]
|
||||
# 解码
|
||||
content = base64.b64decode(b64_content).decode("utf-8")
|
||||
return json.loads(content)
|
||||
|
||||
|
||||
def read_saved() -> list:
|
||||
# 已保存的在res/announcement_saved
|
||||
# 检查是否存在
|
||||
if not os.path.exists("res/announcement_saved.json"):
|
||||
with open("res/announcement_saved.json", "w", encoding="utf-8") as f:
|
||||
f.write("[]")
|
||||
|
||||
with open("res/announcement_saved.json", "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
return json.loads(content)
|
||||
|
||||
|
||||
def write_saved(content: list):
|
||||
# 已保存的在res/announcement_saved
|
||||
with open("res/announcement_saved.json", "w", encoding="utf-8") as f:
|
||||
f.write(json.dumps(content, indent=4, ensure_ascii=False))
|
||||
|
||||
|
||||
def fetch_new() -> list:
|
||||
latest = read_latest()
|
||||
saved = read_saved()
|
||||
|
||||
to_show: list = []
|
||||
|
||||
for item in latest:
|
||||
# 遍历saved检查是否有相同id的公告
|
||||
for saved_item in saved:
|
||||
if saved_item["id"] == item["id"]:
|
||||
break
|
||||
else:
|
||||
# 没有相同id的公告
|
||||
to_show.append(item)
|
||||
|
||||
write_saved(latest)
|
||||
return to_show
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
resp = requests.get(
|
||||
url="https://api.github.com/repos/RockChinQ/QChatGPT/contents/res/announcement.json",
|
||||
)
|
||||
obj_json = resp.json()
|
||||
b64_content = obj_json["content"]
|
||||
# 解码
|
||||
content = base64.b64decode(b64_content).decode("utf-8")
|
||||
print(json.dumps(json.loads(content), indent=4, ensure_ascii=False))
|
||||
File diff suppressed because one or more lines are too long
@@ -1,50 +1,94 @@
|
||||
import threading
|
||||
from pkg.utils import ThreadCtl
|
||||
|
||||
|
||||
context = {
|
||||
'inst': {
|
||||
'database.manager.DatabaseManager': None,
|
||||
'openai.manager.OpenAIInteract': None,
|
||||
'qqbot.manager.QQBotManager': None,
|
||||
},
|
||||
'pool_ctl': None,
|
||||
'logger_handler': None,
|
||||
'config': None,
|
||||
'plugin_host': None,
|
||||
}
|
||||
context_lock = threading.Lock()
|
||||
|
||||
|
||||
### context耦合度非常高,需要大改 ###
|
||||
def set_config(inst):
|
||||
context_lock.acquire()
|
||||
context['config'] = inst
|
||||
context_lock.release()
|
||||
|
||||
|
||||
def get_config():
|
||||
return context['config']
|
||||
context_lock.acquire()
|
||||
t = context['config']
|
||||
context_lock.release()
|
||||
return t
|
||||
|
||||
|
||||
def set_database_manager(inst):
|
||||
context_lock.acquire()
|
||||
context['inst']['database.manager.DatabaseManager'] = inst
|
||||
context_lock.release()
|
||||
|
||||
|
||||
def get_database_manager():
|
||||
return context['inst']['database.manager.DatabaseManager']
|
||||
context_lock.acquire()
|
||||
t = context['inst']['database.manager.DatabaseManager']
|
||||
context_lock.release()
|
||||
return t
|
||||
|
||||
|
||||
def set_openai_manager(inst):
|
||||
context_lock.acquire()
|
||||
context['inst']['openai.manager.OpenAIInteract'] = inst
|
||||
context_lock.release()
|
||||
|
||||
|
||||
def get_openai_manager():
|
||||
return context['inst']['openai.manager.OpenAIInteract']
|
||||
context_lock.acquire()
|
||||
t = context['inst']['openai.manager.OpenAIInteract']
|
||||
context_lock.release()
|
||||
return t
|
||||
|
||||
|
||||
def set_qqbot_manager(inst):
|
||||
context_lock.acquire()
|
||||
context['inst']['qqbot.manager.QQBotManager'] = inst
|
||||
context_lock.release()
|
||||
|
||||
|
||||
def get_qqbot_manager():
|
||||
return context['inst']['qqbot.manager.QQBotManager']
|
||||
context_lock.acquire()
|
||||
t = context['inst']['qqbot.manager.QQBotManager']
|
||||
context_lock.release()
|
||||
return t
|
||||
|
||||
|
||||
def set_plugin_host(inst):
|
||||
context_lock.acquire()
|
||||
context['plugin_host'] = inst
|
||||
context_lock.release()
|
||||
|
||||
|
||||
def get_plugin_host():
|
||||
return context['plugin_host']
|
||||
context_lock.acquire()
|
||||
t = context['plugin_host']
|
||||
context_lock.release()
|
||||
return t
|
||||
|
||||
|
||||
def set_thread_ctl(inst):
|
||||
context_lock.acquire()
|
||||
context['pool_ctl'] = inst
|
||||
context_lock.release()
|
||||
|
||||
|
||||
def get_thread_ctl() -> ThreadCtl:
|
||||
context_lock.acquire()
|
||||
t: ThreadCtl = context['pool_ctl']
|
||||
context_lock.release()
|
||||
return t
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
# OpenAI账号免费额度剩余查询
|
||||
import requests
|
||||
|
||||
|
||||
def fetch_credit_data(api_key: str) -> dict:
|
||||
def fetch_credit_data(api_key: str, http_proxy: str) -> dict:
|
||||
"""OpenAI账号免费额度剩余查询"""
|
||||
proxies = {
|
||||
"http":http_proxy,
|
||||
"https":http_proxy
|
||||
} if http_proxy is not None else None
|
||||
|
||||
resp = requests.get(
|
||||
url="https://api.openai.com/dashboard/billing/credit_grants",
|
||||
headers={
|
||||
"Authorization": "Bearer {}".format(api_key),
|
||||
}
|
||||
},
|
||||
proxies=proxies
|
||||
)
|
||||
|
||||
return resp.json()
|
||||
66
pkg/utils/log.py
Normal file
66
pkg/utils/log.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import shutil
|
||||
|
||||
|
||||
log_file_name = "qchatgpt.log"
|
||||
|
||||
|
||||
log_colors_config = {
|
||||
'DEBUG': 'green', # cyan white
|
||||
'INFO': 'white',
|
||||
'WARNING': 'yellow',
|
||||
'ERROR': 'red',
|
||||
'CRITICAL': 'cyan',
|
||||
}
|
||||
|
||||
|
||||
def init_runtime_log_file():
|
||||
"""为此次运行生成日志文件
|
||||
格式: qchatgpt-yyyy-MM-dd-HH-mm-ss.log
|
||||
"""
|
||||
global log_file_name
|
||||
|
||||
# 检查logs目录是否存在
|
||||
if not os.path.exists("logs"):
|
||||
os.mkdir("logs")
|
||||
|
||||
# 检查本目录是否有qchatgpt.log,若有,移动到logs目录
|
||||
if os.path.exists("qchatgpt.log"):
|
||||
shutil.move("qchatgpt.log", "logs/qchatgpt.legacy.log")
|
||||
|
||||
log_file_name = "logs/qchatgpt-%s.log" % time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
|
||||
|
||||
def reset_logging():
|
||||
global log_file_name
|
||||
|
||||
import config
|
||||
import pkg.utils.context
|
||||
import colorlog
|
||||
|
||||
if pkg.utils.context.context['logger_handler'] is not None:
|
||||
logging.getLogger().removeHandler(pkg.utils.context.context['logger_handler'])
|
||||
|
||||
for handler in logging.getLogger().handlers:
|
||||
logging.getLogger().removeHandler(handler)
|
||||
|
||||
logging.basicConfig(level=config.logging_level, # 设置日志输出格式
|
||||
filename=log_file_name, # log日志输出的文件位置和文件名
|
||||
format="[%(asctime)s.%(msecs)03d] %(pathname)s (%(lineno)d) - [%(levelname)s] :\n%(message)s",
|
||||
# 日志输出的格式
|
||||
# -8表示占位符,让输出左对齐,输出长度都为8位
|
||||
datefmt="%Y-%m-%d %H:%M:%S" # 时间输出的格式
|
||||
)
|
||||
sh = logging.StreamHandler()
|
||||
sh.setLevel(config.logging_level)
|
||||
sh.setFormatter(colorlog.ColoredFormatter(
|
||||
fmt="%(log_color)s[%(asctime)s.%(msecs)03d] %(filename)s (%(lineno)d) - [%(levelname)s] : "
|
||||
"%(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
log_colors=log_colors_config
|
||||
))
|
||||
logging.getLogger().addHandler(sh)
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
return sh
|
||||
9
pkg/utils/network.py
Normal file
9
pkg/utils/network.py
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
def wrapper_proxies() -> dict:
|
||||
"""获取代理"""
|
||||
import config
|
||||
|
||||
return {
|
||||
"http": config.openai_config['proxy'],
|
||||
"https": config.openai_config['proxy']
|
||||
} if 'proxy' in config.openai_config and (config.openai_config['proxy'] is not None) else None
|
||||
@@ -1,16 +1,25 @@
|
||||
from pip._internal import main as pipmain
|
||||
|
||||
import main
|
||||
import pkg.utils.log as log
|
||||
|
||||
|
||||
def install(package):
|
||||
pipmain(['install', package])
|
||||
main.reset_logging()
|
||||
log.reset_logging()
|
||||
|
||||
def install_upgrade(package):
|
||||
pipmain(['install', '--upgrade', package])
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
def run_pip(params: list):
|
||||
pipmain(params)
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
def install_requirements(file):
|
||||
pipmain(['install', '-r', file, "--upgrade"])
|
||||
main.reset_logging()
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
def ensure_dulwich():
|
||||
|
||||
@@ -3,46 +3,67 @@ import threading
|
||||
|
||||
import importlib
|
||||
import pkgutil
|
||||
import pkg.utils.context
|
||||
import pkg.utils.context as context
|
||||
import pkg.plugin.host
|
||||
|
||||
|
||||
def walk(module, prefix=''):
|
||||
def walk(module, prefix='', path_prefix=''):
|
||||
"""遍历并重载所有模块"""
|
||||
for item in pkgutil.iter_modules(module.__path__):
|
||||
if item.ispkg:
|
||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.')
|
||||
|
||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
|
||||
else:
|
||||
logging.info('reload module: {}'.format(prefix + item.name))
|
||||
logging.info('reload module: {}, path: {}'.format(prefix + item.name, path_prefix + item.name + '.py'))
|
||||
pkg.plugin.host.__current_module_path__ = "plugins/" + path_prefix + item.name + '.py'
|
||||
importlib.reload(__import__(module.__name__ + '.' + item.name, fromlist=['']))
|
||||
|
||||
|
||||
def reload_all(notify=True):
|
||||
# 解除bot的事件注册
|
||||
import pkg
|
||||
pkg.utils.context.get_qqbot_manager().unsubscribe_all()
|
||||
context.get_qqbot_manager().unsubscribe_all()
|
||||
# 执行关闭流程
|
||||
logging.info("执行程序关闭流程")
|
||||
import main
|
||||
main.stop()
|
||||
|
||||
# 删除所有已注册的指令
|
||||
import pkg.qqbot.cmds.aamgr as cmdsmgr
|
||||
cmdsmgr.__command_list__ = {}
|
||||
cmdsmgr.__tree_index__ = {}
|
||||
|
||||
# 重载所有模块
|
||||
pkg.utils.context.context['exceeded_keys'] = pkg.utils.context.get_openai_manager().key_mgr.exceeded
|
||||
context = pkg.utils.context.context
|
||||
context.context['exceeded_keys'] = context.get_openai_manager().key_mgr.exceeded
|
||||
this_context = context.context
|
||||
walk(pkg)
|
||||
importlib.reload(__import__("config-template"))
|
||||
importlib.reload(__import__('config'))
|
||||
importlib.reload(__import__('main'))
|
||||
importlib.reload(__import__('banlist'))
|
||||
pkg.utils.context.context = context
|
||||
importlib.reload(__import__('tips'))
|
||||
context.context = this_context
|
||||
|
||||
# 重载插件
|
||||
import plugins
|
||||
walk(plugins)
|
||||
|
||||
# 初始化相关文件
|
||||
main.check_file()
|
||||
|
||||
# 执行启动流程
|
||||
logging.info("执行程序启动流程")
|
||||
threading.Thread(target=main.main, args=(False,), daemon=False).start()
|
||||
main.load_config()
|
||||
main.complete_tips()
|
||||
context.get_thread_ctl().reload(
|
||||
admin_pool_num=context.get_config().admin_pool_num,
|
||||
user_pool_num=context.get_config().user_pool_num
|
||||
)
|
||||
context.get_thread_ctl().submit_sys_task(
|
||||
main.start,
|
||||
False
|
||||
)
|
||||
|
||||
logging.info('程序启动完成')
|
||||
if notify:
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("重载完成")
|
||||
context.get_qqbot_manager().notify_admin("重载完成")
|
||||
|
||||
193
pkg/utils/text2img.py
Normal file
193
pkg/utils/text2img.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import logging
|
||||
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
import re
|
||||
import os
|
||||
import config
|
||||
import traceback
|
||||
|
||||
text_render_font: ImageFont = None
|
||||
|
||||
if config.blob_message_strategy == "image": # 仅在启用了image时才加载字体
|
||||
use_font = config.font_path
|
||||
try:
|
||||
|
||||
# 检查是否存在
|
||||
if not os.path.exists(use_font):
|
||||
# 若是windows系统,使用微软雅黑
|
||||
if os.name == "nt":
|
||||
use_font = "C:/Windows/Fonts/msyh.ttc"
|
||||
if not os.path.exists(use_font):
|
||||
logging.warn("未找到字体文件,且无法使用Windows自带字体,更换为转发消息组件以发送长消息,您可以在config.py中调整相关设置。")
|
||||
config.blob_message_strategy = "forward"
|
||||
else:
|
||||
logging.info("使用Windows自带字体:" + use_font)
|
||||
text_render_font = ImageFont.truetype(use_font, 32, encoding="utf-8")
|
||||
else:
|
||||
logging.warn("未找到字体文件,且无法使用Windows自带字体,更换为转发消息组件以发送长消息,您可以在config.py中调整相关设置。")
|
||||
config.blob_message_strategy = "forward"
|
||||
else:
|
||||
text_render_font = ImageFont.truetype(use_font, 32, encoding="utf-8")
|
||||
except:
|
||||
traceback.print_exc()
|
||||
logging.error("加载字体文件失败({}),更换为转发消息组件以发送长消息,您可以在config.py中调整相关设置。".format(use_font))
|
||||
config.blob_message_strategy = "forward"
|
||||
|
||||
|
||||
def indexNumber(path=''):
|
||||
"""
|
||||
查找字符串中数字所在串中的位置
|
||||
:param path:目标字符串
|
||||
:return:<class 'list'>: <class 'list'>: [['1', 16], ['2', 35], ['1', 51]]
|
||||
"""
|
||||
kv = []
|
||||
nums = []
|
||||
beforeDatas = re.findall('[\d]+', path)
|
||||
for num in beforeDatas:
|
||||
indexV = []
|
||||
times = path.count(num)
|
||||
if times > 1:
|
||||
if num not in nums:
|
||||
indexs = re.finditer(num, path)
|
||||
for index in indexs:
|
||||
iV = []
|
||||
i = index.span()[0]
|
||||
iV.append(num)
|
||||
iV.append(i)
|
||||
kv.append(iV)
|
||||
nums.append(num)
|
||||
else:
|
||||
index = path.find(num)
|
||||
indexV.append(num)
|
||||
indexV.append(index)
|
||||
kv.append(indexV)
|
||||
# 根据数字位置排序
|
||||
indexSort = []
|
||||
resultIndex = []
|
||||
for vi in kv:
|
||||
indexSort.append(vi[1])
|
||||
indexSort.sort()
|
||||
for i in indexSort:
|
||||
for v in kv:
|
||||
if i == v[1]:
|
||||
resultIndex.append(v)
|
||||
return resultIndex
|
||||
|
||||
|
||||
def get_size(file):
|
||||
# 获取文件大小:KB
|
||||
size = os.path.getsize(file)
|
||||
return size / 1024
|
||||
|
||||
|
||||
def get_outfile(infile, outfile):
|
||||
if outfile:
|
||||
return outfile
|
||||
dir, suffix = os.path.splitext(infile)
|
||||
outfile = '{}-out{}'.format(dir, suffix)
|
||||
return outfile
|
||||
|
||||
|
||||
def compress_image(infile, outfile='', kb=100, step=20, quality=90):
|
||||
"""不改变图片尺寸压缩到指定大小
|
||||
:param infile: 压缩源文件
|
||||
:param outfile: 压缩文件保存地址
|
||||
:param mb: 压缩目标,KB
|
||||
:param step: 每次调整的压缩比率
|
||||
:param quality: 初始压缩比率
|
||||
:return: 压缩文件地址,压缩文件大小
|
||||
"""
|
||||
o_size = get_size(infile)
|
||||
if o_size <= kb:
|
||||
return infile, o_size
|
||||
outfile = get_outfile(infile, outfile)
|
||||
while o_size > kb:
|
||||
im = Image.open(infile)
|
||||
im.save(outfile, quality=quality)
|
||||
if quality - step < 0:
|
||||
break
|
||||
quality -= step
|
||||
o_size = get_size(outfile)
|
||||
return outfile, get_size(outfile)
|
||||
|
||||
|
||||
def text_to_image(text_str: str, save_as="temp.png", width=800):
|
||||
global text_render_font
|
||||
|
||||
text_str = text_str.replace("\t", " ")
|
||||
|
||||
# 分行
|
||||
lines = text_str.split('\n')
|
||||
|
||||
# 计算并分割
|
||||
final_lines = []
|
||||
|
||||
text_width = width-80
|
||||
for line in lines:
|
||||
# 如果长了就分割
|
||||
line_width = text_render_font.getlength(line)
|
||||
if line_width < text_width:
|
||||
final_lines.append(line)
|
||||
continue
|
||||
else:
|
||||
rest_text = line
|
||||
while True:
|
||||
# 分割最前面的一行
|
||||
point = int(len(rest_text) * (text_width / line_width))
|
||||
|
||||
# 检查断点是否在数字中间
|
||||
numbers = indexNumber(rest_text)
|
||||
|
||||
for number in numbers:
|
||||
if number[1] < point < number[1] + len(number[0]) and number[1] != 0:
|
||||
point = number[1]
|
||||
break
|
||||
|
||||
final_lines.append(rest_text[:point])
|
||||
rest_text = rest_text[point:]
|
||||
line_width = text_render_font.getlength(rest_text)
|
||||
if line_width < text_width:
|
||||
final_lines.append(rest_text)
|
||||
break
|
||||
else:
|
||||
continue
|
||||
# 准备画布
|
||||
img = Image.new('RGBA', (width, max(280, len(final_lines) * 35 + 65)), (255, 255, 255, 255))
|
||||
draw = ImageDraw.Draw(img, mode='RGBA')
|
||||
|
||||
|
||||
# 绘制正文
|
||||
line_number = 0
|
||||
offset_x = 20
|
||||
offset_y = 30
|
||||
for final_line in final_lines:
|
||||
draw.text((offset_x, offset_y + 35 * line_number), final_line, fill=(0, 0, 0), font=text_render_font)
|
||||
# 遍历此行,检查是否有emoji
|
||||
idx_in_line = 0
|
||||
for ch in final_line:
|
||||
# if self.is_emoji(ch):
|
||||
# emoji_img_valid = ensure_emoji(hex(ord(ch))[2:])
|
||||
# if emoji_img_valid: # emoji图像可用,绘制到指定位置
|
||||
# emoji_image = Image.open("emojis/{}.png".format(hex(ord(ch))[2:]), mode='r').convert('RGBA')
|
||||
# emoji_image = emoji_image.resize((32, 32))
|
||||
|
||||
# x, y = emoji_image.size
|
||||
|
||||
# final_emoji_img = Image.new('RGBA', emoji_image.size, (255, 255, 255))
|
||||
# final_emoji_img.paste(emoji_image, (0, 0, x, y), emoji_image)
|
||||
|
||||
# img.paste(final_emoji_img, box=(int(offset_x + idx_in_line * 32), offset_y + 35 * line_number))
|
||||
|
||||
# 检查字符占位宽
|
||||
char_code = ord(ch)
|
||||
if char_code >= 127:
|
||||
idx_in_line += 1
|
||||
else:
|
||||
idx_in_line += 0.5
|
||||
|
||||
line_number += 1
|
||||
|
||||
|
||||
img.save(save_as)
|
||||
|
||||
return save_as
|
||||
93
pkg/utils/threadctl.py
Normal file
93
pkg/utils/threadctl.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
|
||||
class Pool:
|
||||
"""线程池结构"""
|
||||
pool_num:int = None
|
||||
ctl:ThreadPoolExecutor = None
|
||||
task_list:list = None
|
||||
task_list_lock:threading.Lock = None
|
||||
monitor_type = True
|
||||
|
||||
def __init__(self, pool_num):
|
||||
self.pool_num = pool_num
|
||||
self.ctl = ThreadPoolExecutor(max_workers = self.pool_num)
|
||||
self.task_list = []
|
||||
self.task_list_lock = threading.Lock()
|
||||
|
||||
def __thread_monitor__(self):
|
||||
while self.monitor_type:
|
||||
for t in self.task_list:
|
||||
if not t.done():
|
||||
continue
|
||||
try:
|
||||
self.task_list.pop(self.task_list.index(t))
|
||||
except:
|
||||
continue
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
class ThreadCtl:
|
||||
def __init__(self, sys_pool_num, admin_pool_num, user_pool_num):
|
||||
"""线程池控制类
|
||||
sys_pool_num:分配系统使用的线程池数量(>=8)
|
||||
admin_pool_num:用于处理管理员消息的线程池数量(>=1)
|
||||
user_pool_num:分配用于处理用户消息的线程池的数量(>=1)
|
||||
"""
|
||||
if sys_pool_num < 5:
|
||||
raise Exception("Too few system threads(sys_pool_num needs >= 8, but received {})".format(sys_pool_num))
|
||||
if admin_pool_num < 1:
|
||||
raise Exception("Too few admin threads(admin_pool_num needs >= 1, but received {})".format(admin_pool_num))
|
||||
if user_pool_num < 1:
|
||||
raise Exception("Too few user threads(user_pool_num needs >= 1, but received {})".format(admin_pool_num))
|
||||
self.__sys_pool__ = Pool(sys_pool_num)
|
||||
self.__admin_pool__ = Pool(admin_pool_num)
|
||||
self.__user_pool__ = Pool(user_pool_num)
|
||||
self.submit_sys_task(self.__sys_pool__.__thread_monitor__)
|
||||
self.submit_sys_task(self.__admin_pool__.__thread_monitor__)
|
||||
self.submit_sys_task(self.__user_pool__.__thread_monitor__)
|
||||
|
||||
def __submit__(self, pool: Pool, fn, /, *args, **kwargs ):
|
||||
t = pool.ctl.submit(fn, *args, **kwargs)
|
||||
pool.task_list_lock.acquire()
|
||||
pool.task_list.append(t)
|
||||
pool.task_list_lock.release()
|
||||
return t
|
||||
|
||||
def submit_sys_task(self, fn, /, *args, **kwargs):
|
||||
return self.__submit__(
|
||||
self.__sys_pool__,
|
||||
fn, *args, **kwargs
|
||||
)
|
||||
|
||||
def submit_admin_task(self, fn, /, *args, **kwargs):
|
||||
return self.__submit__(
|
||||
self.__admin_pool__,
|
||||
fn, *args, **kwargs
|
||||
)
|
||||
|
||||
def submit_user_task(self, fn, /, *args, **kwargs):
|
||||
return self.__submit__(
|
||||
self.__user_pool__,
|
||||
fn, *args, **kwargs
|
||||
)
|
||||
|
||||
def shutdown(self):
|
||||
self.__user_pool__.ctl.shutdown(cancel_futures=True)
|
||||
self.__user_pool__.monitor_type = False
|
||||
self.__admin_pool__.ctl.shutdown(cancel_futures=True)
|
||||
self.__admin_pool__.monitor_type = False
|
||||
self.__sys_pool__.monitor_type = False
|
||||
self.__sys_pool__.ctl.shutdown(wait=True, cancel_futures=False)
|
||||
|
||||
def reload(self, admin_pool_num, user_pool_num):
|
||||
self.__user_pool__.ctl.shutdown(cancel_futures=True)
|
||||
self.__user_pool__.monitor_type = False
|
||||
self.__admin_pool__.ctl.shutdown(cancel_futures=True)
|
||||
self.__admin_pool__.monitor_type = False
|
||||
self.__admin_pool__ = Pool(admin_pool_num)
|
||||
self.__user_pool__ = Pool(user_pool_num)
|
||||
self.submit_sys_task(self.__admin_pool__.__thread_monitor__)
|
||||
self.submit_sys_task(self.__user_pool__.__thread_monitor__)
|
||||
@@ -1,6 +1,12 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
import pkg.utils.context
|
||||
import requests
|
||||
import json
|
||||
|
||||
import pkg.utils.constants
|
||||
import pkg.utils.network as network
|
||||
|
||||
|
||||
def check_dulwich_closure():
|
||||
@@ -28,34 +34,180 @@ def pull_latest(repo_path: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def update_all() -> bool:
|
||||
"""使用dulwich更新源码"""
|
||||
check_dulwich_closure()
|
||||
import dulwich
|
||||
try:
|
||||
before_commit_id = get_current_commit_id()
|
||||
from dulwich import porcelain
|
||||
repo = porcelain.open_repo('.')
|
||||
porcelain.pull(repo)
|
||||
def is_newer(new_tag: str, old_tag: str):
|
||||
"""判断版本是否更新,忽略第四位版本和第一位版本"""
|
||||
if new_tag == old_tag:
|
||||
return False
|
||||
|
||||
change_log = ""
|
||||
new_tag = new_tag.split(".")
|
||||
old_tag = old_tag.split(".")
|
||||
|
||||
# 判断主版本是否相同
|
||||
if new_tag[0] != old_tag[0]:
|
||||
return False
|
||||
|
||||
for entry in repo.get_walker():
|
||||
if str(entry.commit.id)[2:-1] == before_commit_id:
|
||||
break
|
||||
tz = datetime.timezone(datetime.timedelta(hours=entry.commit.commit_timezone // 3600))
|
||||
dt = datetime.datetime.fromtimestamp(entry.commit.commit_time, tz)
|
||||
change_log += dt.strftime('%Y-%m-%d %H:%M:%S') + " [" + str(entry.commit.message, encoding="utf-8").strip()+"]\n"
|
||||
if len(new_tag) < 4:
|
||||
return True
|
||||
|
||||
if change_log != "":
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("代码拉取完成,更新内容如下:\n"+change_log)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except ModuleNotFoundError:
|
||||
raise Exception("dulwich模块未安装,请查看 https://github.com/RockChinQ/QChatGPT/issues/77")
|
||||
except dulwich.porcelain.DivergedBranches:
|
||||
raise Exception("分支不一致,自动更新仅支持master分支,请手动更新(https://github.com/RockChinQ/QChatGPT/issues/76)")
|
||||
# 合成前三段,判断是否相同
|
||||
new_tag = ".".join(new_tag[:3])
|
||||
old_tag = ".".join(old_tag[:3])
|
||||
|
||||
return new_tag != old_tag
|
||||
|
||||
|
||||
def get_release_list() -> list:
|
||||
"""获取发行列表"""
|
||||
rls_list_resp = requests.get(
|
||||
url="https://api.github.com/repos/RockChinQ/QChatGPT/releases",
|
||||
proxies=network.wrapper_proxies()
|
||||
)
|
||||
|
||||
rls_list = rls_list_resp.json()
|
||||
|
||||
return rls_list
|
||||
|
||||
|
||||
def get_current_tag() -> str:
|
||||
"""获取当前tag"""
|
||||
current_tag = pkg.utils.constants.semantic_version
|
||||
if os.path.exists("current_tag"):
|
||||
with open("current_tag", "r") as f:
|
||||
current_tag = f.read()
|
||||
|
||||
return current_tag
|
||||
|
||||
|
||||
def compare_version_str(v0: str, v1: str) -> int:
|
||||
"""比较两个版本号"""
|
||||
|
||||
# 删除版本号前的v
|
||||
if v0.startswith("v"):
|
||||
v0 = v0[1:]
|
||||
if v1.startswith("v"):
|
||||
v1 = v1[1:]
|
||||
|
||||
v0:list = v0.split(".")
|
||||
v1:list = v1.split(".")
|
||||
|
||||
# 如果两个版本号节数不同,把短的后面用0补齐
|
||||
if len(v0) < len(v1):
|
||||
v0.extend(["0"]*(len(v1)-len(v0)))
|
||||
elif len(v0) > len(v1):
|
||||
v1.extend(["0"]*(len(v0)-len(v1)))
|
||||
|
||||
# 从高位向低位比较
|
||||
for i in range(len(v0)):
|
||||
if int(v0[i]) > int(v1[i]):
|
||||
return 1
|
||||
elif int(v0[i]) < int(v1[i]):
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def update_all(cli: bool = False) -> bool:
|
||||
"""检查更新并下载源码"""
|
||||
current_tag = get_current_tag()
|
||||
|
||||
rls_list = get_release_list()
|
||||
|
||||
latest_rls = {}
|
||||
rls_notes = []
|
||||
latest_tag_name = ""
|
||||
for rls in rls_list:
|
||||
rls_notes.append(rls['name']) # 使用发行名称作为note
|
||||
if latest_tag_name == "":
|
||||
latest_tag_name = rls['tag_name']
|
||||
|
||||
if rls['tag_name'] == current_tag:
|
||||
break
|
||||
|
||||
if latest_rls == {}:
|
||||
latest_rls = rls
|
||||
if not cli:
|
||||
logging.info("更新日志: {}".format(rls_notes))
|
||||
else:
|
||||
print("更新日志: {}".format(rls_notes))
|
||||
|
||||
if latest_rls == {} and not is_newer(latest_tag_name, current_tag): # 没有新版本
|
||||
return False
|
||||
|
||||
# 下载最新版本的zip到temp目录
|
||||
if not cli:
|
||||
logging.info("开始下载最新版本: {}".format(latest_rls['zipball_url']))
|
||||
else:
|
||||
print("开始下载最新版本: {}".format(latest_rls['zipball_url']))
|
||||
zip_url = latest_rls['zipball_url']
|
||||
zip_resp = requests.get(
|
||||
url=zip_url,
|
||||
proxies=network.wrapper_proxies()
|
||||
)
|
||||
zip_data = zip_resp.content
|
||||
|
||||
# 检查temp/updater目录
|
||||
if not os.path.exists("temp"):
|
||||
os.mkdir("temp")
|
||||
if not os.path.exists("temp/updater"):
|
||||
os.mkdir("temp/updater")
|
||||
with open("temp/updater/{}.zip".format(latest_rls['tag_name']), "wb") as f:
|
||||
f.write(zip_data)
|
||||
|
||||
if not cli:
|
||||
logging.info("下载最新版本完成: {}".format("temp/updater/{}.zip".format(latest_rls['tag_name'])))
|
||||
else:
|
||||
print("下载最新版本完成: {}".format("temp/updater/{}.zip".format(latest_rls['tag_name'])))
|
||||
|
||||
# 解压zip到temp/updater/<tag_name>/
|
||||
import zipfile
|
||||
# 检查目标文件夹
|
||||
if os.path.exists("temp/updater/{}".format(latest_rls['tag_name'])):
|
||||
import shutil
|
||||
shutil.rmtree("temp/updater/{}".format(latest_rls['tag_name']))
|
||||
os.mkdir("temp/updater/{}".format(latest_rls['tag_name']))
|
||||
with zipfile.ZipFile("temp/updater/{}.zip".format(latest_rls['tag_name']), 'r') as zip_ref:
|
||||
zip_ref.extractall("temp/updater/{}".format(latest_rls['tag_name']))
|
||||
|
||||
# 覆盖源码
|
||||
source_root = ""
|
||||
# 找到temp/updater/<tag_name>/中的第一个子目录路径
|
||||
for root, dirs, files in os.walk("temp/updater/{}".format(latest_rls['tag_name'])):
|
||||
if root != "temp/updater/{}".format(latest_rls['tag_name']):
|
||||
source_root = root
|
||||
break
|
||||
|
||||
# 覆盖源码
|
||||
import shutil
|
||||
for root, dirs, files in os.walk(source_root):
|
||||
# 覆盖所有子文件子目录
|
||||
for file in files:
|
||||
src = os.path.join(root, file)
|
||||
dst = src.replace(source_root, ".")
|
||||
if os.path.exists(dst):
|
||||
os.remove(dst)
|
||||
|
||||
# 检查目标文件夹是否存在
|
||||
if not os.path.exists(os.path.dirname(dst)):
|
||||
os.makedirs(os.path.dirname(dst))
|
||||
# 检查目标文件是否存在
|
||||
if not os.path.exists(dst):
|
||||
# 创建目标文件
|
||||
open(dst, "w").close()
|
||||
|
||||
shutil.copy(src, dst)
|
||||
|
||||
# 把current_tag写入文件
|
||||
current_tag = latest_rls['tag_name']
|
||||
with open("current_tag", "w") as f:
|
||||
f.write(current_tag)
|
||||
|
||||
# 通知管理员
|
||||
if not cli:
|
||||
import pkg.utils.context
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("已更新到最新版本: {}\n更新日志:\n{}\n完整的更新日志请前往 https://github.com/RockChinQ/QChatGPT/releases 查看".format(current_tag, "\n".join(rls_notes[:-1])))
|
||||
else:
|
||||
print("已更新到最新版本: {}\n更新日志:\n{}\n完整的更新日志请前往 https://github.com/RockChinQ/QChatGPT/releases 查看".format(current_tag, "\n".join(rls_notes[:-1])))
|
||||
return True
|
||||
|
||||
|
||||
def is_repo(path: str) -> bool:
|
||||
@@ -81,24 +233,12 @@ def get_remote_url(repo_path: str) -> str:
|
||||
|
||||
def get_current_version_info() -> str:
|
||||
"""获取当前版本信息"""
|
||||
check_dulwich_closure()
|
||||
|
||||
from dulwich import porcelain
|
||||
|
||||
repo = porcelain.open_repo('.')
|
||||
|
||||
version_str = ""
|
||||
|
||||
for entry in repo.get_walker():
|
||||
version_str += "提交编号: "+str(entry.commit.id)[2:9] + "\n"
|
||||
tz = datetime.timezone(datetime.timedelta(hours=entry.commit.commit_timezone // 3600))
|
||||
dt = datetime.datetime.fromtimestamp(entry.commit.commit_time, tz)
|
||||
version_str += "时间: "+dt.strftime('%m-%d %H:%M:%S') + "\n"
|
||||
version_str += "说明: "+str(entry.commit.message, encoding="utf-8").strip() + "\n"
|
||||
version_str += "提交作者: '" + str(entry.commit.author)[2:-1] + "'"
|
||||
break
|
||||
|
||||
return version_str
|
||||
rls_list = get_release_list()
|
||||
current_tag = get_current_tag()
|
||||
for rls in rls_list:
|
||||
if rls['tag_name'] == current_tag:
|
||||
return rls['name'] + "\n" + rls['body']
|
||||
return "未知版本"
|
||||
|
||||
|
||||
def get_commit_id_and_time_and_msg() -> str:
|
||||
@@ -132,15 +272,44 @@ def get_current_commit_id() -> str:
|
||||
|
||||
def is_new_version_available() -> bool:
|
||||
"""检查是否有新版本"""
|
||||
check_dulwich_closure()
|
||||
# 从github获取release列表
|
||||
rls_list = get_release_list()
|
||||
if rls_list is None:
|
||||
return False
|
||||
|
||||
from dulwich import porcelain
|
||||
# 获取当前版本
|
||||
current_tag = get_current_tag()
|
||||
|
||||
repo = porcelain.open_repo('.')
|
||||
fetch_res = porcelain.ls_remote(porcelain.get_remote_repo(repo, "origin")[1])
|
||||
# 检查是否有新版本
|
||||
latest_tag_name = ""
|
||||
for rls in rls_list:
|
||||
if latest_tag_name == "":
|
||||
latest_tag_name = rls['tag_name']
|
||||
break
|
||||
|
||||
current_commit_id = get_current_commit_id()
|
||||
return is_newer(latest_tag_name, current_tag)
|
||||
|
||||
latest_commit_id = str(fetch_res[b'HEAD'])[2:-1]
|
||||
|
||||
return current_commit_id != latest_commit_id
|
||||
def get_rls_notes() -> list:
|
||||
"""获取更新日志"""
|
||||
# 从github获取release列表
|
||||
rls_list = get_release_list()
|
||||
if rls_list is None:
|
||||
return None
|
||||
|
||||
# 获取当前版本
|
||||
current_tag = get_current_tag()
|
||||
|
||||
# 检查是否有新版本
|
||||
rls_notes = []
|
||||
for rls in rls_list:
|
||||
if rls['tag_name'] == current_tag:
|
||||
break
|
||||
|
||||
rls_notes.append(rls['name'])
|
||||
|
||||
return rls_notes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
update_all()
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
requests~=2.28.1
|
||||
openai~=0.27.0
|
||||
pip~=22.3.1
|
||||
dulwich~=0.21.3
|
||||
requests~=2.31.0
|
||||
openai~=0.27.8
|
||||
dulwich~=0.21.5
|
||||
colorlog~=6.6.0
|
||||
yiri-mirai~=0.2.6.1
|
||||
websockets~=10.4
|
||||
yiri-mirai
|
||||
websockets
|
||||
urllib3~=1.26.10
|
||||
func_timeout~=4.3.5
|
||||
func_timeout~=4.3.5
|
||||
Pillow
|
||||
nakuru-project-idk
|
||||
CallingGPT
|
||||
tiktoken
|
||||
1
res/announcement
Normal file
1
res/announcement
Normal file
@@ -0,0 +1 @@
|
||||
2023/3/31 21:35 【插件兼容性问题】若您使用了revLibs插件,并将主程序升级到了v2.3.0,请立即使用管理员账号向机器人账号发送!plugin update命令更新逆向库插件,以解决由于情景预设重构引起的兼容性问题。
|
||||
8
res/announcement.json
Normal file
8
res/announcement.json
Normal file
@@ -0,0 +1,8 @@
|
||||
[
|
||||
{
|
||||
"id": 2,
|
||||
"time": "2023-08-01 10:49:26",
|
||||
"timestamp": 1690858166,
|
||||
"content": "现已支持GPT函数调用功能,欢迎了解:https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8-%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0"
|
||||
}
|
||||
]
|
||||
95
res/docs/docker_deploy.md
Normal file
95
res/docs/docker_deploy.md
Normal file
@@ -0,0 +1,95 @@
|
||||
|
||||
|
||||
## 操作步骤
|
||||
|
||||
### 1.安装docker和docker compose
|
||||
|
||||
[各种设备的安装Docker方法](https://yeasy.gitbook.io/docker_practice/install)
|
||||
|
||||
[安装Compose方法](https://yeasy.gitbook.io/docker_practice/compose)
|
||||
|
||||
> `Docker Desktop for Mac/Windows` 自带 `docker-compose` 二进制文件,安装 Docker 之后可以直接使用。
|
||||
>
|
||||
> 可以选择很多下载方法,反正只要安装了就可以了
|
||||
|
||||
### 2. 登录qq(下面所有步骤建议在项目文件夹下操作)
|
||||
|
||||
#### 2.1 输入指令
|
||||
|
||||
```
|
||||
docker run -d -it --name mcl --network host -v ${PWD}/qq/plugins:/app/plugins -v ${PWD}/qq/config:/app/config -v ${PWD}/qq/data:/app/data -v ${PWD}/qq/bots:/app/bots --restart unless-stopped kagurazakanyaa/mcl:latest
|
||||
```
|
||||
|
||||
这里使用了[KagurazakaNyaa/mirai-console-loader-docker](https://github.com/KagurazakaNyaa/mirai-console-loader-docker)的镜像
|
||||
|
||||
#### 2.2 进入容器
|
||||
|
||||
```
|
||||
docker ps
|
||||
```
|
||||
在输出中查看容器的ID,例如:
|
||||
```sh
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
bce1e5568f46 kagurazakanyaa/mcl "./mcl -u" 10 minutes ago Up 10 minutes 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp admiring_mendeleev
|
||||
```
|
||||
查看`IMAGE`名为`kagurazakanyaa/mcl`的容器的`CONTAINER ID`,在这里是`bce1e5568f46`,于是使用以下命令将其切到前台:
|
||||
```
|
||||
docker attach bce1e5568f46
|
||||
```
|
||||
如需将其切到后台运行,请使用组合键`Ctrl+P+Q`
|
||||
|
||||
#### 2.3 编写配置文件
|
||||
|
||||
- 在` /qq/config/net.mamoe.mirai-api-http` 文件夹中找到`setting.yml`,这是`mirai-api-http`的配置文件
|
||||
- 将这个文件的内容修改为:
|
||||
|
||||
```
|
||||
adapters:
|
||||
- ws
|
||||
debug: true
|
||||
enableVerify: true
|
||||
verifyKey: yirimirai
|
||||
singleMode: false
|
||||
cacheSize: 4096
|
||||
adapterSettings:
|
||||
ws:
|
||||
host: localhost
|
||||
port: 8080
|
||||
reservedSyncId: -1
|
||||
```
|
||||
|
||||
`verifyKey`要求与`bot`的`config.py`中的`verifyKey`相同
|
||||
|
||||
`port`: 8080要和2.4 config.py配置里面的端口号相同
|
||||
|
||||
#### 2.4 登录
|
||||
|
||||
#### 在mirai上登录QQ
|
||||
|
||||
```
|
||||
login <机器人QQ号> <机器人QQ密码>
|
||||
```
|
||||
|
||||
> 具体见[此教程](https://yiri-mirai.wybxc.cc/tutorials/01/configuration#4-登录-qq)
|
||||
|
||||
#### 配置自动登录(可选)
|
||||
|
||||
当机器人账号登录成功以后,执行
|
||||
|
||||
```
|
||||
autologin add <机器人QQ号> <机器人密码>
|
||||
autologin setConfig <机器人QQ号> protocol ANDROID_PAD
|
||||
```
|
||||
|
||||
> 出现`无法登录`报错时候[无法登录的临时处理方案](https://mirai.mamoe.net/topic/223/无法登录的临时处理方案)
|
||||
|
||||
**完成后, `Ctrl+P+Q`退出(不会关掉容器,容器还会运行)**
|
||||
|
||||
### 3. 部署QChatGPT
|
||||
|
||||
配置好config.py,保存到当前目录下,运行下面的
|
||||
|
||||
```
|
||||
docker run -it -d --name QChatGPT --network host -v ${PWD}/config.py:/QChatGPT/config.py -v ${PWD}/banlist.py:/QChatGPT/banlist.py -v ${PWD}/sensitive.json:/QChatGPT/sensitive.json mikumifa/qchatgpt-docker
|
||||
```
|
||||
|
||||
BIN
res/logo.png
Normal file
BIN
res/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 35 KiB |
BIN
res/screenshots/group_gpt3.5.png
Normal file
BIN
res/screenshots/group_gpt3.5.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 129 KiB |
BIN
res/screenshots/person_gpt3.5.png
Normal file
BIN
res/screenshots/person_gpt3.5.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 101 KiB |
BIN
res/screenshots/person_newbing.png
Normal file
BIN
res/screenshots/person_newbing.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 98 KiB |
BIN
res/screenshots/webwlkr_plugin.png
Normal file
BIN
res/screenshots/webwlkr_plugin.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
17
res/scripts/generate_cmdpriv_template.py
Normal file
17
res/scripts/generate_cmdpriv_template.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import pkg.qqbot.cmds.aamgr as cmdsmgr
|
||||
import json
|
||||
|
||||
# 执行命令模块的注册
|
||||
cmdsmgr.register_all()
|
||||
|
||||
# 生成限权文件模板
|
||||
template: dict[str, int] = {
|
||||
"comment": "以下为命令权限,请设置到cmdpriv.json中。关于此功能的说明,请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6",
|
||||
}
|
||||
|
||||
for key in cmdsmgr.__command_list__:
|
||||
template[key] = cmdsmgr.__command_list__[key]['privilege']
|
||||
|
||||
# 写入cmdpriv-template.json
|
||||
with open('res/templates/cmdpriv-template.json', 'w') as f:
|
||||
f.write(json.dumps(template, indent=4, ensure_ascii=False))
|
||||
23
res/scripts/generate_override_all.py
Normal file
23
res/scripts/generate_override_all.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# 使用config-template生成override.json的字段全集模板文件override-all.json
|
||||
# 关于override.json机制,请参考:https://github.com/RockChinQ/QChatGPT/pull/271
|
||||
import json
|
||||
import importlib
|
||||
|
||||
|
||||
template = importlib.import_module("config-template")
|
||||
output_json = {
|
||||
"comment": "这是override.json支持的字段全集, 关于override.json机制, 请查看https://github.com/RockChinQ/QChatGPT/pull/271"
|
||||
}
|
||||
|
||||
|
||||
for k, v in template.__dict__.items():
|
||||
if k.startswith("__"):
|
||||
continue
|
||||
# 如果是module
|
||||
if type(v) == type(template):
|
||||
continue
|
||||
print(k, v, type(v))
|
||||
output_json[k] = v
|
||||
|
||||
with open("override-all.json", "w", encoding="utf-8") as f:
|
||||
json.dump(output_json, f, indent=4, ensure_ascii=False)
|
||||
32
res/scripts/publish_announcement.py
Normal file
32
res/scripts/publish_announcement.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# 输出工作路径
|
||||
import os
|
||||
print("工作路径: " + os.getcwd())
|
||||
announcement = input("请输入公告内容: ")
|
||||
|
||||
import json
|
||||
|
||||
# 读取现有的公告文件 res/announcement.json
|
||||
with open("res/announcement.json", "r", encoding="utf-8") as f:
|
||||
announcement_json = json.load(f)
|
||||
|
||||
# 将公告内容写入公告文件
|
||||
|
||||
# 当前自然时间
|
||||
import time
|
||||
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
# 获取最后一个公告的id
|
||||
last_id = announcement_json[-1]["id"] if len(announcement_json) > 0 else -1
|
||||
|
||||
announcement = {
|
||||
"id": last_id + 1,
|
||||
"time": now,
|
||||
"timestamp": int(time.time()),
|
||||
"content": announcement
|
||||
}
|
||||
|
||||
announcement_json.append(announcement)
|
||||
|
||||
# 将公告写入公告文件
|
||||
with open("res/announcement.json", "w", encoding="utf-8") as f:
|
||||
json.dump(announcement_json, f, indent=4, ensure_ascii=False)
|
||||
BIN
res/social.png
Normal file
BIN
res/social.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 70 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user