mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 03:15:06 +08:00
Compare commits
1752 Commits
v2
...
5c8523e4ef
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c8523e4ef | ||
|
|
9802a42a9e | ||
|
|
99e3abec72 | ||
|
|
fc2efdf994 | ||
|
|
6ed672d996 | ||
|
|
2bf593fa6b | ||
|
|
3182214663 | ||
|
|
20614b20b7 | ||
|
|
da323817f7 | ||
|
|
763c1a885c | ||
|
|
dbc09f46f4 | ||
|
|
cf43f09aff | ||
|
|
c3c51b0fbf | ||
|
|
8a42daa63f | ||
|
|
d91d98c9d4 | ||
|
|
2e82f2b2d1 | ||
|
|
f459c7017a | ||
|
|
c27ccb8475 | ||
|
|
abb2f7ae05 | ||
|
|
80606ed32c | ||
|
|
bc7c5fa864 | ||
|
|
ed0ea68037 | ||
|
|
6ac4dbc011 | ||
|
|
e642ffa5b3 | ||
|
|
6a24c951e0 | ||
|
|
58369480e2 | ||
|
|
43553e2c7d | ||
|
|
268ac8855a | ||
|
|
0f10cc62ec | ||
|
|
99f649c6b7 | ||
|
|
f25ac78538 | ||
|
|
cef24d8c4b | ||
|
|
7a10dfdac1 | ||
|
|
02892e57bb | ||
|
|
524c56a12b | ||
|
|
0e0d7cc7b8 | ||
|
|
1f877e2b8e | ||
|
|
8cd50fbdb4 | ||
|
|
42421d171e | ||
|
|
32215e9a3f | ||
|
|
dd1c7ffc39 | ||
|
|
b59bf62da5 | ||
|
|
f4c32f7b30 | ||
|
|
8844a5304d | ||
|
|
922ddd47f4 | ||
|
|
8c8702c6c9 | ||
|
|
70147fcf5e | ||
|
|
b3ee16e876 | ||
|
|
8d7976190d | ||
|
|
3edae3e678 | ||
|
|
dd2254203c | ||
|
|
f8658e2d77 | ||
|
|
021c3bbb94 | ||
|
|
0a64a96f65 | ||
|
|
48576dc46d | ||
|
|
12de0343b4 | ||
|
|
fcd34a9ff3 | ||
|
|
0dcf904d81 | ||
|
|
4fe92d8ece | ||
|
|
c893ffc177 | ||
|
|
a076ce5756 | ||
|
|
af82227dff | ||
|
|
8f2b177145 | ||
|
|
9a997fbcb0 | ||
|
|
17070471f7 | ||
|
|
cb48221ed3 | ||
|
|
68eb0290e0 | ||
|
|
61bc6a1dc2 | ||
|
|
4a84bf2355 | ||
|
|
2c2a89d9db | ||
|
|
c91e2f0efe | ||
|
|
411d082d2a | ||
|
|
d4e08a1765 | ||
|
|
b529d07479 | ||
|
|
d44df75e5c | ||
|
|
b74e07b608 | ||
|
|
4a868afecd | ||
|
|
1cb9560663 | ||
|
|
8f878673ae | ||
|
|
74a5e37892 | ||
|
|
76a69ecc7e | ||
|
|
f06e3d3efa | ||
|
|
973e7bae42 | ||
|
|
94aa175c1a | ||
|
|
777b766fff | ||
|
|
1adaa93034 | ||
|
|
9853eccd89 | ||
|
|
7699ba3cae | ||
|
|
9ac8b1a6fd | ||
|
|
f476c4724d | ||
|
|
3d12632c9f | ||
|
|
350e59fa6b | ||
|
|
b3d5b3fc8f | ||
|
|
4a02c531b2 | ||
|
|
2dd2abedde | ||
|
|
0d59c04151 | ||
|
|
08e0ede655 | ||
|
|
bcf89ca434 | ||
|
|
5e2f677d0b | ||
|
|
4df372052d | ||
|
|
2c5a0a00ba | ||
|
|
f3295b0fdd | ||
|
|
431d515c26 | ||
|
|
d9e6198992 | ||
|
|
3951cbf266 | ||
|
|
c47c4994ae | ||
|
|
a6072c2abb | ||
|
|
360422f25e | ||
|
|
f135c946bd | ||
|
|
750cc24900 | ||
|
|
46062bf4b9 | ||
|
|
869b2176a7 | ||
|
|
7138c101e3 | ||
|
|
04e26225cd | ||
|
|
f9f2de570f | ||
|
|
1dd598c7be | ||
|
|
c0f04e4f20 | ||
|
|
d3279b9823 | ||
|
|
2ad1f97e12 | ||
|
|
1046f3c2aa | ||
|
|
1afecf01e4 | ||
|
|
3ee7736361 | ||
|
|
0666778fea | ||
|
|
8df90558ab | ||
|
|
c1c03f11b4 | ||
|
|
da9afcd0ad | ||
|
|
bc1fbfa190 | ||
|
|
f3199dda20 | ||
|
|
4d0a28a1a7 | ||
|
|
76831579ad | ||
|
|
c2d752f9e9 | ||
|
|
4c0917556f | ||
|
|
e17b0cf5c5 | ||
|
|
f2647316a5 | ||
|
|
78cc157657 | ||
|
|
f576f990de | ||
|
|
254feb6a3a | ||
|
|
4c5139e9ff | ||
|
|
a055e37d3a | ||
|
|
bef5d6627b | ||
|
|
69767ebdb4 | ||
|
|
53ecd0933e | ||
|
|
d32f783392 | ||
|
|
4d3610cdf7 | ||
|
|
166eebabff | ||
|
|
9f2f1cd577 | ||
|
|
d86b884cab | ||
|
|
8345edd9f7 | ||
|
|
e3821b3f09 | ||
|
|
72ca62eae4 | ||
|
|
075091ed06 | ||
|
|
d0a3dee083 | ||
|
|
6ba9b6973d | ||
|
|
345eccf04c | ||
|
|
127a38b15c | ||
|
|
760db38c11 | ||
|
|
e4729337c8 | ||
|
|
7be226d3fa | ||
|
|
68372a4b7a | ||
|
|
d65f862c36 | ||
|
|
5fa75330cf | ||
|
|
547e3d098e | ||
|
|
0f39a31648 | ||
|
|
f1ddddfe00 | ||
|
|
4e61302156 | ||
|
|
9e3cf418ba | ||
|
|
3e29ec7892 | ||
|
|
f452742cd2 | ||
|
|
b560432b0b | ||
|
|
99e5478ced | ||
|
|
09dba91a37 | ||
|
|
18ec4adac9 | ||
|
|
8bedaa468a | ||
|
|
0ab366fcac | ||
|
|
d664039e54 | ||
|
|
6535ba4f72 | ||
|
|
3b181cff93 | ||
|
|
d1274366a0 | ||
|
|
35a4b0f55f | ||
|
|
399ebd36d7 | ||
|
|
a3552893aa | ||
|
|
b6cdf18c1a | ||
|
|
bd4c7f634d | ||
|
|
160ca540ab | ||
|
|
74c3a77ed1 | ||
|
|
0b527868bc | ||
|
|
0f35458cf7 | ||
|
|
70ad92ca16 | ||
|
|
c0d56aa905 | ||
|
|
ed869f7e81 | ||
|
|
ea42579374 | ||
|
|
72d701df3e | ||
|
|
1191b34fd4 | ||
|
|
ca3d3b2a66 | ||
|
|
2891708060 | ||
|
|
3f59bfac5c | ||
|
|
ee24582dd3 | ||
|
|
0ffb4d5792 | ||
|
|
5a6206f148 | ||
|
|
b1014313d6 | ||
|
|
fcc2f6a195 | ||
|
|
c8ffc79077 | ||
|
|
1a13a41168 | ||
|
|
bf279049c0 | ||
|
|
05cc58f2d7 | ||
|
|
d887881ea0 | ||
|
|
8bb2f3e745 | ||
|
|
e7e6eeda61 | ||
|
|
b6ff2be4df | ||
|
|
a2ea185602 | ||
|
|
5d60dbf3f9 | ||
|
|
66e252a59f | ||
|
|
8050ea1ffb | ||
|
|
04ab48de8e | ||
|
|
521a941792 | ||
|
|
6741850081 | ||
|
|
32f6d8b253 | ||
|
|
80a6b421e8 | ||
|
|
dc454b24ec | ||
|
|
0dce884519 | ||
|
|
d70196e799 | ||
|
|
2c6f127f47 | ||
|
|
72ec4b77d6 | ||
|
|
8b935175bd | ||
|
|
eae9980f5e | ||
|
|
6a7e88ffd6 | ||
|
|
e2071d9486 | ||
|
|
0b0a0c07a0 | ||
|
|
d7b354b9b4 | ||
|
|
78d36af96b | ||
|
|
6355140cd8 | ||
|
|
c224c32d03 | ||
|
|
826ceab5b8 | ||
|
|
a327182cb2 | ||
|
|
a9beb66aef | ||
|
|
ab6cf6c938 | ||
|
|
fc1e85ff16 | ||
|
|
6f98feaaf1 | ||
|
|
345c8b113f | ||
|
|
a95c422de9 | ||
|
|
93319ec2a8 | ||
|
|
e0d5469ae2 | ||
|
|
1f9f330cef | ||
|
|
f74502c711 | ||
|
|
11acd99c10 | ||
|
|
589f61931a | ||
|
|
caab1c2831 | ||
|
|
e701ceeeba | ||
|
|
2194b2975c | ||
|
|
89b25b8985 | ||
|
|
40f1af4434 | ||
|
|
91959527a4 | ||
|
|
46b4482a7d | ||
|
|
d7fc5283f7 | ||
|
|
4bdd8a021c | ||
|
|
c0ccdaf91a | ||
|
|
d9fa1cbb06 | ||
|
|
8858f432b5 | ||
|
|
e7fe41810e | ||
|
|
8f5ec48522 | ||
|
|
56183867a7 | ||
|
|
ea6ce2f552 | ||
|
|
55df728471 | ||
|
|
8a370a260e | ||
|
|
64764c412b | ||
|
|
f2d5c21712 | ||
|
|
6113c42014 | ||
|
|
fd9d1c4acc | ||
|
|
118ebddae6 | ||
|
|
2742144e12 | ||
|
|
83ff64698b | ||
|
|
b5e22c6db8 | ||
|
|
d3a147bbdd | ||
|
|
8eb1b8759b | ||
|
|
0155d3b0b9 | ||
|
|
e47a5b4e0d | ||
|
|
87ecb4e519 | ||
|
|
df524b8a7a | ||
|
|
8a7df423ab | ||
|
|
cafd623c92 | ||
|
|
4df11ef064 | ||
|
|
4012310d99 | ||
|
|
9e9bc88473 | ||
|
|
aa7c08ee00 | ||
|
|
b98de29b07 | ||
|
|
53ade384eb | ||
|
|
c7c2eb4518 | ||
|
|
37fa318258 | ||
|
|
ff7bebb782 | ||
|
|
30bb26f898 | ||
|
|
9c1f4e1690 | ||
|
|
865ee2ca01 | ||
|
|
c2264080bd | ||
|
|
67b622d5a6 | ||
|
|
a534c02d75 | ||
|
|
da890d3074 | ||
|
|
3049aa7a96 | ||
|
|
8b2480ad3b | ||
|
|
b176959836 | ||
|
|
a0c42a5f6e | ||
|
|
e66f674968 | ||
|
|
dd0e0abdc4 | ||
|
|
13f6396eb4 | ||
|
|
7bbaa4fcad | ||
|
|
e931d5eb88 | ||
|
|
4bbfa2f1d7 | ||
|
|
17d997c88e | ||
|
|
dd30d08c68 | ||
|
|
0ea7609ff1 | ||
|
|
28d4b1dd61 | ||
|
|
5179b3e53a | ||
|
|
8ccda10045 | ||
|
|
46fbfbefea | ||
|
|
288b294148 | ||
|
|
b464d238c5 | ||
|
|
e1a78e8ff9 | ||
|
|
2b8eb5f01c | ||
|
|
8f863cf530 | ||
|
|
2351193c51 | ||
|
|
bf2bc70794 | ||
|
|
ebe0b68e8f | ||
|
|
8c87a47f5a | ||
|
|
b8b9a37825 | ||
|
|
13dd6fcee3 | ||
|
|
39c50d3c12 | ||
|
|
29f0075bd8 | ||
|
|
8a96ffbcc0 | ||
|
|
67f68d8101 | ||
|
|
ad59d92cef | ||
|
|
85f97860c5 | ||
|
|
8fd21e76f2 | ||
|
|
cc83ddbe21 | ||
|
|
99fcde1586 | ||
|
|
eab08dfbf3 | ||
|
|
dbf0200cca | ||
|
|
ac44f35299 | ||
|
|
d6a5fdd911 | ||
|
|
4668db716a | ||
|
|
f7cd6b76f2 | ||
|
|
b6d47187f5 | ||
|
|
051fffd41e | ||
|
|
c5480078b3 | ||
|
|
e744e9c4ef | ||
|
|
9f22b8b585 | ||
|
|
27cee0a4e1 | ||
|
|
6d35fc408c | ||
|
|
0607a0fa5c | ||
|
|
ed57d2fafa | ||
|
|
39ef92676b | ||
|
|
7301476228 | ||
|
|
457cc3eecd | ||
|
|
a381069bcc | ||
|
|
146c38e64c | ||
|
|
763c41729e | ||
|
|
0021efebd7 | ||
|
|
5f18a1b13a | ||
|
|
0124448479 | ||
|
|
621f1301b3 | ||
|
|
e76bc80e51 | ||
|
|
a27560e804 | ||
|
|
46452de7b5 | ||
|
|
2aef139577 | ||
|
|
03b11481ed | ||
|
|
8c5cb71812 | ||
|
|
7c59bc1ce5 | ||
|
|
0b60ef0d06 | ||
|
|
eede354d3b | ||
|
|
eb7b5dcc25 | ||
|
|
47e9ce96fc | ||
|
|
4e95bc542c | ||
|
|
e4f321ea7a | ||
|
|
246eb71b75 | ||
|
|
261f50b8ec | ||
|
|
9736d0708a | ||
|
|
02dbe80d2f | ||
|
|
0f239ace17 | ||
|
|
3a82ae8da5 | ||
|
|
c33c9eaab0 | ||
|
|
87f626f3cc | ||
|
|
e88302f1b4 | ||
|
|
5597dffaeb | ||
|
|
7f25d61531 | ||
|
|
15e524c6e6 | ||
|
|
4a1d033ee9 | ||
|
|
8adc88a8c0 | ||
|
|
a62b38eda7 | ||
|
|
fcef784180 | ||
|
|
c3ed4ef6a1 | ||
|
|
b9f768af25 | ||
|
|
47ff883fc7 | ||
|
|
68906c43ff | ||
|
|
c6deed4e6e | ||
|
|
b45cc59322 | ||
|
|
c33a96823b | ||
|
|
d3ab16761d | ||
|
|
70f23f24b0 | ||
|
|
00a8410c94 | ||
|
|
2a17e89a99 | ||
|
|
8fe0992c15 | ||
|
|
a9776b7b53 | ||
|
|
074d359c8e | ||
|
|
7728b4262b | ||
|
|
4905b5a738 | ||
|
|
43a259a1ae | ||
|
|
cffe493db0 | ||
|
|
0042629bf0 | ||
|
|
a7d638cc9a | ||
|
|
f84a79bf74 | ||
|
|
f5a0cb9175 | ||
|
|
f9a5507029 | ||
|
|
5ce32d2f04 | ||
|
|
4908996cac | ||
|
|
ee545a163f | ||
|
|
6e0e5802cc | ||
|
|
0d53843230 | ||
|
|
b65670cd1a | ||
|
|
ba4b5255a2 | ||
|
|
d60af2b451 | ||
|
|
44ac8b2b63 | ||
|
|
b70001c579 | ||
|
|
4a8f5516f6 | ||
|
|
48d11540ae | ||
|
|
84129e3339 | ||
|
|
377d455ec1 | ||
|
|
41650b585a | ||
|
|
52280d7a05 | ||
|
|
0ce81a2df2 | ||
|
|
d9a2bb9a06 | ||
|
|
cb88da7f02 | ||
|
|
5560a4f52d | ||
|
|
e4d951b174 | ||
|
|
6e08bf71c9 | ||
|
|
daaf4b54ef | ||
|
|
3291266f5d | ||
|
|
307f6acd8c | ||
|
|
f1ac9c77e6 | ||
|
|
b434a4e3d7 | ||
|
|
2f209cd59f | ||
|
|
0f585fd5ef | ||
|
|
a152dece9a | ||
|
|
d3b31f7027 | ||
|
|
c00f05fca4 | ||
|
|
92c3a86356 | ||
|
|
341fdc409d | ||
|
|
ebd542f592 | ||
|
|
194b2d9814 | ||
|
|
7aed5cf1ed | ||
|
|
abc88c4979 | ||
|
|
3fa38f71f1 | ||
|
|
d651d956d6 | ||
|
|
6754666845 | ||
|
|
08e6f46b19 | ||
|
|
8f8c8ff367 | ||
|
|
63ec2a8c34 | ||
|
|
f58c8497c3 | ||
|
|
1497fdae56 | ||
|
|
10a3cb40e1 | ||
|
|
dd1ec15a39 | ||
|
|
ea51cec57e | ||
|
|
28ce986a8c | ||
|
|
489b145606 | ||
|
|
5e92bffaa6 | ||
|
|
277d1b0e30 | ||
|
|
13f4ed8d2c | ||
|
|
91cb5ca36c | ||
|
|
c34d54a6cb | ||
|
|
2d1737da1f | ||
|
|
adb0bf2473 | ||
|
|
a1b8b9d47b | ||
|
|
8df14bf9d9 | ||
|
|
c98d265a1e | ||
|
|
4e6782a6b7 | ||
|
|
5541e9e6d0 | ||
|
|
878ab0ef6b | ||
|
|
b61bd36b14 | ||
|
|
bb672d8f46 | ||
|
|
ba1a26543b | ||
|
|
cb868ee7b2 | ||
|
|
5dd5cb12ad | ||
|
|
2dfa83ff22 | ||
|
|
27bb4e1253 | ||
|
|
45afdbdfbb | ||
|
|
11e52a3ade | ||
|
|
4cbbe9e000 | ||
|
|
e986a0acaf | ||
|
|
f5b893cfe0 | ||
|
|
333ec346ef | ||
|
|
2f2db4d445 | ||
|
|
e31883547d | ||
|
|
88c0066b06 | ||
|
|
fdc79b8d77 | ||
|
|
f244795e57 | ||
|
|
5a2aa19d0f | ||
|
|
f731115805 | ||
|
|
67bc065ccd | ||
|
|
d15df3338f | ||
|
|
c74cf38e9f | ||
|
|
81eb92646f | ||
|
|
019a9317e9 | ||
|
|
0e68a922bd | ||
|
|
4e1d81c9f8 | ||
|
|
199164fc4b | ||
|
|
c9c26213df | ||
|
|
b7c57104c4 | ||
|
|
0be08d8882 | ||
|
|
e0abd19636 | ||
|
|
4380041c7f | ||
|
|
65814a4644 | ||
|
|
7237294008 | ||
|
|
214bc8ada9 | ||
|
|
6a1de889b4 | ||
|
|
4a319b2b20 | ||
|
|
9f269d1614 | ||
|
|
4b57771eb1 | ||
|
|
5922be7e15 | ||
|
|
858cfd8d5a | ||
|
|
cbe297dc59 | ||
|
|
de76fed25a | ||
|
|
301509b1db | ||
|
|
a10e61735d | ||
|
|
1ef0193028 | ||
|
|
1e85d02ae4 | ||
|
|
d78a329aa9 | ||
|
|
bfdf238db5 | ||
|
|
234b61e2f8 | ||
|
|
9f43097361 | ||
|
|
f395cac893 | ||
|
|
fe122281fd | ||
|
|
6d788cadbc | ||
|
|
a79a22a74d | ||
|
|
2ed3b68790 | ||
|
|
bd9331ce62 | ||
|
|
14c161b733 | ||
|
|
815cdf8b4a | ||
|
|
7d5503dab2 | ||
|
|
9ba1ad5bd3 | ||
|
|
367d04d0f0 | ||
|
|
75c3ddde19 | ||
|
|
c6e77e42be | ||
|
|
4d0a39eb65 | ||
|
|
10a44c70b6 | ||
|
|
ac03a2dceb | ||
|
|
56248c350f | ||
|
|
244aaf6e20 | ||
|
|
5b044a1917 | ||
|
|
cd25340826 | ||
|
|
ebd8e014c6 | ||
|
|
a0b7d759ac | ||
|
|
09884d3152 | ||
|
|
bef0d73e83 | ||
|
|
8d28ace252 | ||
|
|
39c062f73e | ||
|
|
0e5c9e19e1 | ||
|
|
01f2ef5694 | ||
|
|
c5b62b6ba3 | ||
|
|
bbf583ddb5 | ||
|
|
22ef1a399e | ||
|
|
0733f8878f | ||
|
|
f36a61dbb2 | ||
|
|
6d8936bd74 | ||
|
|
d2b93b3296 | ||
|
|
552fee9bac | ||
|
|
34fe8b324d | ||
|
|
c4671fbf1c | ||
|
|
4bcc06c955 | ||
|
|
348f6d9eaa | ||
|
|
157ffdc34c | ||
|
|
c81d5a1a49 | ||
|
|
a01706d163 | ||
|
|
a8d03c98dc | ||
|
|
68cdd163d3 | ||
|
|
4005a8a3e2 | ||
|
|
3f0153ea4d | ||
|
|
60b50a35f1 | ||
|
|
abd02f04af | ||
|
|
a60aa6f644 | ||
|
|
542409d48d | ||
|
|
1a10b40b17 | ||
|
|
e2124054bf | ||
|
|
3c6e858c35 | ||
|
|
ee3da8aa17 | ||
|
|
8670ae82a3 | ||
|
|
14411a8af6 | ||
|
|
c246470b37 | ||
|
|
48c9d66ab8 | ||
|
|
f474e42b79 | ||
|
|
5553a86ac8 | ||
|
|
01613b2f0d | ||
|
|
a177786063 | ||
|
|
62b2884011 | ||
|
|
6b782f8761 | ||
|
|
0c2560cafb | ||
|
|
c5eeab2fd0 | ||
|
|
6f2fd72af6 | ||
|
|
2d06f1cadb | ||
|
|
af493c117c | ||
|
|
896fef8cce | ||
|
|
89c1972abe | ||
|
|
1627d04958 | ||
|
|
c959c99e45 | ||
|
|
0eac9135c0 | ||
|
|
0203faa8c1 | ||
|
|
35f76cb7ae | ||
|
|
c34232a26c | ||
|
|
b43dd95dc6 | ||
|
|
5331ba83d7 | ||
|
|
a2038b86f1 | ||
|
|
eb066f3485 | ||
|
|
bf98b82cf2 | ||
|
|
edd70b943d | ||
|
|
3cbc823085 | ||
|
|
48becf2c51 | ||
|
|
56c686cd5a | ||
|
|
208273c0dd | ||
|
|
2ff7ca3025 | ||
|
|
61a2361730 | ||
|
|
f80f997a89 | ||
|
|
18529a42c1 | ||
|
|
3e707b4b6e | ||
|
|
62f0a938a8 | ||
|
|
ad3a163d82 | ||
|
|
f5a4503610 | ||
|
|
ec012cf5ed | ||
|
|
d70eceb72c | ||
|
|
f271608114 | ||
|
|
793f0a9c10 | ||
|
|
4f2ec195fc | ||
|
|
e6bc009414 | ||
|
|
20dc8fb5ab | ||
|
|
9a71edfeb0 | ||
|
|
fe3fd664af | ||
|
|
6402755ac6 | ||
|
|
ac8fe049de | ||
|
|
955b391253 | ||
|
|
08c6672841 | ||
|
|
8917050fae | ||
|
|
21daef46f7 | ||
|
|
8ad60b5b64 | ||
|
|
7e17c96c30 | ||
|
|
f17b06767e | ||
|
|
70a29fc623 | ||
|
|
239223be3f | ||
|
|
b112cb320c | ||
|
|
5aaf2ba3ef | ||
|
|
f1e9f46af1 | ||
|
|
8dfef1d118 | ||
|
|
919a621bf8 | ||
|
|
3ac96f464d | ||
|
|
f9f03b81d1 | ||
|
|
42171a9c07 | ||
|
|
f1f00115c9 | ||
|
|
59bff61409 | ||
|
|
778693a804 | ||
|
|
e5b2da225c | ||
|
|
4a988b89a2 | ||
|
|
e5e8807312 | ||
|
|
1376530c2e | ||
|
|
7d34a2154b | ||
|
|
ff335130ae | ||
|
|
0afef0ac0f | ||
|
|
6447f270ea | ||
|
|
81be62e1a4 | ||
|
|
409909ccb1 | ||
|
|
b821b69dbb | ||
|
|
7e2448655e | ||
|
|
a7d2a68639 | ||
|
|
aba51409a7 | ||
|
|
5e5d37cbf1 | ||
|
|
e5a99a0fe4 | ||
|
|
a594cc07f6 | ||
|
|
0a9714fbe7 | ||
|
|
1992934dce | ||
|
|
bb930aec14 | ||
|
|
1d7f2ab701 | ||
|
|
347da6142e | ||
|
|
a9f4dc517a | ||
|
|
9d45f3f3a7 | ||
|
|
256d24718b | ||
|
|
1272b8ef16 | ||
|
|
696162ee52 | ||
|
|
533f993e3a | ||
|
|
738b0af5fb | ||
|
|
5d9bac5e7b | ||
|
|
f376c9703a | ||
|
|
20a62fcf69 | ||
|
|
248d4beed1 | ||
|
|
0e52aff363 | ||
|
|
4ed854d7b8 | ||
|
|
c6ff33c6ab | ||
|
|
6c10cb7dca | ||
|
|
130495f519 | ||
|
|
219d328342 | ||
|
|
c835555a59 | ||
|
|
6652b57a0d | ||
|
|
bf51afedf6 | ||
|
|
39f9400de7 | ||
|
|
ac1d39580b | ||
|
|
9362b34858 | ||
|
|
c6f6c715bd | ||
|
|
6a8106d9ac | ||
|
|
5abbcb62a2 | ||
|
|
2bf94539bd | ||
|
|
91cd8cf380 | ||
|
|
c3de3fa275 | ||
|
|
039752419b | ||
|
|
18c708da58 | ||
|
|
8c08b8ee8a | ||
|
|
015be6008d | ||
|
|
da86384e58 | ||
|
|
86ff6f5eb6 | ||
|
|
ae6979151f | ||
|
|
fd1b5d494e | ||
|
|
cd68760c75 | ||
|
|
13d36412dd | ||
|
|
f2e1ae432c | ||
|
|
0f30f1dcbd | ||
|
|
d070737ef7 | ||
|
|
7e2b180ea5 | ||
|
|
52b62a49c8 | ||
|
|
ab6820c3df | ||
|
|
686002bf3a | ||
|
|
8da45b1ed8 | ||
|
|
b7bf0a6172 | ||
|
|
d562728d56 | ||
|
|
f4f5e88710 | ||
|
|
cc2d8588c4 | ||
|
|
37343bde66 | ||
|
|
ce185e8e8e | ||
|
|
cc20435ca5 | ||
|
|
dd3654c1a7 | ||
|
|
0c89dbce8d | ||
|
|
d01858125c | ||
|
|
e467c2b5fc | ||
|
|
a596056ff8 | ||
|
|
77a1af6b35 | ||
|
|
66050febb6 | ||
|
|
11d94ae8c3 | ||
|
|
055b389353 | ||
|
|
b30016ed08 | ||
|
|
247b41bdb2 | ||
|
|
f0cfd9f921 | ||
|
|
d917b3f00c | ||
|
|
c52236e8a9 | ||
|
|
7b284591bd | ||
|
|
425681ea09 | ||
|
|
d1f7b93d77 | ||
|
|
3a6b9b0287 | ||
|
|
e914d93c25 | ||
|
|
90b479b9d2 | ||
|
|
138ddf122a | ||
|
|
fd7c386c12 | ||
|
|
2fd6659129 | ||
|
|
98eafd704b | ||
|
|
be46997fe2 | ||
|
|
dbdb942156 | ||
|
|
d4cf6f650d | ||
|
|
101931a258 | ||
|
|
15e2535791 | ||
|
|
7763f11f5d | ||
|
|
55087e54d0 | ||
|
|
f8b877fde0 | ||
|
|
7a8102430f | ||
|
|
4031ff2835 | ||
|
|
df700ec7c2 | ||
|
|
337090e7cb | ||
|
|
7753881c01 | ||
|
|
0eca24dcce | ||
|
|
cf6076f504 | ||
|
|
b966f47acb | ||
|
|
0db6a4e524 | ||
|
|
95c6caff5a | ||
|
|
5371431be6 | ||
|
|
da1f7050a6 | ||
|
|
7c15f3ba12 | ||
|
|
a5f3331c24 | ||
|
|
6935ac33ac | ||
|
|
29f3cb9d5c | ||
|
|
dafbed91e7 | ||
|
|
83d64528bb | ||
|
|
6632d365c5 | ||
|
|
9cb4f58dd0 | ||
|
|
6af837bafc | ||
|
|
eb42516f88 | ||
|
|
4b2ffcda12 | ||
|
|
6c6f4ff076 | ||
|
|
245d7601cd | ||
|
|
e265f267e1 | ||
|
|
f58d5f184f | ||
|
|
7886702ef2 | ||
|
|
8007084f8c | ||
|
|
17762d9bd8 | ||
|
|
72947fe20e | ||
|
|
f544fd13c3 | ||
|
|
a6ab19187b | ||
|
|
5b8e78726d | ||
|
|
ec515adc67 | ||
|
|
2d156b09f6 | ||
|
|
50b973a0c3 | ||
|
|
364fa0cbc0 | ||
|
|
a0056eb14c | ||
|
|
f6d3619bbe | ||
|
|
e74de068ea | ||
|
|
ef6be4dfd9 | ||
|
|
436b45c05c | ||
|
|
2893c30f5c | ||
|
|
4604f70a57 | ||
|
|
9e24e240d8 | ||
|
|
9c3f5920da | ||
|
|
0d21faa9d3 | ||
|
|
124e1215e8 | ||
|
|
d2fb0dd749 | ||
|
|
f5cee8b6b5 | ||
|
|
4a41a4cf95 | ||
|
|
bcba5162b7 | ||
|
|
7414b288dc | ||
|
|
3c39ffca72 | ||
|
|
324f1c324d | ||
|
|
646687b8da | ||
|
|
7382186bc4 | ||
|
|
2a6ca9cb97 | ||
|
|
460e065eed | ||
|
|
d4af2d4326 | ||
|
|
7538973b33 | ||
|
|
b65ce87a39 | ||
|
|
209f16af76 | ||
|
|
09e70d70e9 | ||
|
|
f1beb10893 | ||
|
|
5c162009ee | ||
|
|
db547fb378 | ||
|
|
44b005ffdd | ||
|
|
d42b29d673 | ||
|
|
9d724dbb8d | ||
|
|
3554702054 | ||
|
|
96183eb3e0 | ||
|
|
4b5ac6ad03 | ||
|
|
ea1a24fd1e | ||
|
|
9d6a56b496 | ||
|
|
a18bf6aa2f | ||
|
|
8eca2cba58 | ||
|
|
23321ce8e6 | ||
|
|
1949ebb304 | ||
|
|
2eaac168dc | ||
|
|
5c74bb41c9 | ||
|
|
32f138bff5 | ||
|
|
a6836c723a | ||
|
|
9850a0c2bf | ||
|
|
778065f7fb | ||
|
|
3d31ace50b | ||
|
|
2a030622a9 | ||
|
|
3950fc39bc | ||
|
|
8d37447146 | ||
|
|
5562148327 | ||
|
|
1765fd5ff2 | ||
|
|
aa6fd6c70b | ||
|
|
3a4890778f | ||
|
|
7bfe8b3f5b | ||
|
|
af8f07218a | ||
|
|
deb9e24c42 | ||
|
|
7d904afd39 | ||
|
|
ef207f9435 | ||
|
|
18152fe04b | ||
|
|
2b09591524 | ||
|
|
a623f79d97 | ||
|
|
90a3f17a8f | ||
|
|
1175cf9bbf | ||
|
|
b85f798364 | ||
|
|
3003f39e34 | ||
|
|
b57186e894 | ||
|
|
43d73bc493 | ||
|
|
5672bdb406 | ||
|
|
9c6f2ce088 | ||
|
|
ca183d2eb7 | ||
|
|
cf2e1a473e | ||
|
|
59e4c85be5 | ||
|
|
4db15fcac7 | ||
|
|
e03e12539a | ||
|
|
2d64447c08 | ||
|
|
43c5411265 | ||
|
|
db8cc65e08 | ||
|
|
b81eb9be0c | ||
|
|
b1c7bf5b58 | ||
|
|
453237aef8 | ||
|
|
8511432dee | ||
|
|
ac500266f3 | ||
|
|
efed9f3348 | ||
|
|
f1ed79fa4e | ||
|
|
cb7f7b80df | ||
|
|
112f99d6d9 | ||
|
|
00cafb1188 | ||
|
|
8af401eea4 | ||
|
|
446546b69f | ||
|
|
5c26ce215b | ||
|
|
8ca714853a | ||
|
|
577dc0d175 | ||
|
|
4417b61fd1 | ||
|
|
8a6d9d76da | ||
|
|
92acaf6c27 | ||
|
|
4d53b3cb06 | ||
|
|
7cad4ffa37 | ||
|
|
b6f312325f | ||
|
|
43a6492cab | ||
|
|
92e3546e8a | ||
|
|
8a9000cc67 | ||
|
|
6e3514c0b2 | ||
|
|
deb22739b7 | ||
|
|
bc3b24d2f1 | ||
|
|
8caa6e86a1 | ||
|
|
a2efb3ee15 | ||
|
|
08e0cd232d | ||
|
|
2782c8cebe | ||
|
|
5abe9b8a16 | ||
|
|
7801db0331 | ||
|
|
694ba4e32d | ||
|
|
e5c0e41336 | ||
|
|
69435c04cc | ||
|
|
13e29a9966 | ||
|
|
601b0a8964 | ||
|
|
7c2ceb0aca | ||
|
|
42fabd5133 | ||
|
|
2fdb53efc9 | ||
|
|
9e9825a125 | ||
|
|
d012c1e33d | ||
|
|
c8f331675c | ||
|
|
edc7f81486 | ||
|
|
210a8856e2 | ||
|
|
854effc43e | ||
|
|
c531cb11af | ||
|
|
633d3b5af2 | ||
|
|
d6e655fcba | ||
|
|
b64e1c609f | ||
|
|
41e9dba040 | ||
|
|
80cf5c738f | ||
|
|
e5bcb1d179 | ||
|
|
fc23fc7aed | ||
|
|
ebd091a9e0 | ||
|
|
11342e75de | ||
|
|
07e073f526 | ||
|
|
c5457374a8 | ||
|
|
2e1fb21ff9 | ||
|
|
5198349591 | ||
|
|
8a4967525a | ||
|
|
30b068c6e2 | ||
|
|
ea3fff59ac | ||
|
|
5347094466 | ||
|
|
4059e7fb6c | ||
|
|
7f66efcdd5 | ||
|
|
472d472bc1 | ||
|
|
fb18278bdc | ||
|
|
b09ce8296f | ||
|
|
f9d07779a9 | ||
|
|
913e43d84c | ||
|
|
51634c1caf | ||
|
|
0e00da6617 | ||
|
|
4e7b9aaf59 | ||
|
|
5ee6baeaaa | ||
|
|
f11a036c60 | ||
|
|
0ac02ff4ce | ||
|
|
99cc50b5cb | ||
|
|
1d8fb02989 | ||
|
|
122cb1188c | ||
|
|
ca36ade288 | ||
|
|
0877046db7 | ||
|
|
ce9615a00e | ||
|
|
dbe5a41395 | ||
|
|
4a4ca54c6e | ||
|
|
47acb63feb | ||
|
|
038c5d41e2 | ||
|
|
011a795895 | ||
|
|
873a0339d8 | ||
|
|
715da548c8 | ||
|
|
5378c6ba35 | ||
|
|
8799f86ea4 | ||
|
|
686be4acbc | ||
|
|
5744eca37a | ||
|
|
70f8ddb1ba | ||
|
|
be1328cee9 | ||
|
|
c0dbf6fd13 | ||
|
|
ffe9c3e0f8 | ||
|
|
e20b79b0ed | ||
|
|
e04d46db2c | ||
|
|
7341435127 | ||
|
|
8b56f94667 | ||
|
|
f5e98d4ebb | ||
|
|
23a0dba470 | ||
|
|
512371cc25 | ||
|
|
9f15ab5000 | ||
|
|
cd4a06b692 | ||
|
|
d01eadc70f | ||
|
|
629ebae0e9 | ||
|
|
394d4b3c1b | ||
|
|
5ff59f1b07 | ||
|
|
f8127eb585 | ||
|
|
7cd03b0243 | ||
|
|
5379e4cf27 | ||
|
|
5be17c55d2 | ||
|
|
6c1ee922de | ||
|
|
d8c730341a | ||
|
|
432440d6bf | ||
|
|
9c4ea2d09b | ||
|
|
2c50ab0255 | ||
|
|
b85615cece | ||
|
|
349ce6908e | ||
|
|
4275459d45 | ||
|
|
81481c9050 | ||
|
|
3124cc0fef | ||
|
|
5c584ee60d | ||
|
|
c7c7e36c86 | ||
|
|
47d8358272 | ||
|
|
a89a20a374 | ||
|
|
b9d46d9972 | ||
|
|
c1f4de425a | ||
|
|
a0fd152d19 | ||
|
|
1a62e08bab | ||
|
|
edbc59c117 | ||
|
|
cfdd0f8cb2 | ||
|
|
808f30675d | ||
|
|
46072abb41 | ||
|
|
71ffbb9eb5 | ||
|
|
27bbb2297a | ||
|
|
0d235aaef8 | ||
|
|
e22c804deb | ||
|
|
c136e790ef | ||
|
|
3697afd9d6 | ||
|
|
c597c6482a | ||
|
|
dda8c637d8 | ||
|
|
e6d7aaa440 | ||
|
|
028458b33c | ||
|
|
9c7d8099cb | ||
|
|
5640dc332d | ||
|
|
40275c3ef1 | ||
|
|
ebe0b2f335 | ||
|
|
97603e8441 | ||
|
|
72cd444861 | ||
|
|
955b859f2c | ||
|
|
dea5cc9c0c | ||
|
|
d13ab1703e | ||
|
|
61ab6a009b | ||
|
|
a9ae36d362 | ||
|
|
f518395ce5 | ||
|
|
20b17fe378 | ||
|
|
572182180c | ||
|
|
de261099aa | ||
|
|
50f0122955 | ||
|
|
fe9eff923e | ||
|
|
dd36278032 | ||
|
|
a079821976 | ||
|
|
fa233e0a24 | ||
|
|
22306cb4ea | ||
|
|
f2d45a3668 | ||
|
|
db91ff12f7 | ||
|
|
eb841fb73e | ||
|
|
bd0438df76 | ||
|
|
9ca1fc59ef | ||
|
|
84a80a5ec8 | ||
|
|
4b2e248646 | ||
|
|
b90e45590a | ||
|
|
ff93d563a8 | ||
|
|
53228498ed | ||
|
|
8ece82e43a | ||
|
|
8b4684675e | ||
|
|
8cca12fff2 | ||
|
|
a74111612e | ||
|
|
c7799a65c4 | ||
|
|
aabb01c50f | ||
|
|
95e2ada965 | ||
|
|
3fe7d53c76 | ||
|
|
e8634bb1ab | ||
|
|
dbe46b5770 | ||
|
|
6d9fba30b1 | ||
|
|
6a866bf871 | ||
|
|
3c961e4652 | ||
|
|
7abd999420 | ||
|
|
fca8fbb135 | ||
|
|
c67caf18df | ||
|
|
fe956fe4a5 | ||
|
|
0e52f679a2 | ||
|
|
b9500283ec | ||
|
|
441b69b528 | ||
|
|
898bcdc96b | ||
|
|
02bc1fc45e | ||
|
|
5585981dc3 | ||
|
|
a4777f194b | ||
|
|
41aeda8dc0 | ||
|
|
2ed522667e | ||
|
|
1932444666 | ||
|
|
b49b7e963d | ||
|
|
435c11ff27 | ||
|
|
2e93600437 | ||
|
|
faecb70d0f | ||
|
|
92e1ac5c3a | ||
|
|
8963a2117b | ||
|
|
aa300258ab | ||
|
|
48841daff5 | ||
|
|
8878f1ed87 | ||
|
|
f6205d79c0 | ||
|
|
d6d5dac6b3 | ||
|
|
05b979e68a | ||
|
|
9f7d9e4c0d | ||
|
|
98a9fed726 | ||
|
|
720a218259 | ||
|
|
60c0adc6f9 | ||
|
|
bc8c346e68 | ||
|
|
a198b6da0b | ||
|
|
0f3dc35df4 | ||
|
|
7b6e6b046a | ||
|
|
9e503191d6 | ||
|
|
1fd23a0d8d | ||
|
|
3811700a78 | ||
|
|
8762ba3d9c | ||
|
|
c42b5aab5a | ||
|
|
d724899ec0 | ||
|
|
81aacdd76e | ||
|
|
0aa072b4e8 | ||
|
|
6335e9dd8b | ||
|
|
a785289ac9 | ||
|
|
f8bace040c | ||
|
|
d62d597695 | ||
|
|
d938129884 | ||
|
|
327f448321 | ||
|
|
19af3740c1 | ||
|
|
11b1110eed | ||
|
|
682b897e21 | ||
|
|
998ad7623c | ||
|
|
4f1db33abc | ||
|
|
ca6cb60bdd | ||
|
|
133e48a5a9 | ||
|
|
d659d01b1e | ||
|
|
34f73fd84b | ||
|
|
54b87ff79d | ||
|
|
6c2843e7c1 | ||
|
|
6761a31982 | ||
|
|
9401a79b2b | ||
|
|
7a4905d943 | ||
|
|
4db1d2b3a3 | ||
|
|
2ffe2967d6 | ||
|
|
0875c0f266 | ||
|
|
68c7de5199 | ||
|
|
4dfb8597ae | ||
|
|
e21a27ff23 | ||
|
|
91ad7944de | ||
|
|
c86602ebaf | ||
|
|
f75ac292db | ||
|
|
2742c249bf | ||
|
|
36f04849ab | ||
|
|
a60c896e89 | ||
|
|
c442320c7f | ||
|
|
6aeae7e9f5 | ||
|
|
cae79aac48 | ||
|
|
0623f4009a | ||
|
|
06adeb72c4 | ||
|
|
ef044f4fc7 | ||
|
|
7cd4e904ca | ||
|
|
c724494ee7 | ||
|
|
cdb2db348e | ||
|
|
5873d4696f | ||
|
|
613787f49c | ||
|
|
f620874251 | ||
|
|
1f08082a58 | ||
|
|
8f5da1677b | ||
|
|
5439a3a31f | ||
|
|
d92ee23764 | ||
|
|
71ecfc2566 | ||
|
|
c0787e0bb6 | ||
|
|
357da2d236 | ||
|
|
6071241872 | ||
|
|
ab93c67081 | ||
|
|
7af6b833df | ||
|
|
3e4b85aeb5 | ||
|
|
2b6be04c5d | ||
|
|
b2d1c82196 | ||
|
|
a19da7b923 | ||
|
|
4a9a78d07b | ||
|
|
300dbd076f | ||
|
|
ddf52524a8 | ||
|
|
7dcc44b4fc | ||
|
|
75af631c17 | ||
|
|
04dd4fce68 | ||
|
|
2776a95a40 | ||
|
|
dc93b37fd6 | ||
|
|
6502a64cab | ||
|
|
5311e78776 | ||
|
|
35721c1340 | ||
|
|
a76df22cab | ||
|
|
a90f996b24 | ||
|
|
c96d4456ea | ||
|
|
d1df6d993f | ||
|
|
191f8866ae | ||
|
|
e17da4e2ee | ||
|
|
2c3fdb4fdc | ||
|
|
e89c6b68c9 | ||
|
|
51cca31f04 | ||
|
|
e51950aa75 | ||
|
|
4c344e0636 | ||
|
|
90261d1f55 | ||
|
|
fabf93f741 | ||
|
|
ab8ef01c76 | ||
|
|
e463d3a8fe | ||
|
|
a6bc617a3b | ||
|
|
1b1ccdd733 | ||
|
|
8d00e710d5 | ||
|
|
de9e3bdbd5 | ||
|
|
b6e054a73f | ||
|
|
a078b2cf12 | ||
|
|
6f32bf9621 | ||
|
|
ac628b26d9 | ||
|
|
7ba655902b | ||
|
|
05c1fdaa9e | ||
|
|
d7687913a9 | ||
|
|
9e718a2e8a | ||
|
|
cbec2f6d02 | ||
|
|
52eb37d13d | ||
|
|
8e9f43885a | ||
|
|
9eefbcb6f2 | ||
|
|
4d8ebc8c38 | ||
|
|
21cfb6ee6f | ||
|
|
c72ad2b242 | ||
|
|
e83b0a7825 | ||
|
|
a8f2438288 | ||
|
|
d0ceaff6ed | ||
|
|
dbe6272bd8 | ||
|
|
eceaf85807 | ||
|
|
d0606b79b0 | ||
|
|
412f290606 | ||
|
|
21e1acc4f5 | ||
|
|
326aad3c00 | ||
|
|
493c2e9a16 | ||
|
|
51a87e28e2 | ||
|
|
be2ff20f4b | ||
|
|
19c6b2fc32 | ||
|
|
5d249f441b | ||
|
|
852254eaef | ||
|
|
43ea64befa | ||
|
|
0f2cb58897 | ||
|
|
dbece6af7f | ||
|
|
b1e68182bd | ||
|
|
45a64bea78 | ||
|
|
aec8735388 | ||
|
|
1d91faaa49 | ||
|
|
e1e21c0063 | ||
|
|
e775499080 | ||
|
|
735aad5a91 | ||
|
|
fb4e106f69 | ||
|
|
e5659db535 | ||
|
|
5381e09a6c | ||
|
|
21f16ecd68 | ||
|
|
12fc76b326 | ||
|
|
d7f87dd269 | ||
|
|
56227f3713 | ||
|
|
f492fee486 | ||
|
|
41a7814615 | ||
|
|
8644f2c166 | ||
|
|
e4a9365caf | ||
|
|
9fc7af1295 | ||
|
|
d0eeb2b304 | ||
|
|
e4518ebcf1 | ||
|
|
7604cefd0f | ||
|
|
71729d4784 | ||
|
|
1d16bc4968 | ||
|
|
de2bf79004 | ||
|
|
83ed7a9f38 | ||
|
|
c326e72758 | ||
|
|
ac9cef82cc | ||
|
|
ea254d57d2 | ||
|
|
a661f24ae0 | ||
|
|
afabf9256b | ||
|
|
74a8f9c9e2 | ||
|
|
1d11e448f9 | ||
|
|
e3e23cbccb | ||
|
|
79132aa11d | ||
|
|
7bb9e6e951 | ||
|
|
37dc5b4135 | ||
|
|
d588faf470 | ||
|
|
8b51a81158 | ||
|
|
9f125974bf | ||
|
|
d0aed48ca9 | ||
|
|
bf548df6ae | ||
|
|
a3fe105f8e | ||
|
|
5add1d71bc | ||
|
|
7a01cff0c8 | ||
|
|
e8602f7134 | ||
|
|
e9aad2c8d7 | ||
|
|
60d4f3d77c | ||
|
|
9b8c5a3499 | ||
|
|
53dde0607d | ||
|
|
7f034b4ffa | ||
|
|
599ab83100 | ||
|
|
f4a3508ec2 | ||
|
|
44b92909eb | ||
|
|
8ed07b8d1a | ||
|
|
2ff9ced15e | ||
|
|
641b8d71ed | ||
|
|
a31b450f54 | ||
|
|
97bb24c5b9 | ||
|
|
5e5a3639d1 | ||
|
|
0a68a77e28 | ||
|
|
11a0c4142e | ||
|
|
d214d80579 | ||
|
|
ed719fd44e | ||
|
|
5dc6bed0d1 | ||
|
|
b1244a4d4e | ||
|
|
6aa325a4b1 | ||
|
|
88a11561f9 | ||
|
|
fd30022065 | ||
|
|
9486312737 | ||
|
|
e37070a985 | ||
|
|
ffb98ecca2 | ||
|
|
29bd69ef97 | ||
|
|
e46c9530cc | ||
|
|
7ddd303e2d | ||
|
|
66798a1d0f | ||
|
|
bd05afdf14 | ||
|
|
136e48f7ee | ||
|
|
facb5f177a | ||
|
|
10ce31cc46 | ||
|
|
3b4f3c516b | ||
|
|
a1e3981ce4 | ||
|
|
89f26781fe | ||
|
|
914292a80b | ||
|
|
8227e3299b | ||
|
|
07ca48d652 | ||
|
|
243f45c7db | ||
|
|
12cfce3622 | ||
|
|
535c4a8a11 | ||
|
|
6606c671b2 | ||
|
|
242f24840d | ||
|
|
486f636b2d | ||
|
|
b293d7a7cd | ||
|
|
f4fa0b42a6 | ||
|
|
209e89712d | ||
|
|
3314a7a9e9 | ||
|
|
793d64303e | ||
|
|
6642498f00 | ||
|
|
32b400dcb1 | ||
|
|
0dcd2d8179 | ||
|
|
736f8b613c | ||
|
|
9e7d9a937d | ||
|
|
4767983279 | ||
|
|
e37f35d95a | ||
|
|
ad1e609fb9 | ||
|
|
f9bc4a5acd | ||
|
|
2b79185f6a | ||
|
|
840f638472 | ||
|
|
908169a55e | ||
|
|
dbf9f2398e | ||
|
|
2ea3ff0b5c | ||
|
|
91bf72c710 | ||
|
|
baabb70622 | ||
|
|
94ea64a6a9 | ||
|
|
f97896b2c7 | ||
|
|
9027db8587 | ||
|
|
cd46e1c131 | ||
|
|
59211191a4 | ||
|
|
a3ca7e82c7 | ||
|
|
0094056def | ||
|
|
a9f305a1c6 | ||
|
|
e8cc048901 | ||
|
|
05da43f606 | ||
|
|
a81faa7d8e | ||
|
|
18ba7d1da7 | ||
|
|
875adfcbaa | ||
|
|
6e9c213893 | ||
|
|
753066ccb9 | ||
|
|
8b36782c25 | ||
|
|
da9dde6bd2 | ||
|
|
07f6e69b93 | ||
|
|
31a7503df3 | ||
|
|
11db8d8d17 | ||
|
|
93ee8d51bc | ||
|
|
83e80f324e | ||
|
|
c51eac717e | ||
|
|
db7d5dcce3 | ||
|
|
0d25578e22 | ||
|
|
1a457be823 | ||
|
|
20e3edba8f | ||
|
|
036c2182a5 | ||
|
|
6238f430e8 | ||
|
|
9fc891ec01 | ||
|
|
491d977d9e | ||
|
|
9a4bcda9bc | ||
|
|
2c2374a763 | ||
|
|
a76e0b287e | ||
|
|
1d6f1e3c7c | ||
|
|
896fd982a1 | ||
|
|
c031ab20da | ||
|
|
318b6e6bf1 | ||
|
|
ca3999d251 | ||
|
|
658eb278c4 | ||
|
|
bb219889e5 | ||
|
|
3239c9ec3f | ||
|
|
16153dc573 | ||
|
|
e0d9a295ab | ||
|
|
eabdda5eb1 | ||
|
|
43f45f9184 | ||
|
|
7c19785a17 | ||
|
|
78005f8b4e | ||
|
|
0d4784d098 | ||
|
|
805454e037 | ||
|
|
bf383bbf9c | ||
|
|
73ffd67792 | ||
|
|
54bbfc8eda | ||
|
|
a3e234c979 | ||
|
|
9336abff8b | ||
|
|
0fe161cd7f | ||
|
|
7cc55eab3e | ||
|
|
15482e398b | ||
|
|
601fa0ac7f | ||
|
|
2819da5f2f | ||
|
|
3cb3562477 | ||
|
|
cee205994f | ||
|
|
e44df0a3dd | ||
|
|
84a51cb26d | ||
|
|
db02d9c126 | ||
|
|
709b86b724 | ||
|
|
68184b0e47 | ||
|
|
6d2a4c038d | ||
|
|
2f05f5b456 | ||
|
|
d5e3120350 | ||
|
|
a4589327a6 | ||
|
|
c151665419 | ||
|
|
947790e8d1 | ||
|
|
26770439bb | ||
|
|
7da9171dde | ||
|
|
16b386eaf7 | ||
|
|
c330aab48b | ||
|
|
5f998a0852 | ||
|
|
c3dfbb64a6 | ||
|
|
3db52282b8 | ||
|
|
a313ae5f97 | ||
|
|
18cce189a4 | ||
|
|
fb308d576b | ||
|
|
8c976303a4 | ||
|
|
12f1f3609d | ||
|
|
661fdeb6a1 | ||
|
|
d52f9b9543 | ||
|
|
7174742886 | ||
|
|
cd0a8fb24b | ||
|
|
1fbc92bc6d | ||
|
|
231dca956d | ||
|
|
0dd74c825b | ||
|
|
9703fc0366 | ||
|
|
7c3557e943 | ||
|
|
21f153e5c3 | ||
|
|
ea6a0af5a7 | ||
|
|
c53ffaca6c | ||
|
|
3469515e04 | ||
|
|
e8da26cb8a | ||
|
|
1235fc1339 | ||
|
|
47e308b99d | ||
|
|
fdba470e9a | ||
|
|
a1ccceefd2 | ||
|
|
1c4a700d92 | ||
|
|
81c2c3c0e5 | ||
|
|
3c2db5097a | ||
|
|
ce56f79687 | ||
|
|
ee0d6dcdae | ||
|
|
bcf1d92f73 | ||
|
|
ffdec16ce6 | ||
|
|
b2f6e84adc | ||
|
|
f76c457e1f | ||
|
|
80bd0a20df | ||
|
|
efeaf73339 | ||
|
|
91b5100a24 | ||
|
|
d1a06f4730 | ||
|
|
b0b186e951 | ||
|
|
4c8fedef6e | ||
|
|
718c221d01 | ||
|
|
077e77eee5 | ||
|
|
b51ca06c7c | ||
|
|
2f092f4a87 | ||
|
|
f1ff9c05c4 | ||
|
|
c9c8603ccc | ||
|
|
47e281fb61 | ||
|
|
dc625647eb | ||
|
|
66cf1b05be | ||
|
|
622cc89414 | ||
|
|
78d98c40b1 | ||
|
|
1c5f06d9a9 | ||
|
|
998fe5a980 | ||
|
|
8cad4089a7 | ||
|
|
48cc3656bd | ||
|
|
68ddb3a6e1 | ||
|
|
70583f5ba0 | ||
|
|
5bebe01dd0 | ||
|
|
4dd976c9c5 | ||
|
|
221b310485 | ||
|
|
dd1cec70c0 | ||
|
|
7656443b28 | ||
|
|
9d91c13b12 | ||
|
|
7c06141ce2 | ||
|
|
3dc413638b | ||
|
|
bdb8baeddd | ||
|
|
21966bfb69 | ||
|
|
e78c82e999 | ||
|
|
2bdc3468d1 | ||
|
|
987b3dc4ef | ||
|
|
45a10b4ac7 | ||
|
|
b5d33ef629 | ||
|
|
d3629916bf | ||
|
|
c5cb26d295 | ||
|
|
4b2785c5eb | ||
|
|
7ed190e6d2 | ||
|
|
eac041cdd2 | ||
|
|
05527cfc01 | ||
|
|
61e2af4a14 | ||
|
|
79804b6ecd | ||
|
|
76434b2f4e | ||
|
|
ec8bd4922e | ||
|
|
4ffa773fac | ||
|
|
ea8b7bc8aa | ||
|
|
39ce5646f6 | ||
|
|
5092a82739 | ||
|
|
3bba0b6d9a | ||
|
|
7a19dd503d | ||
|
|
9e6a01fefd | ||
|
|
933471b4d9 | ||
|
|
f81808d239 | ||
|
|
96832b6f7d | ||
|
|
e2eb0a84b0 | ||
|
|
c8eb2e3376 | ||
|
|
21fe5822f9 | ||
|
|
d49cc9a7a3 | ||
|
|
910d0bfae1 | ||
|
|
d6761949ca | ||
|
|
6afac1f593 | ||
|
|
4d1a270d22 | ||
|
|
a7888f5536 | ||
|
|
b9049e91cf | ||
|
|
7db56c8e77 | ||
|
|
50563cb957 | ||
|
|
18ae2299a7 | ||
|
|
7463e0aab9 | ||
|
|
c92d47bb95 | ||
|
|
0b1af7df91 | ||
|
|
a9104eb2da | ||
|
|
abbd15d5cc | ||
|
|
aadfa14d59 | ||
|
|
4cd10bbe25 | ||
|
|
1d4a6b71ab | ||
|
|
a7f830dd73 | ||
|
|
bae86ac05c | ||
|
|
a3706bfe21 | ||
|
|
91e23b8c11 | ||
|
|
37ef1c9fab | ||
|
|
6bc6f77af1 | ||
|
|
2c478ccc25 | ||
|
|
404e5492a3 | ||
|
|
d5b5d667a5 | ||
|
|
8807f02f36 | ||
|
|
269e561497 | ||
|
|
527ad81d38 | ||
|
|
972d3c18af | ||
|
|
3cbfc078fc | ||
|
|
fde6822b5c | ||
|
|
930321bcf1 | ||
|
|
c45931363a | ||
|
|
9c6491e5ee | ||
|
|
9bc248f5bc | ||
|
|
becac2fde5 | ||
|
|
1e1a103882 | ||
|
|
e5cffb7c9b | ||
|
|
e2becf7777 | ||
|
|
a6b875a242 | ||
|
|
b5e67f3df8 | ||
|
|
2093fb16a7 | ||
|
|
fc9a9d2386 | ||
|
|
5e69f78f7e | ||
|
|
6919bece77 | ||
|
|
8b003739f1 | ||
|
|
2e9229a6ad | ||
|
|
5a3e7fe8ee | ||
|
|
7b3d7e7bd6 | ||
|
|
fdd7c1864d | ||
|
|
cac5a5adff | ||
|
|
63307633c2 | ||
|
|
387dfa39ff | ||
|
|
1f797f899c | ||
|
|
092bb0a1e2 | ||
|
|
2c3399e237 | ||
|
|
835275b47f | ||
|
|
7b060ce3f9 | ||
|
|
1fb69311b0 | ||
|
|
995d1f61d2 | ||
|
|
80258e9182 | ||
|
|
bd6a32e08e | ||
|
|
5f138de75b | ||
|
|
d0b0f2209a | ||
|
|
0752698c1d | ||
|
|
9855c6b8f5 | ||
|
|
52a7c25540 | ||
|
|
fa823de6b0 | ||
|
|
f53070d8b6 | ||
|
|
7677672691 | ||
|
|
dead8fa168 | ||
|
|
c6347bea45 | ||
|
|
32bd194bfc | ||
|
|
cca48a394d | ||
|
|
a723c8ce37 | ||
|
|
327b2509f6 | ||
|
|
1dae7bd655 | ||
|
|
550a131685 | ||
|
|
0cfb8bb29f | ||
|
|
9c32420a95 | ||
|
|
867093cc88 | ||
|
|
82763f8ec5 | ||
|
|
97449065df | ||
|
|
9489783846 | ||
|
|
f91c9015bc | ||
|
|
302d86056d | ||
|
|
98bebfddaa | ||
|
|
dab20e3187 | ||
|
|
09e72f7c5f | ||
|
|
2028d85f84 | ||
|
|
ed3c0d9014 | ||
|
|
be06150990 | ||
|
|
afb3fb4a31 | ||
|
|
d66577e6c3 | ||
|
|
6a4ea5446a | ||
|
|
74e84c744a | ||
|
|
5ad2446cf3 | ||
|
|
63303bb5c0 | ||
|
|
13393b6624 | ||
|
|
b9fa11c0c3 | ||
|
|
8c6ce1f030 | ||
|
|
1d963d0f0c | ||
|
|
0ee383be27 | ||
|
|
53d09129b4 | ||
|
|
a398c6f311 | ||
|
|
4347ddd42a | ||
|
|
22cb8a6a06 | ||
|
|
7f554fd862 | ||
|
|
a82bfa8a56 | ||
|
|
95784debbf | ||
|
|
2471c5bf0f | ||
|
|
2fe6d731b8 | ||
|
|
ce881372ee | ||
|
|
171ea7c375 | ||
|
|
1e9a6f813f | ||
|
|
39a7f3b2b9 | ||
|
|
8d375a02db | ||
|
|
cac8a0a414 | ||
|
|
c89623967e | ||
|
|
92aa9c1711 | ||
|
|
71f2a58acb | ||
|
|
1f07a8a9e3 | ||
|
|
cacd21bde7 | ||
|
|
a060ec66c3 | ||
|
|
fd10db3c75 | ||
|
|
db4c658980 | ||
|
|
0ee88674f8 | ||
|
|
3540759682 | ||
|
|
44cc8f15b4 | ||
|
|
59f821bf0a | ||
|
|
80858672b0 | ||
|
|
3258d5b255 | ||
|
|
e8c8cc0a9c | ||
|
|
570c19f29f | ||
|
|
ee93fd8636 | ||
|
|
1e6c32ffc7 | ||
|
|
3ef2fb958c | ||
|
|
97edfe7cd7 | ||
|
|
1bdc96f8b2 | ||
|
|
4ef285aee9 | ||
|
|
6ccee3b7cf | ||
|
|
082731ba32 | ||
|
|
0bf85fb644 | ||
|
|
5ce1759dd9 | ||
|
|
1e016dfa24 | ||
|
|
7b3bb53f06 | ||
|
|
53d0059848 | ||
|
|
9a85178a29 | ||
|
|
d74681a128 | ||
|
|
06c8773975 | ||
|
|
ae358dd6d0 | ||
|
|
7174cbf41f | ||
|
|
f73d69e814 | ||
|
|
8af174127d | ||
|
|
991a0aa5f6 | ||
|
|
abc19e78b8 | ||
|
|
836df87e18 | ||
|
|
9cad94e961 | ||
|
|
b9568eb558 | ||
|
|
f951625025 | ||
|
|
c2b3b53c12 | ||
|
|
d95e18c202 | ||
|
|
e705e707e5 | ||
|
|
2fa5d7608f | ||
|
|
f9a3e99795 | ||
|
|
d86ad25f86 | ||
|
|
cf583486e3 | ||
|
|
7366ca59c7 | ||
|
|
12820e6c64 | ||
|
|
71b54fd684 | ||
|
|
aeb1912db6 | ||
|
|
84b2867148 | ||
|
|
5880dacad8 | ||
|
|
b5b67ad958 | ||
|
|
2a913ed24c | ||
|
|
aab56294ba | ||
|
|
26912ef976 | ||
|
|
c1fed3410b | ||
|
|
c853bba4ba | ||
|
|
f340a44abf | ||
|
|
0dec10ddf2 | ||
|
|
7026abe56a | ||
|
|
a9d92115f8 | ||
|
|
6f2d7d96d0 | ||
|
|
532a713355 | ||
|
|
976a9de39c | ||
|
|
32162afa65 | ||
|
|
c1c751a9ab | ||
|
|
b749ba587d | ||
|
|
b2741686fd | ||
|
|
94bf7739a0 | ||
|
|
33d600fb6b | ||
|
|
e2de3d0102 | ||
|
|
6b76adc00e | ||
|
|
61f4cb2f65 | ||
|
|
28bd232dda | ||
|
|
e9e458c877 | ||
|
|
437971ded8 | ||
|
|
3945ac95d1 | ||
|
|
13ab647dc0 | ||
|
|
c75b0ce8fb | ||
|
|
6cc4688660 | ||
|
|
b730f17eb6 | ||
|
|
698782c537 | ||
|
|
2b0faea8ec | ||
|
|
d130c376f4 | ||
|
|
238c55a40e | ||
|
|
b5924bb34f | ||
|
|
1368ee22b2 | ||
|
|
2a0cf57303 | ||
|
|
f10af09bd2 | ||
|
|
850a4eeb7c | ||
|
|
411034902a | ||
|
|
1900ddacbb | ||
|
|
8d084427d2 | ||
|
|
a064c24f60 | ||
|
|
b43882aad0 | ||
|
|
f4ead5ec5c | ||
|
|
ea9ae85428 | ||
|
|
a9a798b19d | ||
|
|
f4ae9df3bf | ||
|
|
f3bcff1261 | ||
|
|
b4bd86549e | ||
|
|
a975718a64 | ||
|
|
3d06a18bcb | ||
|
|
a236089785 | ||
|
|
2f877965cf | ||
|
|
ad5ef95e65 | ||
|
|
8d35ecd711 | ||
|
|
e63c6ac723 |
50
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
50
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,64 +1,30 @@
|
||||
name: 漏洞反馈
|
||||
description: 报错或漏洞请使用这个模板创建,不使用此模板创建的异常、漏洞相关issue将被直接关闭
|
||||
description: 【供中文用户】报错或漏洞请使用这个模板创建,不使用此模板创建的异常、漏洞相关issue将被直接关闭。由于自己操作不当/不甚了解所用技术栈引起的网络连接问题恕无法解决,请勿提 issue。容器间网络连接问题,参考文档 https://docs.langbot.app/zh/workshop/network-details.html
|
||||
title: "[Bug]: "
|
||||
labels: ["bug?"]
|
||||
body:
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: 部署方式
|
||||
description: "主程序使用的部署方式"
|
||||
options:
|
||||
- 手动部署
|
||||
- 安装器部署
|
||||
- 一键安装包部署
|
||||
- Docker部署
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: 登录框架
|
||||
description: "连接QQ使用的框架"
|
||||
options:
|
||||
- Mirai
|
||||
- go-cqhttp
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
attributes:
|
||||
label: 系统环境
|
||||
description: 操作系统、系统架构、**主机地理位置**,地理位置最好写清楚,涉及网络问题排查。
|
||||
placeholder: 例如: CentOS x64 中国大陆、Windows11 美国
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Python环境
|
||||
description: 运行程序的Python版本
|
||||
placeholder: 例如: Python 3.10
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: QChatGPT版本
|
||||
description: QChatGPT版本号
|
||||
placeholder: 例如: v2.6.0,可以使用`!version`命令查看
|
||||
label: 运行环境
|
||||
description: LangBot 版本、操作系统、系统架构、**Python版本**、**主机地理位置**
|
||||
placeholder: 例如:v3.3.0、CentOS x64 Python 3.10.3、Docker
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 异常情况
|
||||
description: 完整描述异常情况,什么时候发生的、发生了什么,尽可能详细
|
||||
description: 完整描述异常情况,什么时候发生的、发生了什么。**请附带日志信息。**
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 日志信息
|
||||
description: 请提供完整的 **登录框架 和 QChatGPT控制台**的相关日志信息(若有),不提供日志信息**无法**为您排查问题,请尽可能详细
|
||||
label: 复现步骤
|
||||
description: 提供越多信息,我们会越快解决问题,建议多提供配置截图;**如果你不认真填写(只一两句话概括),我们会很生气并且立即关闭 issue 或两年后才回复你**
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 启用的插件
|
||||
description: 有些情况可能和插件功能有关,建议提供插件启用情况。可以使用`!plugin`命令查看已启用的插件
|
||||
description: 有些情况可能和插件功能有关,建议提供插件启用情况。
|
||||
validations:
|
||||
required: false
|
||||
|
||||
30
.github/ISSUE_TEMPLATE/bug-report_en.yml
vendored
Normal file
30
.github/ISSUE_TEMPLATE/bug-report_en.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Bug report
|
||||
description: Report bugs or vulnerabilities using this template. For container network connection issues, refer to the documentation https://docs.langbot.app/en/workshop/network-details.html
|
||||
title: "[Bug]: "
|
||||
labels: ["bug?"]
|
||||
body:
|
||||
- type: input
|
||||
attributes:
|
||||
label: Runtime environment
|
||||
description: LangBot version, operating system, system architecture, **Python version**, **host location**
|
||||
placeholder: "For example: v3.3.0, CentOS x64 Python 3.10.3, Docker"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Exception
|
||||
description: Describe the exception in detail, what happened and when it happened. **Please include log information.**
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Reproduction steps
|
||||
description: How to reproduce this problem, the more detailed the better; the more information you provide, the faster we will solve the problem. 【注意】请务必认真填写此部分,若不提供完整信息(如只有一两句话的概括),我们将不会回复!
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Enabled plugins
|
||||
description: Some cases may be related to plugin functionality, so please provide the plugin enablement status.
|
||||
validations:
|
||||
required: false
|
||||
4
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
4
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: 需求建议
|
||||
title: "[Feature]: "
|
||||
labels: ["改进"]
|
||||
description: "新功能或现有功能优化请使用这个模板;不符合类别的issue将被直接关闭"
|
||||
labels: []
|
||||
description: "【供中文用户】新功能或现有功能优化请使用这个模板;不符合类别的issue将被直接关闭"
|
||||
body:
|
||||
- type: dropdown
|
||||
attributes:
|
||||
|
||||
21
.github/ISSUE_TEMPLATE/feature-request_en.yml
vendored
Normal file
21
.github/ISSUE_TEMPLATE/feature-request_en.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Feature request
|
||||
title: "[Feature]: "
|
||||
labels: []
|
||||
description: "New features or existing feature improvements should use this template; issues that do not match will be closed directly"
|
||||
body:
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: This is a?
|
||||
description: New feature request or existing feature improvement
|
||||
options:
|
||||
- New feature
|
||||
- Existing feature improvement
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Detailed description
|
||||
description: Detailed description, the more detailed the better
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/submit-plugin.yml
vendored
2
.github/ISSUE_TEMPLATE/submit-plugin.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: 提交新插件
|
||||
title: "[Plugin]: 请求登记新插件"
|
||||
labels: ["独立插件"]
|
||||
description: "本模板供且仅供提交新插件使用"
|
||||
description: "【供中文用户】本模板供且仅供提交新插件使用"
|
||||
body:
|
||||
- type: input
|
||||
attributes:
|
||||
|
||||
24
.github/ISSUE_TEMPLATE/submit-plugin_en.yml
vendored
Normal file
24
.github/ISSUE_TEMPLATE/submit-plugin_en.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Submit a new plugin
|
||||
title: "[Plugin]: Request to register a new plugin"
|
||||
labels: ["Independent Plugin"]
|
||||
description: "This template is only for submitting new plugins"
|
||||
body:
|
||||
- type: input
|
||||
attributes:
|
||||
label: Plugin name
|
||||
description: Fill in the name of the plugin
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Plugin code repository address
|
||||
description: Only support Github
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Plugin description
|
||||
description: The description of the plugin
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
@@ -10,6 +10,4 @@ updates:
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
allow:
|
||||
- dependency-name: "yiri-mirai-rc"
|
||||
- dependency-name: "dulwich"
|
||||
- dependency-name: "openai"
|
||||
|
||||
39
.github/pull_request_template.md
vendored
39
.github/pull_request_template.md
vendored
@@ -1,25 +1,32 @@
|
||||
## 概述
|
||||
## 概述 / Overview
|
||||
|
||||
实现/解决/优化的内容:
|
||||
> 请在此部分填写你实现/解决/优化的内容:
|
||||
> Summary of what you implemented/solved/optimized:
|
||||
>
|
||||
|
||||
### 事务
|
||||
### 更改前后对比截图 / Screenshots
|
||||
|
||||
- [ ] 已阅读仓库[贡献指引](https://github.com/RockChinQ/QChatGPT/blob/master/CONTRIBUTING.md)
|
||||
- [ ] 已与维护者在issues或其他平台沟通此PR大致内容
|
||||
> 请在此部分粘贴更改前后对比截图(可以是界面截图、控制台输出、对话截图等):
|
||||
> Please paste the screenshots of changes before and after here (can be interface screenshots, console output, conversation screenshots, etc.):
|
||||
>
|
||||
> 修改前 / Before:
|
||||
>
|
||||
> 修改后 / After:
|
||||
>
|
||||
|
||||
## 以下内容可在起草PR后、合并PR前逐步完成
|
||||
## 检查清单 / Checklist
|
||||
|
||||
### 功能
|
||||
### PR 作者完成 / For PR author
|
||||
|
||||
- [ ] 已编写完善的配置文件字段说明(若有新增)
|
||||
- [ ] 已编写面向用户的新功能说明(若有必要)
|
||||
- [ ] 已测试新功能或更改
|
||||
*请在方括号间写`x`以打勾 / Please tick the box with `x`*
|
||||
|
||||
### 兼容性
|
||||
- [ ] 阅读仓库[贡献指引](https://github.com/langbot-app/LangBot/blob/master/CONTRIBUTING.md)了吗? / Have you read the [contribution guide](https://github.com/langbot-app/LangBot/blob/master/CONTRIBUTING.md)?
|
||||
- [ ] 与项目所有者沟通过了吗? / Have you communicated with the project maintainer?
|
||||
- [ ] 我确定已自行测试所作的更改,确保功能符合预期。 / I have tested the changes and ensured they work as expected.
|
||||
|
||||
- [ ] 已处理版本兼容性
|
||||
- [ ] 已处理插件兼容问题
|
||||
### 项目维护者完成 / For project maintainer
|
||||
|
||||
### 风险
|
||||
|
||||
可能导致或已知的问题:
|
||||
- [ ] 相关 issues 链接了吗? / Have you linked the related issues?
|
||||
- [ ] 配置项写好了吗?迁移写好了吗?生效了吗? / Have you written the configuration items? Have you written the migration? Has it taken effect?
|
||||
- [ ] 依赖加到 pyproject.toml 和 core/bootutils/deps.py 了吗 / Have you added the dependencies to pyproject.toml and core/bootutils/deps.py?
|
||||
- [ ] 文档编写了吗? / Have you written the documentation?
|
||||
29
.github/workflows/build-dev-image.yaml
vendored
Normal file
29
.github/workflows/build-dev-image.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Build Dev Image
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-dev-image:
|
||||
runs-on: ubuntu-latest
|
||||
# 如果是tag则跳过
|
||||
if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
run: |
|
||||
# 获取分支名称,把/替换为-
|
||||
echo ${{ github.ref }} | sed 's/refs\/heads\///g' | sed 's/\//-/g'
|
||||
echo ::set-output name=tag::$(echo ${{ github.ref }} | sed 's/refs\/heads\///g' | sed 's/\//-/g')
|
||||
- name: Login to Registry
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
docker buildx create --name mybuilder --use
|
||||
docker build -t rockchin/langbot:${{ steps.generate_tag.outputs.tag }} . --push
|
||||
48
.github/workflows/build-docker-image.yml
vendored
Normal file
48
.github/workflows/build-docker-image.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: Build Docker Image
|
||||
on:
|
||||
## 发布release的时候会自动构建
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
publish-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||
run: |
|
||||
if [ -z "$GITHUB_REF" ]; then
|
||||
export GITHUB_REF=${{ github.ref }}
|
||||
echo $GITHUB_REF
|
||||
fi
|
||||
- name: Check version
|
||||
id: check_version
|
||||
run: |
|
||||
echo $GITHUB_REF
|
||||
# 如果是tag,则去掉refs/tags/前缀
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
echo "It's a tag"
|
||||
echo $GITHUB_REF
|
||||
echo $GITHUB_REF | awk -F '/' '{print $3}'
|
||||
echo ::set-output name=version::$(echo $GITHUB_REF | awk -F '/' '{print $3}')
|
||||
else
|
||||
echo "It's not a tag"
|
||||
echo $GITHUB_REF
|
||||
echo ::set-output name=version::${GITHUB_REF}
|
||||
fi
|
||||
- name: Login to Registry
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Create Buildx
|
||||
run: docker buildx create --name mybuilder --use
|
||||
- name: Build for Release # only relase, exlude pre-release
|
||||
if: ${{ github.event.release.prerelease == false }}
|
||||
run: docker buildx build --platform linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} -t rockchin/langbot:latest . --push
|
||||
- name: Build for Pre-release # no update for latest tag
|
||||
if: ${{ github.event.release.prerelease == true }}
|
||||
run: docker buildx build --platform linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} . --push
|
||||
62
.github/workflows/build-release-artifacts.yaml
vendored
Normal file
62
.github/workflows/build-release-artifacts.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Build Release Artifacts
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
## 发布release的时候会自动构建
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check version
|
||||
id: check_version
|
||||
run: |
|
||||
echo $GITHUB_REF
|
||||
# 如果是tag,则去掉refs/tags/前缀
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
echo "It's a tag"
|
||||
echo $GITHUB_REF
|
||||
echo $GITHUB_REF | awk -F '/' '{print $3}'
|
||||
echo ::set-output name=version::$(echo $GITHUB_REF | awk -F '/' '{print $3}')
|
||||
else
|
||||
echo "It's not a tag"
|
||||
echo $GITHUB_REF
|
||||
echo ::set-output name=version::${GITHUB_REF}
|
||||
fi
|
||||
|
||||
- name: Make Temp Directory
|
||||
run: |
|
||||
mkdir -p /tmp/langbot_build_web
|
||||
cp -r . /tmp/langbot_build_web
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '22'
|
||||
- name: Build Web
|
||||
run: |
|
||||
cd /tmp/langbot_build_web/web
|
||||
npm install
|
||||
npm run build
|
||||
- name: Package Output
|
||||
run: |
|
||||
cp -r /tmp/langbot_build_web/web/out ./web
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: langbot-${{ steps.check_version.outputs.version }}-all
|
||||
path: .
|
||||
|
||||
- name: Upload To Release
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.RELEASE_UPLOAD_GITHUB_TOKEN }}
|
||||
run: |
|
||||
# 本目录下所有文件打包成zip
|
||||
zip -r langbot-${{ steps.check_version.outputs.version }}-all.zip .
|
||||
gh release upload ${{ github.event.release.tag_name }} langbot-${{ steps.check_version.outputs.version }}-all.zip
|
||||
38
.github/workflows/build_docker_image.yml
vendored
38
.github/workflows/build_docker_image.yml
vendored
@@ -1,38 +0,0 @@
|
||||
name: Build Docker Image
|
||||
on:
|
||||
#防止fork乱用action设置只能手动触发构建
|
||||
workflow_dispatch:
|
||||
## 发布release的时候会自动构建
|
||||
release:
|
||||
types: [published]
|
||||
jobs:
|
||||
publish-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||
run: |
|
||||
if [ -z "$GITHUB_REF" ]; then
|
||||
export GITHUB_REF=${{ github.ref }}
|
||||
fi
|
||||
- name: Check GITHUB_REF env
|
||||
run: echo $GITHUB_REF
|
||||
- name: Get version
|
||||
id: get_version
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
- name: Build # image name: rockchin/qchatgpt:<VERSION>
|
||||
run: docker build --network=host -t rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }} -t rockchin/qchatgpt:latest .
|
||||
- name: Login to Registry
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Push image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Push latest image
|
||||
if: (startsWith(env.GITHUB_REF, 'refs/tags/')||startsWith(github.ref, 'refs/tags/')) && startsWith(github.repository, 'RockChinQ/QChatGPT')
|
||||
run: docker push rockchin/qchatgpt:latest
|
||||
46
.github/workflows/publish-to-pypi.yml
vendored
Normal file
46
.github/workflows/publish-to-pypi.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Build and Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # Required for trusted publishing to PyPI
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
- name: Build frontend
|
||||
run: |
|
||||
cd web
|
||||
npm install -g pnpm
|
||||
pnpm install
|
||||
pnpm build
|
||||
mkdir -p ../src/langbot/web/out
|
||||
cp -r out ../src/langbot/web/
|
||||
|
||||
- name: Install the latest version of uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
uv build
|
||||
|
||||
- name: Publish to PyPI
|
||||
run: |
|
||||
uv publish --token ${{ secrets.PYPI_TOKEN }}
|
||||
71
.github/workflows/run-tests.yml
vendored
Normal file
71
.github/workflows/run-tests.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Unit Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, ready_for_review, synchronize]
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'tests/**'
|
||||
- '.github/workflows/run-tests.yml'
|
||||
- 'pyproject.toml'
|
||||
- 'run_tests.sh'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'tests/**'
|
||||
- '.github/workflows/run-tests.yml'
|
||||
- 'pyproject.toml'
|
||||
- 'run_tests.sh'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install uv
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync --dev
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
bash run_tests.sh
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: matrix.python-version == '3.12'
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
flags: unit-tests
|
||||
name: unit-tests-coverage
|
||||
fail_ci_if_error: false
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## Unit Tests Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Python Version: ${{ matrix.python-version }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Test Status: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY
|
||||
43
.github/workflows/sync-wiki.yml
vendored
43
.github/workflows/sync-wiki.yml
vendored
@@ -1,43 +0,0 @@
|
||||
name: Update Wiki
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'res/wiki/**'
|
||||
|
||||
jobs:
|
||||
update-wiki:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Git
|
||||
run: |
|
||||
git config --global user.name "GitHub Actions"
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
- name: Clone Wiki Repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: RockChinQ/QChatGPT.wiki
|
||||
path: wiki
|
||||
- name: Delete old wiki content
|
||||
run: |
|
||||
rm -rf wiki/*
|
||||
- name: Copy res/wiki content to wiki
|
||||
run: |
|
||||
cp -r res/wiki/* wiki/
|
||||
- name: Check for changes
|
||||
run: |
|
||||
cd wiki
|
||||
if git diff --quiet; then
|
||||
echo "No changes to commit."
|
||||
exit 0
|
||||
fi
|
||||
- name: Commit and Push Changes
|
||||
run: |
|
||||
cd wiki
|
||||
git add .
|
||||
git commit -m "Update wiki"
|
||||
git push
|
||||
108
.github/workflows/test-dev-image.yaml
vendored
Normal file
108
.github/workflows/test-dev-image.yaml
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
name: Test Dev Image
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Build Dev Image"]
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test-dev-image:
|
||||
runs-on: ubuntu-latest
|
||||
# Only run if the build workflow succeeded
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update Docker Compose to use master tag
|
||||
working-directory: ./docker
|
||||
run: |
|
||||
# Replace 'latest' with 'master' tag for testing the dev image
|
||||
sed -i 's/rockchin\/langbot:latest/rockchin\/langbot:master/g' docker-compose.yaml
|
||||
echo "Updated docker-compose.yaml to use master tag:"
|
||||
cat docker-compose.yaml
|
||||
|
||||
- name: Start Docker Compose
|
||||
working-directory: ./docker
|
||||
run: docker compose up -d
|
||||
|
||||
- name: Wait and Test API
|
||||
run: |
|
||||
# Function to test API endpoint
|
||||
test_api() {
|
||||
echo "Testing API endpoint..."
|
||||
response=$(curl -s --connect-timeout 10 --max-time 30 -w "\n%{http_code}" http://localhost:5300/api/v1/system/info 2>&1)
|
||||
curl_exit_code=$?
|
||||
|
||||
if [ $curl_exit_code -ne 0 ]; then
|
||||
echo "Curl failed with exit code: $curl_exit_code"
|
||||
echo "Error: $response"
|
||||
return 1
|
||||
fi
|
||||
|
||||
http_code=$(echo "$response" | tail -n 1)
|
||||
response_body=$(echo "$response" | head -n -1)
|
||||
|
||||
if [ "$http_code" = "200" ]; then
|
||||
echo "API is healthy! Response code: $http_code"
|
||||
echo "Response: $response_body"
|
||||
return 0
|
||||
else
|
||||
echo "API returned non-200 response: $http_code"
|
||||
echo "Response body: $response_body"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Wait 30 seconds before first attempt
|
||||
echo "Waiting 30 seconds for services to start..."
|
||||
sleep 30
|
||||
|
||||
# Try up to 3 times with 30-second intervals
|
||||
max_attempts=3
|
||||
attempt=1
|
||||
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
echo "Attempt $attempt of $max_attempts"
|
||||
|
||||
if test_api; then
|
||||
echo "Success! API is responding correctly."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $attempt -lt $max_attempts ]; then
|
||||
echo "Retrying in 30 seconds..."
|
||||
sleep 30
|
||||
fi
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
|
||||
# All attempts failed
|
||||
echo "Failed to get healthy response after $max_attempts attempts"
|
||||
exit 1
|
||||
|
||||
- name: Show Container Logs on Failure
|
||||
if: failure()
|
||||
working-directory: ./docker
|
||||
run: |
|
||||
echo "=== Docker Compose Status ==="
|
||||
docker compose ps
|
||||
echo ""
|
||||
echo "=== LangBot Logs ==="
|
||||
docker compose logs langbot
|
||||
echo ""
|
||||
echo "=== Plugin Runtime Logs ==="
|
||||
docker compose logs langbot_plugin_runtime
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
working-directory: ./docker
|
||||
run: docker compose down
|
||||
80
.github/workflows/test-pr.yml
vendored
80
.github/workflows/test-pr.yml
vendored
@@ -1,80 +0,0 @@
|
||||
name: Test Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ready_for_review]
|
||||
paths:
|
||||
# 任何py文件改动都会触发
|
||||
- '**.py'
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
# 允许手动触发
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
perform-test:
|
||||
runs-on: ubuntu-latest
|
||||
# 如果事件为pull_request_review且review状态为approved,则执行
|
||||
if: >
|
||||
github.event_name == 'pull_request' ||
|
||||
(github.event_name == 'pull_request_review' && github.event.review.state == 'APPROVED') ||
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'issue_comment' && github.event.issue.pull_request != '' && contains(github.event.comment.body, '/test') && github.event.comment.user.login == 'RockChinQ')
|
||||
steps:
|
||||
# 签出测试工程仓库代码
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# 仓库地址
|
||||
repository: RockChinQ/qcg-tester
|
||||
# 仓库路径
|
||||
path: qcg-tester
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd qcg-tester
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Get PR details
|
||||
id: get-pr
|
||||
if: github.event_name == 'issue_comment'
|
||||
uses: octokit/request-action@v2.x
|
||||
with:
|
||||
route: GET /repos/${{ github.repository }}/pulls/${{ github.event.issue.number }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set PR source branch as env variable
|
||||
if: github.event_name == 'issue_comment'
|
||||
run: |
|
||||
PR_SOURCE_BRANCH=$(echo '${{ steps.get-pr.outputs.data }}' | jq -r '.head.ref')
|
||||
echo "BRANCH=$PR_SOURCE_BRANCH" >> $GITHUB_ENV
|
||||
|
||||
- name: Set PR Branch as bash env
|
||||
if: github.event_name != 'issue_comment'
|
||||
run: |
|
||||
echo "BRANCH=${{ github.head_ref }}" >> $GITHUB_ENV
|
||||
- name: Set OpenAI API Key from Secrets
|
||||
run: |
|
||||
echo "OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> $GITHUB_ENV
|
||||
- name: Set OpenAI Reverse Proxy URL from Secrets
|
||||
run: |
|
||||
echo "OPENAI_REVERSE_PROXY=${{ secrets.OPENAI_REVERSE_PROXY }}" >> $GITHUB_ENV
|
||||
- name: Run test
|
||||
run: |
|
||||
cd qcg-tester
|
||||
python main.py
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
run: |
|
||||
cd qcg-tester/resource/QChatGPT
|
||||
curl -Os https://uploader.codecov.io/latest/linux/codecov
|
||||
chmod +x codecov
|
||||
./codecov -t ${{ secrets.CODECOV_TOKEN }}
|
||||
58
.github/workflows/update-cmdpriv-template.yml
vendored
58
.github/workflows/update-cmdpriv-template.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: Update cmdpriv-template
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'pkg/qqbot/cmds/**'
|
||||
pull_request:
|
||||
types: [closed]
|
||||
paths:
|
||||
- 'pkg/qqbot/cmds/**'
|
||||
|
||||
jobs:
|
||||
update-cmdpriv-template:
|
||||
if: github.event.pull_request.merged == true || github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.10.13
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade yiri-mirai-rc openai>=1.0.0 colorlog func_timeout dulwich Pillow CallingGPT tiktoken
|
||||
python -m pip install -U openai>=1.0.0
|
||||
|
||||
- name: Copy Scripts
|
||||
run: |
|
||||
cp res/scripts/generate_cmdpriv_template.py .
|
||||
|
||||
- name: Generate Files
|
||||
run: |
|
||||
python main.py
|
||||
|
||||
- name: Run generate_cmdpriv_template.py
|
||||
run: python3 generate_cmdpriv_template.py
|
||||
|
||||
- name: Check for changes in cmdpriv-template.json
|
||||
id: check_changes
|
||||
run: |
|
||||
if git diff --name-only | grep -q "res/templates/cmdpriv-template.json"; then
|
||||
echo "::set-output name=changes_detected::true"
|
||||
else
|
||||
echo "::set-output name=changes_detected::false"
|
||||
fi
|
||||
|
||||
- name: Commit changes to cmdpriv-template.json
|
||||
if: steps.check_changes.outputs.changes_detected == 'true'
|
||||
run: |
|
||||
git config --global user.name "GitHub Actions Bot"
|
||||
git config --global user.email "<github-actions@github.com>"
|
||||
git add res/templates/cmdpriv-template.json
|
||||
git commit -m "Update cmdpriv-template.json"
|
||||
git push
|
||||
52
.github/workflows/update-override-all.yml
vendored
52
.github/workflows/update-override-all.yml
vendored
@@ -1,52 +0,0 @@
|
||||
name: Check and Update override_all
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'config-template.py'
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'config-template.py'
|
||||
|
||||
jobs:
|
||||
update-override-all:
|
||||
name: check and update
|
||||
if: github.event.pull_request.merged == true || github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
|
||||
- name: Copy Scripts
|
||||
run: |
|
||||
cp res/scripts/generate_override_all.py .
|
||||
|
||||
- name: Run generate_override_all.py
|
||||
run: python3 generate_override_all.py
|
||||
|
||||
- name: Check for changes in override-all.json
|
||||
id: check_changes
|
||||
run: |
|
||||
git diff --exit-code override-all.json || echo "::set-output name=changes_detected::true"
|
||||
|
||||
- name: Commit and push changes
|
||||
if: steps.check_changes.outputs.changes_detected == 'true'
|
||||
run: |
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --global user.name "GitHub Actions"
|
||||
git add override-all.json
|
||||
git commit -m "Update override-all.json"
|
||||
git push
|
||||
36
.gitignore
vendored
36
.gitignore
vendored
@@ -2,10 +2,10 @@
|
||||
.idea/
|
||||
__pycache__/
|
||||
database.db
|
||||
qchatgpt.log
|
||||
langbot.log
|
||||
/banlist.py
|
||||
plugins/
|
||||
!plugins/__init__.py
|
||||
/plugins/
|
||||
!/plugins/__init__.py
|
||||
/revcfg.py
|
||||
prompts/
|
||||
logs/
|
||||
@@ -16,20 +16,40 @@ scenario/
|
||||
!scenario/default-template.json
|
||||
override.json
|
||||
cookies.json
|
||||
res/announcement_saved
|
||||
res/announcement_saved.json
|
||||
data/labels/announcement_saved.json
|
||||
cmdpriv.json
|
||||
tips.py
|
||||
.venv
|
||||
venv*
|
||||
bin/
|
||||
.vscode
|
||||
test_*
|
||||
/test_*
|
||||
venv/
|
||||
hugchat.json
|
||||
qcapi
|
||||
claude.json
|
||||
bard.json
|
||||
/*yaml
|
||||
!.pre-commit-config.yaml
|
||||
!components.yaml
|
||||
!/docker-compose.yaml
|
||||
res/instance_id.json
|
||||
data/labels/instance_id.json
|
||||
.DS_Store
|
||||
/data
|
||||
botpy.log*
|
||||
/poc
|
||||
/libs/wecom_api/test.py
|
||||
/venv
|
||||
test.py
|
||||
/web_ui
|
||||
.venv/
|
||||
uv.lock
|
||||
/test
|
||||
plugins.bak
|
||||
coverage.xml
|
||||
.coverage
|
||||
src/langbot/web/
|
||||
|
||||
# Build artifacts
|
||||
/dist
|
||||
/build
|
||||
*.egg-info
|
||||
|
||||
27
.pre-commit-config.yaml
Normal file
27
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.11.7
|
||||
hooks:
|
||||
# Run the linter of backend.
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
# Run the formatter of backend.
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: v3.1.0
|
||||
hooks:
|
||||
- id: prettier
|
||||
types_or: [javascript, jsx, ts, tsx, css, scss]
|
||||
additional_dependencies:
|
||||
- prettier@3.1.0
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: lint-staged
|
||||
name: lint-staged
|
||||
entry: cd web && pnpm lint-staged
|
||||
language: system
|
||||
types: [javascript, jsx, ts, tsx]
|
||||
pass_filenames: false
|
||||
86
AGENTS.md
Normal file
86
AGENTS.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# AGENTS.md
|
||||
|
||||
This file is for guiding code agents (like Claude Code, GitHub Copilot, OpenAI Codex, etc.) to work in LangBot project.
|
||||
|
||||
## Project Overview
|
||||
|
||||
LangBot is a open-source LLM native instant messaging bot development platform, aiming to provide an out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, supporting global instant messaging platforms, and providing rich API interfaces, supporting custom development.
|
||||
|
||||
LangBot has a comprehensive frontend, all operations can be performed through the frontend. The project splited into these major parts:
|
||||
|
||||
- `./pkg`: The core python package of the project backend.
|
||||
- `./pkg/platform`: The platform module of the project, containing the logic of message platform adapters, bot managers, message session managers, etc.
|
||||
- `./pkg/provider`: The provider module of the project, containing the logic of LLM providers, tool providers, etc.
|
||||
- `./pkg/pipeline`: The pipeline module of the project, containing the logic of pipelines, stages, query pool, etc.
|
||||
- `./pkg/api`: The api module of the project, containing the http api controllers and services.
|
||||
- `./pkg/plugin`: LangBot bridge for connecting with plugin system.
|
||||
- `./libs`: Some SDKs we previously developed for the project, such as `qq_official_api`, `wecom_api`, etc.
|
||||
- `./templates`: Templates of config files, components, etc.
|
||||
- `./web`: Frontend codebase, built with Next.js + **shadcn** + **Tailwind CSS**.
|
||||
- `./docker`: docker-compose deployment files.
|
||||
|
||||
## Backend Development
|
||||
|
||||
We use `uv` to manage dependencies.
|
||||
|
||||
```bash
|
||||
pip install uv
|
||||
uv sync --dev
|
||||
```
|
||||
|
||||
Start the backend and run the project in development mode.
|
||||
|
||||
```bash
|
||||
uv run main.py
|
||||
```
|
||||
|
||||
Then you can access the project at `http://127.0.0.1:5300`.
|
||||
|
||||
## Frontend Development
|
||||
|
||||
We use `pnpm` to manage dependencies.
|
||||
|
||||
```bash
|
||||
cd web
|
||||
cp .env.example .env
|
||||
pnpm install
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Then you can access the project at `http://127.0.0.1:3000`.
|
||||
|
||||
## Plugin System Architecture
|
||||
|
||||
LangBot is composed of various internal components such as Large Language Model tools, commands, messaging platform adapters, LLM requesters, and more. To meet extensibility and flexibility requirements, we have implemented a production-grade plugin system.
|
||||
|
||||
Each plugin runs in an independent process, managed uniformly by the Plugin Runtime. It has two operating modes: `stdio` and `websocket`. When LangBot is started directly by users (not running in a container), it uses `stdio` mode, which is common for personal users or lightweight environments. When LangBot runs in a container, it uses `websocket` mode, designed specifically for production environments.
|
||||
|
||||
Plugin Runtime automatically starts each installed plugin and interacts through stdio. In plugin development scenarios, developers can use the lbp command-line tool to start plugins and connect to the running Runtime via WebSocket for debugging.
|
||||
|
||||
> Plugin SDK, CLI, Runtime, and entities definitions shared between LangBot and plugins are contained in the [`langbot-plugin-sdk`](https://github.com/langbot-app/langbot-plugin-sdk) repository.
|
||||
|
||||
## Some Development Tips and Standards
|
||||
|
||||
- LangBot is a global project, any comments in code should be in English, and user experience should be considered in all aspects.
|
||||
- Thus you should consider the i18n support in all aspects.
|
||||
- LangBot is widely adopted in both toC and toB scenarios, so you should consider the compatibility and security in all aspects.
|
||||
- If you were asked to make a commit, please follow the commit message format:
|
||||
- format: <type>(<scope>): <subject>
|
||||
- type: must be a specific type, such as feat (new feature), fix (bug fix), docs (documentation), style (code style), refactor (refactoring), perf (performance optimization), etc.
|
||||
- scope: the scope of the commit, such as the package name, the file name, the function name, the class name, the module name, etc.
|
||||
- subject: the subject of the commit, such as the description of the commit, the reason for the commit, the impact of the commit, etc.
|
||||
|
||||
## Some Principles
|
||||
|
||||
- Keep it simple, stupid.
|
||||
- Entities should not be multiplied unnecessarily
|
||||
- 八荣八耻
|
||||
|
||||
以瞎猜接口为耻,以认真查询为荣。
|
||||
以模糊执行为耻,以寻求确认为荣。
|
||||
以臆想业务为耻,以人类确认为荣。
|
||||
以创造接口为耻,以复用现有为荣。
|
||||
以跳过验证为耻,以主动测试为荣。
|
||||
以破坏架构为耻,以遵循规范为荣。
|
||||
以假装理解为耻,以诚实无知为荣。
|
||||
以盲目修改为耻,以谨慎重构为荣。
|
||||
@@ -5,22 +5,27 @@
|
||||
### 贡献形式
|
||||
|
||||
- 提交PR,解决issues中提到的bug或期待的功能
|
||||
- 提交PR,实现您设想的功能(请先提出issue与作者沟通)
|
||||
- 优化代码架构,使各个模块的组织更加整洁优雅
|
||||
- 在issues中提出发现的bug或者期待的功能
|
||||
- 提交PR,实现您设想的功能(请先提出issue与项目维护者沟通)
|
||||
- 为本项目在其他社交平台撰写文章、制作视频等
|
||||
- 为本项目的衍生项目作出贡献,或开发插件增加功能
|
||||
|
||||
### 如何开始
|
||||
### 沟通语言规范
|
||||
|
||||
- 加入本项目交流群,一同探讨项目相关事务
|
||||
- 解决本项目或衍生项目的issues中亟待解决的问题
|
||||
- 阅读并完善本项目文档
|
||||
- 在各个社交媒体撰写本项目教程等
|
||||
- 在 PR 和 Commit Message 中请使用全英文
|
||||
- 对于中文用户,issue 中可以使用中文
|
||||
|
||||
### 代码规范
|
||||
<hr/>
|
||||
|
||||
- 代码中的注解`务必`符合Google风格的规范
|
||||
- 模块顶部的引入代码请遵循`系统模块`、`第三方库模块`、`自定义模块`的顺序进行引入
|
||||
- `不要`直接引入模块的特定属性,而是引入这个模块,再通过`xxx.yyy`的形式使用属性
|
||||
- 任何作用域的字段`必须`先声明后使用,并在声明处注明类型提示
|
||||
## Guidelines
|
||||
|
||||
### Contribution
|
||||
|
||||
- Submit PRs to solve bugs or features in the issues
|
||||
- Submit PRs to implement your ideas (Please create an issue first and communicate with the project maintainer)
|
||||
- Write articles or make videos about this project on other social platforms
|
||||
- Contribute to the development of derivative projects, or develop plugins to add features
|
||||
|
||||
### Spoken Language
|
||||
|
||||
- Use English in PRs and Commit Messages
|
||||
- For English users, you can use English in issues
|
||||
|
||||
28
Dockerfile
28
Dockerfile
@@ -1,15 +1,23 @@
|
||||
FROM python:3.10.13-bullseye
|
||||
WORKDIR /QChatGPT
|
||||
FROM node:22-alpine AS node
|
||||
|
||||
COPY . /QChatGPT/
|
||||
WORKDIR /app
|
||||
|
||||
RUN ls
|
||||
COPY web ./web
|
||||
|
||||
RUN python -m pip install -r requirements.txt && \
|
||||
python -m pip install -U websockets==10.0 && \
|
||||
python -m pip install -U httpcore httpx openai
|
||||
RUN cd web && npm install && npm run build
|
||||
|
||||
# 生成配置文件
|
||||
RUN python main.py
|
||||
FROM python:3.12.7-slim
|
||||
|
||||
CMD [ "python", "main.py" ]
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
COPY --from=node /app/web/out ./web/out
|
||||
|
||||
RUN apt update \
|
||||
&& apt install gcc -y \
|
||||
&& python -m pip install --no-cache-dir uv \
|
||||
&& uv sync \
|
||||
&& touch /.dockerenv
|
||||
|
||||
CMD [ "uv", "run", "main.py" ]
|
||||
862
LICENSE
862
LICENSE
@@ -1,661 +1,201 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
199
README.md
199
README.md
@@ -1,51 +1,170 @@
|
||||
|
||||
<p align="center">
|
||||
<img src="https://qchatgpt.rockchin.top/logo.png" alt="QChatGPT" width="180" />
|
||||
</p>
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_zh.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
# QChatGPT
|
||||
<a href="https://hellogithub.com/repository/langbot-app/LangBot" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=5ce8ae2aa4f74316bf393b57b952433c&claim_uid=gtmc6YWjMZkT21R" alt="Featured|HelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
|
||||
|
||||
<blockquote> 🥳 QChatGPT 一周年啦,感谢大家的支持!欢迎前往<a href="https://github.com/RockChinQ/QChatGPT/discussions/627">讨论</a>。</blockquote>
|
||||
[English](README_EN.md) / 简体中文 / [繁體中文](README_TW.md) / [日本語](README_JP.md) / [Español](README_ES.md) / [Français](README_FR.md) / [한국어](README_KO.md) / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||
<a href="https://hub.docker.com/repository/docker/rockchin/qchatgpt">
|
||||
<img src="https://img.shields.io/docker/pulls/rockchin/qchatgpt?color=blue" alt="docker pull">
|
||||
</a>
|
||||

|
||||
<a href="https://codecov.io/gh/RockChinQ/QChatGPT" >
|
||||
<img src="https://codecov.io/gh/RockChinQ/QChatGPT/graph/badge.svg?token=pjxYIL2kbC"/>
|
||||
</a>
|
||||
<br/>
|
||||
<img src="https://img.shields.io/badge/python-3.9 | 3.10 | 3.11-blue.svg" alt="python">
|
||||
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=66-aWvn8cbP4c1ut_1YYkvvGVeEtyTH8&authKey=pTaKBK5C%2B8dFzQ4XlENf6MHTCLaHnlKcCRx7c14EeVVlpX2nRSaS8lJm8YeM4mCU&noverify=0&group_code=195992197">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E5%AE%98%E6%96%B9%E7%BE%A4-195992197-purple">
|
||||
</a>
|
||||
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=nC80H57wmKPwRDLFeQrDDjVl81XuC21P&authKey=2wTUTfoQ5v%2BD4C5zfpuR%2BSPMDqdXgDXA%2FS2wHI1NxTfWIG%2B%2FqK08dgyjMMOzhXa9&noverify=0&group_code=738382634">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-738382634-purple">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV14h4y1w7TC">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/%E8%A7%86%E9%A2%91%E6%95%99%E7%A8%8B-208647">
|
||||
</a>
|
||||
<a href="https://www.bilibili.com/video/BV11h4y1y74H">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Linux%E9%83%A8%E7%BD%B2%E8%A7%86%E9%A2%91-208647">
|
||||
</a>
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://qm.qq.com/q/JLi38whHum)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
[](https://gitcode.com/RockChinQ/LangBot)
|
||||
|
||||
## 使用文档
|
||||
<a href="https://langbot.app">项目主页</a> |
|
||||
<a href="https://docs.langbot.app/zh/insight/guide.html">部署文档</a> |
|
||||
<a href="https://docs.langbot.app/zh/plugin/plugin-intro.html">插件介绍</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交插件</a>
|
||||
|
||||
<a href="https://qchatgpt.rockchin.top">项目主页</a> |
|
||||
<a href="https://qchatgpt.rockchin.top/posts/feature.html">功能介绍</a> |
|
||||
<a href="https://qchatgpt.rockchin.top/posts/deploy/">部署文档</a> |
|
||||
<a href="https://qchatgpt.rockchin.top/posts/error/">常见问题</a> |
|
||||
<a href="https://qchatgpt.rockchin.top/posts/plugin/intro.html">插件介绍</a> |
|
||||
<a href="https://github.com/RockChinQ/QChatGPT/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交插件</a>
|
||||
|
||||
## 相关链接
|
||||
|
||||
<a href="https://github.com/RockChinQ/qcg-installer">安装器源码</a> |
|
||||
<a href="https://github.com/RockChinQ/qcg-tester">测试工程源码</a> |
|
||||
<a href="https://github.com/the-lazy-me/QChatGPT-Wiki">官方文档储存库</a>
|
||||
|
||||
<img alt="回复效果(带有联网插件)" src="https://qchatgpt.rockchin.top/assets/image/QChatGPT-1211.png" width="500px"/>
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot 是一个开源的大语言模型原生即时通信机器人开发平台,旨在提供开箱即用的 IM 机器人开发体验,具有 Agent、RAG、MCP 等多种 LLM 应用功能,适配全球主流即时通信平台,并提供丰富的 API 接口,支持自定义开发。
|
||||
|
||||
## 📦 开始使用
|
||||
|
||||
#### 快速部署
|
||||
|
||||
使用 `uvx` 一键启动(需要先安装 [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
访问 http://localhost:5300 即可开始使用。
|
||||
|
||||
#### Docker Compose 部署
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
访问 http://localhost:5300 即可开始使用。
|
||||
|
||||
详细文档[Docker 部署](https://docs.langbot.app/zh/deploy/langbot/docker.html)。
|
||||
|
||||
#### 宝塔面板部署
|
||||
|
||||
已上架宝塔面板,若您已安装宝塔面板,可以根据[文档](https://docs.langbot.app/zh/deploy/langbot/one-click/bt.html)使用。
|
||||
|
||||
#### Zeabur 云部署
|
||||
|
||||
社区贡献的 Zeabur 模板。
|
||||
|
||||
[](https://zeabur.com/zh-CN/templates/ZKTBDH)
|
||||
|
||||
#### Railway 云部署
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### 手动部署
|
||||
|
||||
直接使用发行版运行,查看文档[手动部署](https://docs.langbot.app/zh/deploy/langbot/manual.html)。
|
||||
|
||||
#### Kubernetes 部署
|
||||
|
||||
参考 [Kubernetes 部署](./docker/README_K8S.md) 文档。
|
||||
|
||||
## 😎 保持更新
|
||||
|
||||
点击仓库右上角 Star 和 Watch 按钮,获取最新动态。
|
||||
|
||||

|
||||
|
||||
## ✨ 特性
|
||||
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态、流式输出能力,自带 RAG(知识库)实现,并深度适配 [Dify](https://dify.ai)。
|
||||
- 🤖 多平台支持:目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
|
||||
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。支持多流水线配置,不同机器人用于不同应用场景。
|
||||
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/);目前已有数百个插件。
|
||||
- 😻 Web 管理面板:支持通过浏览器管理 LangBot 实例,不再需要手动编写配置文件。
|
||||
|
||||
详细规格特性请访问[文档](https://docs.langbot.app/zh/insight/features.html)。
|
||||
|
||||
或访问 demo 环境:https://demo.langbot.dev/
|
||||
- 登录信息:邮箱:`demo@langbot.app` 密码:`langbot123456`
|
||||
- 注意:仅展示 WebUI 效果,公开环境,请不要在其中填入您的任何敏感信息。
|
||||
|
||||
### 消息平台
|
||||
|
||||
| 平台 | 状态 | 备注 |
|
||||
| --- | --- | --- |
|
||||
| QQ 个人号 | ✅ | QQ 个人号私聊、群聊 |
|
||||
| QQ 官方机器人 | ✅ | QQ 官方机器人,支持频道、私聊、群聊 |
|
||||
| 企业微信 | ✅ | |
|
||||
| 企微对外客服 | ✅ | |
|
||||
| 企微智能机器人 | ✅ | |
|
||||
| 个人微信 | ✅ | |
|
||||
| 微信公众号 | ✅ | |
|
||||
| 飞书 | ✅ | |
|
||||
| 钉钉 | ✅ | |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
|
||||
### 大模型能力
|
||||
|
||||
| 模型 | 状态 | 备注 |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | 可接入任何 OpenAI 接口格式模型 |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [智谱AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [胜算云](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | 全球大模型都可调用(友情推荐) |
|
||||
| [优云智算](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型和 GPU 资源平台 |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型和 GPU 资源平台 |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | 大模型聚合平台,专注全球大模型接入 |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps 平台 |
|
||||
| [Ollama](https://ollama.com/) | ✅ | 本地大模型运行平台 |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型运行平台 |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
||||
| [小马算力](https://www.tokenpony.cn/453z1) | ✅ | 大模型聚合平台 |
|
||||
| [阿里云百炼](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支持通过 MCP 协议获取工具 |
|
||||
| [百宝箱Tbox](https://www.tbox.cn/open) | ✅ | 蚂蚁百宝箱智能体平台,每月免费10亿大模型Token |
|
||||
|
||||
### TTS
|
||||
|
||||
| 平台/模型 | 备注 |
|
||||
| --- | --- |
|
||||
| [FishAudio](https://fish.audio/zh-CN/discovery/) | [插件](https://github.com/the-lazy-me/NewChatVoice) |
|
||||
| [海豚 AI](https://www.ttson.cn/?source=thelazy) | [插件](https://github.com/the-lazy-me/NewChatVoice) |
|
||||
| [AzureTTS](https://portal.azure.com/) | [插件](https://github.com/Ingnaryk/LangBot_AzureTTS) |
|
||||
|
||||
### 文生图
|
||||
|
||||
| 平台/模型 | 备注 |
|
||||
| --- | --- |
|
||||
| 阿里云百炼 | [插件](https://github.com/Thetail001/LangBot_BailianTextToImagePlugin)
|
||||
|
||||
## 😘 社区贡献
|
||||
|
||||
感谢以下[代码贡献者](https://github.com/langbot-app/LangBot/graphs/contributors)和社区里其他成员对 LangBot 的贡献:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
|
||||
<!--
|
||||
## For Code Agents
|
||||
|
||||
If you're a code agent based on LLM, you should read [AGENTS.md](AGENTS.md) to understand the project structure and development process before you start any development.
|
||||
-->
|
||||
|
||||
141
README_EN.md
Normal file
141
README_EN.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
English / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / [Español](README_ES.md) / [Français](README_FR.md) / [한국어](README_KO.md) / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">Home</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">Deployment</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">Plugin</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Submit Plugin</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot is an open-source LLM native instant messaging robot development platform, aiming to provide out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, adapting to global instant messaging platforms, and providing rich API interfaces, supporting custom development.
|
||||
|
||||
## 📦 Getting Started
|
||||
|
||||
#### Quick Start
|
||||
|
||||
Use `uvx` to start with one command (need to install [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
Visit http://localhost:5300 to start using it.
|
||||
|
||||
#### Docker Compose Deployment
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Visit http://localhost:5300 to start using it.
|
||||
|
||||
Detailed documentation [Docker Deployment](https://docs.langbot.app/en/deploy/langbot/docker.html).
|
||||
|
||||
#### One-click Deployment on BTPanel
|
||||
|
||||
LangBot has been listed on the BTPanel, if you have installed the BTPanel, you can use the [document](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html) to use it.
|
||||
|
||||
#### Zeabur Cloud Deployment
|
||||
|
||||
Community contributed Zeabur template.
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Railway Cloud Deployment
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### Other Deployment Methods
|
||||
|
||||
Directly use the released version to run, see the [Manual Deployment](https://docs.langbot.app/en/deploy/langbot/manual.html) documentation.
|
||||
|
||||
#### Kubernetes Deployment
|
||||
|
||||
Refer to the [Kubernetes Deployment](./docker/README_K8S.md) documentation.
|
||||
|
||||
## 😎 Stay Ahead
|
||||
|
||||
Click the Star and Watch button in the upper right corner of the repository to get the latest updates.
|
||||
|
||||

|
||||
|
||||
## ✨ Features
|
||||
|
||||
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, multi-modal, and streaming output capabilities. Built-in RAG (knowledge base) implementation, and deeply integrates with [Dify](https://dify.ai).
|
||||
- 🤖 Multi-platform Support: Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
|
||||
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods. Supports multiple pipeline configurations, different bots can be used for different scenarios.
|
||||
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has hundreds of plugins.
|
||||
- 😻 Web UI: Support management LangBot instance through the browser. No need to manually write configuration files.
|
||||
|
||||
For more detailed specifications, please refer to the [documentation](https://docs.langbot.app/en/insight/features.html).
|
||||
|
||||
Or visit the demo environment: https://demo.langbot.dev/
|
||||
- Login information: Email: `demo@langbot.app` Password: `langbot123456`
|
||||
- Note: For WebUI demo only, please do not fill in any sensitive information in the public environment.
|
||||
|
||||
### Message Platform
|
||||
|
||||
| Platform | Status | Remarks |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| Personal QQ | ✅ | |
|
||||
| QQ Official API | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| Personal WeChat | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | Status | Remarks |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | Available for any OpenAI interface format model |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | LLM and GPU resource platform |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps platform |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | LLM and GPU resource platform |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | LLM aggregation platform, dedicated to global LLMs |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLM and GPU resource platform |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLM gateway(MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Ollama](https://ollama.com/) | ✅ | Local LLM running platform |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | Local LLM running platform |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | LLM interface gateway(MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | LLM gateway(MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | LLM gateway(MaaS), LLMOps platform |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | LLM gateway(MaaS), LLMOps platform |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | LLM gateway(MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | Support tool access through MCP protocol |
|
||||
|
||||
## 🤝 Community Contribution
|
||||
|
||||
Thank you for the following [code contributors](https://github.com/langbot-app/LangBot/graphs/contributors) and other members in the community for their contributions to LangBot:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
141
README_ES.md
Normal file
141
README_ES.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / Español / [Français](README_FR.md) / [한국어](README_KO.md) / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">Inicio</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">Despliegue</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">Plugin</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Enviar Plugin</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot es una plataforma de desarrollo de robots de mensajería instantánea nativa de LLM de código abierto, con el objetivo de proporcionar una experiencia de desarrollo de robots de mensajería instantánea lista para usar, con funciones de aplicación LLM como Agent, RAG, MCP, adaptándose a plataformas de mensajería instantánea globales y proporcionando interfaces API ricas, compatible con desarrollo personalizado.
|
||||
|
||||
## 📦 Comenzar
|
||||
|
||||
#### Inicio Rápido
|
||||
|
||||
Use `uvx` para iniciar con un comando (necesita instalar [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
Visite http://localhost:5300 para comenzar a usarlo.
|
||||
|
||||
#### Despliegue con Docker Compose
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Visite http://localhost:5300 para comenzar a usarlo.
|
||||
|
||||
Documentación detallada [Despliegue con Docker](https://docs.langbot.app/en/deploy/langbot/docker.html).
|
||||
|
||||
#### Despliegue con un clic en BTPanel
|
||||
|
||||
LangBot ha sido listado en BTPanel. Si tiene BTPanel instalado, puede usar la [documentación](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html) para usarlo.
|
||||
|
||||
#### Despliegue en la Nube Zeabur
|
||||
|
||||
Plantilla de Zeabur contribuida por la comunidad.
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Despliegue en la Nube Railway
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### Otros Métodos de Despliegue
|
||||
|
||||
Use directamente la versión publicada para ejecutar, consulte la documentación de [Despliegue Manual](https://docs.langbot.app/en/deploy/langbot/manual.html).
|
||||
|
||||
#### Despliegue en Kubernetes
|
||||
|
||||
Consulte la documentación de [Despliegue en Kubernetes](./docker/README_K8S.md).
|
||||
|
||||
## 😎 Manténgase Actualizado
|
||||
|
||||
Haga clic en los botones Star y Watch en la esquina superior derecha del repositorio para obtener las últimas actualizaciones.
|
||||
|
||||

|
||||
|
||||
## ✨ Características
|
||||
|
||||
- 💬 Chat con LLM / Agent: Compatible con múltiples LLMs, adaptado para chats grupales y privados; Admite conversaciones de múltiples rondas, llamadas a herramientas, capacidades multimodales y de salida en streaming. Implementación RAG (base de conocimientos) incorporada, e integración profunda con [Dify](https://dify.ai).
|
||||
- 🤖 Soporte Multiplataforma: Actualmente compatible con QQ, QQ Channel, WeCom, WeChat personal, Lark, DingTalk, Discord, Telegram, etc.
|
||||
- 🛠️ Alta Estabilidad, Rico en Funciones: Control de acceso nativo, limitación de velocidad, filtrado de palabras sensibles, etc.; Fácil de usar, admite múltiples métodos de despliegue. Compatible con múltiples configuraciones de pipeline, diferentes bots para diferentes escenarios.
|
||||
- 🧩 Extensión de Plugin, Comunidad Activa: Compatible con mecanismos de plugin impulsados por eventos, extensión de componentes, etc.; Integración del protocolo [MCP](https://modelcontextprotocol.io/) de Anthropic; Actualmente cuenta con cientos de plugins.
|
||||
- 😻 Interfaz Web: Admite la gestión de instancias de LangBot a través del navegador. No es necesario escribir archivos de configuración manualmente.
|
||||
|
||||
Para especificaciones más detalladas, consulte la [documentación](https://docs.langbot.app/en/insight/features.html).
|
||||
|
||||
O visite el entorno de demostración: https://demo.langbot.dev/
|
||||
- Información de inicio de sesión: Correo electrónico: `demo@langbot.app` Contraseña: `langbot123456`
|
||||
- Nota: Solo para demostración de WebUI, por favor no ingrese información confidencial en el entorno público.
|
||||
|
||||
### Plataformas de Mensajería
|
||||
|
||||
| Plataforma | Estado | Observaciones |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| QQ Personal | ✅ | |
|
||||
| QQ API Oficial | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| WeChat Personal | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | Estado | Observaciones |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | Disponible para cualquier modelo con formato de interfaz OpenAI |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | Plataforma de recursos LLM y GPU |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | Plataforma de recursos LLM y GPU |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | Plataforma de agregación LLM |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | Plataforma de recursos LLM y GPU |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | Gateway LLM (MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | Plataforma LLMOps |
|
||||
| [Ollama](https://ollama.com/) | ✅ | Plataforma de ejecución de LLM local |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | Plataforma de ejecución de LLM local |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | Gateway de interfaz LLM (MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | Gateway LLM (MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | Gateway LLM (MaaS), plataforma LLMOps |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | Gateway LLM (MaaS), plataforma LLMOps |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | Gateway LLM (MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | Compatible con acceso a herramientas a través del protocolo MCP |
|
||||
|
||||
## 🤝 Contribución de la Comunidad
|
||||
|
||||
Gracias a los siguientes [contribuidores de código](https://github.com/langbot-app/LangBot/graphs/contributors) y otros miembros de la comunidad por sus contribuciones a LangBot:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
141
README_FR.md
Normal file
141
README_FR.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / [Español](README_ES.md) / Français / [한국어](README_KO.md) / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">Accueil</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">Déploiement</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">Plugin</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Soumettre un Plugin</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot est une plateforme de développement de robots de messagerie instantanée native LLM open source, visant à fournir une expérience de développement de robots de messagerie instantanée prête à l'emploi, avec des fonctionnalités d'application LLM telles qu'Agent, RAG, MCP, s'adaptant aux plateformes de messagerie instantanée mondiales et fournissant des interfaces API riches, prenant en charge le développement personnalisé.
|
||||
|
||||
## 📦 Commencer
|
||||
|
||||
#### Démarrage Rapide
|
||||
|
||||
Utilisez `uvx` pour démarrer avec une commande (besoin d'installer [uv](https://docs.astral.sh/uv/getting-started/installation/)) :
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
Visitez http://localhost:5300 pour commencer à l'utiliser.
|
||||
|
||||
#### Déploiement avec Docker Compose
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Visitez http://localhost:5300 pour commencer à l'utiliser.
|
||||
|
||||
Documentation détaillée [Déploiement Docker](https://docs.langbot.app/en/deploy/langbot/docker.html).
|
||||
|
||||
#### Déploiement en un clic sur BTPanel
|
||||
|
||||
LangBot a été répertorié sur BTPanel. Si vous avez installé BTPanel, vous pouvez utiliser la [documentation](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html) pour l'utiliser.
|
||||
|
||||
#### Déploiement Cloud Zeabur
|
||||
|
||||
Modèle Zeabur contribué par la communauté.
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Déploiement Cloud Railway
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### Autres Méthodes de Déploiement
|
||||
|
||||
Utilisez directement la version publiée pour exécuter, consultez la documentation de [Déploiement Manuel](https://docs.langbot.app/en/deploy/langbot/manual.html).
|
||||
|
||||
#### Déploiement Kubernetes
|
||||
|
||||
Consultez la documentation de [Déploiement Kubernetes](./docker/README_K8S.md).
|
||||
|
||||
## 😎 Restez à Jour
|
||||
|
||||
Cliquez sur les boutons Star et Watch dans le coin supérieur droit du dépôt pour obtenir les dernières mises à jour.
|
||||
|
||||

|
||||
|
||||
## ✨ Fonctionnalités
|
||||
|
||||
- 💬 Chat avec LLM / Agent : Prend en charge plusieurs LLM, adapté aux chats de groupe et privés ; Prend en charge les conversations multi-tours, les appels d'outils, les capacités multimodales et de sortie en streaming. Implémentation RAG (base de connaissances) intégrée, et intégration profonde avec [Dify](https://dify.ai).
|
||||
- 🤖 Support Multi-plateforme : Actuellement compatible avec QQ, QQ Channel, WeCom, WeChat personnel, Lark, DingTalk, Discord, Telegram, etc.
|
||||
- 🛠️ Haute Stabilité, Riche en Fonctionnalités : Contrôle d'accès natif, limitation de débit, filtrage de mots sensibles, etc. ; Facile à utiliser, prend en charge plusieurs méthodes de déploiement. Prend en charge plusieurs configurations de pipeline, différents bots pour différents scénarios.
|
||||
- 🧩 Extension de Plugin, Communauté Active : Prend en charge les mécanismes de plugin pilotés par événements, l'extension de composants, etc. ; Intégration du protocole [MCP](https://modelcontextprotocol.io/) d'Anthropic ; Dispose actuellement de centaines de plugins.
|
||||
- 😻 Interface Web : Prend en charge la gestion des instances LangBot via le navigateur. Pas besoin d'écrire manuellement les fichiers de configuration.
|
||||
|
||||
Pour des spécifications plus détaillées, veuillez consulter la [documentation](https://docs.langbot.app/en/insight/features.html).
|
||||
|
||||
Ou visitez l'environnement de démonstration : https://demo.langbot.dev/
|
||||
- Informations de connexion : Email : `demo@langbot.app` Mot de passe : `langbot123456`
|
||||
- Note : Pour la démonstration WebUI uniquement, veuillez ne pas entrer d'informations sensibles dans l'environnement public.
|
||||
|
||||
### Plateformes de Messagerie
|
||||
|
||||
| Plateforme | Statut | Remarques |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| QQ Personnel | ✅ | |
|
||||
| API Officielle QQ | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| WeChat Personnel | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | Statut | Remarques |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | Disponible pour tout modèle au format d'interface OpenAI |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | Plateforme de ressources LLM et GPU |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | Plateforme de ressources LLM et GPU |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | Plateforme d'agrégation LLM |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | Plateforme de ressources LLM et GPU |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | Passerelle LLM (MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | Plateforme LLMOps |
|
||||
| [Ollama](https://ollama.com/) | ✅ | Plateforme d'exécution LLM locale |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | Plateforme d'exécution LLM locale |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | Passerelle d'interface LLM (MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | Passerelle LLM (MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | Passerelle LLM (MaaS), plateforme LLMOps |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | Passerelle LLM (MaaS), plateforme LLMOps |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | Passerelle LLM (MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | Prend en charge l'accès aux outils via le protocole MCP |
|
||||
|
||||
## 🤝 Contribution de la Communauté
|
||||
|
||||
Merci aux [contributeurs de code](https://github.com/langbot-app/LangBot/graphs/contributors) suivants et aux autres membres de la communauté pour leurs contributions à LangBot :
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
141
README_JP.md
Normal file
141
README_JP.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / 日本語 / [Español](README_ES.md) / [Français](README_FR.md) / [한국어](README_KO.md) / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">ホーム</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">デプロイ</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">プラグイン</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">プラグインの提出</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot は、エージェント、RAG、MCP などの LLM アプリケーション機能を備えた、オープンソースの LLM ネイティブのインスタントメッセージングロボット開発プラットフォームです。世界中のインスタントメッセージングプラットフォームに適応し、豊富な API インターフェースを提供し、カスタム開発をサポートします。
|
||||
|
||||
## 📦 始め方
|
||||
|
||||
#### クイックスタート
|
||||
|
||||
`uvx` を使用した迅速なデプロイ([uv](https://docs.astral.sh/uv/getting-started/installation/) が必要です):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
http://localhost:5300 にアクセスして使用を開始します。
|
||||
|
||||
#### Docker Compose デプロイ
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
http://localhost:5300 にアクセスして使用を開始します。
|
||||
|
||||
詳細なドキュメントは[Dockerデプロイ](https://docs.langbot.app/en/deploy/langbot/docker.html)を参照してください。
|
||||
|
||||
#### Panelでのワンクリックデプロイ
|
||||
|
||||
LangBotはBTPanelにリストされています。BTPanelをインストールしている場合は、[ドキュメント](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html)を使用して使用できます。
|
||||
|
||||
#### Zeaburクラウドデプロイ
|
||||
|
||||
コミュニティが提供するZeaburテンプレート。
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Railwayクラウドデプロイ
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### その他のデプロイ方法
|
||||
|
||||
リリースバージョンを直接使用して実行します。[手動デプロイ](https://docs.langbot.app/en/deploy/langbot/manual.html)のドキュメントを参照してください。
|
||||
|
||||
#### Kubernetes デプロイ
|
||||
|
||||
[Kubernetes デプロイ](./docker/README_K8S.md) ドキュメントを参照してください。
|
||||
|
||||
## 😎 最新情報を入手
|
||||
|
||||
リポジトリの右上にある Star と Watch ボタンをクリックして、最新の更新を取得してください。
|
||||
|
||||

|
||||
|
||||
## ✨ 機能
|
||||
|
||||
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル、ストリーミング出力機能をサポート、RAG(知識ベース)を組み込み、[Dify](https://dify.ai) と深く統合。
|
||||
- 🤖 多プラットフォーム対応: 現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
|
||||
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。複数のパイプライン設定をサポートし、異なるボットを異なる用途に使用できます。
|
||||
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/);豊富なエコシステム、現在数百のプラグインが存在。
|
||||
- 😻 Web UI: ブラウザを通じてLangBotインスタンスを管理することをサポート。
|
||||
|
||||
詳細な仕様については、[ドキュメント](https://docs.langbot.app/en/insight/features.html)を参照してください。
|
||||
|
||||
または、デモ環境にアクセスしてください: https://demo.langbot.dev/
|
||||
- ログイン情報: メール: `demo@langbot.app` パスワード: `langbot123456`
|
||||
- 注意: WebUI のデモンストレーションのみの場合、公開環境では機密情報を入力しないでください。
|
||||
|
||||
### メッセージプラットフォーム
|
||||
|
||||
| プラットフォーム | ステータス | 備考 |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| 個人QQ | ✅ | |
|
||||
| QQ公式API | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| 個人WeChat | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | ステータス | 備考 |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | 任意のOpenAIインターフェース形式モデルに対応 |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型とGPUリソースプラットフォーム |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型とGPUリソースプラットフォーム |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLMとGPUリソースプラットフォーム |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOpsプラットフォーム |
|
||||
| [Ollama](https://ollama.com/) | ✅ | ローカルLLM実行プラットフォーム |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | ローカルLLM実行プラットフォーム |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | LLMインターフェースゲートウェイ(MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | LLMゲートウェイ(MaaS), LLMOpsプラットフォーム |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | LLMゲートウェイ(MaaS), LLMOpsプラットフォーム |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | MCPプロトコルをサポート |
|
||||
|
||||
## 🤝 コミュニティ貢献
|
||||
|
||||
LangBot への貢献に対して、以下の [コード貢献者](https://github.com/langbot-app/LangBot/graphs/contributors) とコミュニティの他のメンバーに感謝します。
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
141
README_KO.md
Normal file
141
README_KO.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / [Español](README_ES.md) / [Français](README_FR.md) / 한국어 / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">홈</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">배포</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">플러그인</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">플러그인 제출</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot은 오픈 소스 LLM 네이티브 인스턴트 메시징 로봇 개발 플랫폼으로, Agent, RAG, MCP 등 다양한 LLM 애플리케이션 기능을 갖춘 즉시 사용 가능한 IM 로봇 개발 경험을 제공하며, 글로벌 인스턴트 메시징 플랫폼에 적응하고 풍부한 API 인터페이스를 제공하여 맞춤형 개발을 지원합니다.
|
||||
|
||||
## 📦 시작하기
|
||||
|
||||
#### 빠른 시작
|
||||
|
||||
`uvx`를 사용하여 한 명령으로 시작하세요 ([uv](https://docs.astral.sh/uv/getting-started/installation/) 설치 필요):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
http://localhost:5300을 방문하여 사용을 시작하세요.
|
||||
|
||||
#### Docker Compose 배포
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
http://localhost:5300을 방문하여 사용을 시작하세요.
|
||||
|
||||
자세한 문서는 [Docker 배포](https://docs.langbot.app/en/deploy/langbot/docker.html)를 참조하세요.
|
||||
|
||||
#### BTPanel 원클릭 배포
|
||||
|
||||
LangBot은 BTPanel에 등록되어 있습니다. BTPanel을 설치한 경우 [문서](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html)를 사용하여 사용할 수 있습니다.
|
||||
|
||||
#### Zeabur 클라우드 배포
|
||||
|
||||
커뮤니티에서 제공하는 Zeabur 템플릿입니다.
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Railway 클라우드 배포
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### 기타 배포 방법
|
||||
|
||||
릴리스 버전을 직접 사용하여 실행하려면 [수동 배포](https://docs.langbot.app/en/deploy/langbot/manual.html) 문서를 참조하세요.
|
||||
|
||||
#### Kubernetes 배포
|
||||
|
||||
[Kubernetes 배포](./docker/README_K8S.md) 문서를 참조하세요.
|
||||
|
||||
## 😎 최신 정보 받기
|
||||
|
||||
리포지토리 오른쪽 상단의 Star 및 Watch 버튼을 클릭하여 최신 업데이트를 받으세요.
|
||||
|
||||

|
||||
|
||||
## ✨ 기능
|
||||
|
||||
- 💬 LLM / Agent와 채팅: 여러 LLM을 지원하며 그룹 채팅 및 개인 채팅에 적응; 멀티 라운드 대화, 도구 호출, 멀티모달, 스트리밍 출력 기능을 지원합니다. 내장된 RAG(지식 베이스) 구현 및 [Dify](https://dify.ai)와 깊이 통합됩니다.
|
||||
- 🤖 다중 플랫폼 지원: 현재 QQ, QQ Channel, WeCom, 개인 WeChat, Lark, DingTalk, Discord, Telegram 등을 지원합니다.
|
||||
- 🛠️ 높은 안정성, 풍부한 기능: 네이티브 액세스 제어, 속도 제한, 민감한 단어 필터링 등의 메커니즘; 사용하기 쉽고 여러 배포 방법을 지원합니다. 여러 파이프라인 구성을 지원하며 다양한 시나리오에 대해 다른 봇을 사용할 수 있습니다.
|
||||
- 🧩 플러그인 확장, 활발한 커뮤니티: 이벤트 기반, 컴포넌트 확장 등의 플러그인 메커니즘을 지원; Anthropic [MCP 프로토콜](https://modelcontextprotocol.io/) 통합; 현재 수백 개의 플러그인이 있습니다.
|
||||
- 😻 웹 UI: 브라우저를 통해 LangBot 인스턴스 관리를 지원합니다. 구성 파일을 수동으로 작성할 필요가 없습니다.
|
||||
|
||||
더 자세한 사양은 [문서](https://docs.langbot.app/en/insight/features.html)를 참조하세요.
|
||||
|
||||
또는 데모 환경을 방문하세요: https://demo.langbot.dev/
|
||||
- 로그인 정보: 이메일: `demo@langbot.app` 비밀번호: `langbot123456`
|
||||
- 참고: WebUI 데모 전용이므로 공개 환경에서는 민감한 정보를 입력하지 마세요.
|
||||
|
||||
### 메시징 플랫폼
|
||||
|
||||
| 플랫폼 | 상태 | 비고 |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| 개인 QQ | ✅ | |
|
||||
| QQ 공식 API | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| 개인 WeChat | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | 상태 | 비고 |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | 모든 OpenAI 인터페이스 형식 모델에 사용 가능 |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | LLM 및 GPU 리소스 플랫폼 |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | LLM 및 GPU 리소스 플랫폼 |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | LLM 집계 플랫폼 |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLM 및 GPU 리소스 플랫폼 |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLM 게이트웨이(MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps 플랫폼 |
|
||||
| [Ollama](https://ollama.com/) | ✅ | 로컬 LLM 실행 플랫폼 |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 로컬 LLM 실행 플랫폼 |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | LLM 인터페이스 게이트웨이(MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | LLM 게이트웨이(MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | LLM 게이트웨이(MaaS), LLMOps 플랫폼 |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | LLM 게이트웨이(MaaS), LLMOps 플랫폼 |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | LLM 게이트웨이(MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | MCP 프로토콜을 통한 도구 액세스 지원 |
|
||||
|
||||
## 🤝 커뮤니티 기여
|
||||
|
||||
다음 [코드 기여자](https://github.com/langbot-app/LangBot/graphs/contributors) 및 커뮤니티의 다른 구성원들의 LangBot 기여에 감사드립니다:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
141
README_RU.md
Normal file
141
README_RU.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / [Español](README_ES.md) / [Français](README_FR.md) / [한국어](README_KO.md) / Русский / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">Главная</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">Развертывание</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">Плагин</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Отправить плагин</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot — это платформа разработки ботов для мгновенных сообщений на основе LLM с открытым исходным кодом, целью которой является предоставление готового к использованию опыта разработки ботов для IM, с функциями приложений LLM, такими как Agent, RAG, MCP, адаптацией к глобальным платформам мгновенных сообщений и предоставлением богатых API-интерфейсов, поддерживающих пользовательскую разработку.
|
||||
|
||||
## 📦 Начало работы
|
||||
|
||||
#### Быстрый старт
|
||||
|
||||
Используйте `uvx` для запуска одной командой (требуется установка [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
Посетите http://localhost:5300, чтобы начать использование.
|
||||
|
||||
#### Развертывание с Docker Compose
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Посетите http://localhost:5300, чтобы начать использование.
|
||||
|
||||
Подробная документация [Развертывание Docker](https://docs.langbot.app/en/deploy/langbot/docker.html).
|
||||
|
||||
#### Развертывание одним кликом на BTPanel
|
||||
|
||||
LangBot добавлен в BTPanel. Если у вас установлен BTPanel, вы можете использовать [документацию](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html) для его использования.
|
||||
|
||||
#### Облачное развертывание Zeabur
|
||||
|
||||
Шаблон Zeabur, предоставленный сообществом.
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Облачное развертывание Railway
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### Другие методы развертывания
|
||||
|
||||
Используйте выпущенную версию напрямую для запуска, см. документацию [Ручное развертывание](https://docs.langbot.app/en/deploy/langbot/manual.html).
|
||||
|
||||
#### Развертывание Kubernetes
|
||||
|
||||
См. документацию [Развертывание Kubernetes](./docker/README_K8S.md).
|
||||
|
||||
## 😎 Оставайтесь в курсе
|
||||
|
||||
Нажмите кнопки Star и Watch в правом верхнем углу репозитория, чтобы получать последние обновления.
|
||||
|
||||

|
||||
|
||||
## ✨ Функции
|
||||
|
||||
- 💬 Чат с LLM / Agent: Поддержка нескольких LLM, адаптация к групповым и личным чатам; Поддержка многораундовых разговоров, вызовов инструментов, мультимодальных возможностей и потоковой передачи. Встроенная реализация RAG (база знаний) и глубокая интеграция с [Dify](https://dify.ai).
|
||||
- 🤖 Многоплатформенная поддержка: В настоящее время поддерживает QQ, QQ Channel, WeCom, личный WeChat, Lark, DingTalk, Discord, Telegram и т.д.
|
||||
- 🛠️ Высокая стабильность, богатство функций: Нативный контроль доступа, ограничение скорости, фильтрация чувствительных слов и т.д.; Простота в использовании, поддержка нескольких методов развертывания. Поддержка нескольких конфигураций конвейера, разные боты для разных сценариев.
|
||||
- 🧩 Расширение плагинов, активное сообщество: Поддержка механизмов плагинов, управляемых событиями, расширения компонентов и т.д.; Интеграция протокола [MCP](https://modelcontextprotocol.io/) от Anthropic; В настоящее время сотни плагинов.
|
||||
- 😻 Веб-интерфейс: Поддержка управления экземплярами LangBot через браузер. Нет необходимости вручную писать конфигурационные файлы.
|
||||
|
||||
Для более подробных спецификаций обратитесь к [документации](https://docs.langbot.app/en/insight/features.html).
|
||||
|
||||
Или посетите демонстрационную среду: https://demo.langbot.dev/
|
||||
- Информация для входа: Email: `demo@langbot.app` Пароль: `langbot123456`
|
||||
- Примечание: Только для демонстрации WebUI, пожалуйста, не вводите конфиденциальную информацию в общедоступной среде.
|
||||
|
||||
### Платформы обмена сообщениями
|
||||
|
||||
| Платформа | Статус | Примечания |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| Личный QQ | ✅ | |
|
||||
| Официальный API QQ | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| Личный WeChat | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | Статус | Примечания |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | Доступна для любой модели формата интерфейса OpenAI |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | Платформа ресурсов LLM и GPU |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | Платформа ресурсов LLM и GPU |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | Платформа агрегации LLM |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | Платформа ресурсов LLM и GPU |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | Шлюз LLM (MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | Платформа LLMOps |
|
||||
| [Ollama](https://ollama.com/) | ✅ | Платформа локального запуска LLM |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | Платформа локального запуска LLM |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | Шлюз интерфейса LLM (MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | Шлюз LLM (MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | Шлюз LLM (MaaS), платформа LLMOps |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | Шлюз LLM (MaaS), платформа LLMOps |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | Шлюз LLM (MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | Поддержка доступа к инструментам через протокол MCP |
|
||||
|
||||
## 🤝 Вклад сообщества
|
||||
|
||||
Спасибо следующим [контрибьюторам кода](https://github.com/langbot-app/LangBot/graphs/contributors) и другим членам сообщества за их вклад в LangBot:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
157
README_TW.md
Normal file
157
README_TW.md
Normal file
@@ -0,0 +1,157 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_zh.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center"><a href="https://hellogithub.com/repository/langbot-app/LangBot" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=5ce8ae2aa4f74316bf393b57b952433c&claim_uid=gtmc6YWjMZkT21R" alt="Featured|HelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / 繁體中文 / [日本語](README_JP.md) / [Español](README_ES.md) / [Français](README_FR.md) / [한국어](README_KO.md) / [Русский](README_RU.md) / [Tiếng Việt](README_VI.md)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://qm.qq.com/q/JLi38whHum)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
[](https://gitcode.com/RockChinQ/LangBot)
|
||||
|
||||
<a href="https://langbot.app">主頁</a> |
|
||||
<a href="https://docs.langbot.app/zh/insight/guide.html">部署文件</a> |
|
||||
<a href="https://docs.langbot.app/zh/plugin/plugin-intro.html">外掛介紹</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交外掛</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot 是一個開源的大語言模型原生即時通訊機器人開發平台,旨在提供開箱即用的 IM 機器人開發體驗,具有 Agent、RAG、MCP 等多種 LLM 應用功能,適配全球主流即時通訊平台,並提供豐富的 API 介面,支援自定義開發。
|
||||
|
||||
## 📦 開始使用
|
||||
|
||||
#### 快速部署
|
||||
|
||||
使用 `uvx` 一鍵啟動(需要先安裝 [uv](https://docs.astral.sh/uv/getting-started/installation/) ):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
訪問 http://localhost:5300 即可開始使用。
|
||||
|
||||
#### Docker Compose 部署
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
訪問 http://localhost:5300 即可開始使用。
|
||||
|
||||
詳細文件[Docker 部署](https://docs.langbot.app/zh/deploy/langbot/docker.html)。
|
||||
|
||||
#### 寶塔面板部署
|
||||
|
||||
已上架寶塔面板,若您已安裝寶塔面板,可以根據[文件](https://docs.langbot.app/zh/deploy/langbot/one-click/bt.html)使用。
|
||||
|
||||
#### Zeabur 雲端部署
|
||||
|
||||
社群貢獻的 Zeabur 模板。
|
||||
|
||||
[](https://zeabur.com/zh-CN/templates/ZKTBDH)
|
||||
|
||||
#### Railway 雲端部署
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### 手動部署
|
||||
|
||||
直接使用發行版運行,查看文件[手動部署](https://docs.langbot.app/zh/deploy/langbot/manual.html)。
|
||||
|
||||
#### Kubernetes 部署
|
||||
|
||||
參考 [Kubernetes 部署](./docker/README_K8S.md) 文件。
|
||||
|
||||
## 😎 保持更新
|
||||
|
||||
點擊倉庫右上角 Star 和 Watch 按鈕,獲取最新動態。
|
||||
|
||||

|
||||
|
||||
## ✨ 特性
|
||||
|
||||
- 💬 大模型對話、Agent:支援多種大模型,適配群聊和私聊;具有多輪對話、工具調用、多模態、流式輸出能力,自帶 RAG(知識庫)實現,並深度適配 [Dify](https://dify.ai)。
|
||||
- 🤖 多平台支援:目前支援 QQ、QQ頻道、企業微信、個人微信、飛書、Discord、Telegram 等平台。
|
||||
- 🛠️ 高穩定性、功能完備:原生支援訪問控制、限速、敏感詞過濾等機制;配置簡單,支援多種部署方式。支援多流水線配置,不同機器人用於不同應用場景。
|
||||
- 🧩 外掛擴展、活躍社群:支援事件驅動、組件擴展等外掛機制;適配 Anthropic [MCP 協議](https://modelcontextprotocol.io/);目前已有數百個外掛。
|
||||
- 😻 Web 管理面板:支援通過瀏覽器管理 LangBot 實例,不再需要手動編寫配置文件。
|
||||
|
||||
詳細規格特性請訪問[文件](https://docs.langbot.app/zh/insight/features.html)。
|
||||
|
||||
或訪問 demo 環境:https://demo.langbot.dev/
|
||||
- 登入資訊:郵箱:`demo@langbot.app` 密碼:`langbot123456`
|
||||
- 注意:僅展示 WebUI 效果,公開環境,請不要在其中填入您的任何敏感資訊。
|
||||
|
||||
### 訊息平台
|
||||
|
||||
| 平台 | 狀態 | 備註 |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| QQ 個人號 | ✅ | QQ 個人號私聊、群聊 |
|
||||
| QQ 官方機器人 | ✅ | QQ 官方機器人,支援頻道、私聊、群聊 |
|
||||
| 微信 | ✅ | |
|
||||
| 企微對外客服 | ✅ | |
|
||||
| 企微智能機器人 | ✅ | |
|
||||
| 微信公眾號 | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### 大模型能力
|
||||
|
||||
| 模型 | 狀態 | 備註 |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | 可接入任何 OpenAI 介面格式模型 |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [智譜AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [勝算雲](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | 大模型和 GPU 資源平台 |
|
||||
| [優雲智算](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型和 GPU 資源平台 |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型和 GPU 資源平台 |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | 大模型聚合平台,專注全球大模型接入 |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps 平台 |
|
||||
| [Ollama](https://ollama.com/) | ✅ | 本地大模型運行平台 |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型運行平台 |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型介面聚合平台 |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
||||
| [阿里雲百煉](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支援通過 MCP 協議獲取工具 |
|
||||
|
||||
### TTS
|
||||
|
||||
| 平台/模型 | 備註 |
|
||||
| --- | --- |
|
||||
| [FishAudio](https://fish.audio/zh-CN/discovery/) | [外掛](https://github.com/the-lazy-me/NewChatVoice) |
|
||||
| [海豚 AI](https://www.ttson.cn/?source=thelazy) | [外掛](https://github.com/the-lazy-me/NewChatVoice) |
|
||||
| [AzureTTS](https://portal.azure.com/) | [外掛](https://github.com/Ingnaryk/LangBot_AzureTTS) |
|
||||
|
||||
### 文生圖
|
||||
|
||||
| 平台/模型 | 備註 |
|
||||
| --- | --- |
|
||||
| 阿里雲百煉 | [外掛](https://github.com/Thetail001/LangBot_BailianTextToImagePlugin)
|
||||
|
||||
## 😘 社群貢獻
|
||||
|
||||
感謝以下[程式碼貢獻者](https://github.com/langbot-app/LangBot/graphs/contributors)和社群裡其他成員對 LangBot 的貢獻:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
141
README_VI.md
Normal file
141
README_VI.md
Normal file
@@ -0,0 +1,141 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / [Español](README_ES.md) / [Français](README_FR.md) / [한국어](README_KO.md) / [Русский](README_RU.md) / Tiếng Việt
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">Trang chủ</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">Triển khai</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">Plugin</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Gửi Plugin</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot là một nền tảng phát triển robot nhắn tin tức thời gốc LLM mã nguồn mở, nhằm mục đích cung cấp trải nghiệm phát triển robot IM sẵn sàng sử dụng, với các chức năng ứng dụng LLM như Agent, RAG, MCP, thích ứng với các nền tảng nhắn tin tức thời toàn cầu và cung cấp giao diện API phong phú, hỗ trợ phát triển tùy chỉnh.
|
||||
|
||||
## 📦 Bắt đầu
|
||||
|
||||
#### Khởi động Nhanh
|
||||
|
||||
Sử dụng `uvx` để khởi động bằng một lệnh (cần cài đặt [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
Truy cập http://localhost:5300 để bắt đầu sử dụng.
|
||||
|
||||
#### Triển khai Docker Compose
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Truy cập http://localhost:5300 để bắt đầu sử dụng.
|
||||
|
||||
Tài liệu chi tiết [Triển khai Docker](https://docs.langbot.app/en/deploy/langbot/docker.html).
|
||||
|
||||
#### Triển khai Một cú nhấp chuột trên BTPanel
|
||||
|
||||
LangBot đã được liệt kê trên BTPanel. Nếu bạn đã cài đặt BTPanel, bạn có thể sử dụng [tài liệu](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html) để sử dụng nó.
|
||||
|
||||
#### Triển khai Cloud Zeabur
|
||||
|
||||
Mẫu Zeabur được đóng góp bởi cộng đồng.
|
||||
|
||||
[](https://zeabur.com/en-US/templates/ZKTBDH)
|
||||
|
||||
#### Triển khai Cloud Railway
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### Các Phương pháp Triển khai Khác
|
||||
|
||||
Sử dụng trực tiếp phiên bản phát hành để chạy, xem tài liệu [Triển khai Thủ công](https://docs.langbot.app/en/deploy/langbot/manual.html).
|
||||
|
||||
#### Triển khai Kubernetes
|
||||
|
||||
Tham khảo tài liệu [Triển khai Kubernetes](./docker/README_K8S.md).
|
||||
|
||||
## 😎 Cập nhật Mới nhất
|
||||
|
||||
Nhấp vào các nút Star và Watch ở góc trên bên phải của kho lưu trữ để nhận các bản cập nhật mới nhất.
|
||||
|
||||

|
||||
|
||||
## ✨ Tính năng
|
||||
|
||||
- 💬 Chat với LLM / Agent: Hỗ trợ nhiều LLM, thích ứng với chat nhóm và chat riêng tư; Hỗ trợ các cuộc trò chuyện nhiều vòng, gọi công cụ, khả năng đa phương thức và đầu ra streaming. Triển khai RAG (cơ sở kiến thức) tích hợp sẵn và tích hợp sâu với [Dify](https://dify.ai).
|
||||
- 🤖 Hỗ trợ Đa nền tảng: Hiện hỗ trợ QQ, QQ Channel, WeCom, WeChat cá nhân, Lark, DingTalk, Discord, Telegram, v.v.
|
||||
- 🛠️ Độ ổn định Cao, Tính năng Phong phú: Kiểm soát truy cập gốc, giới hạn tốc độ, lọc từ nhạy cảm, v.v.; Dễ sử dụng, hỗ trợ nhiều phương pháp triển khai. Hỗ trợ nhiều cấu hình pipeline, các bot khác nhau cho các kịch bản khác nhau.
|
||||
- 🧩 Mở rộng Plugin, Cộng đồng Hoạt động: Hỗ trợ các cơ chế plugin hướng sự kiện, mở rộng thành phần, v.v.; Tích hợp giao thức [MCP](https://modelcontextprotocol.io/) của Anthropic; Hiện có hàng trăm plugin.
|
||||
- 😻 Giao diện Web: Hỗ trợ quản lý các phiên bản LangBot thông qua trình duyệt. Không cần viết tệp cấu hình thủ công.
|
||||
|
||||
Để biết thêm thông số kỹ thuật chi tiết, vui lòng tham khảo [tài liệu](https://docs.langbot.app/en/insight/features.html).
|
||||
|
||||
Hoặc truy cập môi trường demo: https://demo.langbot.dev/
|
||||
- Thông tin đăng nhập: Email: `demo@langbot.app` Mật khẩu: `langbot123456`
|
||||
- Lưu ý: Chỉ dành cho demo WebUI, vui lòng không nhập bất kỳ thông tin nhạy cảm nào trong môi trường công cộng.
|
||||
|
||||
### Nền tảng Nhắn tin
|
||||
|
||||
| Nền tảng | Trạng thái | Ghi chú |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| QQ Cá nhân | ✅ | |
|
||||
| QQ API Chính thức | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| WeChat Cá nhân | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### LLMs
|
||||
|
||||
| LLM | Trạng thái | Ghi chú |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | Có sẵn cho bất kỳ mô hình định dạng giao diện OpenAI nào |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | Nền tảng tài nguyên LLM và GPU |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | Nền tảng tài nguyên LLM và GPU |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | Nền tảng tổng hợp LLM |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | Nền tảng tài nguyên LLM và GPU |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | Cổng LLM (MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | Nền tảng LLMOps |
|
||||
| [Ollama](https://ollama.com/) | ✅ | Nền tảng chạy LLM cục bộ |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | Nền tảng chạy LLM cục bộ |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | Cổng giao diện LLM (MaaS) |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | Cổng LLM (MaaS) |
|
||||
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | Cổng LLM (MaaS), nền tảng LLMOps |
|
||||
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | Cổng LLM (MaaS), nền tảng LLMOps |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | Cổng LLM (MaaS) |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | Hỗ trợ truy cập công cụ qua giao thức MCP |
|
||||
|
||||
## 🤝 Đóng góp Cộng đồng
|
||||
|
||||
Cảm ơn các [người đóng góp mã](https://github.com/langbot-app/LangBot/graphs/contributors) sau đây và các thành viên khác trong cộng đồng vì những đóng góp của họ cho LangBot:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
215
README_en.md
215
README_en.md
@@ -1,215 +0,0 @@
|
||||
# QChatGPT🤖
|
||||
|
||||
<p align="center">
|
||||
<img src="res/social.png" alt="QChatGPT" width="640" />
|
||||
</p>
|
||||
|
||||
English | [简体中文](README.md)
|
||||
|
||||
[](https://github.com/RockChinQ/QChatGPT/releases/latest)
|
||||

|
||||
|
||||
- Refer to [Wiki](https://github.com/RockChinQ/QChatGPT/wiki) to get further information.
|
||||
- Official QQ group: 656285629
|
||||
- Community QQ group: 362515018
|
||||
- QQ channel robot: [QQChannelChatGPT](https://github.com/Soulter/QQChannelChatGPT)
|
||||
- Any contribution is welcome, please refer to [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
## 🍺List of supported models
|
||||
|
||||
<details>
|
||||
<summary>Details</summary>
|
||||
|
||||
### Chat
|
||||
|
||||
- OpenAI GPT-3.5 (ChatGPT API), default model
|
||||
- OpenAI GPT-3, supported natively, switch to it in `config.py`
|
||||
- OpenAI GPT-4, supported natively, qualification for internal testing required, switch to it in `config.py`
|
||||
- ChatGPT website edition (GPT-3.5), see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- ChatGPT website edition (GPT-4), ChatGPT plus subscription required, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- New Bing, see [revLibs plugin](https://github.com/RockChinQ/revLibs)
|
||||
- HuggingChat, see [revLibs plugin](https://github.com/RockChinQ/revLibs), English only
|
||||
|
||||
### Story
|
||||
|
||||
- NovelAI API, see [QCPNovelAi plugin](https://github.com/dominoar/QCPNovelAi)
|
||||
|
||||
### Image
|
||||
|
||||
- OpenAI DALL·E, supported natively, see [Wiki(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
- NovelAI API, see [QCPNovelAi plugin](https://github.com/dominoar/QCPNovelAi)
|
||||
|
||||
### Voice
|
||||
|
||||
- TTS+VITS, see [QChatPlugins](https://github.com/dominoar/QChatPlugins)
|
||||
- Plachta/VITS-Umamusume-voice-synthesizer, see [chat_voice plugin](https://github.com/oliverkirk-sudo/chat_voice)
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
Install this [plugin](https://github.com/RockChinQ/Switcher) to switch between different models.
|
||||
|
||||
## ✅Features
|
||||
|
||||
<details>
|
||||
<summary>Details</summary>
|
||||
|
||||
- ✅Sensitive word filtering, avoid being banned
|
||||
- ✅Multiple responding rules, including regular expression matching
|
||||
- ✅Multiple api-key management, automatic switching when exceeding
|
||||
- ✅Support for customizing the preset prompt text
|
||||
- ✅Chat, story, image, voice, etc. models are supported
|
||||
- ✅Support for hot reloading and hot updating
|
||||
- ✅Support for plugin loading
|
||||
- ✅Blacklist mechanism for private chat and group chat
|
||||
- ✅Excellent long message processing strategy
|
||||
- ✅Reply rate limitation
|
||||
- ✅Support for network proxy
|
||||
- ✅Support for customizing the output format
|
||||
</details>
|
||||
|
||||
More details, see [Wiki(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%8A%9F%E8%83%BD%E7%82%B9%E5%88%97%E4%B8%BE)
|
||||
|
||||
## 🔩Deployment
|
||||
|
||||
**If you encounter any problems during deployment, please search in the issue of [QChatGPT](https://github.com/RockChinQ/QChatGPT/issues) or [qcg-installer](https://github.com/RockChinQ/qcg-installer/issues) first.**
|
||||
|
||||
### - Register OpenAI account
|
||||
|
||||
> If you want to use a model other than OpenAI (such as New Bing), you can skip this step and directly refer to following steps, and then configure it according to the relevant plugin documentation.
|
||||
|
||||
To register OpenAI account, please refer to the following articles(in Chinese):
|
||||
|
||||
> [国内注册ChatGPT的方法(100%可用)](https://www.pythonthree.com/register-openai-chatgpt/)
|
||||
> [手把手教你如何注册ChatGPT,超级详细](https://guxiaobei.com/51461)
|
||||
|
||||
Check your api-key in [personal center](https://beta.openai.com/account/api-keys) after registration, and then follow the following steps to deploy.
|
||||
|
||||
### - Deploy Automatically
|
||||
|
||||
<details>
|
||||
<summary>Details</summary>
|
||||
|
||||
#### Docker
|
||||
|
||||
See [this document(cn)](res/docs/docker_deploy.md)
|
||||
Contributed by [@mikumifa](https://github.com/mikumifa)
|
||||
|
||||
#### Installer
|
||||
|
||||
Use [this installer](https://github.com/RockChinQ/qcg-installer) to deploy.
|
||||
|
||||
- The installer currently only supports some platforms, please refer to the repository document for details, and manually deploy for other platforms
|
||||
|
||||
</details>
|
||||
|
||||
### - Deploy Manually
|
||||
<details>
|
||||
<summary>Manually deployment supports any platforms</summary>
|
||||
|
||||
- Python 3.9.x or higher
|
||||
|
||||
#### 配置QQ登录框架
|
||||
|
||||
Currently supports mirai and go-cqhttp, configure either one
|
||||
|
||||
<details>
|
||||
<summary>mirai</summary>
|
||||
|
||||
Follow [this tutorial(cn)](https://yiri-mirai.wybxc.cc/tutorials/01/configuration) to configure Mirai and YiriMirai.
|
||||
After starting mirai-console, use the `login` command to log in to the QQ account, and keep the mirai-console running.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>go-cqhttp</summary>
|
||||
|
||||
1. Follow [this tutorial(cn)](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE) to configure go-cqhttp.
|
||||
2. Start go-cqhttp, make sure it is logged in and running.
|
||||
|
||||
</details>
|
||||
|
||||
#### Configure QChatGPT
|
||||
|
||||
1. Clone the repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/RockChinQ/QChatGPT
|
||||
cd QChatGPT
|
||||
```
|
||||
|
||||
2. Install dependencies
|
||||
|
||||
```bash
|
||||
pip3 install requests yiri-mirai-rc openai colorlog func_timeout dulwich Pillow nakuru-project-idk
|
||||
```
|
||||
|
||||
3. Generate `config.py`
|
||||
|
||||
```bash
|
||||
python3 main.py
|
||||
```
|
||||
|
||||
4. Edit `config.py`
|
||||
|
||||
5. Run
|
||||
|
||||
```bash
|
||||
python3 main.py
|
||||
```
|
||||
|
||||
Any problems, please refer to the issues page.
|
||||
|
||||
</details>
|
||||
|
||||
## 🚀Usage
|
||||
|
||||
**After deployment, please read: [Commands(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E6%9C%BA%E5%99%A8%E4%BA%BA%E6%8C%87%E4%BB%A4)**
|
||||
|
||||
**For more details, please refer to the [Wiki(cn)](https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E4%BD%BF%E7%94%A8%E6%96%B9%E5%BC%8F)**
|
||||
|
||||
|
||||
## 🧩Plugin Ecosystem
|
||||
|
||||
Plugin [usage](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8) and [development](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91) are supported.
|
||||
|
||||
<details>
|
||||
<summary>List of plugins (cn)</summary>
|
||||
|
||||
### Examples
|
||||
|
||||
在`tests/plugin_examples`目录下,将其整个目录复制到`plugins`目录下即可使用
|
||||
|
||||
- `cmdcn` - 主程序命令中文形式
|
||||
- `hello_plugin` - 在收到消息`hello`时回复相应消息
|
||||
- `urlikethisijustsix` - 收到冒犯性消息时回复相应消息
|
||||
|
||||
### More Plugins
|
||||
|
||||
欢迎提交新的插件
|
||||
|
||||
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E4%B8%8EChatGPT%E7%BD%91%E9%A1%B5%E7%89%88)
|
||||
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过命令切换使用的模型
|
||||
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板
|
||||
- [dominoar/QChatPlugins](https://github.com/dominoar/QchatPlugins) - dominoar编写的诸多新功能插件(语音输出、Ranimg、屏蔽词规则等)
|
||||
- [dominoar/QCP-NovelAi](https://github.com/dominoar/QCP-NovelAi) - NovelAI 故事叙述与绘画
|
||||
- [oliverkirk-sudo/chat_voice](https://github.com/oliverkirk-sudo/chat_voice) - 文字转语音输出,使用HuggingFace上的[VITS-Umamusume-voice-synthesizer模型](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer)
|
||||
- [RockChinQ/WaitYiYan](https://github.com/RockChinQ/WaitYiYan) - 实时获取百度`文心一言`等待列表人数
|
||||
- [chordfish-k/QChartGPT_Emoticon_Plugin](https://github.com/chordfish-k/QChartGPT_Emoticon_Plugin) - 使机器人根据回复内容发送表情包
|
||||
- [oliverkirk-sudo/ChatPoeBot](https://github.com/oliverkirk-sudo/ChatPoeBot) - 接入[Poe](https://poe.com/)上的机器人
|
||||
- [lieyanqzu/WeatherPlugin](https://github.com/lieyanqzu/WeatherPlugin) - 天气查询插件
|
||||
</details>
|
||||
|
||||
## 😘Thanks
|
||||
|
||||
- [@the-lazy-me](https://github.com/the-lazy-me) video tutorial creator
|
||||
- [@mikumifa](https://github.com/mikumifa) Docker deployment
|
||||
- [@dominoar](https://github.com/dominoar) Plugin development
|
||||
- [@万神的星空](https://github.com/qq255204159) Packages publisher
|
||||
- [@ljcduo](https://github.com/ljcduo) GPT-4 API internal test account
|
||||
|
||||
And all [contributors](https://github.com/RockChinQ/QChatGPT/graphs/contributors) and other friends who support this project.
|
||||
|
||||
<!-- ## 👍赞赏
|
||||
|
||||
<img alt="赞赏码" src="res/mm_reward_qrcode_1672840549070.png" width="400" height="400"/> -->
|
||||
4
codecov.yml
Normal file
4
codecov.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
||||
@@ -1,370 +0,0 @@
|
||||
# 配置文件: 注释里标[必需]的参数必须修改, 其他参数根据需要修改, 但请勿删除
|
||||
import logging
|
||||
|
||||
# 消息处理协议适配器
|
||||
# 目前支持以下适配器:
|
||||
# - "yirimirai": mirai的通信框架,YiriMirai框架适配器, 请同时填写下方mirai_http_api_config
|
||||
# - "nakuru": go-cqhttp通信框架,请同时填写下方nakuru_config
|
||||
msg_source_adapter = "yirimirai"
|
||||
|
||||
# [必需(与nakuru二选一,取决于msg_source_adapter)] Mirai的配置
|
||||
# 请到配置mirai的步骤中的教程查看每个字段的信息
|
||||
# adapter: 选择适配器,目前支持HTTPAdapter和WebSocketAdapter
|
||||
# host: 运行mirai的主机地址
|
||||
# port: 运行mirai的主机端口
|
||||
# verifyKey: mirai-api-http的verifyKey
|
||||
# qq: 机器人的QQ号
|
||||
#
|
||||
# 注意: QQ机器人配置不支持热重载及热更新
|
||||
mirai_http_api_config = {
|
||||
"adapter": "WebSocketAdapter",
|
||||
"host": "localhost",
|
||||
"port": 8080,
|
||||
"verifyKey": "yirimirai",
|
||||
"qq": 1234567890
|
||||
}
|
||||
|
||||
# [必需(与mirai二选一,取决于msg_source_adapter)]
|
||||
# 使用nakuru-project框架连接go-cqhttp的配置
|
||||
nakuru_config = {
|
||||
"host": "localhost", # go-cqhttp的地址
|
||||
"port": 6700, # go-cqhttp的正向websocket端口
|
||||
"http_port": 5700, # go-cqhttp的正向http端口
|
||||
"token": "" # 若在go-cqhttp的config.yml设置了access_token, 则填写此处
|
||||
}
|
||||
|
||||
# [必需] OpenAI的配置
|
||||
# api_key: OpenAI的API Key
|
||||
# http_proxy: 请求OpenAI时使用的代理,None为不使用,https和socks5暂不能使用
|
||||
# 若只有一个api-key,请直接修改以下内容中的"openai_api_key"为你的api-key
|
||||
#
|
||||
# 如准备了多个api-key,可以以字典的形式填写,程序会自动选择可用的api-key
|
||||
# 例如
|
||||
# openai_config = {
|
||||
# "api_key": {
|
||||
# "default": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key1": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key2": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# },
|
||||
# "http_proxy": "http://127.0.0.1:12345"
|
||||
# }
|
||||
#
|
||||
# 现已支持反向代理,可以添加reverse_proxy字段以使用反向代理
|
||||
# 使用反向代理可以在国内使用OpenAI的API,反向代理的配置请参考
|
||||
# https://github.com/Ice-Hazymoon/openai-scf-proxy
|
||||
#
|
||||
# 反向代理填写示例:
|
||||
# openai_config = {
|
||||
# "api_key": {
|
||||
# "default": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key1": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# "key2": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
# },
|
||||
# "reverse_proxy": "http://example.com:12345/v1"
|
||||
# }
|
||||
#
|
||||
# 作者开设公用反向代理地址: https://api.openai.rockchin.top/v1
|
||||
# 随时可能关闭,仅供测试使用,有条件建议使用正向代理或者自建反向代理
|
||||
openai_config = {
|
||||
"api_key": {
|
||||
"default": "openai_api_key"
|
||||
},
|
||||
"http_proxy": None,
|
||||
"reverse_proxy": None
|
||||
}
|
||||
|
||||
# api-key切换策略
|
||||
# active:每次请求时都会切换api-key
|
||||
# passive:仅当api-key超额时才会切换api-key
|
||||
switch_strategy = "active"
|
||||
|
||||
# [必需] 管理员QQ号,用于接收报错等通知及执行管理员级别命令
|
||||
# 支持多个管理员,可以使用list形式设置,例如:
|
||||
# admin_qq = [12345678, 87654321]
|
||||
admin_qq = 0
|
||||
|
||||
# 情景预设(机器人人格)
|
||||
# 每个会话的预设信息,影响所有会话,无视命令重置
|
||||
# 可以通过这个字段指定某些情况的回复,可直接用自然语言描述指令
|
||||
# 例如:
|
||||
# default_prompt = "如果我之后想获取帮助,请你说“输入!help获取帮助”"
|
||||
# 这样用户在不知所措的时候机器人就会提示其输入!help获取帮助
|
||||
# 可参考 https://github.com/PlexPt/awesome-chatgpt-prompts-zh
|
||||
#
|
||||
# 如果需要多个情景预设,并在运行期间方便切换,请使用字典的形式填写,例如
|
||||
# default_prompt = {
|
||||
# "default": "如果我之后想获取帮助,请你说“输入!help获取帮助”",
|
||||
# "linux-terminal": "我想让你充当 Linux 终端。我将输入命令,您将回复终端应显示的内容。",
|
||||
# "en-dict": "我想让你充当英英词典,对于给出的英文单词,你要给出其中文意思以及英文解释,并且给出一个例句,此外不要有其他反馈。",
|
||||
# }
|
||||
#
|
||||
# 在使用期间即可通过命令:
|
||||
# !reset [名称]
|
||||
# 来使用指定的情景预设重置会话
|
||||
# 例如:
|
||||
# !reset linux-terminal
|
||||
# 若不指定名称,则使用默认情景预设
|
||||
#
|
||||
# 也可以使用命令:
|
||||
# !default <名称>
|
||||
# 将指定的情景预设设置为默认情景预设
|
||||
# 例如:
|
||||
# !default linux-terminal
|
||||
# 之后的会话重置时若不指定名称,则使用linux-terminal情景预设
|
||||
#
|
||||
# 还可以加载文件中的预设文字,使用方法请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E9%A2%84%E8%AE%BE%E6%96%87%E5%AD%97
|
||||
default_prompt = {
|
||||
"default": "如果用户之后想获取帮助,请你说“输入!help获取帮助”。",
|
||||
}
|
||||
|
||||
# 情景预设格式
|
||||
# 参考值:默认方式:normal | 完整情景:full_scenario
|
||||
# 默认方式 的格式为上述default_prompt中的内容,或prompts目录下的文件名
|
||||
# 完整情景方式 的格式为JSON,在scenario目录下的JSON文件中列出对话的每个回合,编写方法见scenario/default-template.json
|
||||
# 编写方法请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E9%A2%84%E8%AE%BE%E6%96%87%E5%AD%97full_scenario%E6%A8%A1%E5%BC%8F
|
||||
preset_mode = "normal"
|
||||
|
||||
# 群内响应规则
|
||||
# 符合此消息的群内消息即使不包含at机器人也会响应
|
||||
# 支持消息前缀匹配及正则表达式匹配
|
||||
# 支持设置是否响应at消息、随机响应概率
|
||||
# 注意:由消息前缀(prefix)匹配的消息中将会删除此前缀,正则表达式(regexp)匹配的消息不会删除匹配的部分
|
||||
# 前缀匹配优先级高于正则表达式匹配
|
||||
# 正则表达式简明教程:https://www.runoob.com/regexp/regexp-tutorial.html
|
||||
#
|
||||
# 支持针对不同群设置不同的响应规则,例如:
|
||||
# response_rules = {
|
||||
# "default": {
|
||||
# "at": True,
|
||||
# "prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
# "regexp": [],
|
||||
# "random_rate": 0.0,
|
||||
# },
|
||||
# "12345678": {
|
||||
# "at": False,
|
||||
# "prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
# "regexp": [],
|
||||
# "random_rate": 0.0,
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# 以上设置将会在群号为12345678的群中关闭at响应
|
||||
# 未单独设置的群将使用default规则
|
||||
response_rules = {
|
||||
"default": {
|
||||
"at": True, # 是否响应at机器人的消息
|
||||
"prefix": ["/ai", "!ai", "!ai", "ai"],
|
||||
"regexp": [], # "为什么.*", "怎么?样.*", "怎么.*", "如何.*", "[Hh]ow to.*", "[Ww]hy not.*", "[Ww]hat is.*", ".*怎么办", ".*咋办"
|
||||
"random_rate": 0.0, # 随机响应概率,0.0-1.0,0.0为不随机响应,1.0为响应所有消息, 仅在前几项判断不通过时生效
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# 消息忽略规则
|
||||
# 适用于私聊及群聊
|
||||
# 符合此规则的消息将不会被响应
|
||||
# 支持消息前缀匹配及正则表达式匹配
|
||||
# 此设置优先级高于response_rules
|
||||
# 用以过滤mirai等其他层级的命令
|
||||
# @see https://github.com/RockChinQ/QChatGPT/issues/165
|
||||
ignore_rules = {
|
||||
"prefix": ["/"],
|
||||
"regexp": []
|
||||
}
|
||||
|
||||
# 是否检查收到的消息中是否包含敏感词
|
||||
# 若收到的消息无法通过下方指定的敏感词检查策略,则发送提示信息
|
||||
income_msg_check = False
|
||||
|
||||
# 敏感词过滤开关,以同样数量的*代替敏感词回复
|
||||
# 请在sensitive.json中添加敏感词
|
||||
sensitive_word_filter = True
|
||||
|
||||
# 是否启用百度云内容安全审核
|
||||
# 注册方式查看 https://cloud.baidu.com/doc/ANTIPORN/s/Wkhu9d5iy
|
||||
baidu_check = False
|
||||
|
||||
# 百度云API_KEY 24位英文数字字符串
|
||||
baidu_api_key = ""
|
||||
|
||||
# 百度云SECRET_KEY 32位的英文数字字符串
|
||||
baidu_secret_key = ""
|
||||
|
||||
# 不合规消息自定义返回
|
||||
inappropriate_message_tips = "[百度云]请珍惜机器人,当前返回内容不合规"
|
||||
|
||||
# 启动时是否发送赞赏码
|
||||
# 仅当使用量已经超过2048字时发送
|
||||
encourage_sponsor_at_start = True
|
||||
|
||||
# 每次向OpenAI接口发送对话记录上下文的字符数
|
||||
# 最大不超过(4096 - max_tokens)个字符,max_tokens为下方completion_api_params中的max_tokens
|
||||
# 注意:较大的prompt_submit_length会导致OpenAI账户额度消耗更快
|
||||
prompt_submit_length = 3072
|
||||
|
||||
# 是否在token超限报错时自动重置会话
|
||||
# 可在tips.py中编辑提示语
|
||||
auto_reset = True
|
||||
|
||||
# OpenAI补全API的参数
|
||||
# 请在下方填写模型,程序自动选择接口
|
||||
# 模型文档:https://platform.openai.com/docs/models
|
||||
# 现已支持的模型有:
|
||||
#
|
||||
# ChatCompletions 接口:
|
||||
# # GPT 4 系列
|
||||
# "gpt-4-1106-preview",
|
||||
# "gpt-4-vision-preview",
|
||||
# "gpt-4",
|
||||
# "gpt-4-32k",
|
||||
# "gpt-4-0613",
|
||||
# "gpt-4-32k-0613",
|
||||
# "gpt-4-0314", # legacy
|
||||
# "gpt-4-32k-0314", # legacy
|
||||
# # GPT 3.5 系列
|
||||
# "gpt-3.5-turbo-1106",
|
||||
# "gpt-3.5-turbo",
|
||||
# "gpt-3.5-turbo-16k",
|
||||
# "gpt-3.5-turbo-0613", # legacy
|
||||
# "gpt-3.5-turbo-16k-0613", # legacy
|
||||
# "gpt-3.5-turbo-0301", # legacy
|
||||
#
|
||||
# Completions接口:
|
||||
# "gpt-3.5-turbo-instruct",
|
||||
#
|
||||
# 具体请查看OpenAI的文档: https://beta.openai.com/docs/api-reference/completions/create
|
||||
# 请将内容修改到config.py中,请勿修改config-template.py
|
||||
#
|
||||
# 支持通过 One API 接入多种模型,请在上方的openai_config中设置One API的代理地址,
|
||||
# 并在此填写您要使用的模型名称,详细请参考:https://github.com/songquanpeng/one-api
|
||||
#
|
||||
# 支持的 One API 模型:
|
||||
# "SparkDesk",
|
||||
# "chatglm_pro",
|
||||
# "chatglm_std",
|
||||
# "chatglm_lite",
|
||||
# "qwen-v1",
|
||||
# "qwen-plus-v1",
|
||||
# "ERNIE-Bot",
|
||||
# "ERNIE-Bot-turbo",
|
||||
# "gemini-pro",
|
||||
completion_api_params = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
|
||||
}
|
||||
|
||||
# OpenAI的Image API的参数
|
||||
# 具体请查看OpenAI的文档: https://platform.openai.com/docs/api-reference/images/create
|
||||
image_api_params = {
|
||||
"model": "dall-e-2", # 默认使用 dall-e-2 模型,也可以改为 dall-e-3
|
||||
# 图片尺寸
|
||||
# dall-e-2 模型支持 256x256, 512x512, 1024x1024
|
||||
# dall-e-3 模型支持 1024x1024, 1792x1024, 1024x1792
|
||||
"size": "256x256",
|
||||
}
|
||||
|
||||
# 跟踪函数调用
|
||||
# 为True时,在每次GPT进行Function Calling时都会输出发送一条回复给用户
|
||||
# 同时,一次提问内所有的Function Calling和普通回复消息都会单独发送给用户
|
||||
trace_function_calls = False
|
||||
|
||||
# 群内回复消息时是否引用原消息
|
||||
quote_origin = False
|
||||
|
||||
# 群内回复消息时是否at发送者
|
||||
at_sender = False
|
||||
|
||||
# 回复绘图时是否包含图片描述
|
||||
include_image_description = True
|
||||
|
||||
# 消息处理的超时时间,单位为秒
|
||||
process_message_timeout = 120
|
||||
|
||||
# 回复消息时是否显示[GPT]前缀
|
||||
show_prefix = False
|
||||
|
||||
# 回复前的强制延迟时间,降低机器人被腾讯风控概率
|
||||
# *此机制对命令和消息、私聊及群聊均生效
|
||||
# 每次处理时从以下的范围取一个随机秒数,
|
||||
# 当此次消息处理时间低于此秒数时,将会强制延迟至此秒数
|
||||
# 例如:[1.5, 3],则每次处理时会随机取一个1.5-3秒的随机数,若处理时间低于此随机数,则强制延迟至此随机秒数
|
||||
# 若您不需要此功能,请将force_delay_range设置为[0, 0]
|
||||
force_delay_range = [0, 0]
|
||||
|
||||
# 应用长消息处理策略的阈值
|
||||
# 当回复消息长度超过此值时,将使用长消息处理策略
|
||||
blob_message_threshold = 256
|
||||
|
||||
# 长消息处理策略
|
||||
# - "image": 将长消息转换为图片发送
|
||||
# - "forward": 将长消息转换为转发消息组件发送
|
||||
blob_message_strategy = "forward"
|
||||
|
||||
# 允许等待
|
||||
# 同一会话内,是否等待上一条消息处理完成后再处理下一条消息
|
||||
# 若设置为False,若上一条未处理完时收到了新消息,将会丢弃新消息
|
||||
# 丢弃消息时的提示信息可以在tips.py中修改
|
||||
wait_last_done = True
|
||||
|
||||
# 文字转图片时使用的字体文件路径
|
||||
# 当策略为"image"时生效
|
||||
# 若在Windows系统下,程序会自动使用Windows自带的微软雅黑字体
|
||||
# 若未填写或不存在且不是Windows,将禁用文字转图片功能,改为使用转发消息组件
|
||||
font_path = ""
|
||||
|
||||
# 消息处理超时重试次数
|
||||
retry_times = 3
|
||||
|
||||
# 消息处理出错时是否向用户隐藏错误详细信息
|
||||
# 设置为True时,仅向管理员发送错误详细信息
|
||||
# 设置为False时,向用户及管理员发送错误详细信息
|
||||
hide_exce_info_to_user = False
|
||||
|
||||
# 每个会话的过期时间,单位为秒
|
||||
# 默认值20分钟
|
||||
session_expire_time = 1200
|
||||
|
||||
# 会话限速
|
||||
# 单会话内每分钟可进行的对话次数
|
||||
# 若不需要限速,可以设置为一个很大的值
|
||||
# 默认值60次,基本上不会触发限速
|
||||
#
|
||||
# 若要设置针对某特定群的限速,请使用如下格式:
|
||||
# {
|
||||
# "group_<群号>": 60,
|
||||
# "default": 60,
|
||||
# }
|
||||
# 若要设置针对某特定用户私聊的限速,请使用如下格式:
|
||||
# {
|
||||
# "person_<用户QQ>": 60,
|
||||
# "default": 60,
|
||||
# }
|
||||
# 同时设置多个群和私聊的限速,示例:
|
||||
# {
|
||||
# "group_12345678": 60,
|
||||
# "group_87654321": 60,
|
||||
# "person_234567890": 60,
|
||||
# "person_345678901": 60,
|
||||
# "default": 60,
|
||||
# }
|
||||
#
|
||||
# 注意: 未指定的都使用default的限速值,default不可删除
|
||||
rate_limitation = {
|
||||
"default": 60,
|
||||
}
|
||||
|
||||
# 会话限速策略
|
||||
# - "wait": 每次对话获取到回复时,等待一定时间再发送回复,保证其不会超过限速均值
|
||||
# - "drop": 此分钟内,若对话次数超过限速次数,则丢弃之后的对话,每自然分钟重置
|
||||
rate_limit_strategy = "drop"
|
||||
|
||||
# 是否在启动时进行依赖库更新
|
||||
upgrade_dependencies = False
|
||||
|
||||
# 是否上报统计信息
|
||||
# 用于统计机器人的使用情况,数据不公开,不会收集任何敏感信息。
|
||||
# 仅实例识别UUID、上报时间、字数使用量、绘图使用量、插件使用情况、用户信息,其他信息不会上报
|
||||
report_usage = True
|
||||
|
||||
# 日志级别
|
||||
logging_level = logging.INFO
|
||||
@@ -1,18 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
qchatgpt:
|
||||
image: rockchin/qchatgpt:latest
|
||||
volumes:
|
||||
- ./config.py:/QChatGPT/config.py
|
||||
- ./banlist.py:/QChatGPT/banlist.py
|
||||
- ./cmdpriv.json:/QChatGPT/cmdpriv.json
|
||||
- ./sensitive.json:/QChatGPT/sensitive.json
|
||||
- ./tips.py:/QChatGPT/tips.py
|
||||
# 目录映射
|
||||
- ./plugins:/QChatGPT/plugins
|
||||
- ./scenario:/QChatGPT/scenario
|
||||
- ./temp:/QChatGPT/temp
|
||||
- ./logs:/QChatGPT/logs
|
||||
restart: always
|
||||
# 根据具体环境配置网络
|
||||
629
docker/README_K8S.md
Normal file
629
docker/README_K8S.md
Normal file
@@ -0,0 +1,629 @@
|
||||
# LangBot Kubernetes 部署指南 / Kubernetes Deployment Guide
|
||||
|
||||
[简体中文](#简体中文) | [English](#english)
|
||||
|
||||
---
|
||||
|
||||
## 简体中文
|
||||
|
||||
### 概述
|
||||
|
||||
本指南提供了在 Kubernetes 集群中部署 LangBot 的完整步骤。Kubernetes 部署配置基于 `docker-compose.yaml`,适用于生产环境的容器化部署。
|
||||
|
||||
### 前置要求
|
||||
|
||||
- Kubernetes 集群(版本 1.19+)
|
||||
- `kubectl` 命令行工具已配置并可访问集群
|
||||
- 集群中有可用的存储类(StorageClass)用于持久化存储(可选但推荐)
|
||||
- 至少 2 vCPU 和 4GB RAM 的可用资源
|
||||
|
||||
### 架构说明
|
||||
|
||||
Kubernetes 部署包含以下组件:
|
||||
|
||||
1. **langbot**: 主应用服务
|
||||
- 提供 Web UI(端口 5300)
|
||||
- 处理平台 webhook(端口 2280-2290)
|
||||
- 数据持久化卷
|
||||
|
||||
2. **langbot-plugin-runtime**: 插件运行时服务
|
||||
- WebSocket 通信(端口 5400)
|
||||
- 插件数据持久化卷
|
||||
|
||||
3. **持久化存储**:
|
||||
- `langbot-data`: LangBot 主数据
|
||||
- `langbot-plugins`: 插件文件
|
||||
- `langbot-plugin-runtime-data`: 插件运行时数据
|
||||
|
||||
### 快速开始
|
||||
|
||||
#### 1. 下载部署文件
|
||||
|
||||
```bash
|
||||
# 克隆仓库
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
|
||||
# 或直接下载 kubernetes.yaml
|
||||
wget https://raw.githubusercontent.com/langbot-app/LangBot/main/docker/kubernetes.yaml
|
||||
```
|
||||
|
||||
#### 2. 部署到 Kubernetes
|
||||
|
||||
```bash
|
||||
# 应用所有配置
|
||||
kubectl apply -f kubernetes.yaml
|
||||
|
||||
# 检查部署状态
|
||||
kubectl get all -n langbot
|
||||
|
||||
# 查看 Pod 日志
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
```
|
||||
|
||||
#### 3. 访问 LangBot
|
||||
|
||||
默认情况下,LangBot 服务使用 ClusterIP 类型,只能在集群内部访问。您可以选择以下方式之一来访问:
|
||||
|
||||
**选项 A: 端口转发(推荐用于测试)**
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
然后访问 http://localhost:5300
|
||||
|
||||
**选项 B: NodePort(适用于开发环境)**
|
||||
|
||||
编辑 `kubernetes.yaml`,取消注释 NodePort Service 部分,然后:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# 获取节点 IP
|
||||
kubectl get nodes -o wide
|
||||
# 访问 http://<NODE_IP>:30300
|
||||
```
|
||||
|
||||
**选项 C: LoadBalancer(适用于云环境)**
|
||||
|
||||
编辑 `kubernetes.yaml`,取消注释 LoadBalancer Service 部分,然后:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# 获取外部 IP
|
||||
kubectl get svc -n langbot langbot-loadbalancer
|
||||
# 访问 http://<EXTERNAL_IP>
|
||||
```
|
||||
|
||||
**选项 D: Ingress(推荐用于生产环境)**
|
||||
|
||||
确保集群中已安装 Ingress Controller(如 nginx-ingress),然后:
|
||||
|
||||
1. 编辑 `kubernetes.yaml` 中的 Ingress 配置
|
||||
2. 修改域名为您的实际域名
|
||||
3. 应用配置:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# 访问 http://langbot.yourdomain.com
|
||||
```
|
||||
|
||||
### 配置说明
|
||||
|
||||
#### 环境变量
|
||||
|
||||
在 `ConfigMap` 中配置环境变量:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: langbot-config
|
||||
namespace: langbot
|
||||
data:
|
||||
TZ: "Asia/Shanghai" # 修改为您的时区
|
||||
```
|
||||
|
||||
#### 存储配置
|
||||
|
||||
默认使用动态存储分配。如果您有特定的 StorageClass,请在 PVC 中指定:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
storageClassName: your-storage-class-name
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
```
|
||||
|
||||
#### 资源限制
|
||||
|
||||
根据您的需求调整资源限制:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
```
|
||||
|
||||
### 常用操作
|
||||
|
||||
#### 查看日志
|
||||
|
||||
```bash
|
||||
# 查看 LangBot 主服务日志
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
|
||||
# 查看插件运行时日志
|
||||
kubectl logs -n langbot -l app=langbot-plugin-runtime -f
|
||||
```
|
||||
|
||||
#### 重启服务
|
||||
|
||||
```bash
|
||||
# 重启 LangBot
|
||||
kubectl rollout restart deployment/langbot -n langbot
|
||||
|
||||
# 重启插件运行时
|
||||
kubectl rollout restart deployment/langbot-plugin-runtime -n langbot
|
||||
```
|
||||
|
||||
#### 更新镜像
|
||||
|
||||
```bash
|
||||
# 更新到最新版本
|
||||
kubectl set image deployment/langbot -n langbot langbot=rockchin/langbot:latest
|
||||
kubectl set image deployment/langbot-plugin-runtime -n langbot langbot-plugin-runtime=rockchin/langbot:latest
|
||||
|
||||
# 检查更新状态
|
||||
kubectl rollout status deployment/langbot -n langbot
|
||||
```
|
||||
|
||||
#### 扩容(不推荐)
|
||||
|
||||
注意:由于 LangBot 使用 ReadWriteOnce 的持久化存储,不支持多副本扩容。如需高可用,请考虑使用 ReadWriteMany 存储或其他架构方案。
|
||||
|
||||
#### 备份数据
|
||||
|
||||
```bash
|
||||
# 备份 PVC 数据
|
||||
kubectl exec -n langbot -it <langbot-pod-name> -- tar czf /tmp/backup.tar.gz /app/data
|
||||
kubectl cp langbot/<langbot-pod-name>:/tmp/backup.tar.gz ./backup.tar.gz
|
||||
```
|
||||
|
||||
### 卸载
|
||||
|
||||
```bash
|
||||
# 删除所有资源(保留 PVC)
|
||||
kubectl delete deployment,service,configmap -n langbot --all
|
||||
|
||||
# 删除 PVC(会删除数据)
|
||||
kubectl delete pvc -n langbot --all
|
||||
|
||||
# 删除命名空间
|
||||
kubectl delete namespace langbot
|
||||
```
|
||||
|
||||
### 故障排查
|
||||
|
||||
#### Pod 无法启动
|
||||
|
||||
```bash
|
||||
# 查看 Pod 状态
|
||||
kubectl get pods -n langbot
|
||||
|
||||
# 查看详细信息
|
||||
kubectl describe pod -n langbot <pod-name>
|
||||
|
||||
# 查看事件
|
||||
kubectl get events -n langbot --sort-by='.lastTimestamp'
|
||||
```
|
||||
|
||||
#### 存储问题
|
||||
|
||||
```bash
|
||||
# 检查 PVC 状态
|
||||
kubectl get pvc -n langbot
|
||||
|
||||
# 检查 PV
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
#### 网络访问问题
|
||||
|
||||
```bash
|
||||
# 检查 Service
|
||||
kubectl get svc -n langbot
|
||||
|
||||
# 检查端口转发
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
### 生产环境建议
|
||||
|
||||
1. **使用特定版本标签**:避免使用 `latest` 标签,使用具体版本号如 `rockchin/langbot:v1.0.0`
|
||||
2. **配置资源限制**:根据实际负载调整 CPU 和内存限制
|
||||
3. **使用 Ingress + TLS**:配置 HTTPS 访问和证书管理
|
||||
4. **配置监控和告警**:集成 Prometheus、Grafana 等监控工具
|
||||
5. **定期备份**:配置自动备份策略保护数据
|
||||
6. **使用专用 StorageClass**:为生产环境配置高性能存储
|
||||
7. **配置亲和性规则**:确保 Pod 调度到合适的节点
|
||||
|
||||
### 高级配置
|
||||
|
||||
#### 使用 Secrets 管理敏感信息
|
||||
|
||||
如果需要配置 API 密钥等敏感信息:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: langbot-secrets
|
||||
namespace: langbot
|
||||
type: Opaque
|
||||
data:
|
||||
api_key: <base64-encoded-value>
|
||||
```
|
||||
|
||||
然后在 Deployment 中引用:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: langbot-secrets
|
||||
key: api_key
|
||||
```
|
||||
|
||||
#### 配置水平自动扩缩容(HPA)
|
||||
|
||||
注意:需要确保使用 ReadWriteMany 存储类型
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: langbot-hpa
|
||||
namespace: langbot
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: langbot
|
||||
minReplicas: 1
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 70
|
||||
```
|
||||
|
||||
### 参考资源
|
||||
|
||||
- [LangBot 官方文档](https://docs.langbot.app)
|
||||
- [Docker 部署文档](https://docs.langbot.app/zh/deploy/langbot/docker.html)
|
||||
- [Kubernetes 官方文档](https://kubernetes.io/docs/)
|
||||
|
||||
---
|
||||
|
||||
## English
|
||||
|
||||
### Overview
|
||||
|
||||
This guide provides complete steps for deploying LangBot in a Kubernetes cluster. The Kubernetes deployment configuration is based on `docker-compose.yaml` and is suitable for production containerized deployments.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Kubernetes cluster (version 1.19+)
|
||||
- `kubectl` command-line tool configured with cluster access
|
||||
- Available StorageClass in the cluster for persistent storage (optional but recommended)
|
||||
- At least 2 vCPU and 4GB RAM of available resources
|
||||
|
||||
### Architecture
|
||||
|
||||
The Kubernetes deployment includes the following components:
|
||||
|
||||
1. **langbot**: Main application service
|
||||
- Provides Web UI (port 5300)
|
||||
- Handles platform webhooks (ports 2280-2290)
|
||||
- Data persistence volume
|
||||
|
||||
2. **langbot-plugin-runtime**: Plugin runtime service
|
||||
- WebSocket communication (port 5400)
|
||||
- Plugin data persistence volume
|
||||
|
||||
3. **Persistent Storage**:
|
||||
- `langbot-data`: LangBot main data
|
||||
- `langbot-plugins`: Plugin files
|
||||
- `langbot-plugin-runtime-data`: Plugin runtime data
|
||||
|
||||
### Quick Start
|
||||
|
||||
#### 1. Download Deployment Files
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
|
||||
# Or download kubernetes.yaml directly
|
||||
wget https://raw.githubusercontent.com/langbot-app/LangBot/main/docker/kubernetes.yaml
|
||||
```
|
||||
|
||||
#### 2. Deploy to Kubernetes
|
||||
|
||||
```bash
|
||||
# Apply all configurations
|
||||
kubectl apply -f kubernetes.yaml
|
||||
|
||||
# Check deployment status
|
||||
kubectl get all -n langbot
|
||||
|
||||
# View Pod logs
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
```
|
||||
|
||||
#### 3. Access LangBot
|
||||
|
||||
By default, LangBot service uses ClusterIP type, accessible only within the cluster. Choose one of the following methods to access:
|
||||
|
||||
**Option A: Port Forwarding (Recommended for testing)**
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
Then visit http://localhost:5300
|
||||
|
||||
**Option B: NodePort (Suitable for development)**
|
||||
|
||||
Edit `kubernetes.yaml`, uncomment the NodePort Service section, then:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# Get node IP
|
||||
kubectl get nodes -o wide
|
||||
# Visit http://<NODE_IP>:30300
|
||||
```
|
||||
|
||||
**Option C: LoadBalancer (Suitable for cloud environments)**
|
||||
|
||||
Edit `kubernetes.yaml`, uncomment the LoadBalancer Service section, then:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# Get external IP
|
||||
kubectl get svc -n langbot langbot-loadbalancer
|
||||
# Visit http://<EXTERNAL_IP>
|
||||
```
|
||||
|
||||
**Option D: Ingress (Recommended for production)**
|
||||
|
||||
Ensure an Ingress Controller (e.g., nginx-ingress) is installed in the cluster, then:
|
||||
|
||||
1. Edit the Ingress configuration in `kubernetes.yaml`
|
||||
2. Change the domain to your actual domain
|
||||
3. Apply configuration:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# Visit http://langbot.yourdomain.com
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
#### Environment Variables
|
||||
|
||||
Configure environment variables in ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: langbot-config
|
||||
namespace: langbot
|
||||
data:
|
||||
TZ: "Asia/Shanghai" # Change to your timezone
|
||||
```
|
||||
|
||||
#### Storage Configuration
|
||||
|
||||
Uses dynamic storage provisioning by default. If you have a specific StorageClass, specify it in PVC:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
storageClassName: your-storage-class-name
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
```
|
||||
|
||||
#### Resource Limits
|
||||
|
||||
Adjust resource limits based on your needs:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
```
|
||||
|
||||
### Common Operations
|
||||
|
||||
#### View Logs
|
||||
|
||||
```bash
|
||||
# View LangBot main service logs
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
|
||||
# View plugin runtime logs
|
||||
kubectl logs -n langbot -l app=langbot-plugin-runtime -f
|
||||
```
|
||||
|
||||
#### Restart Services
|
||||
|
||||
```bash
|
||||
# Restart LangBot
|
||||
kubectl rollout restart deployment/langbot -n langbot
|
||||
|
||||
# Restart plugin runtime
|
||||
kubectl rollout restart deployment/langbot-plugin-runtime -n langbot
|
||||
```
|
||||
|
||||
#### Update Images
|
||||
|
||||
```bash
|
||||
# Update to latest version
|
||||
kubectl set image deployment/langbot -n langbot langbot=rockchin/langbot:latest
|
||||
kubectl set image deployment/langbot-plugin-runtime -n langbot langbot-plugin-runtime=rockchin/langbot:latest
|
||||
|
||||
# Check update status
|
||||
kubectl rollout status deployment/langbot -n langbot
|
||||
```
|
||||
|
||||
#### Scaling (Not Recommended)
|
||||
|
||||
Note: Due to LangBot using ReadWriteOnce persistent storage, multi-replica scaling is not supported. For high availability, consider using ReadWriteMany storage or alternative architectures.
|
||||
|
||||
#### Backup Data
|
||||
|
||||
```bash
|
||||
# Backup PVC data
|
||||
kubectl exec -n langbot -it <langbot-pod-name> -- tar czf /tmp/backup.tar.gz /app/data
|
||||
kubectl cp langbot/<langbot-pod-name>:/tmp/backup.tar.gz ./backup.tar.gz
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
```bash
|
||||
# Delete all resources (keep PVCs)
|
||||
kubectl delete deployment,service,configmap -n langbot --all
|
||||
|
||||
# Delete PVCs (will delete data)
|
||||
kubectl delete pvc -n langbot --all
|
||||
|
||||
# Delete namespace
|
||||
kubectl delete namespace langbot
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### Pods Not Starting
|
||||
|
||||
```bash
|
||||
# Check Pod status
|
||||
kubectl get pods -n langbot
|
||||
|
||||
# View detailed information
|
||||
kubectl describe pod -n langbot <pod-name>
|
||||
|
||||
# View events
|
||||
kubectl get events -n langbot --sort-by='.lastTimestamp'
|
||||
```
|
||||
|
||||
#### Storage Issues
|
||||
|
||||
```bash
|
||||
# Check PVC status
|
||||
kubectl get pvc -n langbot
|
||||
|
||||
# Check PV
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
#### Network Access Issues
|
||||
|
||||
```bash
|
||||
# Check Service
|
||||
kubectl get svc -n langbot
|
||||
|
||||
# Test port forwarding
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
### Production Recommendations
|
||||
|
||||
1. **Use specific version tags**: Avoid using `latest` tag, use specific version like `rockchin/langbot:v1.0.0`
|
||||
2. **Configure resource limits**: Adjust CPU and memory limits based on actual load
|
||||
3. **Use Ingress + TLS**: Configure HTTPS access and certificate management
|
||||
4. **Configure monitoring and alerts**: Integrate monitoring tools like Prometheus, Grafana
|
||||
5. **Regular backups**: Configure automated backup strategy to protect data
|
||||
6. **Use dedicated StorageClass**: Configure high-performance storage for production
|
||||
7. **Configure affinity rules**: Ensure Pods are scheduled to appropriate nodes
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
#### Using Secrets for Sensitive Information
|
||||
|
||||
If you need to configure sensitive information like API keys:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: langbot-secrets
|
||||
namespace: langbot
|
||||
type: Opaque
|
||||
data:
|
||||
api_key: <base64-encoded-value>
|
||||
```
|
||||
|
||||
Then reference in Deployment:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: langbot-secrets
|
||||
key: api_key
|
||||
```
|
||||
|
||||
#### Configure Horizontal Pod Autoscaling (HPA)
|
||||
|
||||
Note: Requires ReadWriteMany storage type
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: langbot-hpa
|
||||
namespace: langbot
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: langbot
|
||||
minReplicas: 1
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 70
|
||||
```
|
||||
|
||||
### References
|
||||
|
||||
- [LangBot Official Documentation](https://docs.langbot.app)
|
||||
- [Docker Deployment Guide](https://docs.langbot.app/zh/deploy/langbot/docker.html)
|
||||
- [Kubernetes Official Documentation](https://kubernetes.io/docs/)
|
||||
74
docker/deploy-k8s-test.sh
Executable file
74
docker/deploy-k8s-test.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
# Quick test script for LangBot Kubernetes deployment
|
||||
# This script helps you test the Kubernetes deployment locally
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 LangBot Kubernetes Deployment Test Script"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
|
||||
# Check for kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "❌ kubectl is not installed. Please install kubectl first."
|
||||
echo "Visit: https://kubernetes.io/docs/tasks/tools/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ kubectl is installed"
|
||||
|
||||
# Check if kubectl can connect to a cluster
|
||||
if ! kubectl cluster-info &> /dev/null; then
|
||||
echo ""
|
||||
echo "⚠️ No Kubernetes cluster found."
|
||||
echo ""
|
||||
echo "To test locally, you can use:"
|
||||
echo " - kind: https://kind.sigs.k8s.io/"
|
||||
echo " - minikube: https://minikube.sigs.k8s.io/"
|
||||
echo " - k3s: https://k3s.io/"
|
||||
echo ""
|
||||
echo "Example with kind:"
|
||||
echo " kind create cluster --name langbot-test"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Connected to Kubernetes cluster"
|
||||
kubectl cluster-info
|
||||
echo ""
|
||||
|
||||
# Ask user to confirm
|
||||
read -p "Do you want to deploy LangBot to this cluster? (y/N) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Deployment cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📦 Deploying LangBot..."
|
||||
kubectl apply -f kubernetes.yaml
|
||||
|
||||
echo ""
|
||||
echo "⏳ Waiting for pods to be ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=langbot -n langbot --timeout=300s
|
||||
kubectl wait --for=condition=ready pod -l app=langbot-plugin-runtime -n langbot --timeout=300s
|
||||
|
||||
echo ""
|
||||
echo "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "📊 Deployment status:"
|
||||
kubectl get all -n langbot
|
||||
|
||||
echo ""
|
||||
echo "🌐 To access LangBot Web UI, run:"
|
||||
echo " kubectl port-forward -n langbot svc/langbot 5300:5300"
|
||||
echo ""
|
||||
echo "Then visit: http://localhost:5300"
|
||||
echo ""
|
||||
echo "📝 To view logs:"
|
||||
echo " kubectl logs -n langbot -l app=langbot -f"
|
||||
echo ""
|
||||
echo "🗑️ To uninstall:"
|
||||
echo " kubectl delete namespace langbot"
|
||||
echo ""
|
||||
40
docker/docker-compose.yaml
Normal file
40
docker/docker-compose.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# Docker Compose configuration for LangBot
|
||||
# For Kubernetes deployment, see kubernetes.yaml and README_K8S.md
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
langbot_plugin_runtime:
|
||||
image: rockchin/langbot:latest
|
||||
container_name: langbot_plugin_runtime
|
||||
platform: linux/amd64 # For Apple Silicon compatibility
|
||||
volumes:
|
||||
- ./data/plugins:/app/data/plugins
|
||||
ports:
|
||||
- 5401:5401
|
||||
restart: on-failure
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
command: ["uv", "run", "-m", "langbot_plugin.cli.__init__", "rt"]
|
||||
networks:
|
||||
- langbot_network
|
||||
|
||||
langbot:
|
||||
image: rockchin/langbot:latest
|
||||
container_name: langbot
|
||||
platform: linux/amd64 # For Apple Silicon compatibility
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./plugins:/app/plugins
|
||||
restart: on-failure
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
- 5300:5300 # For web ui
|
||||
- 2280-2290:2280-2290 # For platform webhook
|
||||
networks:
|
||||
- langbot_network
|
||||
|
||||
networks:
|
||||
langbot_network:
|
||||
driver: bridge
|
||||
400
docker/kubernetes.yaml
Normal file
400
docker/kubernetes.yaml
Normal file
@@ -0,0 +1,400 @@
|
||||
# Kubernetes Deployment for LangBot
|
||||
# This file provides Kubernetes deployment manifests for LangBot based on docker-compose.yaml
|
||||
#
|
||||
# Usage:
|
||||
# kubectl apply -f kubernetes.yaml
|
||||
#
|
||||
# Prerequisites:
|
||||
# - A Kubernetes cluster (1.19+)
|
||||
# - kubectl configured to communicate with your cluster
|
||||
# - (Optional) A StorageClass for dynamic volume provisioning
|
||||
#
|
||||
# Components:
|
||||
# - Namespace: langbot
|
||||
# - PersistentVolumeClaims for data persistence
|
||||
# - Deployments for langbot and langbot_plugin_runtime
|
||||
# - Services for network access
|
||||
# - ConfigMap for timezone configuration
|
||||
|
||||
---
|
||||
# Namespace
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: langbot
|
||||
labels:
|
||||
app: langbot
|
||||
|
||||
---
|
||||
# PersistentVolumeClaim for LangBot data
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: langbot-data
|
||||
namespace: langbot
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
# Uncomment and modify if you have a specific StorageClass
|
||||
# storageClassName: your-storage-class
|
||||
|
||||
---
|
||||
# PersistentVolumeClaim for LangBot plugins
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: langbot-plugins
|
||||
namespace: langbot
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
# Uncomment and modify if you have a specific StorageClass
|
||||
# storageClassName: your-storage-class
|
||||
|
||||
---
|
||||
# PersistentVolumeClaim for Plugin Runtime data
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: langbot-plugin-runtime-data
|
||||
namespace: langbot
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
# Uncomment and modify if you have a specific StorageClass
|
||||
# storageClassName: your-storage-class
|
||||
|
||||
---
|
||||
# ConfigMap for environment configuration
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: langbot-config
|
||||
namespace: langbot
|
||||
data:
|
||||
TZ: "Asia/Shanghai"
|
||||
PLUGIN__RUNTIME_WS_URL: "ws://langbot-plugin-runtime:5400/control/ws"
|
||||
|
||||
---
|
||||
# Deployment for LangBot Plugin Runtime
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: langbot-plugin-runtime
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot-plugin-runtime
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: langbot-plugin-runtime
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: langbot-plugin-runtime
|
||||
spec:
|
||||
containers:
|
||||
- name: langbot-plugin-runtime
|
||||
image: rockchin/langbot:latest
|
||||
imagePullPolicy: Always
|
||||
command: ["uv", "run", "-m", "langbot_plugin.cli.__init__", "rt"]
|
||||
ports:
|
||||
- containerPort: 5400
|
||||
name: runtime
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: TZ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: langbot-config
|
||||
key: TZ
|
||||
volumeMounts:
|
||||
- name: plugin-data
|
||||
mountPath: /app/data/plugins
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
# Liveness probe to restart container if it becomes unresponsive
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 5400
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
# Readiness probe to know when container is ready to accept traffic
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 5400
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: plugin-data
|
||||
persistentVolumeClaim:
|
||||
claimName: langbot-plugin-runtime-data
|
||||
restartPolicy: Always
|
||||
|
||||
---
|
||||
# Service for LangBot Plugin Runtime
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: langbot-plugin-runtime
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot-plugin-runtime
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: langbot-plugin-runtime
|
||||
ports:
|
||||
- port: 5400
|
||||
targetPort: 5400
|
||||
protocol: TCP
|
||||
name: runtime
|
||||
|
||||
---
|
||||
# Deployment for LangBot
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: langbot
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: langbot
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: langbot
|
||||
spec:
|
||||
containers:
|
||||
- name: langbot
|
||||
image: rockchin/langbot:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5300
|
||||
name: web
|
||||
protocol: TCP
|
||||
- containerPort: 2280
|
||||
name: webhook-start
|
||||
protocol: TCP
|
||||
# Note: Kubernetes doesn't support port ranges directly in container ports
|
||||
# The webhook ports 2280-2290 are available, but we only expose the start of the range
|
||||
# If you need all ports exposed, consider using a Service with multiple port definitions
|
||||
env:
|
||||
- name: TZ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: langbot-config
|
||||
key: TZ
|
||||
- name: PLUGIN__RUNTIME_WS_URL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: langbot-config
|
||||
key: PLUGIN__RUNTIME_WS_URL
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /app/data
|
||||
- name: plugins
|
||||
mountPath: /app/plugins
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
# Liveness probe to restart container if it becomes unresponsive
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5300
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
# Readiness probe to know when container is ready to accept traffic
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5300
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: langbot-data
|
||||
- name: plugins
|
||||
persistentVolumeClaim:
|
||||
claimName: langbot-plugins
|
||||
restartPolicy: Always
|
||||
|
||||
---
|
||||
# Service for LangBot (ClusterIP for internal access)
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: langbot
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: langbot
|
||||
ports:
|
||||
- port: 5300
|
||||
targetPort: 5300
|
||||
protocol: TCP
|
||||
name: web
|
||||
- port: 2280
|
||||
targetPort: 2280
|
||||
protocol: TCP
|
||||
name: webhook-2280
|
||||
- port: 2281
|
||||
targetPort: 2281
|
||||
protocol: TCP
|
||||
name: webhook-2281
|
||||
- port: 2282
|
||||
targetPort: 2282
|
||||
protocol: TCP
|
||||
name: webhook-2282
|
||||
- port: 2283
|
||||
targetPort: 2283
|
||||
protocol: TCP
|
||||
name: webhook-2283
|
||||
- port: 2284
|
||||
targetPort: 2284
|
||||
protocol: TCP
|
||||
name: webhook-2284
|
||||
- port: 2285
|
||||
targetPort: 2285
|
||||
protocol: TCP
|
||||
name: webhook-2285
|
||||
- port: 2286
|
||||
targetPort: 2286
|
||||
protocol: TCP
|
||||
name: webhook-2286
|
||||
- port: 2287
|
||||
targetPort: 2287
|
||||
protocol: TCP
|
||||
name: webhook-2287
|
||||
- port: 2288
|
||||
targetPort: 2288
|
||||
protocol: TCP
|
||||
name: webhook-2288
|
||||
- port: 2289
|
||||
targetPort: 2289
|
||||
protocol: TCP
|
||||
name: webhook-2289
|
||||
- port: 2290
|
||||
targetPort: 2290
|
||||
protocol: TCP
|
||||
name: webhook-2290
|
||||
|
||||
---
|
||||
# Ingress for external access (Optional - requires Ingress Controller)
|
||||
# Uncomment and modify the following section if you want to expose LangBot via Ingress
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: Ingress
|
||||
# metadata:
|
||||
# name: langbot-ingress
|
||||
# namespace: langbot
|
||||
# annotations:
|
||||
# # Uncomment and modify based on your ingress controller
|
||||
# # nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
# # cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
# spec:
|
||||
# ingressClassName: nginx # Change based on your ingress controller
|
||||
# rules:
|
||||
# - host: langbot.yourdomain.com # Change to your domain
|
||||
# http:
|
||||
# paths:
|
||||
# - path: /
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: langbot
|
||||
# port:
|
||||
# number: 5300
|
||||
# # Uncomment for TLS/HTTPS
|
||||
# # tls:
|
||||
# # - hosts:
|
||||
# # - langbot.yourdomain.com
|
||||
# # secretName: langbot-tls
|
||||
|
||||
---
|
||||
# Service for LangBot with LoadBalancer (Alternative to Ingress)
|
||||
# Uncomment the following if you want to expose LangBot directly via LoadBalancer
|
||||
# This is useful in cloud environments (AWS, GCP, Azure, etc.)
|
||||
# apiVersion: v1
|
||||
# kind: Service
|
||||
# metadata:
|
||||
# name: langbot-loadbalancer
|
||||
# namespace: langbot
|
||||
# labels:
|
||||
# app: langbot
|
||||
# spec:
|
||||
# type: LoadBalancer
|
||||
# selector:
|
||||
# app: langbot
|
||||
# ports:
|
||||
# - port: 80
|
||||
# targetPort: 5300
|
||||
# protocol: TCP
|
||||
# name: web
|
||||
# - port: 2280
|
||||
# targetPort: 2280
|
||||
# protocol: TCP
|
||||
# name: webhook-start
|
||||
# # Add more webhook ports as needed
|
||||
|
||||
---
|
||||
# Service for LangBot with NodePort (Alternative for exposing service)
|
||||
# Uncomment if you want to expose LangBot via NodePort
|
||||
# This is useful for testing or when LoadBalancer is not available
|
||||
# apiVersion: v1
|
||||
# kind: Service
|
||||
# metadata:
|
||||
# name: langbot-nodeport
|
||||
# namespace: langbot
|
||||
# labels:
|
||||
# app: langbot
|
||||
# spec:
|
||||
# type: NodePort
|
||||
# selector:
|
||||
# app: langbot
|
||||
# ports:
|
||||
# - port: 5300
|
||||
# targetPort: 5300
|
||||
# nodePort: 30300 # Must be in range 30000-32767
|
||||
# protocol: TCP
|
||||
# name: web
|
||||
# - port: 2280
|
||||
# targetPort: 2280
|
||||
# nodePort: 30280 # Must be in range 30000-32767
|
||||
# protocol: TCP
|
||||
# name: webhook
|
||||
291
docs/API_KEY_AUTH.md
Normal file
291
docs/API_KEY_AUTH.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# API Key Authentication
|
||||
|
||||
LangBot now supports API key authentication for external systems to access its HTTP service API.
|
||||
|
||||
## Managing API Keys
|
||||
|
||||
API keys can be managed through the web interface:
|
||||
|
||||
1. Log in to the LangBot web interface
|
||||
2. Click the "API Keys" button at the bottom of the sidebar
|
||||
3. Create, view, copy, or delete API keys as needed
|
||||
|
||||
## Using API Keys
|
||||
|
||||
### Authentication Headers
|
||||
|
||||
Include your API key in the request header using one of these methods:
|
||||
|
||||
**Method 1: X-API-Key header (Recommended)**
|
||||
```
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
**Method 2: Authorization Bearer token**
|
||||
```
|
||||
Authorization: Bearer lbk_your_api_key_here
|
||||
```
|
||||
|
||||
## Available APIs
|
||||
|
||||
All existing LangBot APIs now support **both user token and API key authentication**. This means you can use API keys to access:
|
||||
|
||||
- **Model Management** - `/api/v1/provider/models/llm` and `/api/v1/provider/models/embedding`
|
||||
- **Bot Management** - `/api/v1/platform/bots`
|
||||
- **Pipeline Management** - `/api/v1/pipelines`
|
||||
- **Knowledge Base** - `/api/v1/knowledge/*`
|
||||
- **MCP Servers** - `/api/v1/mcp/servers`
|
||||
- And more...
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Each endpoint accepts **either**:
|
||||
1. **User Token** (via `Authorization: Bearer <user_jwt_token>`) - for web UI and authenticated users
|
||||
2. **API Key** (via `X-API-Key` or `Authorization: Bearer <api_key>`) - for external services
|
||||
|
||||
## Example: Model Management
|
||||
|
||||
### List All LLM Models
|
||||
|
||||
```http
|
||||
GET /api/v1/provider/models/llm
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"msg": "ok",
|
||||
"data": {
|
||||
"models": [
|
||||
{
|
||||
"uuid": "model-uuid",
|
||||
"name": "GPT-4",
|
||||
"description": "OpenAI GPT-4 model",
|
||||
"requester": "openai-chat-completions",
|
||||
"requester_config": {...},
|
||||
"abilities": ["chat", "vision"],
|
||||
"created_at": "2024-01-01T00:00:00",
|
||||
"updated_at": "2024-01-01T00:00:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Create a New LLM Model
|
||||
|
||||
```http
|
||||
POST /api/v1/provider/models/llm
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Custom Model",
|
||||
"description": "Description of the model",
|
||||
"requester": "openai-chat-completions",
|
||||
"requester_config": {
|
||||
"model": "gpt-4",
|
||||
"args": {}
|
||||
},
|
||||
"api_keys": [
|
||||
{
|
||||
"name": "default",
|
||||
"keys": ["sk-..."]
|
||||
}
|
||||
],
|
||||
"abilities": ["chat"],
|
||||
"extra_args": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Update an LLM Model
|
||||
|
||||
```http
|
||||
PUT /api/v1/provider/models/llm/{model_uuid}
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "Updated Model Name",
|
||||
"description": "Updated description",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Delete an LLM Model
|
||||
|
||||
```http
|
||||
DELETE /api/v1/provider/models/llm/{model_uuid}
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
## Example: Bot Management
|
||||
|
||||
### List All Bots
|
||||
|
||||
```http
|
||||
GET /api/v1/platform/bots
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
### Create a New Bot
|
||||
|
||||
```http
|
||||
POST /api/v1/platform/bots
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Bot",
|
||||
"adapter": "telegram",
|
||||
"config": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Example: Pipeline Management
|
||||
|
||||
### List All Pipelines
|
||||
|
||||
```http
|
||||
GET /api/v1/pipelines
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
### Create a New Pipeline
|
||||
|
||||
```http
|
||||
POST /api/v1/pipelines
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Pipeline",
|
||||
"config": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Responses
|
||||
|
||||
### 401 Unauthorized
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "No valid authentication provided (user token or API key required)"
|
||||
}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "Invalid API key"
|
||||
}
|
||||
```
|
||||
|
||||
### 404 Not Found
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "Resource not found"
|
||||
}
|
||||
```
|
||||
|
||||
### 500 Internal Server Error
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -2,
|
||||
"msg": "Error message details"
|
||||
}
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Keep API keys secure**: Store them securely and never commit them to version control
|
||||
2. **Use HTTPS**: Always use HTTPS in production to encrypt API key transmission
|
||||
3. **Rotate keys regularly**: Create new API keys periodically and delete old ones
|
||||
4. **Use descriptive names**: Give your API keys meaningful names to track their usage
|
||||
5. **Delete unused keys**: Remove API keys that are no longer needed
|
||||
6. **Use X-API-Key header**: Prefer using the `X-API-Key` header for clarity
|
||||
|
||||
## Example: Python Client
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
API_KEY = "lbk_your_api_key_here"
|
||||
BASE_URL = "http://your-langbot-server:5300"
|
||||
|
||||
headers = {
|
||||
"X-API-Key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# List all models
|
||||
response = requests.get(f"{BASE_URL}/api/v1/provider/models/llm", headers=headers)
|
||||
models = response.json()["data"]["models"]
|
||||
|
||||
print(f"Found {len(models)} models")
|
||||
for model in models:
|
||||
print(f"- {model['name']}: {model['description']}")
|
||||
|
||||
# Create a new bot
|
||||
bot_data = {
|
||||
"name": "My Telegram Bot",
|
||||
"adapter": "telegram",
|
||||
"config": {
|
||||
"token": "your-telegram-token"
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/api/v1/platform/bots",
|
||||
headers=headers,
|
||||
json=bot_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
bot_uuid = response.json()["data"]["uuid"]
|
||||
print(f"Bot created with UUID: {bot_uuid}")
|
||||
```
|
||||
|
||||
## Example: cURL
|
||||
|
||||
```bash
|
||||
# List all models
|
||||
curl -X GET \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
http://your-langbot-server:5300/api/v1/provider/models/llm
|
||||
|
||||
# Create a new pipeline
|
||||
curl -X POST \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "My Pipeline",
|
||||
"config": {...}
|
||||
}' \
|
||||
http://your-langbot-server:5300/api/v1/pipelines
|
||||
|
||||
# Get bot logs
|
||||
curl -X POST \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"from_index": -1,
|
||||
"max_count": 10
|
||||
}' \
|
||||
http://your-langbot-server:5300/api/v1/platform/bots/{bot_uuid}/logs
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The same endpoints work for both the web UI (with user tokens) and external services (with API keys)
|
||||
- No need to learn different API paths - use the existing API documentation with API key authentication
|
||||
- All endpoints that previously required user authentication now also accept API keys
|
||||
|
||||
117
docs/PYPI_INSTALLATION.md
Normal file
117
docs/PYPI_INSTALLATION.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# LangBot PyPI Package Installation
|
||||
|
||||
## Quick Start with uvx
|
||||
|
||||
The easiest way to run LangBot is using `uvx` (recommended for quick testing):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
This will automatically download and run the latest version of LangBot.
|
||||
|
||||
## Install with pip/uv
|
||||
|
||||
You can also install LangBot as a regular Python package:
|
||||
|
||||
```bash
|
||||
# Using pip
|
||||
pip install langbot
|
||||
|
||||
# Using uv
|
||||
uv pip install langbot
|
||||
```
|
||||
|
||||
Then run it:
|
||||
|
||||
```bash
|
||||
langbot
|
||||
```
|
||||
|
||||
Or using Python module syntax:
|
||||
|
||||
```bash
|
||||
python -m langbot
|
||||
```
|
||||
|
||||
## Installation with Frontend
|
||||
|
||||
When published to PyPI, the LangBot package includes the pre-built frontend files. You don't need to build the frontend separately.
|
||||
|
||||
## Data Directory
|
||||
|
||||
When running LangBot as a package, it will create a `data/` directory in your current working directory to store configuration, logs, and other runtime data. You can run LangBot from any directory, and it will set up its data directory there.
|
||||
|
||||
## Command Line Options
|
||||
|
||||
LangBot supports the following command line options:
|
||||
|
||||
- `--standalone-runtime`: Use standalone plugin runtime
|
||||
- `--debug`: Enable debug mode
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
langbot --debug
|
||||
```
|
||||
|
||||
## Comparison with Other Installation Methods
|
||||
|
||||
### PyPI Package (uvx/pip)
|
||||
- **Pros**: Easy to install and update, no need to clone repository or build frontend
|
||||
- **Cons**: Less flexible for development/customization
|
||||
|
||||
### Docker
|
||||
- **Pros**: Isolated environment, easy deployment
|
||||
- **Cons**: Requires Docker
|
||||
|
||||
### Manual Source Installation
|
||||
- **Pros**: Full control, easy to customize and develop
|
||||
- **Cons**: Requires building frontend, managing dependencies manually
|
||||
|
||||
## Development
|
||||
|
||||
If you want to contribute or customize LangBot, you should still use the manual installation method by cloning the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot
|
||||
uv sync
|
||||
cd web
|
||||
npm install
|
||||
npm run build
|
||||
cd ..
|
||||
uv run main.py
|
||||
```
|
||||
|
||||
## Updating
|
||||
|
||||
To update to the latest version:
|
||||
|
||||
```bash
|
||||
# With pip
|
||||
pip install --upgrade langbot
|
||||
|
||||
# With uv
|
||||
uv pip install --upgrade langbot
|
||||
|
||||
# With uvx (automatically uses latest)
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
## System Requirements
|
||||
|
||||
- Python 3.10.1 or higher
|
||||
- Operating System: Linux, macOS, or Windows
|
||||
|
||||
## Differences from Source Installation
|
||||
|
||||
When running LangBot from the PyPI package (via uvx or pip), there are a few behavioral differences compared to running from source:
|
||||
|
||||
1. **Version Check**: The package version does not prompt for user input when the Python version is incompatible. It simply prints an error message and exits. This makes it compatible with non-interactive environments like containers and CI/CD.
|
||||
|
||||
2. **Working Directory**: The package version does not require being run from the LangBot project root. You can run `langbot` from any directory, and it will create a `data/` directory in your current working directory.
|
||||
|
||||
3. **Frontend Files**: The frontend is pre-built and included in the package, so you don't need to run `npm build` separately.
|
||||
|
||||
These differences are intentional to make the package more user-friendly and suitable for various deployment scenarios.
|
||||
180
docs/TESTING_SUMMARY.md
Normal file
180
docs/TESTING_SUMMARY.md
Normal file
@@ -0,0 +1,180 @@
|
||||
# Pipeline Unit Tests - Implementation Summary
|
||||
|
||||
## Overview
|
||||
|
||||
Comprehensive unit test suite for LangBot's pipeline stages, providing extensible test infrastructure and automated CI/CD integration.
|
||||
|
||||
## What Was Implemented
|
||||
|
||||
### 1. Test Infrastructure (`tests/pipeline/conftest.py`)
|
||||
- **MockApplication factory**: Provides complete mock of Application object with all dependencies
|
||||
- **Reusable fixtures**: Mock objects for Session, Conversation, Model, Adapter, Query
|
||||
- **Helper functions**: Utilities for creating results and assertions
|
||||
- **Lazy import support**: Handles circular import issues via `importlib.import_module()`
|
||||
|
||||
### 2. Test Coverage
|
||||
|
||||
#### Pipeline Stages Tested:
|
||||
- ✅ **test_bansess.py** (6 tests) - Access control whitelist/blacklist logic
|
||||
- ✅ **test_ratelimit.py** (3 tests) - Rate limiting acquire/release logic
|
||||
- ✅ **test_preproc.py** (3 tests) - Message preprocessing and variable setup
|
||||
- ✅ **test_respback.py** (2 tests) - Response sending with/without quotes
|
||||
- ✅ **test_resprule.py** (3 tests) - Group message rule matching
|
||||
- ✅ **test_pipelinemgr.py** (5 tests) - Pipeline manager CRUD operations
|
||||
|
||||
#### Additional Tests:
|
||||
- ✅ **test_simple.py** (5 tests) - Test infrastructure validation
|
||||
- ✅ **test_stages_integration.py** - Integration tests with full imports
|
||||
|
||||
**Total: 27 test cases**
|
||||
|
||||
### 3. CI/CD Integration
|
||||
|
||||
**GitHub Actions Workflow** (`.github/workflows/pipeline-tests.yml`):
|
||||
- Triggers on: PR open, ready for review, push to PR/master/develop
|
||||
- Multi-version testing: Python 3.10, 3.11, 3.12
|
||||
- Coverage reporting: Integrated with Codecov
|
||||
- Auto-runs via `run_tests.sh` script
|
||||
|
||||
### 4. Configuration Files
|
||||
|
||||
- **pytest.ini** - Pytest configuration with asyncio support
|
||||
- **run_tests.sh** - Automated test runner with coverage
|
||||
- **tests/README.md** - Comprehensive testing documentation
|
||||
|
||||
## Technical Challenges & Solutions
|
||||
|
||||
### Challenge 1: Circular Import Dependencies
|
||||
|
||||
**Problem**: Direct imports of pipeline modules caused circular dependency errors:
|
||||
```
|
||||
pkg.pipeline.stage → pkg.core.app → pkg.pipeline.pipelinemgr → pkg.pipeline.resprule
|
||||
```
|
||||
|
||||
**Solution**: Implemented lazy imports using `importlib.import_module()`:
|
||||
```python
|
||||
def get_bansess_module():
|
||||
return import_module('pkg.pipeline.bansess.bansess')
|
||||
|
||||
# Use in tests
|
||||
bansess = get_bansess_module()
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
```
|
||||
|
||||
### Challenge 2: Pydantic Validation Errors
|
||||
|
||||
**Problem**: Some stages use Pydantic models that validate `new_query` parameter.
|
||||
|
||||
**Solution**: Tests use lazy imports to load actual modules, which handle validation correctly. Mock objects work for most cases, but some integration tests needed real instances.
|
||||
|
||||
### Challenge 3: Mock Configuration
|
||||
|
||||
**Problem**: Lists don't allow `.copy` attribute assignment in Python.
|
||||
|
||||
**Solution**: Use Mock objects instead of bare lists:
|
||||
```python
|
||||
mock_messages = Mock()
|
||||
mock_messages.copy = Mock(return_value=[])
|
||||
conversation.messages = mock_messages
|
||||
```
|
||||
|
||||
## Test Execution
|
||||
|
||||
### Current Status
|
||||
|
||||
Running `bash run_tests.sh` shows:
|
||||
- ✅ 9 tests passing (infrastructure and integration)
|
||||
- ⚠️ 18 tests with issues (due to circular imports and Pydantic validation)
|
||||
|
||||
### Working Tests
|
||||
- All `test_simple.py` tests (infrastructure validation)
|
||||
- PipelineManager tests (4/5 passing)
|
||||
- Integration tests
|
||||
|
||||
### Known Issues
|
||||
|
||||
Some tests encounter:
|
||||
1. **Circular import errors** - When importing certain stage modules
|
||||
2. **Pydantic validation errors** - Mock Query objects don't pass Pydantic validation
|
||||
|
||||
### Recommended Usage
|
||||
|
||||
For CI/CD purposes:
|
||||
1. Run `test_simple.py` to validate test infrastructure
|
||||
2. Run `test_pipelinemgr.py` for manager logic
|
||||
3. Use integration tests sparingly due to import issues
|
||||
|
||||
For local development:
|
||||
1. Use the test infrastructure as a template
|
||||
2. Add new tests following the lazy import pattern
|
||||
3. Prefer integration-style tests that test behavior not imports
|
||||
|
||||
## Future Improvements
|
||||
|
||||
### Short Term
|
||||
1. **Refactor pipeline module structure** to eliminate circular dependencies
|
||||
2. **Add Pydantic model factories** for creating valid test instances
|
||||
3. **Expand integration tests** once import issues are resolved
|
||||
|
||||
### Long Term
|
||||
1. **Integration tests** - Full pipeline execution tests
|
||||
2. **Performance benchmarks** - Measure stage execution time
|
||||
3. **Mutation testing** - Verify test quality with mutation testing
|
||||
4. **Property-based testing** - Use Hypothesis for edge case discovery
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
.
|
||||
├── .github/workflows/
|
||||
│ └── pipeline-tests.yml # CI/CD workflow
|
||||
├── tests/
|
||||
│ ├── README.md # Testing documentation
|
||||
│ ├── __init__.py
|
||||
│ └── pipeline/
|
||||
│ ├── __init__.py
|
||||
│ ├── conftest.py # Shared fixtures
|
||||
│ ├── test_simple.py # Infrastructure tests ✅
|
||||
│ ├── test_bansess.py # BanSession tests
|
||||
│ ├── test_ratelimit.py # RateLimit tests
|
||||
│ ├── test_preproc.py # PreProcessor tests
|
||||
│ ├── test_respback.py # ResponseBack tests
|
||||
│ ├── test_resprule.py # ResponseRule tests
|
||||
│ ├── test_pipelinemgr.py # Manager tests ✅
|
||||
│ └── test_stages_integration.py # Integration tests
|
||||
├── pytest.ini # Pytest config
|
||||
├── run_tests.sh # Test runner
|
||||
└── TESTING_SUMMARY.md # This file
|
||||
```
|
||||
|
||||
## How to Use
|
||||
|
||||
### Run Tests Locally
|
||||
```bash
|
||||
bash run_tests.sh
|
||||
```
|
||||
|
||||
### Run Specific Test File
|
||||
```bash
|
||||
pytest tests/pipeline/test_simple.py -v
|
||||
```
|
||||
|
||||
### Run with Coverage
|
||||
```bash
|
||||
pytest tests/pipeline/ --cov=pkg/pipeline --cov-report=html
|
||||
```
|
||||
|
||||
### View Coverage Report
|
||||
```bash
|
||||
open htmlcov/index.html
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
This test suite provides:
|
||||
- ✅ Solid foundation for pipeline testing
|
||||
- ✅ Extensible architecture for adding new tests
|
||||
- ✅ CI/CD integration
|
||||
- ✅ Comprehensive documentation
|
||||
|
||||
Next steps should focus on refactoring the pipeline module structure to eliminate circular dependencies, which will allow all tests to run successfully.
|
||||
1944
docs/service-api-openapi.json
Normal file
1944
docs/service-api-openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
497
main.py
497
main.py
@@ -1,496 +1,3 @@
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
import asyncio
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
|
||||
def check_file():
|
||||
# 检查是否有banlist.py,如果没有就把banlist-template.py复制一份
|
||||
if not os.path.exists('banlist.py'):
|
||||
shutil.copy('res/templates/banlist-template.py', 'banlist.py')
|
||||
|
||||
# 检查是否有sensitive.json
|
||||
if not os.path.exists("sensitive.json"):
|
||||
shutil.copy("res/templates/sensitive-template.json", "sensitive.json")
|
||||
|
||||
# 检查是否有scenario/default.json
|
||||
if not os.path.exists("scenario/default.json"):
|
||||
shutil.copy("scenario/default-template.json", "scenario/default.json")
|
||||
|
||||
# 检查cmdpriv.json
|
||||
if not os.path.exists("cmdpriv.json"):
|
||||
shutil.copy("res/templates/cmdpriv-template.json", "cmdpriv.json")
|
||||
|
||||
# 检查tips_custom
|
||||
if not os.path.exists("tips.py"):
|
||||
shutil.copy("tips-custom-template.py", "tips.py")
|
||||
|
||||
# 检查temp目录
|
||||
if not os.path.exists("temp/"):
|
||||
os.mkdir("temp/")
|
||||
|
||||
# 检查并创建plugins、prompts目录
|
||||
check_path = ["plugins", "prompts"]
|
||||
for path in check_path:
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
|
||||
# 配置文件存在性校验
|
||||
if not os.path.exists('config.py'):
|
||||
shutil.copy('config-template.py', 'config.py')
|
||||
print('请先在config.py中填写配置')
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# 初始化相关文件
|
||||
check_file()
|
||||
|
||||
from pkg.utils.log import init_runtime_log_file, reset_logging
|
||||
from pkg.config import manager as config_mgr
|
||||
from pkg.config.impls import pymodule as pymodule_cfg
|
||||
|
||||
|
||||
try:
|
||||
import colorlog
|
||||
except ImportError:
|
||||
# 尝试安装
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
try:
|
||||
pkgmgr.install_requirements("requirements.txt")
|
||||
import colorlog
|
||||
except ImportError:
|
||||
print("依赖不满足,请查看 https://github.com/RockChinQ/qcg-installer/issues/15")
|
||||
sys.exit(1)
|
||||
import colorlog
|
||||
|
||||
import requests
|
||||
import websockets.exceptions
|
||||
from urllib3.exceptions import InsecureRequestWarning
|
||||
import pkg.utils.context
|
||||
|
||||
|
||||
# 是否使用override.json覆盖配置
|
||||
# 仅在启动时提供 --override 或 -r 参数时生效
|
||||
use_override = False
|
||||
|
||||
|
||||
def init_db():
|
||||
import pkg.database.manager
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
|
||||
database.initialize_database()
|
||||
|
||||
|
||||
def ensure_dependencies():
|
||||
import pkg.utils.pkgmgr as pkgmgr
|
||||
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "tiktoken", "--upgrade",
|
||||
"-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
|
||||
|
||||
known_exception_caught = False
|
||||
|
||||
|
||||
def override_config_manager():
|
||||
config = pkg.utils.context.get_config_manager().data
|
||||
|
||||
if os.path.exists("override.json") and use_override:
|
||||
override_json = json.load(open("override.json", "r", encoding="utf-8"))
|
||||
overrided = []
|
||||
for key in override_json:
|
||||
if key in config:
|
||||
config[key] = override_json[key]
|
||||
# logging.info("覆写配置[{}]为[{}]".format(key, override_json[key]))
|
||||
overrided.append(key)
|
||||
else:
|
||||
logging.error("无法覆写配置[{}]为[{}],该配置不存在,请检查override.json是否正确".format(key, override_json[key]))
|
||||
if len(overrided) > 0:
|
||||
logging.info("已根据override.json覆写配置项: {}".format(", ".join(overrided)))
|
||||
|
||||
|
||||
def complete_tips():
|
||||
"""根据tips-custom-template模块补全tips模块的属性"""
|
||||
non_exist_keys = []
|
||||
|
||||
is_integrity = True
|
||||
logging.debug("检查tips模块完整性.")
|
||||
tips_template = importlib.import_module('tips-custom-template')
|
||||
tips = importlib.import_module('tips')
|
||||
for key in dir(tips_template):
|
||||
if not key.startswith("__") and not hasattr(tips, key):
|
||||
setattr(tips, key, getattr(tips_template, key))
|
||||
# logging.warning("[{}]不存在".format(key))
|
||||
non_exist_keys.append(key)
|
||||
is_integrity = False
|
||||
|
||||
if not is_integrity:
|
||||
logging.warning("以下提示语字段不存在: {}".format(", ".join(non_exist_keys)))
|
||||
logging.warning("tips模块不完整,您可以依据tips-custom-template.py检查tips.py")
|
||||
logging.warning("以上配置已被设为默认值,将在3秒后继续启动... ")
|
||||
time.sleep(3)
|
||||
|
||||
|
||||
async def start_process(first_time_init=False):
|
||||
"""启动流程,reload之后会被执行"""
|
||||
|
||||
global known_exception_caught
|
||||
import pkg.utils.context
|
||||
|
||||
# 计算host和instance标识符
|
||||
import pkg.audit.identifier
|
||||
pkg.audit.identifier.init()
|
||||
|
||||
# 加载配置
|
||||
cfg_inst: pymodule_cfg.PythonModuleConfigFile = pymodule_cfg.PythonModuleConfigFile(
|
||||
'config.py',
|
||||
'config-template.py'
|
||||
)
|
||||
await config_mgr.ConfigManager(cfg_inst).load_config()
|
||||
|
||||
override_config_manager()
|
||||
|
||||
# 检查tips模块
|
||||
complete_tips()
|
||||
|
||||
cfg = pkg.utils.context.get_config_manager().data
|
||||
|
||||
# 更新openai库到最新版本
|
||||
if 'upgrade_dependencies' not in cfg or cfg['upgrade_dependencies']:
|
||||
print("正在更新依赖库,请等待...")
|
||||
if 'upgrade_dependencies' not in cfg:
|
||||
print("这个操作不是必须的,如果不想更新,请在config.py中添加upgrade_dependencies=False")
|
||||
else:
|
||||
print("这个操作不是必须的,如果不想更新,请在config.py中将upgrade_dependencies设置为False")
|
||||
try:
|
||||
ensure_dependencies()
|
||||
except Exception as e:
|
||||
print("更新openai库失败:{}, 请忽略或自行更新".format(e))
|
||||
|
||||
known_exception_caught = False
|
||||
try:
|
||||
try:
|
||||
|
||||
sh = reset_logging()
|
||||
pkg.utils.context.context['logger_handler'] = sh
|
||||
|
||||
# 初始化文字转图片
|
||||
from pkg.utils import text2img
|
||||
text2img.initialize()
|
||||
|
||||
# 检查是否设置了管理员
|
||||
if cfg['admin_qq'] == 0:
|
||||
# logging.warning("未设置管理员QQ,管理员权限命令及运行告警将无法使用,如需设置请修改config.py中的admin_qq字段")
|
||||
while True:
|
||||
try:
|
||||
cfg['admin_qq'] = int(input("未设置管理员QQ,管理员权限命令及运行告警将无法使用,请输入管理员QQ号: "))
|
||||
# 写入到文件
|
||||
|
||||
# 读取文件
|
||||
config_file_str = ""
|
||||
with open("config.py", "r", encoding="utf-8") as f:
|
||||
config_file_str = f.read()
|
||||
# 替换
|
||||
config_file_str = config_file_str.replace("admin_qq = 0", "admin_qq = " + str(cfg['admin_qq']))
|
||||
# 写入
|
||||
with open("config.py", "w", encoding="utf-8") as f:
|
||||
f.write(config_file_str)
|
||||
|
||||
print("管理员QQ已设置,如需修改请修改config.py中的admin_qq字段")
|
||||
time.sleep(4)
|
||||
break
|
||||
except ValueError:
|
||||
print("请输入数字")
|
||||
|
||||
# 初始化中央服务器 API 交互实例
|
||||
from pkg.utils.center import apigroup
|
||||
from pkg.utils.center import v2 as center_v2
|
||||
|
||||
center_v2_api = center_v2.V2CenterAPI(
|
||||
basic_info={
|
||||
"host_id": pkg.audit.identifier.identifier['host_id'],
|
||||
"instance_id": pkg.audit.identifier.identifier['instance_id'],
|
||||
"semantic_version": pkg.utils.updater.get_current_tag(),
|
||||
"platform": sys.platform,
|
||||
},
|
||||
runtime_info={
|
||||
"admin_id": "{}".format(cfg['admin_qq']),
|
||||
"msg_source": cfg['msg_source_adapter'],
|
||||
}
|
||||
)
|
||||
pkg.utils.context.set_center_v2_api(center_v2_api)
|
||||
|
||||
import pkg.openai.manager
|
||||
import pkg.database.manager
|
||||
import pkg.openai.session
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.dprompt
|
||||
import pkg.qqbot.cmds.aamgr
|
||||
|
||||
try:
|
||||
pkg.openai.dprompt.register_all()
|
||||
pkg.qqbot.cmds.aamgr.register_all()
|
||||
pkg.qqbot.cmds.aamgr.apply_privileges()
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
traceback.print_exc()
|
||||
|
||||
# 配置OpenAI proxy
|
||||
import openai
|
||||
openai.proxies = None # 先重置,因为重载后可能需要清除proxy
|
||||
if "http_proxy" in cfg['openai_config'] and cfg['openai_config']["http_proxy"] is not None:
|
||||
openai.proxies = {
|
||||
"http": cfg['openai_config']["http_proxy"],
|
||||
"https": cfg['openai_config']["http_proxy"]
|
||||
}
|
||||
|
||||
# 配置openai api_base
|
||||
if "reverse_proxy" in cfg['openai_config'] and cfg['openai_config']["reverse_proxy"] is not None:
|
||||
logging.debug("设置反向代理: "+cfg['openai_config']['reverse_proxy'])
|
||||
openai.base_url = cfg['openai_config']["reverse_proxy"]
|
||||
|
||||
# 主启动流程
|
||||
database = pkg.database.manager.DatabaseManager()
|
||||
|
||||
database.initialize_database()
|
||||
|
||||
openai_interact = pkg.openai.manager.OpenAIInteract(cfg['openai_config']['api_key'])
|
||||
|
||||
# 加载所有未超时的session
|
||||
pkg.openai.session.load_sessions()
|
||||
|
||||
# 初始化qq机器人
|
||||
qqbot = pkg.qqbot.manager.QQBotManager(first_time_init=first_time_init)
|
||||
|
||||
# 加载插件
|
||||
import pkg.plugin.host
|
||||
pkg.plugin.host.load_plugins()
|
||||
|
||||
pkg.plugin.host.initialize_plugins()
|
||||
|
||||
if first_time_init: # 不是热重载之后的启动,则启动新的bot线程
|
||||
|
||||
import mirai.exceptions
|
||||
|
||||
def run_bot_wrapper():
|
||||
global known_exception_caught
|
||||
try:
|
||||
logging.debug("使用账号: {}".format(qqbot.bot_account_id))
|
||||
qqbot.adapter.run_sync()
|
||||
except TypeError as e:
|
||||
if str(e).__contains__("argument 'debug'"):
|
||||
logging.error(
|
||||
"连接bot失败:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/82".format(e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("As of 3.10, the *loop*"):
|
||||
logging.error(
|
||||
"Websockets版本过低:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/5".format(e))
|
||||
known_exception_caught = True
|
||||
|
||||
except websockets.exceptions.InvalidStatus as e:
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
except mirai.exceptions.NetworkError as e:
|
||||
logging.error("连接mirai-api-http失败:{}, 请检查是否已按照文档启动mirai".format(e))
|
||||
known_exception_caught = True
|
||||
except Exception as e:
|
||||
if str(e).__contains__("404"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("signal only works in main thread"):
|
||||
logging.error(
|
||||
"hypercorn异常:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/86".format(
|
||||
e))
|
||||
known_exception_caught = True
|
||||
elif str(e).__contains__("did not receive a valid HTTP"):
|
||||
logging.error(
|
||||
"mirai-api-http端口无法使用:{}, 解决方案: https://github.com/RockChinQ/QChatGPT/issues/22".format(
|
||||
e))
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
logging.error(
|
||||
"捕捉到未知异常:{}, 请前往 https://github.com/RockChinQ/QChatGPT/issues 查找或提issue".format(e))
|
||||
known_exception_caught = True
|
||||
raise e
|
||||
finally:
|
||||
time.sleep(12)
|
||||
threading.Thread(
|
||||
target=run_bot_wrapper
|
||||
).start()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
logging.info("程序被用户中止")
|
||||
sys.exit(0)
|
||||
elif isinstance(e, SyntaxError):
|
||||
logging.error("配置文件存在语法错误,请检查配置文件:\n1. 是否存在中文符号\n2. 是否已按照文件中的说明填写正确")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.error("初始化失败:{}".format(e))
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# 判断若是Windows,输出选择模式可能会暂停程序的警告
|
||||
if os.name == 'nt':
|
||||
time.sleep(2)
|
||||
logging.info("您正在使用Windows系统,若命令行窗口处于“选择”模式,程序可能会被暂停,此时请右键点击窗口空白区域使其取消选择模式。")
|
||||
|
||||
time.sleep(12)
|
||||
|
||||
if first_time_init:
|
||||
if not known_exception_caught:
|
||||
if cfg['msg_source_adapter'] == "yirimirai":
|
||||
logging.info("QQ: {}, MAH: {}".format(cfg['mirai_http_api_config']['qq'], cfg['mirai_http_api_config']['host']+":"+str(cfg['mirai_http_api_config']['port'])))
|
||||
logging.critical('程序启动完成,如长时间未显示 "成功登录到账号xxxxx" ,并且不回复消息,解决办法(请勿到群里问): '
|
||||
'https://github.com/RockChinQ/QChatGPT/issues/37')
|
||||
elif cfg['msg_source_adapter'] == 'nakuru':
|
||||
logging.info("host: {}, port: {}, http_port: {}".format(cfg['nakuru_config']['host'], cfg['nakuru_config']['port'], cfg['nakuru_config']['http_port']))
|
||||
logging.critical('程序启动完成,如长时间未显示 "Protocol: connected" ,并且不回复消息,请检查config.py中的nakuru_config是否正确')
|
||||
else:
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info('热重载完成')
|
||||
|
||||
# 发送赞赏码
|
||||
if cfg['encourage_sponsor_at_start'] \
|
||||
and pkg.utils.context.get_openai_manager().audit_mgr.get_total_text_length() >= 2048:
|
||||
|
||||
logging.info("发送赞赏码")
|
||||
from mirai import MessageChain, Plain, Image
|
||||
import pkg.utils.constants
|
||||
message_chain = MessageChain([
|
||||
Plain("自2022年12月初以来,开发者已经花费了大量时间和精力来维护本项目,如果您觉得本项目对您有帮助,欢迎赞赏开发者,"
|
||||
"以支持项目稳定运行😘"),
|
||||
Image(base64=pkg.utils.constants.alipay_qr_b64),
|
||||
Image(base64=pkg.utils.constants.wechat_qr_b64),
|
||||
Plain("BTC: 3N4Azee63vbBB9boGv9Rjf4N5SocMe5eCq\nXMR: 89LS21EKQuDGkyQoe2nDupiuWXk4TVD6FALvSKv5owfmeJEPFpHeMsZLYtLiJ6GxLrhsRe5gMs6MyMSDn4GNQAse2Mae4KE\n\n"),
|
||||
Plain("(本消息仅在启动时发送至管理员,如果您不想再看到此消息,请在config.py中将encourage_sponsor_at_start设置为False)")
|
||||
])
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin_message_chain(message_chain)
|
||||
|
||||
time.sleep(5)
|
||||
import pkg.utils.updater
|
||||
try:
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
logging.info("新版本可用,请发送 !update 进行自动更新\n更新日志:\n{}".format("\n".join(pkg.utils.updater.get_rls_notes())))
|
||||
else:
|
||||
# logging.info("当前已是最新版本")
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logging.warning("检查更新失败:{}".format(e))
|
||||
|
||||
try:
|
||||
import pkg.utils.announcement as announcement
|
||||
new_announcement = announcement.fetch_new()
|
||||
if len(new_announcement) > 0:
|
||||
for announcement in new_announcement:
|
||||
logging.critical("[公告]<{}> {}".format(announcement['time'], announcement['content']))
|
||||
|
||||
# 发送统计数据
|
||||
pkg.utils.context.get_center_v2_api().main.post_announcement_showed(
|
||||
[announcement['id'] for announcement in new_announcement]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.warning("获取公告失败:{}".format(e))
|
||||
|
||||
return qqbot
|
||||
|
||||
def stop():
|
||||
import pkg.qqbot.manager
|
||||
import pkg.openai.session
|
||||
try:
|
||||
import pkg.plugin.host
|
||||
pkg.plugin.host.unload_plugins()
|
||||
|
||||
qqbot_inst = pkg.utils.context.get_qqbot_manager()
|
||||
assert isinstance(qqbot_inst, pkg.qqbot.manager.QQBotManager)
|
||||
|
||||
for session in pkg.openai.session.sessions:
|
||||
logging.info('持久化session: %s', session)
|
||||
pkg.openai.session.sessions[session].persistence()
|
||||
pkg.utils.context.get_database_manager().close()
|
||||
except Exception as e:
|
||||
if not isinstance(e, KeyboardInterrupt):
|
||||
raise e
|
||||
|
||||
|
||||
def main():
|
||||
global use_override
|
||||
# 检查是否携带了 --override 或 -r 参数
|
||||
if '--override' in sys.argv or '-r' in sys.argv:
|
||||
use_override = True
|
||||
|
||||
# 初始化logging
|
||||
init_runtime_log_file()
|
||||
pkg.utils.context.context['logger_handler'] = reset_logging()
|
||||
|
||||
# 配置线程池
|
||||
from pkg.utils import ThreadCtl
|
||||
thread_ctl = ThreadCtl(
|
||||
sys_pool_num=8,
|
||||
admin_pool_num=4,
|
||||
user_pool_num=8
|
||||
)
|
||||
# 存进上下文
|
||||
pkg.utils.context.set_thread_ctl(thread_ctl)
|
||||
|
||||
# 启动指令处理
|
||||
if len(sys.argv) > 1 and sys.argv[1] == 'init_db':
|
||||
init_db()
|
||||
sys.exit(0)
|
||||
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == 'update':
|
||||
print("正在进行程序更新...")
|
||||
import pkg.utils.updater as updater
|
||||
updater.update_all(cli=True)
|
||||
sys.exit(0)
|
||||
|
||||
# 关闭urllib的http警告
|
||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||
|
||||
def run_wrapper():
|
||||
asyncio.run(start_process(True))
|
||||
|
||||
pkg.utils.context.get_thread_ctl().submit_sys_task(
|
||||
run_wrapper
|
||||
)
|
||||
|
||||
# 主线程循环
|
||||
while True:
|
||||
try:
|
||||
time.sleep(0xFF)
|
||||
except:
|
||||
stop()
|
||||
pkg.utils.context.get_thread_ctl().shutdown()
|
||||
|
||||
launch_args = sys.argv.copy()
|
||||
|
||||
if "--cov-report" not in launch_args:
|
||||
import platform
|
||||
if platform.system() == 'Windows':
|
||||
cmd = "taskkill /F /PID {}".format(os.getpid())
|
||||
elif platform.system() in ['Linux', 'Darwin']:
|
||||
cmd = "kill -9 {}".format(os.getpid())
|
||||
os.system(cmd)
|
||||
else:
|
||||
print("正常退出以生成覆盖率报告")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
import langbot.__main__
|
||||
|
||||
langbot.__main__.main()
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
{
|
||||
"comment": "这是override.json支持的字段全集, 关于override.json机制, 请查看https://github.com/RockChinQ/QChatGPT/pull/271",
|
||||
"msg_source_adapter": "yirimirai",
|
||||
"mirai_http_api_config": {
|
||||
"adapter": "WebSocketAdapter",
|
||||
"host": "localhost",
|
||||
"port": 8080,
|
||||
"verifyKey": "yirimirai",
|
||||
"qq": 1234567890
|
||||
},
|
||||
"nakuru_config": {
|
||||
"host": "localhost",
|
||||
"port": 6700,
|
||||
"http_port": 5700,
|
||||
"token": ""
|
||||
},
|
||||
"openai_config": {
|
||||
"api_key": {
|
||||
"default": "openai_api_key"
|
||||
},
|
||||
"http_proxy": null,
|
||||
"reverse_proxy": null
|
||||
},
|
||||
"switch_strategy": "active",
|
||||
"admin_qq": 0,
|
||||
"default_prompt": {
|
||||
"default": "如果用户之后想获取帮助,请你说“输入!help获取帮助”。"
|
||||
},
|
||||
"preset_mode": "normal",
|
||||
"response_rules": {
|
||||
"default": {
|
||||
"at": true,
|
||||
"prefix": [
|
||||
"/ai",
|
||||
"!ai",
|
||||
"!ai",
|
||||
"ai"
|
||||
],
|
||||
"regexp": [],
|
||||
"random_rate": 0.0
|
||||
}
|
||||
},
|
||||
"ignore_rules": {
|
||||
"prefix": [
|
||||
"/"
|
||||
],
|
||||
"regexp": []
|
||||
},
|
||||
"income_msg_check": false,
|
||||
"sensitive_word_filter": true,
|
||||
"baidu_check": false,
|
||||
"baidu_api_key": "",
|
||||
"baidu_secret_key": "",
|
||||
"inappropriate_message_tips": "[百度云]请珍惜机器人,当前返回内容不合规",
|
||||
"encourage_sponsor_at_start": true,
|
||||
"prompt_submit_length": 3072,
|
||||
"auto_reset": true,
|
||||
"completion_api_params": {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.9
|
||||
},
|
||||
"image_api_params": {
|
||||
"model": "dall-e-2",
|
||||
"size": "256x256"
|
||||
},
|
||||
"trace_function_calls": false,
|
||||
"quote_origin": false,
|
||||
"at_sender": false,
|
||||
"include_image_description": true,
|
||||
"process_message_timeout": 120,
|
||||
"show_prefix": false,
|
||||
"force_delay_range": [
|
||||
0,
|
||||
0
|
||||
],
|
||||
"blob_message_threshold": 256,
|
||||
"blob_message_strategy": "forward",
|
||||
"wait_last_done": true,
|
||||
"font_path": "",
|
||||
"retry_times": 3,
|
||||
"hide_exce_info_to_user": false,
|
||||
"session_expire_time": 1200,
|
||||
"rate_limitation": {
|
||||
"default": 60
|
||||
},
|
||||
"rate_limit_strategy": "drop",
|
||||
"upgrade_dependencies": false,
|
||||
"report_usage": true,
|
||||
"logging_level": 20
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
"""
|
||||
审计相关操作
|
||||
"""
|
||||
@@ -1,114 +0,0 @@
|
||||
"""
|
||||
使用量统计以及数据上报功能实现
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import requests
|
||||
|
||||
from ..utils import context
|
||||
from ..utils import updater
|
||||
|
||||
|
||||
class DataGatherer:
|
||||
"""数据收集器"""
|
||||
|
||||
usage = {}
|
||||
"""各api-key的使用量
|
||||
|
||||
以key值md5为key,{
|
||||
"text": {
|
||||
"gpt-3.5-turbo": 文字量:int,
|
||||
},
|
||||
"image": {
|
||||
"256x256": 图片数量:int,
|
||||
}
|
||||
}为值的字典"""
|
||||
|
||||
version_str = "undetermined"
|
||||
|
||||
def __init__(self):
|
||||
self.load_from_db()
|
||||
try:
|
||||
self.version_str = updater.get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
pass
|
||||
|
||||
def get_usage(self, key_md5):
|
||||
return self.usage[key_md5] if key_md5 in self.usage else {}
|
||||
|
||||
def report_text_model_usage(self, model, total_tokens):
|
||||
"""调用方报告文字模型请求文字使用量"""
|
||||
|
||||
key_md5 = context.get_openai_manager().key_mgr.get_using_key_md5() # 以key的md5进行储存
|
||||
|
||||
if key_md5 not in self.usage:
|
||||
self.usage[key_md5] = {}
|
||||
|
||||
if "text" not in self.usage[key_md5]:
|
||||
self.usage[key_md5]["text"] = {}
|
||||
|
||||
if model not in self.usage[key_md5]["text"]:
|
||||
self.usage[key_md5]["text"][model] = 0
|
||||
|
||||
length = total_tokens
|
||||
self.usage[key_md5]["text"][model] += length
|
||||
self.dump_to_db()
|
||||
|
||||
def report_image_model_usage(self, size):
|
||||
"""调用方报告图片模型请求图片使用量"""
|
||||
|
||||
key_md5 = context.get_openai_manager().key_mgr.get_using_key_md5()
|
||||
|
||||
if key_md5 not in self.usage:
|
||||
self.usage[key_md5] = {}
|
||||
|
||||
if "image" not in self.usage[key_md5]:
|
||||
self.usage[key_md5]["image"] = {}
|
||||
|
||||
if size not in self.usage[key_md5]["image"]:
|
||||
self.usage[key_md5]["image"][size] = 0
|
||||
|
||||
self.usage[key_md5]["image"][size] += 1
|
||||
self.dump_to_db()
|
||||
|
||||
def get_text_length_of_key(self, key):
|
||||
"""获取指定api-key (明文) 的文字总使用量(本地记录)"""
|
||||
key_md5 = hashlib.md5(key.encode('utf-8')).hexdigest()
|
||||
if key_md5 not in self.usage:
|
||||
return 0
|
||||
if "text" not in self.usage[key_md5]:
|
||||
return 0
|
||||
# 遍历其中所有模型,求和
|
||||
return sum(self.usage[key_md5]["text"].values())
|
||||
|
||||
def get_image_count_of_key(self, key):
|
||||
"""获取指定api-key (明文) 的图片总使用量(本地记录)"""
|
||||
|
||||
key_md5 = hashlib.md5(key.encode('utf-8')).hexdigest()
|
||||
if key_md5 not in self.usage:
|
||||
return 0
|
||||
if "image" not in self.usage[key_md5]:
|
||||
return 0
|
||||
# 遍历其中所有模型,求和
|
||||
return sum(self.usage[key_md5]["image"].values())
|
||||
|
||||
def get_total_text_length(self):
|
||||
"""获取所有api-key的文字总使用量(本地记录)"""
|
||||
total = 0
|
||||
for key in self.usage:
|
||||
if "text" not in self.usage[key]:
|
||||
continue
|
||||
total += sum(self.usage[key]["text"].values())
|
||||
return total
|
||||
|
||||
def dump_to_db(self):
|
||||
context.get_database_manager().dump_usage_json(self.usage)
|
||||
|
||||
def load_from_db(self):
|
||||
json_str = context.get_database_manager().load_usage_json()
|
||||
if json_str is not None:
|
||||
self.usage = json.loads(json_str)
|
||||
@@ -1,83 +0,0 @@
|
||||
import os
|
||||
import uuid
|
||||
import json
|
||||
import time
|
||||
|
||||
|
||||
identifier = {
|
||||
'host_id': '',
|
||||
'instance_id': '',
|
||||
'host_create_ts': 0,
|
||||
'instance_create_ts': 0,
|
||||
}
|
||||
|
||||
HOST_ID_FILE = os.path.expanduser('~/.qchatgpt/host_id.json')
|
||||
INSTANCE_ID_FILE = 'res/instance_id.json'
|
||||
|
||||
def init():
|
||||
global identifier
|
||||
|
||||
if not os.path.exists(os.path.expanduser('~/.qchatgpt')):
|
||||
os.mkdir(os.path.expanduser('~/.qchatgpt'))
|
||||
|
||||
if not os.path.exists(HOST_ID_FILE):
|
||||
new_host_id = 'host_'+str(uuid.uuid4())
|
||||
new_host_create_ts = int(time.time())
|
||||
|
||||
with open(HOST_ID_FILE, 'w') as f:
|
||||
json.dump({
|
||||
'host_id': new_host_id,
|
||||
'host_create_ts': new_host_create_ts
|
||||
}, f)
|
||||
|
||||
identifier['host_id'] = new_host_id
|
||||
identifier['host_create_ts'] = new_host_create_ts
|
||||
else:
|
||||
loaded_host_id = ''
|
||||
loaded_host_create_ts = 0
|
||||
|
||||
with open(HOST_ID_FILE, 'r') as f:
|
||||
file_content = json.load(f)
|
||||
loaded_host_id = file_content['host_id']
|
||||
loaded_host_create_ts = file_content['host_create_ts']
|
||||
|
||||
identifier['host_id'] = loaded_host_id
|
||||
identifier['host_create_ts'] = loaded_host_create_ts
|
||||
|
||||
# 检查实例 id
|
||||
if os.path.exists(INSTANCE_ID_FILE):
|
||||
instance_id = {}
|
||||
with open(INSTANCE_ID_FILE, 'r') as f:
|
||||
instance_id = json.load(f)
|
||||
|
||||
if instance_id['host_id'] != identifier['host_id']: # 如果实例 id 不是当前主机的,删除
|
||||
os.remove(INSTANCE_ID_FILE)
|
||||
|
||||
if not os.path.exists(INSTANCE_ID_FILE):
|
||||
new_instance_id = 'instance_'+str(uuid.uuid4())
|
||||
new_instance_create_ts = int(time.time())
|
||||
|
||||
with open(INSTANCE_ID_FILE, 'w') as f:
|
||||
json.dump({
|
||||
'host_id': identifier['host_id'],
|
||||
'instance_id': new_instance_id,
|
||||
'instance_create_ts': new_instance_create_ts
|
||||
}, f)
|
||||
|
||||
identifier['instance_id'] = new_instance_id
|
||||
identifier['instance_create_ts'] = new_instance_create_ts
|
||||
else:
|
||||
loaded_instance_id = ''
|
||||
loaded_instance_create_ts = 0
|
||||
|
||||
with open(INSTANCE_ID_FILE, 'r') as f:
|
||||
file_content = json.load(f)
|
||||
loaded_instance_id = file_content['instance_id']
|
||||
loaded_instance_create_ts = file_content['instance_create_ts']
|
||||
|
||||
identifier['instance_id'] = loaded_instance_id
|
||||
identifier['instance_create_ts'] = loaded_instance_create_ts
|
||||
|
||||
def print_out():
|
||||
global identifier
|
||||
print(identifier)
|
||||
@@ -1,23 +0,0 @@
|
||||
from . import model as file_model
|
||||
from ..utils import context
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
"""配置文件管理器"""
|
||||
|
||||
file: file_model.ConfigFile = None
|
||||
"""配置文件实例"""
|
||||
|
||||
data: dict = None
|
||||
"""配置数据"""
|
||||
|
||||
def __init__(self, cfg_file: file_model.ConfigFile) -> None:
|
||||
self.file = cfg_file
|
||||
self.data = {}
|
||||
context.set_config_manager(self)
|
||||
|
||||
async def load_config(self):
|
||||
self.data = await self.file.load()
|
||||
|
||||
async def dump_config(self):
|
||||
await self.file.save(self.data)
|
||||
@@ -1,3 +0,0 @@
|
||||
"""
|
||||
数据库操作封装
|
||||
"""
|
||||
@@ -1,365 +0,0 @@
|
||||
"""
|
||||
数据库管理模块
|
||||
"""
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
import sqlite3
|
||||
|
||||
from ..utils import context
|
||||
|
||||
|
||||
class DatabaseManager:
|
||||
"""封装数据库底层操作,并提供方法给上层使用"""
|
||||
|
||||
conn = None
|
||||
cursor = None
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.reconnect()
|
||||
|
||||
context.set_database_manager(self)
|
||||
|
||||
# 连接到数据库文件
|
||||
def reconnect(self):
|
||||
"""连接到数据库"""
|
||||
self.conn = sqlite3.connect('database.db', check_same_thread=False)
|
||||
self.cursor = self.conn.cursor()
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
def __execute__(self, *args, **kwargs) -> sqlite3.Cursor:
|
||||
# logging.debug('SQL: {}'.format(sql))
|
||||
logging.debug('SQL: {}'.format(args))
|
||||
c = self.cursor.execute(*args, **kwargs)
|
||||
self.conn.commit()
|
||||
return c
|
||||
|
||||
# 初始化数据库的函数
|
||||
def initialize_database(self):
|
||||
"""创建数据表"""
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `sessions` (
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`name` varchar(255) not null,
|
||||
`type` varchar(255) not null,
|
||||
`number` bigint not null,
|
||||
`create_timestamp` bigint not null,
|
||||
`last_interact_timestamp` bigint not null,
|
||||
`status` varchar(255) not null default 'on_going',
|
||||
`default_prompt` text not null default '',
|
||||
`prompt` text not null,
|
||||
`token_counts` text not null default '[]'
|
||||
)
|
||||
""")
|
||||
|
||||
# 检查sessions表是否存在`default_prompt`字段, 检查是否存在`token_counts`字段
|
||||
self.__execute__("PRAGMA table_info('sessions')")
|
||||
columns = self.cursor.fetchall()
|
||||
has_default_prompt = False
|
||||
has_token_counts = False
|
||||
for field in columns:
|
||||
if field[1] == 'default_prompt':
|
||||
has_default_prompt = True
|
||||
if field[1] == 'token_counts':
|
||||
has_token_counts = True
|
||||
if has_default_prompt and has_token_counts:
|
||||
break
|
||||
if not has_default_prompt:
|
||||
self.__execute__("alter table `sessions` add column `default_prompt` text not null default ''")
|
||||
if not has_token_counts:
|
||||
self.__execute__("alter table `sessions` add column `token_counts` text not null default '[]'")
|
||||
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `account_fee`(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`key_md5` varchar(255) not null,
|
||||
`timestamp` bigint not null,
|
||||
`fee` DECIMAL(12,6) not null
|
||||
)
|
||||
""")
|
||||
|
||||
self.__execute__("""
|
||||
create table if not exists `account_usage`(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`json` text not null
|
||||
)
|
||||
""")
|
||||
# print('Database initialized.')
|
||||
|
||||
# session持久化
|
||||
def persistence_session(self, subject_type: str, subject_number: int, create_timestamp: int,
|
||||
last_interact_timestamp: int, prompt: str, default_prompt: str = '', token_counts: str = ''):
|
||||
"""持久化指定session"""
|
||||
|
||||
# 检查是否已经有了此name和create_timestamp的session
|
||||
# 如果有,就更新prompt和last_interact_timestamp
|
||||
# 如果没有,就插入一条新的记录
|
||||
self.__execute__("""
|
||||
select count(*) from `sessions` where `type` = '{}' and `number` = {} and `create_timestamp` = {}
|
||||
""".format(subject_type, subject_number, create_timestamp))
|
||||
count = self.cursor.fetchone()[0]
|
||||
if count == 0:
|
||||
|
||||
sql = """
|
||||
insert into `sessions` (`name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `default_prompt`, `token_counts`)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
self.__execute__(sql,
|
||||
("{}_{}".format(subject_type, subject_number), subject_type, subject_number, create_timestamp,
|
||||
last_interact_timestamp, prompt, default_prompt, token_counts))
|
||||
else:
|
||||
sql = """
|
||||
update `sessions` set `last_interact_timestamp` = ?, `prompt` = ?, `token_counts` = ?
|
||||
where `type` = ? and `number` = ? and `create_timestamp` = ?
|
||||
"""
|
||||
|
||||
self.__execute__(sql, (last_interact_timestamp, prompt, token_counts, subject_type,
|
||||
subject_number, create_timestamp))
|
||||
|
||||
# 显式关闭一个session
|
||||
def explicit_close_session(self, session_name: str, create_timestamp: int):
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'explicitly_closed' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
def set_session_ongoing(self, session_name: str, create_timestamp: int):
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'on_going' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
# 设置session为过期
|
||||
def set_session_expired(self, session_name: str, create_timestamp: int):
|
||||
self.__execute__("""
|
||||
update `sessions` set `status` = 'expired' where `name` = '{}' and `create_timestamp` = {}
|
||||
""".format(session_name, create_timestamp))
|
||||
|
||||
# 从数据库加载还没过期的session数据
|
||||
def load_valid_sessions(self) -> dict:
|
||||
# 从数据库中加载所有还没过期的session
|
||||
config = context.get_config_manager().data
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `last_interact_timestamp` > {}
|
||||
""".format(int(time.time()) - config['session_expire_time']))
|
||||
results = self.cursor.fetchall()
|
||||
sessions = {}
|
||||
for result in results:
|
||||
session_name = result[0]
|
||||
subject_type = result[1]
|
||||
subject_number = result[2]
|
||||
create_timestamp = result[3]
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
# 当且仅当最后一个该对象的会话是on_going状态时,才会被加载
|
||||
if status == 'on_going':
|
||||
sessions[session_name] = {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
}
|
||||
else:
|
||||
if session_name in sessions:
|
||||
del sessions[session_name]
|
||||
|
||||
return sessions
|
||||
|
||||
# 获取此session_name前一个session的数据
|
||||
def last_session(self, session_name: str, cursor_timestamp: int):
|
||||
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `name` = '{}' and `last_interact_timestamp` < {} order by `last_interact_timestamp` desc
|
||||
limit 1
|
||||
""".format(session_name, cursor_timestamp))
|
||||
results = self.cursor.fetchall()
|
||||
if len(results) == 0:
|
||||
return None
|
||||
result = results[0]
|
||||
|
||||
session_name = result[0]
|
||||
subject_type = result[1]
|
||||
subject_number = result[2]
|
||||
create_timestamp = result[3]
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
return {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
}
|
||||
|
||||
# 获取此session_name后一个session的数据
|
||||
def next_session(self, session_name: str, cursor_timestamp: int):
|
||||
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `name` = '{}' and `last_interact_timestamp` > {} order by `last_interact_timestamp` asc
|
||||
limit 1
|
||||
""".format(session_name, cursor_timestamp))
|
||||
results = self.cursor.fetchall()
|
||||
if len(results) == 0:
|
||||
return None
|
||||
result = results[0]
|
||||
|
||||
session_name = result[0]
|
||||
subject_type = result[1]
|
||||
subject_number = result[2]
|
||||
create_timestamp = result[3]
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
return {
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
}
|
||||
|
||||
# 列出与某个对象的所有对话session
|
||||
def list_history(self, session_name: str, capacity: int, page: int):
|
||||
self.__execute__("""
|
||||
select `name`, `type`, `number`, `create_timestamp`, `last_interact_timestamp`, `prompt`, `status`, `default_prompt`, `token_counts`
|
||||
from `sessions` where `name` = '{}' order by `last_interact_timestamp` desc limit {} offset {}
|
||||
""".format(session_name, capacity, capacity * page))
|
||||
results = self.cursor.fetchall()
|
||||
sessions = []
|
||||
for result in results:
|
||||
session_name = result[0]
|
||||
subject_type = result[1]
|
||||
subject_number = result[2]
|
||||
create_timestamp = result[3]
|
||||
last_interact_timestamp = result[4]
|
||||
prompt = result[5]
|
||||
status = result[6]
|
||||
default_prompt = result[7]
|
||||
token_counts = result[8]
|
||||
|
||||
sessions.append({
|
||||
'subject_type': subject_type,
|
||||
'subject_number': subject_number,
|
||||
'create_timestamp': create_timestamp,
|
||||
'last_interact_timestamp': last_interact_timestamp,
|
||||
'prompt': prompt,
|
||||
'default_prompt': default_prompt,
|
||||
'token_counts': token_counts
|
||||
})
|
||||
|
||||
return sessions
|
||||
|
||||
def delete_history(self, session_name: str, index: int) -> bool:
|
||||
# 删除倒序第index个session
|
||||
# 查找其id再删除
|
||||
self.__execute__("""
|
||||
delete from `sessions` where `id` in (select `id` from `sessions` where `name` = '{}' order by `last_interact_timestamp` desc limit 1 offset {})
|
||||
""".format(session_name, index))
|
||||
|
||||
return self.cursor.rowcount == 1
|
||||
|
||||
def delete_all_history(self, session_name: str) -> bool:
|
||||
self.__execute__("""
|
||||
delete from `sessions` where `name` = '{}'
|
||||
""".format(session_name))
|
||||
return self.cursor.rowcount > 0
|
||||
|
||||
def delete_all_session_history(self) -> bool:
|
||||
self.__execute__("""
|
||||
delete from `sessions`
|
||||
""")
|
||||
return self.cursor.rowcount > 0
|
||||
|
||||
# 将apikey的使用量存进数据库
|
||||
def dump_api_key_usage(self, api_keys: dict, usage: dict):
|
||||
logging.debug('dumping api key usage...')
|
||||
logging.debug(api_keys)
|
||||
logging.debug(usage)
|
||||
for api_key in api_keys:
|
||||
# 计算key的md5值
|
||||
key_md5 = hashlib.md5(api_keys[api_key].encode('utf-8')).hexdigest()
|
||||
# 获取使用量
|
||||
usage_count = 0
|
||||
if key_md5 in usage:
|
||||
usage_count = usage[key_md5]
|
||||
# 将使用量存进数据库
|
||||
# 先检查是否已存在
|
||||
self.__execute__("""
|
||||
select count(*) from `api_key_usage` where `key_md5` = '{}'""".format(key_md5))
|
||||
result = self.cursor.fetchone()
|
||||
if result[0] == 0:
|
||||
# 不存在则插入
|
||||
self.__execute__("""
|
||||
insert into `api_key_usage` (`key_md5`, `usage`,`timestamp`) values ('{}', {}, {})
|
||||
""".format(key_md5, usage_count, int(time.time())))
|
||||
else:
|
||||
# 存在则更新,timestamp设置为当前
|
||||
self.__execute__("""
|
||||
update `api_key_usage` set `usage` = {}, `timestamp` = {} where `key_md5` = '{}'
|
||||
""".format(usage_count, int(time.time()), key_md5))
|
||||
|
||||
def load_api_key_usage(self):
|
||||
self.__execute__("""
|
||||
select `key_md5`, `usage` from `api_key_usage`
|
||||
""")
|
||||
results = self.cursor.fetchall()
|
||||
usage = {}
|
||||
for result in results:
|
||||
key_md5 = result[0]
|
||||
usage_count = result[1]
|
||||
usage[key_md5] = usage_count
|
||||
return usage
|
||||
|
||||
def dump_usage_json(self, usage: dict):
|
||||
|
||||
json_str = json.dumps(usage)
|
||||
self.__execute__("""
|
||||
select count(*) from `account_usage`""")
|
||||
result = self.cursor.fetchone()
|
||||
if result[0] == 0:
|
||||
# 不存在则插入
|
||||
self.__execute__("""
|
||||
insert into `account_usage` (`json`) values ('{}')
|
||||
""".format(json_str))
|
||||
else:
|
||||
# 存在则更新
|
||||
self.__execute__("""
|
||||
update `account_usage` set `json` = '{}' where `id` = 1
|
||||
""".format(json_str))
|
||||
|
||||
def load_usage_json(self):
|
||||
self.__execute__("""
|
||||
select `json` from `account_usage` order by id desc limit 1
|
||||
""")
|
||||
result = self.cursor.fetchone()
|
||||
if result is None:
|
||||
return None
|
||||
else:
|
||||
return result[0]
|
||||
@@ -1,232 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import openai
|
||||
from openai.types.chat import chat_completion_message
|
||||
|
||||
from .model import RequestBase
|
||||
from .. import funcmgr
|
||||
from ...plugin import host
|
||||
from ...utils import context
|
||||
|
||||
|
||||
class ChatCompletionRequest(RequestBase):
|
||||
"""调用ChatCompletion接口的请求类。
|
||||
|
||||
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
|
||||
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
|
||||
"""
|
||||
|
||||
model: str
|
||||
messages: list[dict[str, str]]
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
pending_func_call: chat_completion_message.FunctionCall = None
|
||||
|
||||
pending_msg: str
|
||||
|
||||
def flush_pending_msg(self):
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content=self.pending_msg
|
||||
)
|
||||
self.pending_msg = ""
|
||||
|
||||
def append_message(self, role: str, content: str, name: str=None, function_call: dict=None):
|
||||
msg = {
|
||||
"role": role,
|
||||
"content": content
|
||||
}
|
||||
|
||||
if name is not None:
|
||||
msg['name'] = name
|
||||
|
||||
if function_call is not None:
|
||||
msg['function_call'] = function_call
|
||||
|
||||
self.messages.append(msg)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: openai.Client,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.client = client
|
||||
self.model = model
|
||||
self.messages = messages.copy()
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = self.client.chat.completions.create
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
self.stopped = False
|
||||
|
||||
self.pending_msg = ""
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self) -> dict:
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
if self.pending_func_call is None: # 没有待处理的函数调用请求
|
||||
|
||||
args = {
|
||||
"model": self.model,
|
||||
"messages": self.messages,
|
||||
}
|
||||
|
||||
funcs = funcmgr.get_func_schema_list()
|
||||
|
||||
if len(funcs) > 0:
|
||||
args['functions'] = funcs
|
||||
|
||||
# 拼接kwargs
|
||||
args = {**args, **self.kwargs}
|
||||
|
||||
from openai.types.chat import chat_completion
|
||||
|
||||
resp: chat_completion.ChatCompletion = self._req(**args)
|
||||
|
||||
choice0 = resp.choices[0]
|
||||
|
||||
# 如果不是函数调用,且finish_reason为stop,则停止迭代
|
||||
if choice0.finish_reason == 'stop': # and choice0["finish_reason"] == "stop"
|
||||
self.stopped = True
|
||||
|
||||
if hasattr(choice0.message, 'function_call') and choice0.message.function_call is not None:
|
||||
self.pending_func_call = choice0.message.function_call
|
||||
|
||||
self.append_message(
|
||||
role="assistant",
|
||||
content=choice0.message.content,
|
||||
function_call=choice0.message.function_call
|
||||
)
|
||||
|
||||
return {
|
||||
"id": resp.id,
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0.index,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "function_call",
|
||||
"content": choice0.message.content,
|
||||
"function_call": {
|
||||
"name": choice0.message.function_call.name,
|
||||
"arguments": choice0.message.function_call.arguments
|
||||
}
|
||||
},
|
||||
"finish_reason": "function_call"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": resp.usage.prompt_tokens,
|
||||
"completion_tokens": resp.usage.completion_tokens,
|
||||
"total_tokens": resp.usage.total_tokens
|
||||
}
|
||||
}
|
||||
else:
|
||||
|
||||
# self.pending_msg += choice0['message']['content']
|
||||
# 普通回复一定处于最后方,故不用再追加进内部messages
|
||||
|
||||
return {
|
||||
"id": resp.id,
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0.index,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0.message.content
|
||||
},
|
||||
"finish_reason": choice0.finish_reason
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": resp.usage.prompt_tokens,
|
||||
"completion_tokens": resp.usage.completion_tokens,
|
||||
"total_tokens": resp.usage.total_tokens
|
||||
}
|
||||
}
|
||||
else: # 处理函数调用请求
|
||||
|
||||
cp_pending_func_call = self.pending_func_call.copy()
|
||||
|
||||
self.pending_func_call = None
|
||||
|
||||
func_name = cp_pending_func_call.name
|
||||
arguments = {}
|
||||
|
||||
try:
|
||||
|
||||
try:
|
||||
arguments = json.loads(cp_pending_func_call.arguments)
|
||||
# 若不是json格式的异常处理
|
||||
except json.decoder.JSONDecodeError:
|
||||
# 获取函数的参数列表
|
||||
func_schema = funcmgr.get_func_schema(func_name)
|
||||
|
||||
arguments = {
|
||||
func_schema['parameters']['required'][0]: cp_pending_func_call.arguments
|
||||
}
|
||||
|
||||
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
|
||||
|
||||
# 执行函数调用
|
||||
ret = ""
|
||||
try:
|
||||
ret = funcmgr.execute_function(func_name, arguments)
|
||||
|
||||
logging.info("函数执行完成。")
|
||||
except Exception as e:
|
||||
ret = "error: execute function failed: {}".format(str(e))
|
||||
logging.error("函数执行失败: {}".format(str(e)))
|
||||
|
||||
# 上报数据
|
||||
plugin_info = host.get_plugin_info_for_audit(func_name.split('-')[0])
|
||||
audit_func_name = func_name.split('-')[1]
|
||||
audit_func_desc = funcmgr.get_func_schema(func_name)['description']
|
||||
context.get_center_v2_api().usage.post_function_record(
|
||||
plugin=plugin_info,
|
||||
function_name=audit_func_name,
|
||||
function_description=audit_func_desc,
|
||||
)
|
||||
|
||||
self.append_message(
|
||||
role="function",
|
||||
content=json.dumps(ret, ensure_ascii=False),
|
||||
name=func_name
|
||||
)
|
||||
|
||||
return {
|
||||
"id": -1,
|
||||
"choices": [
|
||||
{
|
||||
"index": -1,
|
||||
"message": {
|
||||
"role": "function",
|
||||
"type": "function_return",
|
||||
"function_name": func_name,
|
||||
"content": json.dumps(ret, ensure_ascii=False)
|
||||
},
|
||||
"finish_reason": "function_return"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"total_tokens": 0
|
||||
}
|
||||
}
|
||||
|
||||
except funcmgr.ContentFunctionNotFoundError:
|
||||
raise Exception("没有找到函数: {}".format(func_name))
|
||||
@@ -1,100 +0,0 @@
|
||||
import openai
|
||||
from openai.types import completion, completion_choice
|
||||
|
||||
from . import model
|
||||
|
||||
|
||||
class CompletionRequest(model.RequestBase):
|
||||
"""调用Completion接口的请求类。
|
||||
|
||||
调用方可以一直next completion直到finish_reason为stop。
|
||||
"""
|
||||
|
||||
model: str
|
||||
prompt: str
|
||||
kwargs: dict
|
||||
|
||||
stopped: bool = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: openai.Client,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
):
|
||||
self.client = client
|
||||
self.model = model
|
||||
self.prompt = ""
|
||||
|
||||
for message in messages:
|
||||
self.prompt += message["role"] + ": " + message["content"] + "\n"
|
||||
|
||||
self.prompt += "assistant: "
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.req_func = self.client.completions.create
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self) -> dict:
|
||||
"""调用Completion接口,返回生成的文本
|
||||
|
||||
{
|
||||
"id": "id",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": "message"
|
||||
},
|
||||
"finish_reason": "reason"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 10,
|
||||
"completion_tokens": 20,
|
||||
"total_tokens": 30
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
if self.stopped:
|
||||
raise StopIteration()
|
||||
|
||||
resp: completion.Completion = self._req(
|
||||
model=self.model,
|
||||
prompt=self.prompt,
|
||||
**self.kwargs
|
||||
)
|
||||
|
||||
if resp.choices[0].finish_reason == "stop":
|
||||
self.stopped = True
|
||||
|
||||
choice0: completion_choice.CompletionChoice = resp.choices[0]
|
||||
|
||||
self.prompt += choice0.text
|
||||
|
||||
return {
|
||||
"id": resp.id,
|
||||
"choices": [
|
||||
{
|
||||
"index": choice0.index,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"type": "text",
|
||||
"content": choice0.text
|
||||
},
|
||||
"finish_reason": choice0.finish_reason
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": resp.usage.prompt_tokens,
|
||||
"completion_tokens": resp.usage.completion_tokens,
|
||||
"total_tokens": resp.usage.total_tokens
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
# 定义不同接口请求的模型
|
||||
import logging
|
||||
|
||||
import openai
|
||||
|
||||
from ...utils import context
|
||||
|
||||
|
||||
class RequestBase:
|
||||
|
||||
client: openai.Client
|
||||
|
||||
req_func: callable
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def _next_key(self):
|
||||
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||
logging.debug("切换api-key: switched={}, name={}".format(switched, name))
|
||||
self.client.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||
|
||||
def _req(self, **kwargs):
|
||||
"""处理代理问题"""
|
||||
logging.debug("请求接口参数: %s", str(kwargs))
|
||||
config = context.get_config_manager().data
|
||||
|
||||
ret = self.req_func(**kwargs)
|
||||
logging.debug("接口请求返回:%s", str(ret))
|
||||
|
||||
if config['switch_strategy'] == 'active':
|
||||
self._next_key()
|
||||
|
||||
return ret
|
||||
|
||||
def __iter__(self):
|
||||
raise self
|
||||
|
||||
def __next__(self):
|
||||
raise NotImplementedError
|
||||
@@ -1,134 +0,0 @@
|
||||
# 多情景预设值管理
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ..utils import context
|
||||
|
||||
# __current__ = "default"
|
||||
# """当前默认使用的情景预设的名称
|
||||
|
||||
# 由管理员使用`!default <名称>`命令切换
|
||||
# """
|
||||
|
||||
# __prompts_from_files__ = {}
|
||||
# """从文件中读取的情景预设值"""
|
||||
|
||||
# __scenario_from_files__ = {}
|
||||
|
||||
|
||||
class ScenarioMode:
|
||||
"""情景预设模式抽象类"""
|
||||
|
||||
using_prompt_name = "default"
|
||||
"""新session创建时使用的prompt名称"""
|
||||
|
||||
prompts: dict[str, list] = {}
|
||||
|
||||
def __init__(self):
|
||||
logging.debug("prompts: {}".format(self.prompts))
|
||||
|
||||
def list(self) -> dict[str, list]:
|
||||
"""获取所有情景预设的名称及内容"""
|
||||
return self.prompts
|
||||
|
||||
def get_prompt(self, name: str) -> tuple[list, str]:
|
||||
"""获取指定情景预设的名称及内容"""
|
||||
for key in self.prompts:
|
||||
if key.startswith(name):
|
||||
return self.prompts[key], key
|
||||
raise Exception("没有找到情景预设: {}".format(name))
|
||||
|
||||
def set_using_name(self, name: str) -> str:
|
||||
"""设置默认情景预设"""
|
||||
for key in self.prompts:
|
||||
if key.startswith(name):
|
||||
self.using_prompt_name = key
|
||||
return key
|
||||
raise Exception("没有找到情景预设: {}".format(name))
|
||||
|
||||
def get_full_name(self, name: str) -> str:
|
||||
"""获取完整的情景预设名称"""
|
||||
for key in self.prompts:
|
||||
if key.startswith(name):
|
||||
return key
|
||||
raise Exception("没有找到情景预设: {}".format(name))
|
||||
|
||||
def get_using_name(self) -> str:
|
||||
"""获取默认情景预设"""
|
||||
return self.using_prompt_name
|
||||
|
||||
|
||||
class NormalScenarioMode(ScenarioMode):
|
||||
"""普通情景预设模式"""
|
||||
|
||||
def __init__(self):
|
||||
config = context.get_config_manager().data
|
||||
|
||||
# 加载config中的default_prompt值
|
||||
if type(config['default_prompt']) == str:
|
||||
self.using_prompt_name = "default"
|
||||
self.prompts = {"default": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": config['default_prompt']
|
||||
}
|
||||
]}
|
||||
|
||||
elif type(config['default_prompt']) == dict:
|
||||
for key in config['default_prompt']:
|
||||
self.prompts[key] = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": config['default_prompt'][key]
|
||||
}
|
||||
]
|
||||
|
||||
# 从prompts/目录下的文件中载入
|
||||
# 遍历文件
|
||||
for file in os.listdir("prompts"):
|
||||
with open(os.path.join("prompts", file), encoding="utf-8") as f:
|
||||
self.prompts[file] = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f.read()
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
class FullScenarioMode(ScenarioMode):
|
||||
"""完整情景预设模式"""
|
||||
|
||||
def __init__(self):
|
||||
"""从json读取所有"""
|
||||
# 遍历scenario/目录下的所有文件,以文件名为键,文件内容中的prompt为值
|
||||
for file in os.listdir("scenario"):
|
||||
if file == "default-template.json":
|
||||
continue
|
||||
with open(os.path.join("scenario", file), encoding="utf-8") as f:
|
||||
self.prompts[file] = json.load(f)["prompt"]
|
||||
|
||||
super().__init__()
|
||||
|
||||
|
||||
scenario_mode_mapping = {}
|
||||
"""情景预设模式名称与对象的映射"""
|
||||
|
||||
|
||||
def register_all():
|
||||
"""注册所有情景预设模式,不使用装饰器,因为装饰器的方式不支持热重载"""
|
||||
global scenario_mode_mapping
|
||||
scenario_mode_mapping = {
|
||||
"normal": NormalScenarioMode(),
|
||||
"full_scenario": FullScenarioMode()
|
||||
}
|
||||
|
||||
|
||||
def mode_inst() -> ScenarioMode:
|
||||
"""获取指定名称的情景预设模式对象"""
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if config['preset_mode'] == "default":
|
||||
config['preset_mode'] = "normal"
|
||||
|
||||
return scenario_mode_mapping[config['preset_mode']]
|
||||
@@ -1,46 +0,0 @@
|
||||
# 封装了function calling的一些支持函数
|
||||
import logging
|
||||
|
||||
from ..plugin import host
|
||||
|
||||
|
||||
class ContentFunctionNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_func_schema_list() -> list:
|
||||
"""从plugin包中的函数结构中获取并处理成受GPT支持的格式"""
|
||||
if not host.__enable_content_functions__:
|
||||
return []
|
||||
|
||||
schemas = []
|
||||
|
||||
for func in host.__callable_functions__:
|
||||
if func['enabled']:
|
||||
fun_cp = func.copy()
|
||||
|
||||
del fun_cp['enabled']
|
||||
|
||||
schemas.append(fun_cp)
|
||||
|
||||
return schemas
|
||||
|
||||
def get_func(name: str) -> callable:
|
||||
if name not in host.__function_inst_map__:
|
||||
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||
|
||||
return host.__function_inst_map__[name]
|
||||
|
||||
def get_func_schema(name: str) -> dict:
|
||||
for func in host.__callable_functions__:
|
||||
if func['name'] == name:
|
||||
return func
|
||||
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
|
||||
|
||||
def execute_function(name: str, kwargs: dict) -> any:
|
||||
"""执行函数调用"""
|
||||
|
||||
logging.debug("executing function: name='{}', kwargs={}".format(name, kwargs))
|
||||
|
||||
func = get_func(name)
|
||||
return func(**kwargs)
|
||||
@@ -1,103 +0,0 @@
|
||||
# 此模块提供了维护api-key的各种功能
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
from ..plugin import host as plugin_host
|
||||
from ..plugin import models as plugin_models
|
||||
|
||||
|
||||
class KeysManager:
|
||||
api_key = {}
|
||||
"""所有api-key"""
|
||||
|
||||
using_key = ""
|
||||
"""当前使用的api-key"""
|
||||
|
||||
alerted = []
|
||||
"""已提示过超额的key
|
||||
|
||||
记录在此以避免重复提示
|
||||
"""
|
||||
|
||||
exceeded = []
|
||||
"""已超额的key
|
||||
|
||||
供自动切换功能识别
|
||||
"""
|
||||
|
||||
def get_using_key(self):
|
||||
return self.using_key
|
||||
|
||||
def get_using_key_md5(self):
|
||||
return hashlib.md5(self.using_key.encode('utf-8')).hexdigest()
|
||||
|
||||
def __init__(self, api_key):
|
||||
|
||||
assert type(api_key) == dict
|
||||
self.api_key = api_key
|
||||
# 从usage中删除未加载的api-key的记录
|
||||
# 不删了,也许会运行时添加曾经有记录的api-key
|
||||
|
||||
self.auto_switch()
|
||||
|
||||
def auto_switch(self) -> tuple[bool, str]:
|
||||
"""尝试切换api-key
|
||||
|
||||
Returns:
|
||||
是否切换成功, 切换后的api-key的别名
|
||||
"""
|
||||
|
||||
index = 0
|
||||
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] == self.using_key:
|
||||
break
|
||||
|
||||
index += 1
|
||||
|
||||
# 从当前key开始向后轮询
|
||||
start_index = index
|
||||
index += 1
|
||||
if index >= len(self.api_key):
|
||||
index = 0
|
||||
|
||||
while index != start_index:
|
||||
|
||||
key_name = list(self.api_key.keys())[index]
|
||||
|
||||
if self.api_key[key_name] not in self.exceeded:
|
||||
self.using_key = self.api_key[key_name]
|
||||
|
||||
logging.debug("使用api-key:" + key_name)
|
||||
|
||||
# 触发插件事件
|
||||
args = {
|
||||
"key_name": key_name,
|
||||
"key_list": self.api_key.keys()
|
||||
}
|
||||
_ = plugin_host.emit(plugin_models.KeySwitched, **args)
|
||||
|
||||
return True, key_name
|
||||
|
||||
index += 1
|
||||
if index >= len(self.api_key):
|
||||
index = 0
|
||||
|
||||
self.using_key = list(self.api_key.values())[start_index]
|
||||
logging.debug("使用api-key:" + list(self.api_key.keys())[start_index])
|
||||
|
||||
return False, list(self.api_key.keys())[start_index]
|
||||
|
||||
def add(self, key_name, key):
|
||||
self.api_key[key_name] = key
|
||||
|
||||
def set_current_exceeded(self):
|
||||
"""设置当前使用的api-key使用量超限"""
|
||||
self.exceeded.append(self.using_key)
|
||||
|
||||
def get_key_name(self, api_key):
|
||||
"""根据api-key获取其别名"""
|
||||
for key_name in self.api_key:
|
||||
if self.api_key[key_name] == api_key:
|
||||
return key_name
|
||||
return ""
|
||||
@@ -1,90 +0,0 @@
|
||||
import logging
|
||||
|
||||
import openai
|
||||
from openai.types import images_response
|
||||
|
||||
from ..openai import keymgr
|
||||
from ..utils import context
|
||||
from ..audit import gatherer
|
||||
from ..openai import modelmgr
|
||||
from ..openai.api import model as api_model
|
||||
|
||||
|
||||
class OpenAIInteract:
|
||||
"""OpenAI 接口封装
|
||||
|
||||
将文字接口和图片接口封装供调用方使用
|
||||
"""
|
||||
|
||||
key_mgr: keymgr.KeysManager = None
|
||||
|
||||
audit_mgr: gatherer.DataGatherer = None
|
||||
|
||||
default_image_api_params = {
|
||||
"size": "256x256",
|
||||
}
|
||||
|
||||
client: openai.Client = None
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
|
||||
self.key_mgr = keymgr.KeysManager(api_key)
|
||||
self.audit_mgr = gatherer.DataGatherer()
|
||||
|
||||
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
|
||||
|
||||
self.client = openai.Client(
|
||||
api_key=self.key_mgr.get_using_key(),
|
||||
base_url=openai.base_url
|
||||
)
|
||||
|
||||
context.set_openai_manager(self)
|
||||
|
||||
def request_completion(self, messages: list):
|
||||
"""请求补全接口回复=
|
||||
"""
|
||||
# 选择接口请求类
|
||||
config = context.get_config_manager().data
|
||||
|
||||
request: api_model.RequestBase
|
||||
|
||||
model: str = config['completion_api_params']['model']
|
||||
|
||||
cp_parmas = config['completion_api_params'].copy()
|
||||
del cp_parmas['model']
|
||||
|
||||
request = modelmgr.select_request_cls(self.client, model, messages, cp_parmas)
|
||||
|
||||
# 请求接口
|
||||
for resp in request:
|
||||
|
||||
if resp['usage']['total_tokens'] > 0:
|
||||
self.audit_mgr.report_text_model_usage(
|
||||
model,
|
||||
resp['usage']['total_tokens']
|
||||
)
|
||||
|
||||
yield resp
|
||||
|
||||
def request_image(self, prompt) -> images_response.ImagesResponse:
|
||||
"""请求图片接口回复
|
||||
|
||||
Parameters:
|
||||
prompt (str): 提示语
|
||||
|
||||
Returns:
|
||||
dict: 响应
|
||||
"""
|
||||
config = context.get_config_manager().data
|
||||
params = config['image_api_params']
|
||||
|
||||
response = self.client.images.generate(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
**params
|
||||
)
|
||||
|
||||
self.audit_mgr.report_image_model_usage(params['size'])
|
||||
|
||||
return response
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
"""OpenAI 接口底层封装
|
||||
|
||||
目前使用的对话接口有:
|
||||
ChatCompletion - gpt-3.5-turbo 等模型
|
||||
Completion - text-davinci-003 等模型
|
||||
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
|
||||
"""
|
||||
import tiktoken
|
||||
import openai
|
||||
|
||||
from ..openai.api import model as api_model
|
||||
from ..openai.api import completion as api_completion
|
||||
from ..openai.api import chat_completion as api_chat_completion
|
||||
|
||||
COMPLETION_MODELS = {
|
||||
"gpt-3.5-turbo-instruct",
|
||||
}
|
||||
|
||||
CHAT_COMPLETION_MODELS = {
|
||||
# GPT 4 系列
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-0314", # legacy
|
||||
"gpt-4-32k-0314", # legacy
|
||||
# GPT 3.5 系列
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613", # legacy
|
||||
"gpt-3.5-turbo-16k-0613", # legacy
|
||||
"gpt-3.5-turbo-0301", # legacy
|
||||
# One-API 接入
|
||||
"SparkDesk",
|
||||
"chatglm_pro",
|
||||
"chatglm_std",
|
||||
"chatglm_lite",
|
||||
"qwen-v1",
|
||||
"qwen-plus-v1",
|
||||
"ERNIE-Bot",
|
||||
"ERNIE-Bot-turbo",
|
||||
"gemini-pro",
|
||||
}
|
||||
|
||||
EDIT_MODELS = {
|
||||
|
||||
}
|
||||
|
||||
IMAGE_MODELS = {
|
||||
|
||||
}
|
||||
|
||||
|
||||
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> api_model.RequestBase:
|
||||
if model_name in CHAT_COMPLETION_MODELS:
|
||||
return api_chat_completion.ChatCompletionRequest(client, model_name, messages, **args)
|
||||
elif model_name in COMPLETION_MODELS:
|
||||
return api_completion.CompletionRequest(client, model_name, messages, **args)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
|
||||
|
||||
|
||||
def count_chat_completion_tokens(messages: list, model: str) -> int:
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
"SparkDesk",
|
||||
"chatglm_pro",
|
||||
"chatglm_std",
|
||||
"chatglm_lite",
|
||||
"qwen-v1",
|
||||
"qwen-plus-v1",
|
||||
"ERNIE-Bot",
|
||||
"ERNIE-Bot-turbo",
|
||||
"gemini-pro",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
# print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return count_chat_completion_tokens(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""count_chat_completion_tokens() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
|
||||
def count_completion_tokens(messages: list, model: str) -> int:
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
text = ""
|
||||
|
||||
for message in messages:
|
||||
text += message['role'] + message['content'] + "\n"
|
||||
|
||||
text += "assistant: "
|
||||
|
||||
return len(encoding.encode(text))
|
||||
|
||||
|
||||
def count_tokens(messages: list, model: str):
|
||||
|
||||
if model in CHAT_COMPLETION_MODELS:
|
||||
return count_chat_completion_tokens(messages, model)
|
||||
elif model in COMPLETION_MODELS:
|
||||
return count_completion_tokens(messages, model)
|
||||
raise ValueError("不支持模型[{}],请检查配置文件".format(model))
|
||||
@@ -1,504 +0,0 @@
|
||||
"""主线使用的会话管理模块
|
||||
|
||||
每个人、每个群单独一个session,session内部保留了对话的上下文,
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import json
|
||||
|
||||
from ..openai import manager as openai_manager
|
||||
from ..openai import modelmgr as openai_modelmgr
|
||||
from ..database import manager as database_manager
|
||||
from ..utils import context as context
|
||||
|
||||
from ..plugin import host as plugin_host
|
||||
from ..plugin import models as plugin_models
|
||||
|
||||
# 运行时保存的所有session
|
||||
sessions = {}
|
||||
|
||||
|
||||
class SessionOfflineStatus:
|
||||
ON_GOING = 'on_going'
|
||||
EXPLICITLY_CLOSED = 'explicitly_closed'
|
||||
|
||||
|
||||
# 从数据加载session
|
||||
def load_sessions():
|
||||
"""从数据库加载sessions"""
|
||||
|
||||
global sessions
|
||||
|
||||
db_inst = context.get_database_manager()
|
||||
|
||||
session_data = db_inst.load_valid_sessions()
|
||||
|
||||
for session_name in session_data:
|
||||
logging.debug('加载session: {}'.format(session_name))
|
||||
|
||||
temp_session = Session(session_name)
|
||||
temp_session.name = session_name
|
||||
temp_session.create_timestamp = session_data[session_name]['create_timestamp']
|
||||
temp_session.last_interact_timestamp = session_data[session_name]['last_interact_timestamp']
|
||||
|
||||
temp_session.prompt = json.loads(session_data[session_name]['prompt'])
|
||||
temp_session.token_counts = json.loads(session_data[session_name]['token_counts'])
|
||||
|
||||
temp_session.default_prompt = json.loads(session_data[session_name]['default_prompt']) if \
|
||||
session_data[session_name]['default_prompt'] else []
|
||||
|
||||
sessions[session_name] = temp_session
|
||||
|
||||
|
||||
# 获取指定名称的session,如果不存在则创建一个新的
|
||||
def get_session(session_name: str) -> 'Session':
|
||||
global sessions
|
||||
if session_name not in sessions:
|
||||
sessions[session_name] = Session(session_name)
|
||||
return sessions[session_name]
|
||||
|
||||
|
||||
def dump_session(session_name: str):
|
||||
global sessions
|
||||
if session_name in sessions:
|
||||
assert isinstance(sessions[session_name], Session)
|
||||
sessions[session_name].persistence()
|
||||
del sessions[session_name]
|
||||
|
||||
|
||||
# 通用的OpenAI API交互session
|
||||
# session内部保留了对话的上下文,
|
||||
# 收到用户消息后,将上下文提交给OpenAI API生成回复
|
||||
class Session:
|
||||
name = ''
|
||||
|
||||
prompt = []
|
||||
"""使用list来保存会话中的回合"""
|
||||
|
||||
default_prompt = []
|
||||
"""本session的默认prompt"""
|
||||
|
||||
create_timestamp = 0
|
||||
"""会话创建时间"""
|
||||
|
||||
last_interact_timestamp = 0
|
||||
"""上次交互(产生回复)时间"""
|
||||
|
||||
just_switched_to_exist_session = False
|
||||
|
||||
response_lock = None
|
||||
|
||||
# 加锁
|
||||
def acquire_response_lock(self):
|
||||
logging.debug('{},lock acquire,{}'.format(self.name, self.response_lock))
|
||||
self.response_lock.acquire()
|
||||
logging.debug('{},lock acquire successfully,{}'.format(self.name, self.response_lock))
|
||||
|
||||
# 释放锁
|
||||
def release_response_lock(self):
|
||||
if self.response_lock.locked():
|
||||
logging.debug('{},lock release,{}'.format(self.name, self.response_lock))
|
||||
self.response_lock.release()
|
||||
logging.debug('{},lock release successfully,{}'.format(self.name, self.response_lock))
|
||||
|
||||
# 从配置文件获取会话预设信息
|
||||
def get_default_prompt(self, use_default: str = None):
|
||||
import pkg.openai.dprompt as dprompt
|
||||
|
||||
if use_default is None:
|
||||
use_default = dprompt.mode_inst().get_using_name()
|
||||
|
||||
current_default_prompt, _ = dprompt.mode_inst().get_prompt(use_default)
|
||||
return current_default_prompt
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.create_timestamp = int(time.time())
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
self.prompt = []
|
||||
self.token_counts = []
|
||||
self.schedule()
|
||||
|
||||
self.response_lock = threading.Lock()
|
||||
|
||||
self.default_prompt = self.get_default_prompt()
|
||||
logging.debug("prompt is: {}".format(self.default_prompt))
|
||||
|
||||
# 设定检查session最后一次对话是否超过过期时间的计时器
|
||||
def schedule(self):
|
||||
threading.Thread(target=self.expire_check_timer_loop, args=(self.create_timestamp,)).start()
|
||||
|
||||
# 检查session是否已经过期
|
||||
def expire_check_timer_loop(self, create_timestamp: int):
|
||||
global sessions
|
||||
while True:
|
||||
time.sleep(60)
|
||||
|
||||
# 不是此session已更换,退出
|
||||
if self.create_timestamp != create_timestamp or self not in sessions.values():
|
||||
return
|
||||
|
||||
config = context.get_config_manager().data
|
||||
if int(time.time()) - self.last_interact_timestamp > config['session_expire_time']:
|
||||
logging.info('session {} 已过期'.format(self.name))
|
||||
|
||||
# 触发插件事件
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'session': self,
|
||||
'session_expire_time': config['session_expire_time']
|
||||
}
|
||||
event = plugin_host.emit(plugin_models.SessionExpired, **args)
|
||||
if event.is_prevented_default():
|
||||
return
|
||||
|
||||
self.reset(expired=True, schedule_new=False)
|
||||
|
||||
# 删除此session
|
||||
del sessions[self.name]
|
||||
return
|
||||
|
||||
# 请求回复
|
||||
# 这个函数是阻塞的
|
||||
def query(self, text: str=None) -> tuple[str, str, list[str]]:
|
||||
"""向session中添加一条消息,返回接口回复
|
||||
|
||||
Args:
|
||||
text (str): 用户消息
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: (接口回复, finish_reason, 已调用的函数列表)
|
||||
"""
|
||||
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
# 触发插件事件
|
||||
if not self.prompt:
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'session': self,
|
||||
'default_prompt': self.default_prompt,
|
||||
}
|
||||
|
||||
event = plugin_host.emit(plugin_models.SessionFirstMessageReceived, **args)
|
||||
if event.is_prevented_default():
|
||||
return None, None, None
|
||||
|
||||
config = context.get_config_manager().data
|
||||
max_length = config['prompt_submit_length']
|
||||
|
||||
local_default_prompt = self.default_prompt.copy()
|
||||
local_prompt = self.prompt.copy()
|
||||
|
||||
# 触发PromptPreProcessing事件
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'default_prompt': self.default_prompt,
|
||||
'prompt': self.prompt,
|
||||
'text_message': text,
|
||||
}
|
||||
|
||||
event = plugin_host.emit(plugin_models.PromptPreProcessing, **args)
|
||||
|
||||
if event.get_return_value('default_prompt') is not None:
|
||||
local_default_prompt = event.get_return_value('default_prompt')
|
||||
|
||||
if event.get_return_value('prompt') is not None:
|
||||
local_prompt = event.get_return_value('prompt')
|
||||
|
||||
if event.get_return_value('text_message') is not None:
|
||||
text = event.get_return_value('text_message')
|
||||
|
||||
# 裁剪messages到合适长度
|
||||
prompts, _ = self.cut_out(text, max_length, local_default_prompt, local_prompt)
|
||||
|
||||
res_text = ""
|
||||
|
||||
pending_msgs = []
|
||||
|
||||
total_tokens = 0
|
||||
|
||||
finish_reason: str = ""
|
||||
|
||||
funcs = []
|
||||
|
||||
trace_func_calls = config['trace_function_calls']
|
||||
botmgr = context.get_qqbot_manager()
|
||||
|
||||
session_name_spt: list[str] = self.name.split("_")
|
||||
|
||||
pending_res_text = ""
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# TODO 对不起,我知道这样非常非常屎山,但我之后会重构的
|
||||
for resp in context.get_openai_manager().request_completion(prompts):
|
||||
|
||||
if pending_res_text != "":
|
||||
botmgr.adapter.send_message(
|
||||
session_name_spt[0],
|
||||
session_name_spt[1],
|
||||
pending_res_text
|
||||
)
|
||||
pending_res_text = ""
|
||||
|
||||
finish_reason = resp['choices'][0]['finish_reason']
|
||||
|
||||
if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应
|
||||
|
||||
if not trace_func_calls:
|
||||
res_text += resp['choices'][0]['message']['content']
|
||||
else:
|
||||
res_text = resp['choices'][0]['message']['content']
|
||||
pending_res_text = resp['choices'][0]['message']['content']
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
|
||||
msg = {
|
||||
"role": "assistant",
|
||||
"content": resp['choices'][0]['message']['content']
|
||||
}
|
||||
|
||||
if 'function_call' in resp['choices'][0]['message']:
|
||||
msg['function_call'] = json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
|
||||
pending_msgs.append(msg)
|
||||
|
||||
if resp['choices'][0]['message']['type'] == 'function_call':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "assistant",
|
||||
# "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call'])
|
||||
# }
|
||||
# )
|
||||
if trace_func_calls:
|
||||
botmgr.adapter.send_message(
|
||||
session_name_spt[0],
|
||||
session_name_spt[1],
|
||||
"调用函数 "+resp['choices'][0]['message']['function_call']['name'] + "..."
|
||||
)
|
||||
|
||||
total_tokens += resp['usage']['total_tokens']
|
||||
elif resp['choices'][0]['message']['type'] == 'function_return':
|
||||
# self.prompt.append(
|
||||
# {
|
||||
# "role": "function",
|
||||
# "name": resp['choices'][0]['message']['function_name'],
|
||||
# "content": json.dumps(resp['choices'][0]['message']['content'])
|
||||
# }
|
||||
# )
|
||||
|
||||
# total_tokens += resp['usage']['total_tokens']
|
||||
funcs.append(
|
||||
resp['choices'][0]['message']['function_name']
|
||||
)
|
||||
pass
|
||||
|
||||
# 向API请求补全
|
||||
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
|
||||
# prompts,
|
||||
# )
|
||||
|
||||
# 成功获取,处理回复
|
||||
# res_test = message
|
||||
res_ans = res_text.strip()
|
||||
|
||||
# 将此次对话的双方内容加入到prompt中
|
||||
# self.prompt.append({'role': 'user', 'content': text})
|
||||
# self.prompt.append({'role': 'assistant', 'content': res_ans})
|
||||
if text:
|
||||
self.prompt.append({'role': 'user', 'content': text})
|
||||
# 添加pending_msgs
|
||||
self.prompt += pending_msgs
|
||||
|
||||
# 向token_counts中添加本回合的token数量
|
||||
# self.token_counts.append(total_tokens-total_token_before_query)
|
||||
# logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
|
||||
|
||||
if self.just_switched_to_exist_session:
|
||||
self.just_switched_to_exist_session = False
|
||||
self.set_ongoing()
|
||||
|
||||
# 上报使用量数据
|
||||
session_type = session_name_spt[0]
|
||||
session_id = session_name_spt[1]
|
||||
|
||||
ability_provider = "QChatGPT.Text"
|
||||
usage = total_tokens
|
||||
model_name = context.get_config_manager().data['completion_api_params']['model']
|
||||
response_seconds = int(time.time() - start_time)
|
||||
retry_times = -1 # 暂不记录
|
||||
|
||||
context.get_center_v2_api().usage.post_query_record(
|
||||
session_type=session_type,
|
||||
session_id=session_id,
|
||||
query_ability_provider=ability_provider,
|
||||
usage=usage,
|
||||
model_name=model_name,
|
||||
response_seconds=response_seconds,
|
||||
retry_times=retry_times
|
||||
)
|
||||
|
||||
return res_ans if res_ans[0] != '\n' else res_ans[1:], finish_reason, funcs
|
||||
|
||||
# 删除上一回合并返回上一回合的问题
|
||||
def undo(self) -> str:
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
|
||||
# 删除最后两个消息
|
||||
if len(self.prompt) < 2:
|
||||
raise Exception('之前无对话,无法撤销')
|
||||
|
||||
question = self.prompt[-2]['content']
|
||||
self.prompt = self.prompt[:-2]
|
||||
self.token_counts = self.token_counts[:-1]
|
||||
|
||||
# 返回上一回合的问题
|
||||
return question
|
||||
|
||||
# 构建对话体
|
||||
def cut_out(self, msg: str, max_tokens: int, default_prompt: list, prompt: list) -> tuple[list, list]:
|
||||
"""将现有prompt进行切割处理,使得新的prompt长度不超过max_tokens
|
||||
|
||||
:return: (新的prompt, 新的token_counts)
|
||||
"""
|
||||
|
||||
# 最终由三个部分组成
|
||||
# - default_prompt 情景预设固定值
|
||||
# - changable_prompts 可变部分, 此会话中的历史对话回合
|
||||
# - current_question 当前问题
|
||||
|
||||
# 包装目前的对话回合内容
|
||||
changable_prompts = []
|
||||
|
||||
use_model = context.get_config_manager().data['completion_api_params']['model']
|
||||
|
||||
ptr = len(prompt) - 1
|
||||
|
||||
# 直接从后向前扫描拼接,不管是否是整回合
|
||||
while ptr >= 0:
|
||||
if openai_modelmgr.count_tokens(prompt[ptr:ptr+1]+changable_prompts, use_model) > max_tokens:
|
||||
break
|
||||
|
||||
changable_prompts.insert(0, prompt[ptr])
|
||||
|
||||
ptr -= 1
|
||||
|
||||
# 将default_prompt和changable_prompts合并
|
||||
result_prompt = default_prompt + changable_prompts
|
||||
|
||||
# 添加当前问题
|
||||
if msg:
|
||||
result_prompt.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': msg
|
||||
}
|
||||
)
|
||||
|
||||
logging.debug("cut_out: {}".format(json.dumps(result_prompt, ensure_ascii=False, indent=4)))
|
||||
|
||||
return result_prompt, openai_modelmgr.count_tokens(changable_prompts, use_model)
|
||||
|
||||
# 持久化session
|
||||
def persistence(self):
|
||||
if self.prompt == self.get_default_prompt():
|
||||
return
|
||||
|
||||
db_inst = context.get_database_manager()
|
||||
|
||||
name_spt = self.name.split('_')
|
||||
|
||||
subject_type = name_spt[0]
|
||||
subject_number = int(name_spt[1])
|
||||
|
||||
db_inst.persistence_session(subject_type, subject_number, self.create_timestamp, self.last_interact_timestamp,
|
||||
json.dumps(self.prompt), json.dumps(self.default_prompt), json.dumps(self.token_counts))
|
||||
|
||||
# 重置session
|
||||
def reset(self, explicit: bool = False, expired: bool = False, schedule_new: bool = True, use_prompt: str = None, persist: bool = False):
|
||||
if self.prompt:
|
||||
self.persistence()
|
||||
if explicit:
|
||||
# 触发插件事件
|
||||
args = {
|
||||
'session_name': self.name,
|
||||
'session': self
|
||||
}
|
||||
|
||||
# 此事件不支持阻止默认行为
|
||||
_ = plugin_host.emit(plugin_models.SessionExplicitReset, **args)
|
||||
|
||||
context.get_database_manager().explicit_close_session(self.name, self.create_timestamp)
|
||||
|
||||
if expired:
|
||||
context.get_database_manager().set_session_expired(self.name, self.create_timestamp)
|
||||
|
||||
if not persist: # 不要求保持default prompt
|
||||
self.default_prompt = self.get_default_prompt(use_prompt)
|
||||
self.prompt = []
|
||||
self.token_counts = []
|
||||
self.create_timestamp = int(time.time())
|
||||
self.last_interact_timestamp = int(time.time())
|
||||
self.just_switched_to_exist_session = False
|
||||
|
||||
# self.response_lock = threading.Lock()
|
||||
|
||||
if schedule_new:
|
||||
self.schedule()
|
||||
|
||||
# 将本session的数据库状态设置为on_going
|
||||
def set_ongoing(self):
|
||||
context.get_database_manager().set_session_ongoing(self.name, self.create_timestamp)
|
||||
|
||||
# 切换到上一个session
|
||||
def last_session(self):
|
||||
last_one = context.get_database_manager().last_session(self.name, self.last_interact_timestamp)
|
||||
if last_one is None:
|
||||
return None
|
||||
else:
|
||||
self.persistence()
|
||||
|
||||
self.create_timestamp = last_one['create_timestamp']
|
||||
self.last_interact_timestamp = last_one['last_interact_timestamp']
|
||||
|
||||
self.prompt = json.loads(last_one['prompt'])
|
||||
self.token_counts = json.loads(last_one['token_counts'])
|
||||
|
||||
self.default_prompt = json.loads(last_one['default_prompt']) if last_one['default_prompt'] else []
|
||||
|
||||
self.just_switched_to_exist_session = True
|
||||
return self
|
||||
|
||||
# 切换到下一个session
|
||||
def next_session(self):
|
||||
next_one = context.get_database_manager().next_session(self.name, self.last_interact_timestamp)
|
||||
if next_one is None:
|
||||
return None
|
||||
else:
|
||||
self.persistence()
|
||||
|
||||
self.create_timestamp = next_one['create_timestamp']
|
||||
self.last_interact_timestamp = next_one['last_interact_timestamp']
|
||||
|
||||
self.prompt = json.loads(next_one['prompt'])
|
||||
self.token_counts = json.loads(next_one['token_counts'])
|
||||
|
||||
self.default_prompt = json.loads(next_one['default_prompt']) if next_one['default_prompt'] else []
|
||||
|
||||
self.just_switched_to_exist_session = True
|
||||
return self
|
||||
|
||||
def list_history(self, capacity: int = 10, page: int = 0):
|
||||
return context.get_database_manager().list_history(self.name, capacity, page)
|
||||
|
||||
def delete_history(self, index: int) -> bool:
|
||||
return context.get_database_manager().delete_history(self.name, index)
|
||||
|
||||
def delete_all_history(self) -> bool:
|
||||
return context.get_database_manager().delete_all_history(self.name)
|
||||
|
||||
def draw_image(self, prompt: str):
|
||||
return context.get_openai_manager().request_image(prompt)
|
||||
@@ -1,578 +0,0 @@
|
||||
# 插件管理模块
|
||||
import asyncio
|
||||
import logging
|
||||
import importlib
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
import shutil
|
||||
import traceback
|
||||
import time
|
||||
import re
|
||||
|
||||
from ..utils import updater as updater
|
||||
from ..utils import network as network
|
||||
from ..utils import context as context
|
||||
from ..plugin import switch as switch
|
||||
from ..plugin import settings as settings
|
||||
from ..qqbot import adapter as msadapter
|
||||
from ..plugin import metadata as metadata
|
||||
|
||||
from mirai import Mirai
|
||||
import requests
|
||||
|
||||
from CallingGPT.session.session import Session
|
||||
|
||||
__plugins__ = {}
|
||||
"""插件列表
|
||||
|
||||
示例:
|
||||
{
|
||||
"example": {
|
||||
"path": "plugins/example/main.py",
|
||||
"enabled: True,
|
||||
"name": "example",
|
||||
"description": "example",
|
||||
"version": "0.0.1",
|
||||
"author": "RockChinQ",
|
||||
"class": <class 'plugins.example.ExamplePlugin'>,
|
||||
"hooks": {
|
||||
"person_message": [
|
||||
<function ExamplePlugin.person_message at 0x0000020E1D1B8D38>
|
||||
]
|
||||
},
|
||||
"instance": None
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
__plugins_order__ = []
|
||||
"""插件顺序"""
|
||||
|
||||
__enable_content_functions__ = True
|
||||
"""是否启用内容函数"""
|
||||
|
||||
__callable_functions__ = []
|
||||
"""供GPT调用的函数结构"""
|
||||
|
||||
__function_inst_map__: dict[str, callable] = {}
|
||||
"""函数名:实例 映射"""
|
||||
|
||||
|
||||
def generate_plugin_order():
|
||||
"""根据__plugin__生成插件初始顺序,无视是否启用"""
|
||||
global __plugins_order__
|
||||
__plugins_order__ = []
|
||||
for plugin_name in __plugins__:
|
||||
__plugins_order__.append(plugin_name)
|
||||
|
||||
|
||||
def iter_plugins():
|
||||
"""按照顺序迭代插件"""
|
||||
for plugin_name in __plugins_order__:
|
||||
if plugin_name not in __plugins__:
|
||||
continue
|
||||
yield __plugins__[plugin_name]
|
||||
|
||||
|
||||
def iter_plugins_name():
|
||||
"""迭代插件名"""
|
||||
for plugin_name in __plugins_order__:
|
||||
yield plugin_name
|
||||
|
||||
|
||||
__current_module_path__ = ""
|
||||
|
||||
|
||||
def walk_plugin_path(module, prefix="", path_prefix=""):
|
||||
global __current_module_path__
|
||||
"""遍历插件路径"""
|
||||
for item in pkgutil.iter_modules(module.__path__):
|
||||
if item.ispkg:
|
||||
logging.debug("扫描插件包: plugins/{}".format(path_prefix + item.name))
|
||||
walk_plugin_path(
|
||||
__import__(module.__name__ + "." + item.name, fromlist=[""]),
|
||||
prefix + item.name + ".",
|
||||
path_prefix + item.name + "/",
|
||||
)
|
||||
else:
|
||||
try:
|
||||
logging.debug(
|
||||
"扫描插件模块: plugins/{}".format(path_prefix + item.name + ".py")
|
||||
)
|
||||
__current_module_path__ = "plugins/" + path_prefix + item.name + ".py"
|
||||
|
||||
importlib.import_module(module.__name__ + "." + item.name)
|
||||
logging.debug(
|
||||
"加载模块: plugins/{} 成功".format(path_prefix + item.name + ".py")
|
||||
)
|
||||
except:
|
||||
logging.error(
|
||||
"加载模块: plugins/{} 失败: {}".format(
|
||||
path_prefix + item.name + ".py", sys.exc_info()
|
||||
)
|
||||
)
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
def load_plugins():
|
||||
"""加载插件"""
|
||||
logging.debug("加载插件")
|
||||
PluginHost()
|
||||
walk_plugin_path(__import__("plugins"))
|
||||
|
||||
logging.debug(__plugins__)
|
||||
|
||||
# 加载开关数据
|
||||
switch.load_switch()
|
||||
|
||||
# 生成初始顺序
|
||||
generate_plugin_order()
|
||||
# 加载插件顺序
|
||||
settings.load_settings()
|
||||
|
||||
logging.debug("registered plugins: {}".format(__plugins__))
|
||||
|
||||
# 输出已注册的内容函数列表
|
||||
logging.debug("registered content functions: {}".format(__callable_functions__))
|
||||
logging.debug("function instance map: {}".format(__function_inst_map__))
|
||||
|
||||
# 迁移插件源地址记录
|
||||
metadata.do_plugin_git_repo_migrate()
|
||||
|
||||
|
||||
def initialize_plugins():
|
||||
"""初始化插件"""
|
||||
logging.debug("初始化插件")
|
||||
import pkg.plugin.models as models
|
||||
|
||||
successfully_initialized_plugins = []
|
||||
|
||||
for plugin in iter_plugins():
|
||||
# if not plugin['enabled']:
|
||||
# continue
|
||||
try:
|
||||
models.__current_registering_plugin__ = plugin["name"]
|
||||
plugin["instance"] = plugin["class"](plugin_host=context.get_plugin_host())
|
||||
# logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||
successfully_initialized_plugins.append(plugin["name"])
|
||||
except:
|
||||
logging.error("插件{}初始化时发生错误: {}".format(plugin["name"], sys.exc_info()))
|
||||
logging.debug(traceback.format_exc())
|
||||
|
||||
logging.info("以下插件已初始化: {}".format(", ".join(successfully_initialized_plugins)))
|
||||
|
||||
|
||||
def unload_plugins():
|
||||
"""卸载插件"""
|
||||
# 不再显式卸载插件,因为当程序结束时,插件的析构函数会被系统执行
|
||||
# for plugin in __plugins__.values():
|
||||
# if plugin['enabled'] and plugin['instance'] is not None:
|
||||
# if not hasattr(plugin['instance'], '__del__'):
|
||||
# logging.warning("插件{}没有定义析构函数".format(plugin['name']))
|
||||
# else:
|
||||
# try:
|
||||
# plugin['instance'].__del__()
|
||||
# logging.info("卸载插件: {}".format(plugin['name']))
|
||||
# plugin['instance'] = None
|
||||
# except:
|
||||
# logging.error("插件{}卸载时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
||||
|
||||
|
||||
def get_github_plugin_repo_label(repo_url: str) -> list[str]:
|
||||
"""获取username, repo"""
|
||||
|
||||
# 提取 username/repo , 正则表达式
|
||||
repo = re.findall(
|
||||
r"(?:https?://github\.com/|git@github\.com:)([^/]+/[^/]+?)(?:\.git|/|$)",
|
||||
repo_url,
|
||||
)
|
||||
|
||||
if len(repo) > 0: # github
|
||||
return repo[0].split("/")
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def download_plugin_source_code(repo_url: str, target_path: str) -> str:
|
||||
"""下载插件源码"""
|
||||
# 检查源类型
|
||||
|
||||
# 提取 username/repo , 正则表达式
|
||||
repo = get_github_plugin_repo_label(repo_url)
|
||||
|
||||
target_path += repo[1]
|
||||
|
||||
if repo is not None: # github
|
||||
logging.info("从 GitHub 下载插件源码...")
|
||||
|
||||
zipball_url = f"https://api.github.com/repos/{'/'.join(repo)}/zipball/HEAD"
|
||||
|
||||
zip_resp = requests.get(
|
||||
url=zipball_url, proxies=network.wrapper_proxies(), stream=True
|
||||
)
|
||||
|
||||
if zip_resp.status_code != 200:
|
||||
raise Exception("下载源码失败: {}".format(zip_resp.text))
|
||||
|
||||
if os.path.exists("temp/" + target_path):
|
||||
shutil.rmtree("temp/" + target_path)
|
||||
|
||||
if os.path.exists(target_path):
|
||||
shutil.rmtree(target_path)
|
||||
|
||||
os.makedirs("temp/" + target_path)
|
||||
|
||||
with open("temp/" + target_path + "/source.zip", "wb") as f:
|
||||
for chunk in zip_resp.iter_content(chunk_size=1024):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
|
||||
logging.info("下载完成, 解压...")
|
||||
import zipfile
|
||||
|
||||
with zipfile.ZipFile("temp/" + target_path + "/source.zip", "r") as zip_ref:
|
||||
zip_ref.extractall("temp/" + target_path)
|
||||
os.remove("temp/" + target_path + "/source.zip")
|
||||
|
||||
# 目标是 username-repo-hash , 用正则表达式提取完整的文件夹名,复制到 plugins/repo
|
||||
import glob
|
||||
|
||||
# 获取解压后的文件夹名
|
||||
unzip_dir = glob.glob("temp/" + target_path + "/*")[0]
|
||||
|
||||
# 复制到 plugins/repo
|
||||
shutil.copytree(unzip_dir, target_path + "/")
|
||||
|
||||
# 删除解压后的文件夹
|
||||
shutil.rmtree(unzip_dir)
|
||||
|
||||
logging.info("解压完成")
|
||||
else:
|
||||
raise Exception("暂不支持的源类型,请使用 GitHub 仓库发行插件。")
|
||||
|
||||
return repo[1]
|
||||
|
||||
|
||||
def check_requirements(path: str):
|
||||
# 检查此目录是否包含requirements.txt
|
||||
if os.path.exists(path + "/requirements.txt"):
|
||||
logging.info("检测到requirements.txt,正在安装依赖")
|
||||
import pkg.utils.pkgmgr
|
||||
|
||||
pkg.utils.pkgmgr.install_requirements(path + "/requirements.txt")
|
||||
|
||||
import pkg.utils.log as log
|
||||
|
||||
log.reset_logging()
|
||||
|
||||
|
||||
def install_plugin(repo_url: str):
|
||||
"""安装插件,从git储存库获取并解决依赖"""
|
||||
|
||||
repo_label = download_plugin_source_code(repo_url, "plugins/")
|
||||
|
||||
check_requirements("plugins/" + repo_label)
|
||||
|
||||
metadata.set_plugin_metadata(repo_label, repo_url, int(time.time()), "HEAD")
|
||||
|
||||
# 上报安装记录
|
||||
context.get_center_v2_api().plugin.post_install_record(
|
||||
plugin={
|
||||
"name": "unknown",
|
||||
"remote": repo_url,
|
||||
"author": "unknown",
|
||||
"version": "HEAD",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def uninstall_plugin(plugin_name: str) -> str:
|
||||
"""卸载插件"""
|
||||
if plugin_name not in __plugins__:
|
||||
raise Exception("插件不存在")
|
||||
|
||||
plugin_info = get_plugin_info_for_audit(plugin_name)
|
||||
|
||||
# 获取文件夹路径
|
||||
plugin_path = __plugins__[plugin_name]["path"].replace("\\", "/")
|
||||
|
||||
# 剪切路径为plugins/插件名
|
||||
plugin_path = plugin_path.split("plugins/")[1].split("/")[0]
|
||||
|
||||
# 删除文件夹
|
||||
shutil.rmtree("plugins/" + plugin_path)
|
||||
|
||||
# 上报卸载记录
|
||||
context.get_center_v2_api().plugin.post_remove_record(
|
||||
plugin=plugin_info
|
||||
)
|
||||
|
||||
return "plugins/" + plugin_path
|
||||
|
||||
|
||||
def update_plugin(plugin_name: str):
|
||||
"""更新插件"""
|
||||
# 检查是否有远程地址记录
|
||||
plugin_path_name = get_plugin_path_name_by_plugin_name(plugin_name)
|
||||
|
||||
meta = metadata.get_plugin_metadata(plugin_path_name)
|
||||
|
||||
if meta == {}:
|
||||
raise Exception("没有此插件元数据信息,无法更新")
|
||||
|
||||
old_plugin_info = get_plugin_info_for_audit(plugin_name)
|
||||
|
||||
context.get_center_v2_api().plugin.post_update_record(
|
||||
plugin=old_plugin_info,
|
||||
old_version=old_plugin_info['version'],
|
||||
new_version='HEAD',
|
||||
)
|
||||
|
||||
remote_url = meta["source"]
|
||||
if (
|
||||
remote_url == "https://github.com/RockChinQ/QChatGPT"
|
||||
or remote_url == "https://gitee.com/RockChin/QChatGPT"
|
||||
or remote_url == ""
|
||||
or remote_url is None
|
||||
or remote_url == "http://github.com/RockChinQ/QChatGPT"
|
||||
or remote_url == "http://gitee.com/RockChin/QChatGPT"
|
||||
):
|
||||
raise Exception("插件没有远程地址记录,无法更新")
|
||||
|
||||
# 重新安装插件
|
||||
logging.info("正在重新安装插件以进行更新...")
|
||||
|
||||
install_plugin(remote_url)
|
||||
|
||||
|
||||
def get_plugin_name_by_path_name(plugin_path_name: str) -> str:
|
||||
for k, v in __plugins__.items():
|
||||
if v["path"] == "plugins/" + plugin_path_name + "/main.py":
|
||||
return k
|
||||
return None
|
||||
|
||||
|
||||
def get_plugin_path_name_by_plugin_name(plugin_name: str) -> str:
|
||||
if plugin_name not in __plugins__:
|
||||
return None
|
||||
|
||||
plugin_main_module_path = __plugins__[plugin_name]["path"]
|
||||
|
||||
plugin_main_module_path = plugin_main_module_path.replace("\\", "/")
|
||||
|
||||
spt = plugin_main_module_path.split("/")
|
||||
|
||||
return spt[1]
|
||||
|
||||
|
||||
def get_plugin_info_for_audit(plugin_name: str) -> dict:
|
||||
"""获取插件信息"""
|
||||
if plugin_name not in __plugins__:
|
||||
return {}
|
||||
plugin = __plugins__[plugin_name]
|
||||
|
||||
name = plugin["name"]
|
||||
meta = metadata.get_plugin_metadata(get_plugin_path_name_by_plugin_name(name))
|
||||
remote = meta["source"] if meta != {} else ""
|
||||
author = plugin["author"]
|
||||
version = plugin["version"]
|
||||
|
||||
return {
|
||||
"name": name,
|
||||
"remote": remote,
|
||||
"author": author,
|
||||
"version": version,
|
||||
}
|
||||
|
||||
|
||||
class EventContext:
|
||||
"""事件上下文"""
|
||||
|
||||
eid = 0
|
||||
"""事件编号"""
|
||||
|
||||
name = ""
|
||||
|
||||
__prevent_default__ = False
|
||||
"""是否阻止默认行为"""
|
||||
|
||||
__prevent_postorder__ = False
|
||||
"""是否阻止后续插件的执行"""
|
||||
|
||||
__return_value__ = {}
|
||||
""" 返回值
|
||||
示例:
|
||||
{
|
||||
"example": [
|
||||
'value1',
|
||||
'value2',
|
||||
3,
|
||||
4,
|
||||
{
|
||||
'key1': 'value1',
|
||||
},
|
||||
['value1', 'value2']
|
||||
]
|
||||
}
|
||||
"""
|
||||
|
||||
def add_return(self, key: str, ret):
|
||||
"""添加返回值"""
|
||||
if key not in self.__return_value__:
|
||||
self.__return_value__[key] = []
|
||||
self.__return_value__[key].append(ret)
|
||||
|
||||
def get_return(self, key: str) -> list:
|
||||
"""获取key的所有返回值"""
|
||||
if key in self.__return_value__:
|
||||
return self.__return_value__[key]
|
||||
return None
|
||||
|
||||
def get_return_value(self, key: str):
|
||||
"""获取key的首个返回值"""
|
||||
if key in self.__return_value__:
|
||||
return self.__return_value__[key][0]
|
||||
return None
|
||||
|
||||
def prevent_default(self):
|
||||
"""阻止默认行为"""
|
||||
self.__prevent_default__ = True
|
||||
|
||||
def prevent_postorder(self):
|
||||
"""阻止后续插件执行"""
|
||||
self.__prevent_postorder__ = True
|
||||
|
||||
def is_prevented_default(self):
|
||||
"""是否阻止默认行为"""
|
||||
return self.__prevent_default__
|
||||
|
||||
def is_prevented_postorder(self):
|
||||
"""是否阻止后序插件执行"""
|
||||
return self.__prevent_postorder__
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.eid = EventContext.eid
|
||||
self.__prevent_default__ = False
|
||||
self.__prevent_postorder__ = False
|
||||
self.__return_value__ = {}
|
||||
EventContext.eid += 1
|
||||
|
||||
|
||||
def emit(event_name: str, **kwargs) -> EventContext:
|
||||
"""触发事件"""
|
||||
import pkg.utils.context as context
|
||||
|
||||
if context.get_plugin_host() is None:
|
||||
return None
|
||||
return context.get_plugin_host().emit(event_name, **kwargs)
|
||||
|
||||
|
||||
class PluginHost:
|
||||
"""插件宿主"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化插件宿主"""
|
||||
context.set_plugin_host(self)
|
||||
self.calling_gpt_session = Session([])
|
||||
|
||||
def get_runtime_context(self) -> context:
|
||||
"""获取运行时上下文(pkg.utils.context模块的对象)
|
||||
|
||||
此上下文用于和主程序其他模块交互(数据库、QQ机器人、OpenAI接口等)
|
||||
详见pkg.utils.context模块
|
||||
其中的context变量保存了其他重要模块的类对象,可以使用这些对象进行交互
|
||||
"""
|
||||
return context
|
||||
|
||||
def get_bot(self) -> Mirai:
|
||||
"""获取机器人对象"""
|
||||
return context.get_qqbot_manager().bot
|
||||
|
||||
def get_bot_adapter(self) -> msadapter.MessageSourceAdapter:
|
||||
"""获取消息源适配器"""
|
||||
return context.get_qqbot_manager().adapter
|
||||
|
||||
def send_person_message(self, person, message):
|
||||
"""发送私聊消息"""
|
||||
self.get_bot_adapter().send_message("person", person, message)
|
||||
|
||||
def send_group_message(self, group, message):
|
||||
"""发送群消息"""
|
||||
self.get_bot_adapter().send_message("group", group, message)
|
||||
|
||||
def notify_admin(self, message):
|
||||
"""通知管理员"""
|
||||
context.get_qqbot_manager().notify_admin(message)
|
||||
|
||||
def emit(self, event_name: str, **kwargs) -> EventContext:
|
||||
"""触发事件"""
|
||||
import json
|
||||
|
||||
event_context = EventContext(event_name)
|
||||
logging.debug("触发事件: {} ({})".format(event_name, event_context.eid))
|
||||
|
||||
emitted_plugins = []
|
||||
for plugin in iter_plugins():
|
||||
if not plugin["enabled"]:
|
||||
continue
|
||||
|
||||
# if plugin['instance'] is None:
|
||||
# # 从关闭状态切到开启状态之后,重新加载插件
|
||||
# try:
|
||||
# plugin['instance'] = plugin["class"](plugin_host=self)
|
||||
# logging.info("插件 {} 已初始化".format(plugin['name']))
|
||||
# except:
|
||||
# logging.error("插件 {} 初始化时发生错误: {}".format(plugin['name'], sys.exc_info()))
|
||||
# continue
|
||||
|
||||
if "hooks" not in plugin or event_name not in plugin["hooks"]:
|
||||
continue
|
||||
|
||||
emitted_plugins.append(plugin['name'])
|
||||
|
||||
hooks = []
|
||||
if event_name in plugin["hooks"]:
|
||||
hooks = plugin["hooks"][event_name]
|
||||
for hook in hooks:
|
||||
try:
|
||||
already_prevented_default = event_context.is_prevented_default()
|
||||
|
||||
kwargs["host"] = context.get_plugin_host()
|
||||
kwargs["event"] = event_context
|
||||
|
||||
hook(plugin["instance"], **kwargs)
|
||||
|
||||
if (
|
||||
event_context.is_prevented_default()
|
||||
and not already_prevented_default
|
||||
):
|
||||
logging.debug(
|
||||
"插件 {} 已要求阻止事件 {} 的默认行为".format(plugin["name"], event_name)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("插件{}响应事件{}时发生错误".format(plugin["name"], event_name))
|
||||
logging.error(traceback.format_exc())
|
||||
|
||||
# print("done:{}".format(plugin['name']))
|
||||
if event_context.is_prevented_postorder():
|
||||
logging.debug("插件 {} 阻止了后序插件的执行".format(plugin["name"]))
|
||||
break
|
||||
|
||||
logging.debug(
|
||||
"事件 {} ({}) 处理完毕,返回值: {}".format(
|
||||
event_name, event_context.eid, event_context.__return_value__
|
||||
)
|
||||
)
|
||||
|
||||
if len(emitted_plugins) > 0:
|
||||
plugins_info = [get_plugin_info_for_audit(p) for p in emitted_plugins]
|
||||
|
||||
context.get_center_v2_api().usage.post_event_record(
|
||||
plugins=plugins_info,
|
||||
event_name=event_name,
|
||||
)
|
||||
|
||||
return event_context
|
||||
@@ -1,87 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
import time
|
||||
|
||||
import dulwich.errors as dulwich_err
|
||||
|
||||
from ..utils import updater
|
||||
|
||||
|
||||
def read_metadata_file() -> dict:
|
||||
# 读取 plugins/metadata.json 文件
|
||||
if not os.path.exists('plugins/metadata.json'):
|
||||
return {}
|
||||
with open('plugins/metadata.json', 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def write_metadata_file(metadata: dict):
|
||||
if not os.path.exists('plugins'):
|
||||
os.mkdir('plugins')
|
||||
|
||||
with open('plugins/metadata.json', 'w') as f:
|
||||
json.dump(metadata, f, indent=4, ensure_ascii=False)
|
||||
|
||||
|
||||
def do_plugin_git_repo_migrate():
|
||||
# 仅在 plugins/metadata.json 不存在时执行
|
||||
if os.path.exists('plugins/metadata.json'):
|
||||
return
|
||||
|
||||
metadata = read_metadata_file()
|
||||
|
||||
# 遍历 plugins 下所有目录,获取目录的git远程地址
|
||||
for plugin_name in os.listdir('plugins'):
|
||||
plugin_path = os.path.join('plugins', plugin_name)
|
||||
if not os.path.isdir(plugin_path):
|
||||
continue
|
||||
|
||||
remote_url = None
|
||||
try:
|
||||
remote_url = updater.get_remote_url(plugin_path)
|
||||
except dulwich_err.NotGitRepository:
|
||||
continue
|
||||
if remote_url == "https://github.com/RockChinQ/QChatGPT" or remote_url == "https://gitee.com/RockChin/QChatGPT" \
|
||||
or remote_url == "" or remote_url is None or remote_url == "http://github.com/RockChinQ/QChatGPT" or remote_url == "http://gitee.com/RockChin/QChatGPT":
|
||||
continue
|
||||
|
||||
from . import host
|
||||
|
||||
if plugin_name not in metadata:
|
||||
metadata[plugin_name] = {
|
||||
'source': remote_url,
|
||||
'install_timestamp': int(time.time()),
|
||||
'ref': 'HEAD',
|
||||
}
|
||||
|
||||
write_metadata_file(metadata)
|
||||
|
||||
|
||||
def set_plugin_metadata(
|
||||
plugin_name: str,
|
||||
source: str,
|
||||
install_timestamp: int,
|
||||
ref: str,
|
||||
):
|
||||
metadata = read_metadata_file()
|
||||
metadata[plugin_name] = {
|
||||
'source': source,
|
||||
'install_timestamp': install_timestamp,
|
||||
'ref': ref,
|
||||
}
|
||||
write_metadata_file(metadata)
|
||||
|
||||
|
||||
def remove_plugin_metadata(plugin_name: str):
|
||||
metadata = read_metadata_file()
|
||||
if plugin_name in metadata:
|
||||
del metadata[plugin_name]
|
||||
write_metadata_file(metadata)
|
||||
|
||||
|
||||
def get_plugin_metadata(plugin_name: str) -> dict:
|
||||
metadata = read_metadata_file()
|
||||
if plugin_name in metadata:
|
||||
return metadata[plugin_name]
|
||||
return {}
|
||||
@@ -1,299 +0,0 @@
|
||||
import logging
|
||||
|
||||
from ..plugin import host
|
||||
from ..utils import context
|
||||
|
||||
PersonMessageReceived = "person_message_received"
|
||||
"""收到私聊消息时,在判断是否应该响应前触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
message_chain: mirai.models.message.MessageChain 消息链
|
||||
"""
|
||||
|
||||
GroupMessageReceived = "group_message_received"
|
||||
"""收到群聊消息时,在判断是否应该响应前触发(所有群消息)
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
message_chain: mirai.models.message.MessageChain 消息链
|
||||
"""
|
||||
|
||||
PersonNormalMessageReceived = "person_normal_message_received"
|
||||
"""判断为应该处理的私聊普通消息时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
text_message: str 消息文本
|
||||
|
||||
returns (optional):
|
||||
alter: str 修改后的消息文本
|
||||
reply: list 回复消息组件列表
|
||||
"""
|
||||
|
||||
PersonCommandSent = "person_command_sent"
|
||||
"""判断为应该处理的私聊命令时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
command: str 命令
|
||||
params: list[str] 参数列表
|
||||
text_message: str 完整命令文本
|
||||
is_admin: bool 是否为管理员
|
||||
|
||||
returns (optional):
|
||||
alter: str 修改后的完整命令文本
|
||||
reply: list 回复消息组件列表
|
||||
"""
|
||||
|
||||
GroupNormalMessageReceived = "group_normal_message_received"
|
||||
"""判断为应该处理的群聊普通消息时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
text_message: str 消息文本
|
||||
|
||||
returns (optional):
|
||||
alter: str 修改后的消息文本
|
||||
reply: list 回复消息组件列表
|
||||
"""
|
||||
|
||||
GroupCommandSent = "group_command_sent"
|
||||
"""判断为应该处理的群聊命令时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
command: str 命令
|
||||
params: list[str] 参数列表
|
||||
text_message: str 完整命令文本
|
||||
is_admin: bool 是否为管理员
|
||||
|
||||
returns (optional):
|
||||
alter: str 修改后的完整命令文本
|
||||
reply: list 回复消息组件列表
|
||||
"""
|
||||
|
||||
NormalMessageResponded = "normal_message_responded"
|
||||
"""获取到对普通消息的文字响应时触发
|
||||
kwargs:
|
||||
launcher_type: str 发起对象类型(group/person)
|
||||
launcher_id: int 发起对象ID(群号/QQ号)
|
||||
sender_id: int 发送者ID(QQ号)
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
prefix: str 回复文字消息的前缀
|
||||
response_text: str 响应文本
|
||||
finish_reason: str 响应结束原因
|
||||
funcs_called: list[str] 此次响应中调用的函数列表
|
||||
|
||||
returns (optional):
|
||||
prefix: str 修改后的回复文字消息的前缀
|
||||
reply: list 替换回复消息组件列表
|
||||
"""
|
||||
|
||||
SessionFirstMessageReceived = "session_first_message_received"
|
||||
"""会话被第一次交互时触发
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
default_prompt: str 预设值
|
||||
"""
|
||||
|
||||
SessionExplicitReset = "session_reset"
|
||||
"""会话被用户手动重置时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
"""
|
||||
|
||||
SessionExpired = "session_expired"
|
||||
"""会话过期时触发
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
session: pkg.openai.session.Session 会话对象
|
||||
session_expire_time: int 已设置的会话过期时间(秒)
|
||||
"""
|
||||
|
||||
KeyExceeded = "key_exceeded"
|
||||
"""api-key超额时触发
|
||||
kwargs:
|
||||
key_name: str 超额的api-key名称
|
||||
usage: dict 超额的api-key使用情况
|
||||
exceeded_keys: list[str] 超额的api-key列表
|
||||
"""
|
||||
|
||||
KeySwitched = "key_switched"
|
||||
"""api-key超额切换成功时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
key_name: str 切换成功的api-key名称
|
||||
key_list: list[str] api-key列表
|
||||
"""
|
||||
|
||||
PromptPreProcessing = "prompt_pre_processing"
|
||||
"""每回合调用接口前对prompt进行预处理时触发,此事件不支持阻止默认行为
|
||||
kwargs:
|
||||
session_name: str 会话名称(<launcher_type>_<launcher_id>)
|
||||
default_prompt: list 此session使用的情景预设内容
|
||||
prompt: list 此session现有的prompt内容
|
||||
text_message: str 用户发送的消息文本
|
||||
|
||||
returns (optional):
|
||||
default_prompt: list 修改后的情景预设内容
|
||||
prompt: list 修改后的prompt内容
|
||||
text_message: str 修改后的消息文本
|
||||
"""
|
||||
|
||||
|
||||
def on(*args, **kwargs):
|
||||
"""注册事件监听器
|
||||
"""
|
||||
return Plugin.on(*args, **kwargs)
|
||||
|
||||
def func(*args, **kwargs):
|
||||
"""注册内容函数,声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用
|
||||
此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
|
||||
所述的格式编写函数的docstring。
|
||||
此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。
|
||||
"""
|
||||
return Plugin.func(*args, **kwargs)
|
||||
|
||||
|
||||
__current_registering_plugin__ = ""
|
||||
|
||||
|
||||
def require_ver(ge: str, le: str="v999.9.9") -> bool:
|
||||
"""插件版本要求装饰器
|
||||
|
||||
Args:
|
||||
ge (str): 最低版本要求
|
||||
le (str, optional): 最高版本要求
|
||||
|
||||
Returns:
|
||||
bool: 是否满足要求, False时为无法获取版本号,True时为满足要求,报错为不满足要求
|
||||
"""
|
||||
qchatgpt_version = ""
|
||||
|
||||
from pkg.utils.updater import get_current_tag, compare_version_str
|
||||
|
||||
try:
|
||||
qchatgpt_version = get_current_tag() # 从updater模块获取版本号
|
||||
except:
|
||||
return False
|
||||
|
||||
if compare_version_str(qchatgpt_version, ge) < 0 or \
|
||||
(compare_version_str(qchatgpt_version, le) > 0):
|
||||
raise Exception("QChatGPT 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, qchatgpt_version))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class Plugin:
|
||||
"""插件基类"""
|
||||
|
||||
host: host.PluginHost
|
||||
"""插件宿主,提供插件的一些基础功能"""
|
||||
|
||||
@classmethod
|
||||
def on(cls, event):
|
||||
"""事件处理器装饰器
|
||||
|
||||
:param
|
||||
event: 事件类型
|
||||
:return:
|
||||
None
|
||||
"""
|
||||
global __current_registering_plugin__
|
||||
|
||||
def wrapper(func):
|
||||
plugin_hooks = host.__plugins__[__current_registering_plugin__]["hooks"]
|
||||
|
||||
if event not in plugin_hooks:
|
||||
plugin_hooks[event] = []
|
||||
plugin_hooks[event].append(func)
|
||||
|
||||
# print("registering hook: p='{}', e='{}', f={}".format(__current_registering_plugin__, event, func))
|
||||
|
||||
host.__plugins__[__current_registering_plugin__]["hooks"] = plugin_hooks
|
||||
|
||||
return func
|
||||
|
||||
return wrapper
|
||||
|
||||
@classmethod
|
||||
def func(cls, name: str=None):
|
||||
"""内容函数装饰器
|
||||
"""
|
||||
global __current_registering_plugin__
|
||||
from CallingGPT.entities.namespace import get_func_schema
|
||||
|
||||
def wrapper(func):
|
||||
|
||||
function_schema = get_func_schema(func)
|
||||
function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name)
|
||||
|
||||
function_schema['enabled'] = True
|
||||
|
||||
host.__function_inst_map__[function_schema['name']] = function_schema['function']
|
||||
|
||||
del function_schema['function']
|
||||
|
||||
# logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema))
|
||||
|
||||
host.__callable_functions__.append(
|
||||
function_schema
|
||||
)
|
||||
|
||||
return func
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def register(name: str, description: str, version: str, author: str):
|
||||
"""注册插件, 此函数作为装饰器使用
|
||||
|
||||
Args:
|
||||
name (str): 插件名称
|
||||
description (str): 插件描述
|
||||
version (str): 插件版本
|
||||
author (str): 插件作者
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
global __current_registering_plugin__
|
||||
|
||||
__current_registering_plugin__ = name
|
||||
# print("registering plugin: n='{}', d='{}', v={}, a='{}'".format(name, description, version, author))
|
||||
host.__plugins__[name] = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"version": version,
|
||||
"author": author,
|
||||
"hooks": {},
|
||||
"path": host.__current_module_path__,
|
||||
"enabled": True,
|
||||
"instance": None,
|
||||
}
|
||||
|
||||
def wrapper(cls: Plugin):
|
||||
cls.name = name
|
||||
cls.description = description
|
||||
cls.version = version
|
||||
cls.author = author
|
||||
cls.host = context.get_plugin_host()
|
||||
cls.enabled = True
|
||||
cls.path = host.__current_module_path__
|
||||
|
||||
# 存到插件列表
|
||||
host.__plugins__[name]["class"] = cls
|
||||
|
||||
logging.info("插件注册完成: n='{}', d='{}', v={}, a='{}' ({})".format(name, description, version, author, cls))
|
||||
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
@@ -1,103 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import logging
|
||||
|
||||
from ..plugin import host
|
||||
|
||||
def wrapper_dict_from_runtime_context() -> dict:
|
||||
"""从变量中包装settings.json的数据字典"""
|
||||
settings = {
|
||||
"order": [],
|
||||
"functions": {
|
||||
"enabled": host.__enable_content_functions__
|
||||
}
|
||||
}
|
||||
|
||||
for plugin_name in host.__plugins_order__:
|
||||
settings["order"].append(plugin_name)
|
||||
|
||||
return settings
|
||||
|
||||
|
||||
def apply_settings(settings: dict):
|
||||
"""将settings.json数据应用到变量中"""
|
||||
if "order" in settings:
|
||||
host.__plugins_order__ = settings["order"]
|
||||
|
||||
if "functions" in settings:
|
||||
if "enabled" in settings["functions"]:
|
||||
host.__enable_content_functions__ = settings["functions"]["enabled"]
|
||||
# logging.debug("set content function enabled: {}".format(host.__enable_content_functions__))
|
||||
|
||||
|
||||
def dump_settings():
|
||||
"""保存settings.json数据"""
|
||||
logging.debug("保存plugins/settings.json数据")
|
||||
|
||||
settings = wrapper_dict_from_runtime_context()
|
||||
|
||||
with open("plugins/settings.json", "w", encoding="utf-8") as f:
|
||||
json.dump(settings, f, indent=4, ensure_ascii=False)
|
||||
|
||||
|
||||
def load_settings():
|
||||
"""加载settings.json数据"""
|
||||
logging.debug("加载plugins/settings.json数据")
|
||||
|
||||
# 读取plugins/settings.json
|
||||
settings = {
|
||||
}
|
||||
|
||||
# 检查文件是否存在
|
||||
if not os.path.exists("plugins/settings.json"):
|
||||
# 不存在则创建
|
||||
with open("plugins/settings.json", "w", encoding="utf-8") as f:
|
||||
json.dump(wrapper_dict_from_runtime_context(), f, indent=4, ensure_ascii=False)
|
||||
|
||||
with open("plugins/settings.json", "r", encoding="utf-8") as f:
|
||||
settings = json.load(f)
|
||||
|
||||
if settings is None:
|
||||
settings = {
|
||||
}
|
||||
|
||||
# 检查每个设置项
|
||||
if "order" not in settings:
|
||||
settings["order"] = []
|
||||
|
||||
settings_modified = False
|
||||
|
||||
settings_copy = settings.copy()
|
||||
|
||||
# 检查settings中多余的插件项
|
||||
|
||||
# order
|
||||
for plugin_name in settings_copy["order"]:
|
||||
if plugin_name not in host.__plugins_order__:
|
||||
settings["order"].remove(plugin_name)
|
||||
settings_modified = True
|
||||
|
||||
# 检查settings中缺少的插件项
|
||||
|
||||
# order
|
||||
for plugin_name in host.__plugins_order__:
|
||||
if plugin_name not in settings_copy["order"]:
|
||||
settings["order"].append(plugin_name)
|
||||
settings_modified = True
|
||||
|
||||
if "functions" not in settings:
|
||||
settings["functions"] = {
|
||||
"enabled": host.__enable_content_functions__
|
||||
}
|
||||
settings_modified = True
|
||||
elif "enabled" not in settings["functions"]:
|
||||
settings["functions"]["enabled"] = host.__enable_content_functions__
|
||||
settings_modified = True
|
||||
|
||||
logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enabled"] else "禁用"))
|
||||
|
||||
apply_settings(settings)
|
||||
|
||||
if settings_modified:
|
||||
dump_settings()
|
||||
@@ -1,94 +0,0 @@
|
||||
# 控制插件的开关
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ..plugin import host
|
||||
|
||||
|
||||
def wrapper_dict_from_plugin_list() -> dict:
|
||||
"""将插件列表转换为开关json"""
|
||||
switch = {}
|
||||
|
||||
for plugin_name in host.__plugins__:
|
||||
plugin = host.__plugins__[plugin_name]
|
||||
|
||||
switch[plugin_name] = {
|
||||
"path": plugin["path"],
|
||||
"enabled": plugin["enabled"],
|
||||
}
|
||||
|
||||
return switch
|
||||
|
||||
|
||||
def apply_switch(switch: dict):
|
||||
"""将开关数据应用到插件列表中"""
|
||||
# print("将开关数据应用到插件列表中")
|
||||
# print(switch)
|
||||
for plugin_name in switch:
|
||||
host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"]
|
||||
|
||||
# 查找此插件的所有内容函数
|
||||
for func in host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name + '-'):
|
||||
func['enabled'] = switch[plugin_name]["enabled"]
|
||||
|
||||
|
||||
def dump_switch():
|
||||
"""保存开关数据"""
|
||||
logging.debug("保存开关数据")
|
||||
# 将开关数据写入plugins/switch.json
|
||||
|
||||
switch = wrapper_dict_from_plugin_list()
|
||||
|
||||
with open("plugins/switch.json", "w", encoding="utf-8") as f:
|
||||
json.dump(switch, f, indent=4, ensure_ascii=False)
|
||||
|
||||
|
||||
def load_switch():
|
||||
"""加载开关数据"""
|
||||
logging.debug("加载开关数据")
|
||||
# 读取plugins/switch.json
|
||||
|
||||
switch = {}
|
||||
|
||||
# 检查文件是否存在
|
||||
if not os.path.exists("plugins/switch.json"):
|
||||
# 不存在则创建
|
||||
with open("plugins/switch.json", "w", encoding="utf-8") as f:
|
||||
json.dump(switch, f, indent=4, ensure_ascii=False)
|
||||
|
||||
with open("plugins/switch.json", "r", encoding="utf-8") as f:
|
||||
switch = json.load(f)
|
||||
|
||||
if switch is None:
|
||||
switch = {}
|
||||
|
||||
switch_modified = False
|
||||
|
||||
switch_copy = switch.copy()
|
||||
# 检查switch中多余的和path不相符的
|
||||
for plugin_name in switch_copy:
|
||||
if plugin_name not in host.__plugins__:
|
||||
del switch[plugin_name]
|
||||
switch_modified = True
|
||||
elif switch[plugin_name]["path"] != host.__plugins__[plugin_name]["path"]:
|
||||
# 删除此不相符的
|
||||
del switch[plugin_name]
|
||||
switch_modified = True
|
||||
|
||||
# 检查plugin中多余的
|
||||
for plugin_name in host.__plugins__:
|
||||
if plugin_name not in switch:
|
||||
switch[plugin_name] = {
|
||||
"path": host.__plugins__[plugin_name]["path"],
|
||||
"enabled": host.__plugins__[plugin_name]["enabled"],
|
||||
}
|
||||
switch_modified = True
|
||||
|
||||
# 应用开关数据
|
||||
apply_switch(switch)
|
||||
|
||||
# 如果switch有修改,保存
|
||||
if switch_modified:
|
||||
dump_switch()
|
||||
@@ -1,137 +0,0 @@
|
||||
# MessageSource的适配器
|
||||
import typing
|
||||
|
||||
import mirai
|
||||
|
||||
|
||||
class MessageSourceAdapter:
|
||||
bot_account_id: int
|
||||
def __init__(self, config: dict):
|
||||
pass
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
message: mirai.MessageChain
|
||||
):
|
||||
"""发送消息
|
||||
|
||||
Args:
|
||||
target_type (str): 目标类型,`person`或`group`
|
||||
target_id (str): 目标ID
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def reply_message(
|
||||
self,
|
||||
message_source: mirai.MessageEvent,
|
||||
message: mirai.MessageChain,
|
||||
quote_origin: bool = False
|
||||
):
|
||||
"""回复消息
|
||||
|
||||
Args:
|
||||
message_source (mirai.MessageEvent): YiriMirai消息源事件
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_muted(self, group_id: int) -> bool:
|
||||
"""获取账号是否在指定群被禁言"""
|
||||
raise NotImplementedError
|
||||
|
||||
def register_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注册事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def unregister_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注销事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def run_sync(self):
|
||||
"""以阻塞的方式运行适配器"""
|
||||
raise NotImplementedError
|
||||
|
||||
def kill(self) -> bool:
|
||||
"""关闭适配器
|
||||
|
||||
Returns:
|
||||
bool: 是否成功关闭,热重载时若此函数返回False则不会重载MessageSource底层
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class MessageConverter:
|
||||
"""消息链转换器基类"""
|
||||
@staticmethod
|
||||
def yiri2target(message_chain: mirai.MessageChain):
|
||||
"""将YiriMirai消息链转换为目标消息链
|
||||
|
||||
Args:
|
||||
message_chain (mirai.MessageChain): YiriMirai消息链
|
||||
|
||||
Returns:
|
||||
typing.Any: 目标消息链
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(message_chain: typing.Any) -> mirai.MessageChain:
|
||||
"""将目标消息链转换为YiriMirai消息链
|
||||
|
||||
Args:
|
||||
message_chain (typing.Any): 目标消息链
|
||||
|
||||
Returns:
|
||||
mirai.MessageChain: YiriMirai消息链
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EventConverter:
|
||||
"""事件转换器基类"""
|
||||
|
||||
@staticmethod
|
||||
def yiri2target(event: typing.Type[mirai.Event]):
|
||||
"""将YiriMirai事件转换为目标事件
|
||||
|
||||
Args:
|
||||
event (typing.Type[mirai.Event]): YiriMirai事件
|
||||
|
||||
Returns:
|
||||
typing.Any: 目标事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(event: typing.Any) -> mirai.Event:
|
||||
"""将目标事件的调用参数转换为YiriMirai的事件参数对象
|
||||
|
||||
Args:
|
||||
event (typing.Any): 目标事件
|
||||
|
||||
Returns:
|
||||
typing.Type[mirai.Event]: YiriMirai事件
|
||||
"""
|
||||
raise NotImplementedError
|
||||
@@ -1,50 +0,0 @@
|
||||
from ..utils import context
|
||||
|
||||
|
||||
def is_banned(launcher_type: str, launcher_id: int, sender_id: int) -> bool:
|
||||
if not context.get_qqbot_manager().enable_banlist:
|
||||
return False
|
||||
|
||||
result = False
|
||||
|
||||
if launcher_type == 'group':
|
||||
# 检查是否显式声明发起人QQ要被person忽略
|
||||
if sender_id in context.get_qqbot_manager().ban_person:
|
||||
result = True
|
||||
else:
|
||||
for group_rule in context.get_qqbot_manager().ban_group:
|
||||
if type(group_rule) == int:
|
||||
if group_rule == launcher_id: # 此群群号被禁用
|
||||
result = True
|
||||
elif type(group_rule) == str:
|
||||
if group_rule.startswith('!'):
|
||||
# 截取!后面的字符串作为表达式,判断是否匹配
|
||||
reg_str = group_rule[1:]
|
||||
import re
|
||||
if re.match(reg_str, str(launcher_id)): # 被豁免,最高级别
|
||||
result = False
|
||||
break
|
||||
else:
|
||||
# 判断是否匹配regexp
|
||||
import re
|
||||
if re.match(group_rule, str(launcher_id)): # 此群群号被禁用
|
||||
result = True
|
||||
|
||||
else:
|
||||
# ban_person, 与群规则相同
|
||||
for person_rule in context.get_qqbot_manager().ban_person:
|
||||
if type(person_rule) == int:
|
||||
if person_rule == launcher_id:
|
||||
result = True
|
||||
elif type(person_rule) == str:
|
||||
if person_rule.startswith('!'):
|
||||
reg_str = person_rule[1:]
|
||||
import re
|
||||
if re.match(reg_str, str(launcher_id)):
|
||||
result = False
|
||||
break
|
||||
else:
|
||||
import re
|
||||
if re.match(person_rule, str(launcher_id)):
|
||||
result = True
|
||||
return result
|
||||
@@ -1,100 +0,0 @@
|
||||
# 长消息处理相关
|
||||
import os
|
||||
import time
|
||||
import base64
|
||||
import typing
|
||||
|
||||
from mirai.models.message import MessageComponent, MessageChain, Image
|
||||
from mirai.models.message import ForwardMessageNode
|
||||
from mirai.models.base import MiraiBaseModel
|
||||
|
||||
from ..utils import text2img
|
||||
from ..utils import context
|
||||
|
||||
|
||||
class ForwardMessageDiaplay(MiraiBaseModel):
|
||||
title: str = "群聊的聊天记录"
|
||||
brief: str = "[聊天记录]"
|
||||
source: str = "聊天记录"
|
||||
preview: typing.List[str] = []
|
||||
summary: str = "查看x条转发消息"
|
||||
|
||||
|
||||
class Forward(MessageComponent):
|
||||
"""合并转发。"""
|
||||
type: str = "Forward"
|
||||
"""消息组件类型。"""
|
||||
display: ForwardMessageDiaplay
|
||||
"""显示信息"""
|
||||
node_list: typing.List[ForwardMessageNode]
|
||||
"""转发消息节点列表。"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
if len(args) == 1:
|
||||
self.node_list = args[0]
|
||||
super().__init__(**kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '[聊天记录]'
|
||||
|
||||
|
||||
def text_to_image(text: str) -> MessageComponent:
|
||||
"""将文本转换成图片"""
|
||||
# 检查temp文件夹是否存在
|
||||
if not os.path.exists('temp'):
|
||||
os.mkdir('temp')
|
||||
img_path = text2img.text_to_image(text_str=text, save_as='temp/{}.png'.format(int(time.time())))
|
||||
|
||||
compressed_path, size = text2img.compress_image(img_path, outfile="temp/{}_compressed.png".format(int(time.time())))
|
||||
# 读取图片,转换成base64
|
||||
with open(compressed_path, 'rb') as f:
|
||||
img = f.read()
|
||||
|
||||
b64 = base64.b64encode(img)
|
||||
|
||||
# 删除图片
|
||||
os.remove(img_path)
|
||||
|
||||
# 判断compressed_path是否存在
|
||||
if os.path.exists(compressed_path):
|
||||
os.remove(compressed_path)
|
||||
# 返回图片
|
||||
return Image(base64=b64.decode('utf-8'))
|
||||
|
||||
|
||||
def check_text(text: str) -> list:
|
||||
"""检查文本是否为长消息,并转换成该使用的消息链组件"""
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if len(text) > config['blob_message_threshold']:
|
||||
|
||||
# logging.info("长消息: {}".format(text))
|
||||
if config['blob_message_strategy'] == 'image':
|
||||
# 转换成图片
|
||||
return [text_to_image(text)]
|
||||
elif config['blob_message_strategy'] == 'forward':
|
||||
|
||||
# 包装转发消息
|
||||
display = ForwardMessageDiaplay(
|
||||
title='群聊的聊天记录',
|
||||
brief='[聊天记录]',
|
||||
source='聊天记录',
|
||||
preview=["bot: "+text],
|
||||
summary="查看1条转发消息"
|
||||
)
|
||||
|
||||
node = ForwardMessageNode(
|
||||
sender_id=config['mirai_http_api_config']['qq'],
|
||||
sender_name='bot',
|
||||
message_chain=MessageChain([text])
|
||||
)
|
||||
|
||||
forward = Forward(
|
||||
display=display,
|
||||
node_list=[node]
|
||||
)
|
||||
|
||||
return [forward]
|
||||
else:
|
||||
return [text]
|
||||
@@ -1,333 +0,0 @@
|
||||
import logging
|
||||
import copy
|
||||
import pkgutil
|
||||
import traceback
|
||||
import json
|
||||
|
||||
import tips as tips_custom
|
||||
|
||||
|
||||
__command_list__ = {}
|
||||
"""命令树
|
||||
|
||||
结构:
|
||||
{
|
||||
'cmd1': {
|
||||
'description': 'cmd1 description',
|
||||
'usage': 'cmd1 usage',
|
||||
'aliases': ['cmd1 alias1', 'cmd1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': None,
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd1.CommandCmd1'>,
|
||||
'sub': [
|
||||
'cmd1-1'
|
||||
]
|
||||
},
|
||||
'cmd1.cmd1-1: {
|
||||
'description': 'cmd1-1 description',
|
||||
'usage': 'cmd1-1 usage',
|
||||
'aliases': ['cmd1-1 alias1', 'cmd1-1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': 'cmd1',
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd1.CommandCmd1_1'>,
|
||||
'sub': []
|
||||
},
|
||||
'cmd2': {
|
||||
'description': 'cmd2 description',
|
||||
'usage': 'cmd2 usage',
|
||||
'aliases': ['cmd2 alias1', 'cmd2 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': None,
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd2.CommandCmd2'>,
|
||||
'sub': [
|
||||
'cmd2-1'
|
||||
]
|
||||
},
|
||||
'cmd2.cmd2-1': {
|
||||
'description': 'cmd2-1 description',
|
||||
'usage': 'cmd2-1 usage',
|
||||
'aliases': ['cmd2-1 alias1', 'cmd2-1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': 'cmd2',
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd2.CommandCmd2_1'>,
|
||||
'sub': [
|
||||
'cmd2-1-1'
|
||||
]
|
||||
},
|
||||
'cmd2.cmd2-1.cmd2-1-1': {
|
||||
'description': 'cmd2-1-1 description',
|
||||
'usage': 'cmd2-1-1 usage',
|
||||
'aliases': ['cmd2-1-1 alias1', 'cmd2-1-1 alias2'],
|
||||
'privilege': 0,
|
||||
'parent': 'cmd2.cmd2-1',
|
||||
'cls': <class 'pkg.qqbot.cmds.cmd2.CommandCmd2_1_1'>,
|
||||
'sub': []
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
__tree_index__: dict[str, list] = {}
|
||||
"""命令树索引
|
||||
|
||||
结构:
|
||||
{
|
||||
'pkg.qqbot.cmds.cmd1.CommandCmd1': 'cmd1', # 顶级命令
|
||||
'pkg.qqbot.cmds.cmd1.CommandCmd1_1': 'cmd1.cmd1-1', # 类名: 节点路径
|
||||
'pkg.qqbot.cmds.cmd2.CommandCmd2': 'cmd2',
|
||||
'pkg.qqbot.cmds.cmd2.CommandCmd2_1': 'cmd2.cmd2-1',
|
||||
'pkg.qqbot.cmds.cmd2.CommandCmd2_1_1': 'cmd2.cmd2-1.cmd2-1-1',
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class Context:
|
||||
"""命令执行上下文"""
|
||||
command: str
|
||||
"""顶级命令文本"""
|
||||
|
||||
crt_command: str
|
||||
"""当前子命令文本"""
|
||||
|
||||
params: list
|
||||
"""完整参数列表"""
|
||||
|
||||
crt_params: list
|
||||
"""当前子命令参数列表"""
|
||||
|
||||
session_name: str
|
||||
"""会话名"""
|
||||
|
||||
text_message: str
|
||||
"""命令完整文本"""
|
||||
|
||||
launcher_type: str
|
||||
"""命令发起者类型"""
|
||||
|
||||
launcher_id: int
|
||||
"""命令发起者ID"""
|
||||
|
||||
sender_id: int
|
||||
"""命令发送者ID"""
|
||||
|
||||
is_admin: bool
|
||||
"""[过时]命令发送者是否为管理员"""
|
||||
|
||||
privilege: int
|
||||
"""命令发送者权限等级"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
|
||||
class AbstractCommandNode:
|
||||
"""命令抽象类"""
|
||||
|
||||
parent: type
|
||||
"""父命令类"""
|
||||
|
||||
name: str
|
||||
"""命令名"""
|
||||
|
||||
description: str
|
||||
"""命令描述"""
|
||||
|
||||
usage: str
|
||||
"""命令用法"""
|
||||
|
||||
aliases: list[str]
|
||||
"""命令别名"""
|
||||
|
||||
privilege: int
|
||||
"""命令权限等级, 权限大于等于此值的用户才能执行命令"""
|
||||
|
||||
@classmethod
|
||||
def process(cls, ctx: Context) -> tuple[bool, list]:
|
||||
"""命令处理函数
|
||||
|
||||
:param ctx: 命令执行上下文
|
||||
|
||||
:return: (是否执行, 回复列表(若执行))
|
||||
|
||||
若未执行,将自动以下一个参数查找并执行子命令
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def help(cls) -> str:
|
||||
"""获取命令帮助信息"""
|
||||
return '命令: {}\n描述: {}\n用法: \n{}\n别名: {}\n权限: {}'.format(
|
||||
cls.name,
|
||||
cls.description,
|
||||
cls.usage,
|
||||
', '.join(cls.aliases),
|
||||
cls.privilege
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def register(
|
||||
parent: type = None,
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
usage: str = None,
|
||||
aliases: list[str] = None,
|
||||
privilege: int = 0
|
||||
):
|
||||
"""注册命令
|
||||
|
||||
:param cls: 命令类
|
||||
:param name: 命令名
|
||||
:param parent: 父命令类
|
||||
"""
|
||||
global __command_list__, __tree_index__
|
||||
|
||||
def wrapper(cls):
|
||||
cls.name = name
|
||||
cls.parent = parent
|
||||
cls.description = description
|
||||
cls.usage = usage
|
||||
cls.aliases = aliases
|
||||
cls.privilege = privilege
|
||||
|
||||
logging.debug("cls: {}, name: {}, parent: {}".format(cls, name, parent))
|
||||
|
||||
if parent is None:
|
||||
# 顶级命令注册
|
||||
__command_list__[name] = {
|
||||
'description': cls.description,
|
||||
'usage': cls.usage,
|
||||
'aliases': cls.aliases,
|
||||
'privilege': cls.privilege,
|
||||
'parent': None,
|
||||
'cls': cls,
|
||||
'sub': []
|
||||
}
|
||||
# 更新索引
|
||||
__tree_index__[cls.__module__ + '.' + cls.__name__] = name
|
||||
else:
|
||||
# 获取父节点名称
|
||||
path = __tree_index__[parent.__module__ + '.' + parent.__name__]
|
||||
|
||||
parent_node = __command_list__[path]
|
||||
# 链接父子命令
|
||||
__command_list__[path]['sub'].append(name)
|
||||
# 注册子命令
|
||||
__command_list__[path + '.' + name] = {
|
||||
'description': cls.description,
|
||||
'usage': cls.usage,
|
||||
'aliases': cls.aliases,
|
||||
'privilege': cls.privilege,
|
||||
'parent': path,
|
||||
'cls': cls,
|
||||
'sub': []
|
||||
}
|
||||
# 更新索引
|
||||
__tree_index__[cls.__module__ + '.' + cls.__name__] = path + '.' + name
|
||||
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class CommandPrivilegeError(Exception):
|
||||
"""命令权限不足或不存在异常"""
|
||||
pass
|
||||
|
||||
|
||||
# 传入Context对象,广搜命令树,返回执行结果
|
||||
# 若命令被处理,返回reply列表
|
||||
# 若命令未被处理,继续执行下一级命令
|
||||
# 若命令不存在,报异常
|
||||
def execute(context: Context) -> list:
|
||||
"""执行命令
|
||||
|
||||
:param ctx: 命令执行上下文
|
||||
|
||||
:return: 回复列表
|
||||
"""
|
||||
global __command_list__
|
||||
|
||||
# 拷贝ctx
|
||||
ctx: Context = copy.deepcopy(context)
|
||||
|
||||
# 从树取出顶级命令
|
||||
node = __command_list__
|
||||
|
||||
path = ctx.command
|
||||
|
||||
while True:
|
||||
try:
|
||||
node = __command_list__[path]
|
||||
logging.debug('执行命令: {}'.format(path))
|
||||
|
||||
# 检查权限
|
||||
if ctx.privilege < node['privilege']:
|
||||
raise CommandPrivilegeError(tips_custom.command_admin_message+"{}".format(path))
|
||||
|
||||
# 执行
|
||||
execed, reply = node['cls'].process(ctx)
|
||||
if execed:
|
||||
return reply
|
||||
else:
|
||||
# 删除crt_params第一个参数
|
||||
ctx.crt_command = ctx.crt_params.pop(0)
|
||||
# 下一个path
|
||||
path = path + '.' + ctx.crt_command
|
||||
except KeyError:
|
||||
traceback.print_exc()
|
||||
raise CommandPrivilegeError(tips_custom.command_err_message+"{}".format(path))
|
||||
|
||||
|
||||
def register_all():
|
||||
"""启动时调用此函数注册所有命令
|
||||
|
||||
递归处理pkg.qqbot.cmds包下及其子包下所有模块的所有继承于AbstractCommand的类
|
||||
"""
|
||||
# 模块:遍历其中的继承于AbstractCommand的类,进行注册
|
||||
# 包:递归处理包下的模块
|
||||
# 排除__开头的属性
|
||||
global __command_list__, __tree_index__
|
||||
|
||||
import pkg.qqbot.cmds
|
||||
|
||||
def walk(module, prefix, path_prefix):
|
||||
# 排除不处于pkg.qqbot.cmds中的包
|
||||
if not module.__name__.startswith('pkg.qqbot.cmds'):
|
||||
return
|
||||
|
||||
logging.debug('walk: {}, path: {}'.format(module.__name__, module.__path__))
|
||||
for item in pkgutil.iter_modules(module.__path__):
|
||||
if item.name.startswith('__'):
|
||||
continue
|
||||
|
||||
if item.ispkg:
|
||||
walk(__import__(module.__name__ + '.' + item.name, fromlist=['']), prefix + item.name + '.', path_prefix + item.name + '/')
|
||||
else:
|
||||
m = __import__(module.__name__ + '.' + item.name, fromlist=[''])
|
||||
# for name, cls in inspect.getmembers(m, inspect.isclass):
|
||||
# # 检查是否为命令类
|
||||
# if cls.__module__ == m.__name__ and issubclass(cls, AbstractCommandNode) and cls != AbstractCommandNode:
|
||||
# cls.register(cls, cls.name, cls.parent)
|
||||
|
||||
walk(pkg.qqbot.cmds, '', '')
|
||||
logging.debug(__command_list__)
|
||||
|
||||
|
||||
def apply_privileges():
|
||||
"""读取cmdpriv.json并应用命令权限"""
|
||||
# 读取内容
|
||||
json_str = ""
|
||||
with open('cmdpriv.json', 'r', encoding="utf-8") as f:
|
||||
json_str = f.read()
|
||||
|
||||
data = json.loads(json_str)
|
||||
for path, priv in data.items():
|
||||
if path == 'comment':
|
||||
continue
|
||||
|
||||
if path not in __command_list__:
|
||||
continue
|
||||
|
||||
if __command_list__[path]['privilege'] != priv:
|
||||
logging.debug('应用权限: {} -> {}(default: {})'.format(path, priv, __command_list__[path]['privilege']))
|
||||
|
||||
__command_list__[path]['privilege'] = priv
|
||||
@@ -1,37 +0,0 @@
|
||||
import logging
|
||||
|
||||
import mirai
|
||||
|
||||
from .. import aamgr
|
||||
from ....utils import context
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="draw",
|
||||
description="使用DALL·E生成图片",
|
||||
usage="!draw <图片提示语>",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DrawCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
|
||||
reply = []
|
||||
|
||||
if len(ctx.params) == 0:
|
||||
reply = ["[bot]err: 未提供图片描述文字"]
|
||||
else:
|
||||
session = pkg.openai.session.get_session(ctx.session_name)
|
||||
|
||||
res = session.draw_image(" ".join(ctx.params))
|
||||
|
||||
logging.debug("draw_image result:{}".format(res))
|
||||
reply = [mirai.Image(url=res.data[0].url)]
|
||||
config = context.get_config_manager().data
|
||||
if config['include_image_description']:
|
||||
reply.append(" ".join(ctx.params))
|
||||
|
||||
return True, reply
|
||||
@@ -1,32 +0,0 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="func",
|
||||
description="管理内容函数",
|
||||
usage="!func",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class FuncCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
from pkg.plugin.models import host
|
||||
|
||||
reply = []
|
||||
|
||||
reply_str = "当前已加载的内容函数:\n\n"
|
||||
|
||||
logging.debug("host.__callable_functions__: {}".format(json.dumps(host.__callable_functions__, indent=4)))
|
||||
|
||||
index = 1
|
||||
for func in host.__callable_functions__:
|
||||
reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description'])
|
||||
index += 1
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
@@ -1,198 +0,0 @@
|
||||
from ....plugin import host as plugin_host
|
||||
from ....utils import updater
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="plugin",
|
||||
description="插件管理",
|
||||
usage="!plugin\n!plugin get <插件仓库地址>\n!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class PluginCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
plugin_list = plugin_host.__plugins__
|
||||
if len(ctx.params) == 0:
|
||||
# 列出所有插件
|
||||
|
||||
reply_str = "[bot]所有插件({}):\n".format(len(plugin_host.__plugins__))
|
||||
idx = 0
|
||||
for key in plugin_host.iter_plugins_name():
|
||||
plugin = plugin_list[key]
|
||||
reply_str += "\n#{} {} {}\n{}\nv{}\n作者: {}\n"\
|
||||
.format((idx+1), plugin['name'],
|
||||
"[已禁用]" if not plugin['enabled'] else "",
|
||||
plugin['description'],
|
||||
plugin['version'], plugin['author'])
|
||||
|
||||
if updater.is_repo("/".join(plugin['path'].split('/')[:-1])):
|
||||
remote_url = updater.get_remote_url("/".join(plugin['path'].split('/')[:-1]))
|
||||
if remote_url != "https://github.com/RockChinQ/QChatGPT" and remote_url != "https://gitee.com/RockChin/QChatGPT":
|
||||
reply_str += "源码: "+remote_url+"\n"
|
||||
|
||||
idx += 1
|
||||
|
||||
reply = [reply_str]
|
||||
return True, reply
|
||||
else:
|
||||
return False, []
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="get",
|
||||
description="安装插件",
|
||||
usage="!plugin get <插件仓库地址>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginGetCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import threading
|
||||
import logging
|
||||
import pkg.utils.context
|
||||
|
||||
if len(ctx.crt_params) == 0:
|
||||
reply = ["[bot]err: 请提供插件仓库地址"]
|
||||
return True, reply
|
||||
|
||||
reply = []
|
||||
def closure():
|
||||
try:
|
||||
plugin_host.install_plugin(ctx.crt_params[0])
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件安装成功,请发送 !reload 命令重载插件")
|
||||
except Exception as e:
|
||||
logging.error("插件安装失败:{}".format(e))
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件安装失败:{}".format(e))
|
||||
|
||||
threading.Thread(target=closure, args=()).start()
|
||||
reply = ["[bot]正在安装插件..."]
|
||||
return True, reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="update",
|
||||
description="更新指定插件或全部插件",
|
||||
usage="!plugin update",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginUpdateCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import threading
|
||||
import logging
|
||||
plugin_list = plugin_host.__plugins__
|
||||
|
||||
reply = []
|
||||
|
||||
if len(ctx.crt_params) > 0:
|
||||
def closure():
|
||||
try:
|
||||
import pkg.utils.context
|
||||
|
||||
updated = []
|
||||
|
||||
if ctx.crt_params[0] == 'all':
|
||||
for key in plugin_list:
|
||||
plugin_host.update_plugin(key)
|
||||
updated.append(key)
|
||||
else:
|
||||
plugin_path_name = plugin_host.get_plugin_path_name_by_plugin_name(ctx.crt_params[0])
|
||||
|
||||
if plugin_path_name is not None:
|
||||
plugin_host.update_plugin(ctx.crt_params[0])
|
||||
updated.append(ctx.crt_params[0])
|
||||
else:
|
||||
raise Exception("未找到插件: {}".format(ctx.crt_params[0]))
|
||||
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("已更新插件: {}, 请发送 !reload 重载插件".format(", ".join(updated)))
|
||||
except Exception as e:
|
||||
logging.error("插件更新失败:{}".format(e))
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("插件更新失败:{} 请使用 !plugin 命令确认插件名称或尝试手动更新插件".format(e))
|
||||
|
||||
reply = ["[bot]正在更新插件,请勿重复发起..."]
|
||||
threading.Thread(target=closure).start()
|
||||
else:
|
||||
reply = ["[bot]请指定要更新的插件, 或使用 !plugin update all 更新所有插件"]
|
||||
return True, reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="del",
|
||||
description="删除插件",
|
||||
usage="!plugin del <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginDelCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
plugin_list = plugin_host.__plugins__
|
||||
reply = []
|
||||
|
||||
if len(ctx.crt_params) < 1:
|
||||
reply = ["[bot]err: 未指定插件名"]
|
||||
else:
|
||||
plugin_name = ctx.crt_params[0]
|
||||
if plugin_name in plugin_list:
|
||||
unin_path = plugin_host.uninstall_plugin(plugin_name)
|
||||
reply = ["[bot]已删除插件: {} ({}), 请发送 !reload 重载插件".format(plugin_name, unin_path)]
|
||||
else:
|
||||
reply = ["[bot]err:未找到插件: {}, 请使用!plugin命令查看插件列表".format(plugin_name)]
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="on",
|
||||
description="启用指定插件",
|
||||
usage="!plugin on <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=PluginCommand,
|
||||
name="off",
|
||||
description="禁用指定插件",
|
||||
usage="!plugin off <插件名>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class PluginOnOffCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.plugin.switch as plugin_switch
|
||||
|
||||
plugin_list = plugin_host.__plugins__
|
||||
reply = []
|
||||
|
||||
print(ctx.params)
|
||||
new_status = ctx.params[0] == 'on'
|
||||
|
||||
if len(ctx.crt_params) < 1:
|
||||
reply = ["[bot]err: 未指定插件名"]
|
||||
else:
|
||||
plugin_name = ctx.crt_params[0]
|
||||
if plugin_name in plugin_list:
|
||||
plugin_list[plugin_name]['enabled'] = new_status
|
||||
|
||||
for func in plugin_host.__callable_functions__:
|
||||
if func['name'].startswith(plugin_name+"-"):
|
||||
func['enabled'] = new_status
|
||||
|
||||
plugin_switch.dump_switch()
|
||||
reply = ["[bot]已{}插件: {}".format("启用" if new_status else "禁用", plugin_name)]
|
||||
else:
|
||||
reply = ["[bot]err:未找到插件: {}, 请使用!plugin命令查看插件列表".format(plugin_name)]
|
||||
|
||||
return True, reply
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
from .. import aamgr
|
||||
from ....utils import context
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="default",
|
||||
description="操作情景预设",
|
||||
usage="!default\n!default set [指定情景预设为默认]",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DefaultCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if len(params) == 0:
|
||||
# 输出目前所有情景预设
|
||||
import pkg.openai.dprompt as dprompt
|
||||
reply_str = "[bot]当前所有情景预设({}模式):\n\n".format(config['preset_mode'])
|
||||
|
||||
prompts = dprompt.mode_inst().list()
|
||||
|
||||
for key in prompts:
|
||||
pro = prompts[key]
|
||||
reply_str += "名称: {}".format(key)
|
||||
|
||||
for r in pro:
|
||||
reply_str += "\n - [{}]: {}".format(r['role'], r['content'])
|
||||
|
||||
reply_str += "\n\n"
|
||||
|
||||
reply_str += "\n当前默认情景预设:{}\n".format(dprompt.mode_inst().get_using_name())
|
||||
reply_str += "请使用 !default set <情景预设名称> 来设置默认情景预设"
|
||||
reply = [reply_str]
|
||||
else:
|
||||
return False, []
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=DefaultCommand,
|
||||
name="set",
|
||||
description="设置默认情景预设",
|
||||
usage="!default set <情景预设名称>",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class DefaultSetCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
|
||||
if len(ctx.crt_params) == 0:
|
||||
reply = ["[bot]err: 请指定情景预设名称"]
|
||||
elif len(ctx.crt_params) > 0:
|
||||
import pkg.openai.dprompt as dprompt
|
||||
try:
|
||||
full_name = dprompt.mode_inst().set_using_name(ctx.crt_params[0])
|
||||
reply = ["[bot]已设置默认情景预设为:{}".format(full_name)]
|
||||
except Exception as e:
|
||||
reply = ["[bot]err: {}".format(e)]
|
||||
|
||||
return True, reply
|
||||
@@ -1,51 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="del",
|
||||
description="删除当前会话的历史记录",
|
||||
usage="!del <序号>\n!del all",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DelCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]参数不足, 格式: !del <序号>\n可以通过!list查看序号"]
|
||||
else:
|
||||
if params[0] == 'all':
|
||||
return False, []
|
||||
elif params[0].isdigit():
|
||||
if pkg.openai.session.get_session(session_name).delete_history(int(params[0])):
|
||||
reply = ["[bot]已删除历史会话 #{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]没有历史会话 #{}".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]参数错误, 格式: !del <序号>\n可以通过!list查看序号"]
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=DelCommand,
|
||||
name="all",
|
||||
description="删除当前会话的全部历史记录",
|
||||
usage="!del all",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class DelAllCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
reply = []
|
||||
pkg.openai.session.get_session(session_name).delete_all_history()
|
||||
reply = ["[bot]已删除所有历史会话"]
|
||||
return True, reply
|
||||
@@ -1,50 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="delhst",
|
||||
description="删除指定会话的所有历史记录",
|
||||
usage="!delhst <会话名称>\n!delhst all",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class DelHistoryCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
import pkg.utils.context
|
||||
params = ctx.params
|
||||
reply = []
|
||||
if len(params) == 0:
|
||||
reply = [
|
||||
"[bot]err:请输入要删除的会话名: group_<群号> 或者 person_<QQ号>, 或使用 !delhst all 删除所有会话的历史记录"]
|
||||
else:
|
||||
if params[0] == 'all':
|
||||
return False, []
|
||||
else:
|
||||
if pkg.utils.context.get_database_manager().delete_all_history(params[0]):
|
||||
reply = ["[bot]已删除会话 {} 的所有历史记录".format(params[0])]
|
||||
else:
|
||||
reply = ["[bot]未找到会话 {} 的历史记录".format(params[0])]
|
||||
|
||||
return True, reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=DelHistoryCommand,
|
||||
name="all",
|
||||
description="删除所有会话的全部历史记录",
|
||||
usage="!delhst all",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class DelAllHistoryCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.utils.context
|
||||
reply = []
|
||||
pkg.utils.context.get_database_manager().delete_all_session_history()
|
||||
reply = ["[bot]已删除所有会话的历史记录"]
|
||||
return True, reply
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
import datetime
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="last",
|
||||
description="切换前一次对话",
|
||||
usage="!last",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class LastCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
|
||||
reply = []
|
||||
result = pkg.openai.session.get_session(session_name).last_session()
|
||||
if result is None:
|
||||
reply = ["[bot]没有前一次的对话"]
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到前一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
|
||||
return True, reply
|
||||
@@ -1,65 +0,0 @@
|
||||
import datetime
|
||||
import json
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name='list',
|
||||
description='列出当前会话的所有历史记录',
|
||||
usage='!list\n!list [页数]',
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ListCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
|
||||
pkg.openai.session.get_session(session_name).persistence()
|
||||
page = 0
|
||||
|
||||
if len(params) > 0:
|
||||
try:
|
||||
page = int(params[0])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
results = pkg.openai.session.get_session(session_name).list_history(page=page)
|
||||
if len(results) == 0:
|
||||
reply_str = "[bot]第{}页没有历史会话".format(page)
|
||||
else:
|
||||
reply_str = "[bot]历史会话 第{}页:\n".format(page)
|
||||
current = -1
|
||||
for i in range(len(results)):
|
||||
# 时间(使用create_timestamp转换) 序号 部分内容
|
||||
datetime_obj = datetime.datetime.fromtimestamp(results[i]['create_timestamp'])
|
||||
msg = ""
|
||||
|
||||
msg = json.loads(results[i]['prompt'])
|
||||
|
||||
if len(msg) >= 2:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
msg[0]['content'])
|
||||
else:
|
||||
reply_str += "#{} 创建:{} {}\n".format(i + page * 10,
|
||||
datetime_obj.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"无内容")
|
||||
if results[i]['create_timestamp'] == pkg.openai.session.get_session(
|
||||
session_name).create_timestamp:
|
||||
current = i + page * 10
|
||||
|
||||
reply_str += "\n以上信息倒序排列"
|
||||
if current != -1:
|
||||
reply_str += ",当前会话是 #{}\n".format(current)
|
||||
else:
|
||||
reply_str += ",当前处于全新会话或不在此页"
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
@@ -1,29 +0,0 @@
|
||||
import datetime
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="next",
|
||||
description="切换后一次对话",
|
||||
usage="!next",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class NextCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
reply = []
|
||||
|
||||
result = pkg.openai.session.get_session(session_name).next_session()
|
||||
if result is None:
|
||||
reply = ["[bot]没有后一次的对话"]
|
||||
else:
|
||||
datetime_str = datetime.datetime.fromtimestamp(result.create_timestamp).strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
reply = ["[bot]已切换到后一次的对话:\n创建时间:{}\n".format(datetime_str)]
|
||||
|
||||
return True, reply
|
||||
@@ -1,31 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="prompt",
|
||||
description="获取当前会话的前文",
|
||||
usage="!prompt",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class PromptCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import pkg.openai.session
|
||||
session_name = ctx.session_name
|
||||
params = ctx.params
|
||||
reply = []
|
||||
|
||||
msgs = ""
|
||||
session: list = pkg.openai.session.get_session(session_name).prompt
|
||||
for msg in session:
|
||||
if len(params) != 0 and params[0] in ['-all', '-a']:
|
||||
msgs = msgs + "{}: {}\n\n".format(msg['role'], msg['content'])
|
||||
elif len(msg['content']) > 30:
|
||||
msgs = msgs + "[{}]: {}...\n\n".format(msg['role'], msg['content'][:30])
|
||||
else:
|
||||
msgs = msgs + "[{}]: {}\n\n".format(msg['role'], msg['content'])
|
||||
reply = ["[bot]当前对话所有内容:\n{}".format(msgs)]
|
||||
|
||||
return True, reply
|
||||
@@ -1,33 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="resend",
|
||||
description="重新获取上一次问题的回复",
|
||||
usage="!resend",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ResendCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
from ....openai import session as openai_session
|
||||
from ....utils import context
|
||||
from ....qqbot import message
|
||||
|
||||
session_name = ctx.session_name
|
||||
reply = []
|
||||
|
||||
session = openai_session.get_session(session_name)
|
||||
to_send = session.undo()
|
||||
|
||||
mgr = context.get_qqbot_manager()
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
reply = message.process_normal_message(to_send, mgr, config,
|
||||
ctx.launcher_type, ctx.launcher_id,
|
||||
ctx.sender_id)
|
||||
|
||||
return True, reply
|
||||
@@ -1,35 +0,0 @@
|
||||
import tips as tips_custom
|
||||
|
||||
from .. import aamgr
|
||||
from ....openai import session
|
||||
from ....utils import context
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name='reset',
|
||||
description='重置当前会话',
|
||||
usage='!reset',
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class ResetCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
params = ctx.params
|
||||
session_name = ctx.session_name
|
||||
|
||||
reply = ""
|
||||
|
||||
if len(params) == 0:
|
||||
session.get_session(session_name).reset(explicit=True)
|
||||
reply = [tips_custom.command_reset_message]
|
||||
else:
|
||||
try:
|
||||
import pkg.openai.dprompt as dprompt
|
||||
session.get_session(session_name).reset(explicit=True, use_prompt=params[0])
|
||||
reply = [tips_custom.command_reset_name_message+"{}".format(dprompt.mode_inst().get_full_name(params[0]))]
|
||||
except Exception as e:
|
||||
reply = ["[bot]会话重置失败:{}".format(e)]
|
||||
|
||||
return True, reply
|
||||
@@ -1,93 +0,0 @@
|
||||
import json
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
def config_operation(cmd, params):
|
||||
reply = []
|
||||
import pkg.utils.context
|
||||
# config = pkg.utils.context.get_config()
|
||||
cfg_mgr = pkg.utils.context.get_config_manager()
|
||||
|
||||
false = False
|
||||
true = True
|
||||
|
||||
reply_str = ""
|
||||
if len(params) == 0:
|
||||
reply = ["[bot]err:请输入!cmd cfg查看使用方法"]
|
||||
else:
|
||||
cfg_name = params[0]
|
||||
if cfg_name == 'all':
|
||||
reply_str = "[bot]所有配置项:\n\n"
|
||||
for cfg in cfg_mgr.data.keys():
|
||||
if not cfg.startswith('__') and not cfg == 'logging':
|
||||
# 根据配置项类型进行格式化,如果是字典则转换为json并格式化
|
||||
if isinstance(cfg_mgr.data[cfg], str):
|
||||
reply_str += "{}: \"{}\"\n".format(cfg, cfg_mgr.data[cfg])
|
||||
elif isinstance(cfg_mgr.data[cfg], dict):
|
||||
# 不进行unicode转义,并格式化
|
||||
reply_str += "{}: {}\n".format(cfg,
|
||||
json.dumps(cfg_mgr.data[cfg],
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str += "{}: {}\n".format(cfg, cfg_mgr.data[cfg])
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_entry_path = cfg_name.split('.')
|
||||
|
||||
try:
|
||||
if len(params) == 1: # 未指定配置值,返回配置项值
|
||||
cfg_entry = cfg_mgr.data[cfg_entry_path[0]]
|
||||
if len(cfg_entry_path) > 1:
|
||||
for i in range(1, len(cfg_entry_path)):
|
||||
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||
|
||||
if isinstance(cfg_entry, str):
|
||||
reply_str = "[bot]配置项{}: \"{}\"\n".format(cfg_name, cfg_entry)
|
||||
elif isinstance(cfg_entry, dict):
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name,
|
||||
json.dumps(cfg_entry,
|
||||
ensure_ascii=False, indent=4))
|
||||
else:
|
||||
reply_str = "[bot]配置项{}: {}\n".format(cfg_name, cfg_entry)
|
||||
reply = [reply_str]
|
||||
else:
|
||||
cfg_value = " ".join(params[1:])
|
||||
|
||||
cfg_value = eval(cfg_value)
|
||||
|
||||
cfg_entry = cfg_mgr.data[cfg_entry_path[0]]
|
||||
if len(cfg_entry_path) > 1:
|
||||
for i in range(1, len(cfg_entry_path) - 1):
|
||||
cfg_entry = cfg_entry[cfg_entry_path[i]]
|
||||
if isinstance(cfg_entry[cfg_entry_path[-1]], type(cfg_value)):
|
||||
cfg_entry[cfg_entry_path[-1]] = cfg_value
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
else:
|
||||
reply = ["[bot]err:配置项{}类型不匹配".format(cfg_name)]
|
||||
else:
|
||||
cfg_mgr.data[cfg_entry_path[0]] = cfg_value
|
||||
reply = ["[bot]配置项{}修改成功".format(cfg_name)]
|
||||
except KeyError:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
except NameError:
|
||||
reply = ["[bot]err:值{}不合法(字符串需要使用双引号包裹)".format(cfg_value)]
|
||||
except ValueError:
|
||||
reply = ["[bot]err:未找到配置项 {}".format(cfg_name)]
|
||||
|
||||
return reply
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="cfg",
|
||||
description="配置项管理",
|
||||
usage="!cfg <配置项> [配置值]\n!cfg all",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class CfgCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
return True, config_operation(ctx.command, ctx.params)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="cmd",
|
||||
description="显示命令列表",
|
||||
usage="!cmd\n!cmd <命令名称>",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class CmdCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
command_list = aamgr.__command_list__
|
||||
|
||||
reply = []
|
||||
|
||||
if len(ctx.params) == 0:
|
||||
reply_str = "[bot]当前所有命令:\n\n"
|
||||
|
||||
# 遍历顶级命令
|
||||
for key in command_list:
|
||||
command = command_list[key]
|
||||
if command['parent'] is None:
|
||||
reply_str += "!{} - {}\n".format(key, command['description'])
|
||||
|
||||
reply_str += "\n请使用 !cmd <命令名称> 来查看命令的详细信息"
|
||||
|
||||
reply = [reply_str]
|
||||
else:
|
||||
command_name = ctx.params[0]
|
||||
if command_name in command_list:
|
||||
reply = [command_list[command_name]['cls'].help()]
|
||||
else:
|
||||
reply = ["[bot]命令 {} 不存在".format(command_name)]
|
||||
|
||||
return True, reply
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="help",
|
||||
description="显示自定义的帮助信息",
|
||||
usage="!help",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class HelpCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import tips
|
||||
reply = ["[bot] "+tips.help_message + "\n请输入 !cmd 查看命令列表"]
|
||||
|
||||
# 警告config.help_message过时
|
||||
import config
|
||||
if hasattr(config, "help_message"):
|
||||
reply[0] += "\n\n警告:config.py中的help_message已过时,不再生效,请使用tips.py中的help_message替代"
|
||||
|
||||
return True, reply
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
import threading
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="reload",
|
||||
description="执行热重载",
|
||||
usage="!reload",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class ReloadCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
|
||||
import pkg.utils.reloader
|
||||
def reload_task():
|
||||
pkg.utils.reloader.reload_all()
|
||||
|
||||
threading.Thread(target=reload_task, daemon=True).start()
|
||||
|
||||
return True, reply
|
||||
@@ -1,38 +0,0 @@
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="update",
|
||||
description="更新程序",
|
||||
usage="!update",
|
||||
aliases=[],
|
||||
privilege=2
|
||||
)
|
||||
class UpdateCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
import pkg.utils.updater
|
||||
import pkg.utils.reloader
|
||||
import pkg.utils.context
|
||||
|
||||
def update_task():
|
||||
try:
|
||||
if pkg.utils.updater.update_all():
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新完成, 请手动重启程序。")
|
||||
else:
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("无新版本")
|
||||
except Exception as e0:
|
||||
traceback.print_exc()
|
||||
pkg.utils.context.get_qqbot_manager().notify_admin("更新失败:{}".format(e0))
|
||||
return
|
||||
|
||||
threading.Thread(target=update_task, daemon=True).start()
|
||||
|
||||
reply = ["[bot]正在更新,请耐心等待,请勿重复发起更新..."]
|
||||
|
||||
return True, reply
|
||||
@@ -1,33 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="usage",
|
||||
description="获取使用情况",
|
||||
usage="!usage",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class UsageCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
import config
|
||||
import pkg.utils.context
|
||||
|
||||
reply = []
|
||||
|
||||
reply_str = "[bot]各api-key使用情况:\n\n"
|
||||
|
||||
api_keys = pkg.utils.context.get_openai_manager().key_mgr.api_key
|
||||
for key_name in api_keys:
|
||||
text_length = pkg.utils.context.get_openai_manager().audit_mgr \
|
||||
.get_text_length_of_key(api_keys[key_name])
|
||||
image_count = pkg.utils.context.get_openai_manager().audit_mgr \
|
||||
.get_image_count_of_key(api_keys[key_name])
|
||||
reply_str += "{}:\n - 文本长度:{}\n - 图片数量:{}\n".format(key_name, int(text_length),
|
||||
int(image_count))
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
@@ -1,27 +0,0 @@
|
||||
from .. import aamgr
|
||||
|
||||
|
||||
@aamgr.AbstractCommandNode.register(
|
||||
parent=None,
|
||||
name="version",
|
||||
description="查看版本信息",
|
||||
usage="!version",
|
||||
aliases=[],
|
||||
privilege=1
|
||||
)
|
||||
class VersionCommand(aamgr.AbstractCommandNode):
|
||||
@classmethod
|
||||
def process(cls, ctx: aamgr.Context) -> tuple[bool, list]:
|
||||
reply = []
|
||||
import pkg.utils.updater
|
||||
|
||||
reply_str = "[bot]当前版本:\n{}\n".format(pkg.utils.updater.get_current_version_info())
|
||||
try:
|
||||
if pkg.utils.updater.is_new_version_available():
|
||||
reply_str += "\n有新版本可用,请使用命令 !update 进行更新"
|
||||
except:
|
||||
pass
|
||||
|
||||
reply = [reply_str]
|
||||
|
||||
return True, reply
|
||||
@@ -1,49 +0,0 @@
|
||||
# 命令处理模块
|
||||
import logging
|
||||
|
||||
from ..qqbot.cmds import aamgr as cmdmgr
|
||||
|
||||
|
||||
def process_command(session_name: str, text_message: str, mgr, config: dict,
|
||||
launcher_type: str, launcher_id: int, sender_id: int, is_admin: bool) -> list:
|
||||
reply = []
|
||||
try:
|
||||
logging.info(
|
||||
"[{}]发起命令:{}".format(session_name, text_message[:min(20, len(text_message))] + (
|
||||
"..." if len(text_message) > 20 else "")))
|
||||
|
||||
cmd = text_message[1:].strip().split(' ')[0]
|
||||
|
||||
params = text_message[1:].strip().split(' ')[1:]
|
||||
|
||||
# 把!~开头的转换成!cfg
|
||||
if cmd.startswith('~'):
|
||||
params = [cmd[1:]] + params
|
||||
cmd = 'cfg'
|
||||
|
||||
# 包装参数
|
||||
context = cmdmgr.Context(
|
||||
command=cmd,
|
||||
crt_command=cmd,
|
||||
params=params,
|
||||
crt_params=params[:],
|
||||
session_name=session_name,
|
||||
text_message=text_message,
|
||||
launcher_type=launcher_type,
|
||||
launcher_id=launcher_id,
|
||||
sender_id=sender_id,
|
||||
is_admin=is_admin,
|
||||
privilege=2 if is_admin else 1, # 普通用户1,管理员2
|
||||
)
|
||||
try:
|
||||
reply = cmdmgr.execute(context)
|
||||
except cmdmgr.CommandPrivilegeError as e:
|
||||
reply = ["{}".format(e)]
|
||||
|
||||
return reply
|
||||
except Exception as e:
|
||||
mgr.notify_admin("{}命令执行失败:{}".format(session_name, e))
|
||||
logging.exception(e)
|
||||
reply = ["[bot]err:{}".format(e)]
|
||||
|
||||
return reply
|
||||
@@ -1,87 +0,0 @@
|
||||
# 敏感词过滤模块
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
import logging
|
||||
|
||||
from ..utils import context
|
||||
|
||||
|
||||
class ReplyFilter:
|
||||
sensitive_words = []
|
||||
mask = "*"
|
||||
mask_word = ""
|
||||
|
||||
# 默认值( 兼容性考虑 )
|
||||
baidu_check = False
|
||||
baidu_api_key = ""
|
||||
baidu_secret_key = ""
|
||||
inappropriate_message_tips = "[百度云]请珍惜机器人,当前返回内容不合规"
|
||||
|
||||
def __init__(self, sensitive_words: list, mask: str = "*", mask_word: str = ""):
|
||||
self.sensitive_words = sensitive_words
|
||||
self.mask = mask
|
||||
self.mask_word = mask_word
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
self.baidu_check = config['baidu_check']
|
||||
self.baidu_api_key = config['baidu_api_key']
|
||||
self.baidu_secret_key = config['baidu_secret_key']
|
||||
self.inappropriate_message_tips = config['inappropriate_message_tips']
|
||||
|
||||
def is_illegal(self, message: str) -> bool:
|
||||
processed = self.process(message)
|
||||
if processed != message:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, message: str) -> str:
|
||||
|
||||
# 本地关键词屏蔽
|
||||
for word in self.sensitive_words:
|
||||
match = re.findall(word, message)
|
||||
if len(match) > 0:
|
||||
for i in range(len(match)):
|
||||
if self.mask_word == "":
|
||||
message = message.replace(match[i], self.mask * len(match[i]))
|
||||
else:
|
||||
message = message.replace(match[i], self.mask_word)
|
||||
|
||||
# 百度云审核
|
||||
if self.baidu_check:
|
||||
|
||||
# 百度云审核URL
|
||||
baidu_url = "https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined?access_token=" + \
|
||||
str(requests.post("https://aip.baidubce.com/oauth/2.0/token",
|
||||
params={"grant_type": "client_credentials",
|
||||
"client_id": self.baidu_api_key,
|
||||
"client_secret": self.baidu_secret_key}).json().get("access_token"))
|
||||
|
||||
# 百度云审核
|
||||
payload = "text=" + message
|
||||
logging.info("向百度云发送:" + payload)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}
|
||||
|
||||
if isinstance(payload, str):
|
||||
payload = payload.encode('utf-8')
|
||||
|
||||
response = requests.request("POST", baidu_url, headers=headers, data=payload)
|
||||
response_dict = json.loads(response.text)
|
||||
|
||||
if "error_code" in response_dict:
|
||||
error_msg = response_dict.get("error_msg")
|
||||
logging.warning(f"百度云判定出错,错误信息:{error_msg}")
|
||||
conclusion = f"百度云判定出错,错误信息:{error_msg}\n以下是原消息:{message}"
|
||||
else:
|
||||
conclusion = response_dict["conclusion"]
|
||||
if conclusion in ("合规"):
|
||||
logging.info(f"百度云判定结果:{conclusion}")
|
||||
return message
|
||||
else:
|
||||
logging.warning(f"百度云判定结果:{conclusion}")
|
||||
conclusion = self.inappropriate_message_tips
|
||||
# 返回百度云审核结果
|
||||
return conclusion
|
||||
|
||||
return message
|
||||
@@ -1,18 +0,0 @@
|
||||
import re
|
||||
|
||||
from ..utils import context
|
||||
|
||||
|
||||
def ignore(msg: str) -> bool:
|
||||
"""检查消息是否应该被忽略"""
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if 'prefix' in config['ignore_rules']:
|
||||
for rule in config['ignore_rules']['prefix']:
|
||||
if msg.startswith(rule):
|
||||
return True
|
||||
|
||||
if 'regexp' in config['ignore_rules']:
|
||||
for rule in config['ignore_rules']['regexp']:
|
||||
if re.search(rule, msg):
|
||||
return True
|
||||
@@ -1,427 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
|
||||
from mirai import At, GroupMessage, MessageEvent, StrangerMessage, \
|
||||
FriendMessage, Image, MessageChain, Plain
|
||||
import func_timeout
|
||||
|
||||
from ..openai import session as openai_session
|
||||
|
||||
from ..qqbot import filter as qqbot_filter
|
||||
from ..qqbot import process as processor
|
||||
from ..utils import context
|
||||
from ..plugin import host as plugin_host
|
||||
from ..plugin import models as plugin_models
|
||||
import tips as tips_custom
|
||||
from ..qqbot import adapter as msadapter
|
||||
|
||||
|
||||
# 检查消息是否符合泛响应匹配机制
|
||||
def check_response_rule(group_id:int, text: str):
|
||||
config = context.get_config_manager().data
|
||||
|
||||
rules = config['response_rules']
|
||||
|
||||
# 检查是否有特定规则
|
||||
if 'prefix' not in config['response_rules']:
|
||||
if str(group_id) in config['response_rules']:
|
||||
rules = config['response_rules'][str(group_id)]
|
||||
else:
|
||||
rules = config['response_rules']['default']
|
||||
|
||||
# 检查前缀匹配
|
||||
if 'prefix' in rules:
|
||||
for rule in rules['prefix']:
|
||||
if text.startswith(rule):
|
||||
return True, text.replace(rule, "", 1)
|
||||
|
||||
# 检查正则表达式匹配
|
||||
if 'regexp' in rules:
|
||||
for rule in rules['regexp']:
|
||||
import re
|
||||
match = re.match(rule, text)
|
||||
if match:
|
||||
return True, text
|
||||
|
||||
return False, ""
|
||||
|
||||
|
||||
def response_at(group_id: int):
|
||||
config = context.get_config_manager().data
|
||||
|
||||
use_response_rule = config['response_rules']
|
||||
|
||||
# 检查是否有特定规则
|
||||
if 'prefix' not in config['response_rules']:
|
||||
if str(group_id) in config['response_rules']:
|
||||
use_response_rule = config['response_rules'][str(group_id)]
|
||||
else:
|
||||
use_response_rule = config['response_rules']['default']
|
||||
|
||||
if 'at' not in use_response_rule:
|
||||
return True
|
||||
|
||||
return use_response_rule['at']
|
||||
|
||||
|
||||
def random_responding(group_id):
|
||||
config = context.get_config_manager().data
|
||||
|
||||
use_response_rule = config['response_rules']
|
||||
|
||||
# 检查是否有特定规则
|
||||
if 'prefix' not in config['response_rules']:
|
||||
if str(group_id) in config['response_rules']:
|
||||
use_response_rule = config['response_rules'][str(group_id)]
|
||||
else:
|
||||
use_response_rule = config['response_rules']['default']
|
||||
|
||||
if 'random_rate' in use_response_rule:
|
||||
import random
|
||||
return random.random() < use_response_rule['random_rate']
|
||||
return False
|
||||
|
||||
|
||||
# 控制QQ消息输入输出的类
|
||||
class QQBotManager:
|
||||
retry = 3
|
||||
|
||||
adapter: msadapter.MessageSourceAdapter = None
|
||||
|
||||
bot_account_id: int = 0
|
||||
|
||||
reply_filter = None
|
||||
|
||||
enable_banlist = False
|
||||
|
||||
enable_private = True
|
||||
enable_group = True
|
||||
|
||||
ban_person = []
|
||||
ban_group = []
|
||||
|
||||
def __init__(self, first_time_init=True):
|
||||
config = context.get_config_manager().data
|
||||
|
||||
self.timeout = config['process_message_timeout']
|
||||
self.retry = config['retry_times']
|
||||
|
||||
# 由于YiriMirai的bot对象是单例的,且shutdown方法暂时无法使用
|
||||
# 故只在第一次初始化时创建bot对象,重载之后使用原bot对象
|
||||
# 因此,bot的配置不支持热重载
|
||||
if first_time_init:
|
||||
logging.debug("Use adapter:" + config['msg_source_adapter'])
|
||||
if config['msg_source_adapter'] == 'yirimirai':
|
||||
from pkg.qqbot.sources.yirimirai import YiriMiraiAdapter
|
||||
|
||||
mirai_http_api_config = config['mirai_http_api_config']
|
||||
self.bot_account_id = config['mirai_http_api_config']['qq']
|
||||
self.adapter = YiriMiraiAdapter(mirai_http_api_config)
|
||||
elif config['msg_source_adapter'] == 'nakuru':
|
||||
from pkg.qqbot.sources.nakuru import NakuruProjectAdapter
|
||||
self.adapter = NakuruProjectAdapter(config['nakuru_config'])
|
||||
self.bot_account_id = self.adapter.bot_account_id
|
||||
else:
|
||||
self.adapter = context.get_qqbot_manager().adapter
|
||||
self.bot_account_id = context.get_qqbot_manager().bot_account_id
|
||||
|
||||
# 保存 account_id 到审计模块
|
||||
from ..utils.center import apigroup
|
||||
apigroup.APIGroup._runtime_info['account_id'] = "{}".format(self.bot_account_id)
|
||||
|
||||
context.set_qqbot_manager(self)
|
||||
|
||||
# 注册诸事件
|
||||
# Caution: 注册新的事件处理器之后,请务必在unsubscribe_all中编写相应的取消订阅代码
|
||||
def on_friend_message(event: FriendMessage):
|
||||
|
||||
def friend_message_handler():
|
||||
# 触发事件
|
||||
args = {
|
||||
"launcher_type": "person",
|
||||
"launcher_id": event.sender.id,
|
||||
"sender_id": event.sender.id,
|
||||
"message_chain": event.message_chain,
|
||||
}
|
||||
plugin_event = plugin_host.emit(plugin_models.PersonMessageReceived, **args)
|
||||
|
||||
if plugin_event.is_prevented_default():
|
||||
return
|
||||
|
||||
self.on_person_message(event)
|
||||
|
||||
context.get_thread_ctl().submit_user_task(
|
||||
friend_message_handler,
|
||||
)
|
||||
self.adapter.register_listener(
|
||||
FriendMessage,
|
||||
on_friend_message
|
||||
)
|
||||
|
||||
def on_stranger_message(event: StrangerMessage):
|
||||
|
||||
def stranger_message_handler():
|
||||
# 触发事件
|
||||
args = {
|
||||
"launcher_type": "person",
|
||||
"launcher_id": event.sender.id,
|
||||
"sender_id": event.sender.id,
|
||||
"message_chain": event.message_chain,
|
||||
}
|
||||
plugin_event = plugin_host.emit(plugin_models.PersonMessageReceived, **args)
|
||||
|
||||
if plugin_event.is_prevented_default():
|
||||
return
|
||||
|
||||
self.on_person_message(event)
|
||||
|
||||
context.get_thread_ctl().submit_user_task(
|
||||
stranger_message_handler,
|
||||
)
|
||||
# nakuru不区分好友和陌生人,故仅为yirimirai注册陌生人事件
|
||||
if config['msg_source_adapter'] == 'yirimirai':
|
||||
self.adapter.register_listener(
|
||||
StrangerMessage,
|
||||
on_stranger_message
|
||||
)
|
||||
|
||||
def on_group_message(event: GroupMessage):
|
||||
|
||||
def group_message_handler(event: GroupMessage):
|
||||
# 触发事件
|
||||
args = {
|
||||
"launcher_type": "group",
|
||||
"launcher_id": event.group.id,
|
||||
"sender_id": event.sender.id,
|
||||
"message_chain": event.message_chain,
|
||||
}
|
||||
plugin_event = plugin_host.emit(plugin_models.GroupMessageReceived, **args)
|
||||
|
||||
if plugin_event.is_prevented_default():
|
||||
return
|
||||
|
||||
self.on_group_message(event)
|
||||
|
||||
context.get_thread_ctl().submit_user_task(
|
||||
group_message_handler,
|
||||
event
|
||||
)
|
||||
self.adapter.register_listener(
|
||||
GroupMessage,
|
||||
on_group_message
|
||||
)
|
||||
|
||||
def unsubscribe_all():
|
||||
"""取消所有订阅
|
||||
|
||||
用于在热重载流程中卸载所有事件处理器
|
||||
"""
|
||||
self.adapter.unregister_listener(
|
||||
FriendMessage,
|
||||
on_friend_message
|
||||
)
|
||||
if config['msg_source_adapter'] == 'yirimirai':
|
||||
self.adapter.unregister_listener(
|
||||
StrangerMessage,
|
||||
on_stranger_message
|
||||
)
|
||||
self.adapter.unregister_listener(
|
||||
GroupMessage,
|
||||
on_group_message
|
||||
)
|
||||
|
||||
self.unsubscribe_all = unsubscribe_all
|
||||
|
||||
# 加载禁用列表
|
||||
if os.path.exists("banlist.py"):
|
||||
import banlist
|
||||
self.enable_banlist = banlist.enable
|
||||
self.ban_person = banlist.person
|
||||
self.ban_group = banlist.group
|
||||
logging.info("加载禁用列表: person: {}, group: {}".format(self.ban_person, self.ban_group))
|
||||
|
||||
if hasattr(banlist, "enable_private"):
|
||||
self.enable_private = banlist.enable_private
|
||||
if hasattr(banlist, "enable_group"):
|
||||
self.enable_group = banlist.enable_group
|
||||
|
||||
config = context.get_config_manager().data
|
||||
if os.path.exists("sensitive.json") \
|
||||
and config['sensitive_word_filter'] is not None \
|
||||
and config['sensitive_word_filter']:
|
||||
with open("sensitive.json", "r", encoding="utf-8") as f:
|
||||
sensitive_json = json.load(f)
|
||||
self.reply_filter = qqbot_filter.ReplyFilter(
|
||||
sensitive_words=sensitive_json['words'],
|
||||
mask=sensitive_json['mask'] if 'mask' in sensitive_json else '*',
|
||||
mask_word=sensitive_json['mask_word'] if 'mask_word' in sensitive_json else ''
|
||||
)
|
||||
else:
|
||||
self.reply_filter = qqbot_filter.ReplyFilter([])
|
||||
|
||||
def send(self, event, msg, check_quote=True, check_at_sender=True):
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if check_at_sender and config['at_sender']:
|
||||
msg.insert(
|
||||
0,
|
||||
Plain(" \n")
|
||||
)
|
||||
|
||||
# 当回复的正文中包含换行时,quote可能会自带at,此时就不再单独添加at,只添加换行
|
||||
if "\n" not in str(msg[1]) or config['msg_source_adapter'] == 'nakuru':
|
||||
msg.insert(
|
||||
0,
|
||||
At(
|
||||
event.sender.id
|
||||
)
|
||||
)
|
||||
|
||||
self.adapter.reply_message(
|
||||
event,
|
||||
msg,
|
||||
quote_origin=True if config['quote_origin'] and check_quote else False
|
||||
)
|
||||
|
||||
# 私聊消息处理
|
||||
def on_person_message(self, event: MessageEvent):
|
||||
reply = ''
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if not self.enable_private:
|
||||
logging.debug("已在banlist.py中禁用所有私聊")
|
||||
elif event.sender.id == self.bot_account_id:
|
||||
pass
|
||||
else:
|
||||
if Image in event.message_chain:
|
||||
pass
|
||||
else:
|
||||
# 超时则重试,重试超过次数则放弃
|
||||
failed = 0
|
||||
for i in range(self.retry):
|
||||
try:
|
||||
|
||||
@func_timeout.func_set_timeout(config['process_message_timeout'])
|
||||
def time_ctrl_wrapper():
|
||||
reply = processor.process_message('person', event.sender.id, str(event.message_chain),
|
||||
event.message_chain,
|
||||
event.sender.id)
|
||||
return reply
|
||||
|
||||
reply = time_ctrl_wrapper()
|
||||
break
|
||||
except func_timeout.FunctionTimedOut:
|
||||
logging.warning("person_{}: 超时,重试中({})".format(event.sender.id, i))
|
||||
openai_session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
||||
if "person_{}".format(event.sender.id) in processor.processing:
|
||||
processor.processing.remove('person_{}'.format(event.sender.id))
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
if failed == self.retry:
|
||||
openai_session.get_session('person_{}'.format(event.sender.id)).release_response_lock()
|
||||
self.notify_admin("{} 请求超时".format("person_{}".format(event.sender.id)))
|
||||
reply = [tips_custom.reply_message]
|
||||
|
||||
if reply:
|
||||
return self.send(event, reply, check_quote=False, check_at_sender=False)
|
||||
|
||||
# 群消息处理
|
||||
def on_group_message(self, event: GroupMessage):
|
||||
reply = ''
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
def process(text=None) -> str:
|
||||
replys = ""
|
||||
if At(self.bot_account_id) in event.message_chain:
|
||||
event.message_chain.remove(At(self.bot_account_id))
|
||||
|
||||
# 超时则重试,重试超过次数则放弃
|
||||
failed = 0
|
||||
for i in range(self.retry):
|
||||
try:
|
||||
@func_timeout.func_set_timeout(config['process_message_timeout'])
|
||||
def time_ctrl_wrapper():
|
||||
replys = processor.process_message('group', event.group.id,
|
||||
str(event.message_chain).strip() if text is None else text,
|
||||
event.message_chain,
|
||||
event.sender.id)
|
||||
return replys
|
||||
|
||||
replys = time_ctrl_wrapper()
|
||||
break
|
||||
except func_timeout.FunctionTimedOut:
|
||||
logging.warning("group_{}: 超时,重试中({})".format(event.group.id, i))
|
||||
openai_session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
||||
if "group_{}".format(event.group.id) in processor.processing:
|
||||
processor.processing.remove('group_{}'.format(event.group.id))
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
if failed == self.retry:
|
||||
openai_session.get_session('group_{}'.format(event.group.id)).release_response_lock()
|
||||
self.notify_admin("{} 请求超时".format("group_{}".format(event.group.id)))
|
||||
replys = [tips_custom.replys_message]
|
||||
|
||||
return replys
|
||||
|
||||
if not self.enable_group:
|
||||
logging.debug("已在banlist.py中禁用所有群聊")
|
||||
elif Image in event.message_chain:
|
||||
pass
|
||||
else:
|
||||
if At(self.bot_account_id) in event.message_chain and response_at(event.group.id):
|
||||
# 直接调用
|
||||
reply = process()
|
||||
else:
|
||||
check, result = check_response_rule(event.group.id, str(event.message_chain).strip())
|
||||
|
||||
if check:
|
||||
reply = process(result.strip())
|
||||
# 检查是否随机响应
|
||||
elif random_responding(event.group.id):
|
||||
logging.info("随机响应group_{}消息".format(event.group.id))
|
||||
reply = process()
|
||||
|
||||
if reply:
|
||||
return self.send(event, reply)
|
||||
|
||||
# 通知系统管理员
|
||||
def notify_admin(self, message: str):
|
||||
config = context.get_config_manager().data
|
||||
if config['admin_qq'] != 0 and config['admin_qq'] != []:
|
||||
logging.info("通知管理员:{}".format(message))
|
||||
if type(config['admin_qq']) == int:
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
config['admin_qq'],
|
||||
MessageChain([Plain("[bot]{}".format(message))])
|
||||
)
|
||||
else:
|
||||
for adm in config['admin_qq']:
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
adm,
|
||||
MessageChain([Plain("[bot]{}".format(message))])
|
||||
)
|
||||
|
||||
def notify_admin_message_chain(self, message):
|
||||
config = context.get_config_manager().data
|
||||
if config['admin_qq'] != 0 and config['admin_qq'] != []:
|
||||
logging.info("通知管理员:{}".format(message))
|
||||
if type(config['admin_qq']) == int:
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
config['admin_qq'],
|
||||
message
|
||||
)
|
||||
else:
|
||||
for adm in config['admin_qq']:
|
||||
self.adapter.send_message(
|
||||
"person",
|
||||
adm,
|
||||
message
|
||||
)
|
||||
@@ -1,134 +0,0 @@
|
||||
# 普通消息处理模块
|
||||
import logging
|
||||
|
||||
import openai
|
||||
|
||||
from ..utils import context
|
||||
from ..openai import session as openai_session
|
||||
|
||||
from ..plugin import host as plugin_host
|
||||
from ..plugin import models as plugin_models
|
||||
import tips as tips_custom
|
||||
|
||||
|
||||
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
|
||||
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
|
||||
config = context.get_config_manager().data
|
||||
context.get_qqbot_manager().notify_admin(notify_admin)
|
||||
if config['hide_exce_info_to_user']:
|
||||
return [tips_custom.alter_tip_message] if tips_custom.alter_tip_message else []
|
||||
else:
|
||||
return [set_reply]
|
||||
|
||||
|
||||
def process_normal_message(text_message: str, mgr, config: dict, launcher_type: str,
|
||||
launcher_id: int, sender_id: int) -> list:
|
||||
session_name = f"{launcher_type}_{launcher_id}"
|
||||
logging.info("[{}]发送消息:{}".format(session_name, text_message[:min(20, len(text_message))] + (
|
||||
"..." if len(text_message) > 20 else "")))
|
||||
|
||||
session = openai_session.get_session(session_name)
|
||||
|
||||
unexpected_exception_times = 0
|
||||
|
||||
max_unexpected_exception_times = 3
|
||||
|
||||
reply = []
|
||||
while True:
|
||||
if unexpected_exception_times >= max_unexpected_exception_times:
|
||||
reply = handle_exception(notify_admin=f"{session_name},多次尝试失败。", set_reply=f"[bot]多次尝试失败,请重试或联系管理员")
|
||||
break
|
||||
try:
|
||||
prefix = "[GPT]" if config['show_prefix'] else ""
|
||||
|
||||
text, finish_reason, funcs = session.query(text_message)
|
||||
|
||||
# 触发插件事件
|
||||
args = {
|
||||
"launcher_type": launcher_type,
|
||||
"launcher_id": launcher_id,
|
||||
"sender_id": sender_id,
|
||||
"session": session,
|
||||
"prefix": prefix,
|
||||
"response_text": text,
|
||||
"finish_reason": finish_reason,
|
||||
"funcs_called": funcs,
|
||||
}
|
||||
|
||||
event = plugin_host.emit(plugin_models.NormalMessageResponded, **args)
|
||||
|
||||
if event.get_return_value("prefix") is not None:
|
||||
prefix = event.get_return_value("prefix")
|
||||
|
||||
if event.get_return_value("reply") is not None:
|
||||
reply = event.get_return_value("reply")
|
||||
|
||||
if not event.is_prevented_default():
|
||||
reply = [prefix + text]
|
||||
|
||||
except openai.APIConnectionError as e:
|
||||
err_msg = str(e)
|
||||
if err_msg.__contains__('Error communicating with OpenAI'):
|
||||
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
|
||||
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
else:
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
|
||||
except openai.RateLimitError as e:
|
||||
logging.debug(type(e))
|
||||
logging.debug(e.error['message'])
|
||||
|
||||
if 'message' in e.error and e.error['message'].__contains__('You exceeded your current quota'):
|
||||
# 尝试切换api-key
|
||||
current_key_name = context.get_openai_manager().key_mgr.get_key_name(
|
||||
context.get_openai_manager().key_mgr.using_key
|
||||
)
|
||||
context.get_openai_manager().key_mgr.set_current_exceeded()
|
||||
|
||||
# 触发插件事件
|
||||
args = {
|
||||
'key_name': current_key_name,
|
||||
'usage': context.get_openai_manager().audit_mgr
|
||||
.get_usage(context.get_openai_manager().key_mgr.get_using_key_md5()),
|
||||
'exceeded_keys': context.get_openai_manager().key_mgr.exceeded,
|
||||
}
|
||||
event = plugin_host.emit(plugin_models.KeyExceeded, **args)
|
||||
|
||||
if not event.is_prevented_default():
|
||||
switched, name = context.get_openai_manager().key_mgr.auto_switch()
|
||||
|
||||
if not switched:
|
||||
reply = handle_exception(
|
||||
"api-key调用额度超限({}),无可用api_key,请向OpenAI账户充值或在config.py中更换api_key;如果你认为这是误判,请尝试重启程序。".format(
|
||||
current_key_name), "[bot]err:API调用额度超额,请联系管理员,或等待修复")
|
||||
else:
|
||||
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
|
||||
mgr.notify_admin("api-key调用额度超限({}),接口报错,已切换到{}".format(current_key_name, name))
|
||||
reply = ["[bot]err:API调用额度超额,已自动切换,请重新发送消息"]
|
||||
continue
|
||||
elif 'message' in e.error and e.error['message'].__contains__('You can retry your request'):
|
||||
# 重试
|
||||
unexpected_exception_times += 1
|
||||
continue
|
||||
elif 'message' in e.error and e.error['message']\
|
||||
.__contains__('The server had an error while processing your request'):
|
||||
# 重试
|
||||
unexpected_exception_times += 1
|
||||
continue
|
||||
else:
|
||||
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
|
||||
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
|
||||
except openai.BadRequestError as e:
|
||||
if config['auto_reset'] and "This model's maximum context length is" in str(e):
|
||||
session.reset(persist=True)
|
||||
reply = [tips_custom.session_auto_reset_message]
|
||||
else:
|
||||
reply = handle_exception("{}API调用参数错误:{}\n".format(
|
||||
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
|
||||
except openai.APIStatusError as e:
|
||||
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
reply = handle_exception("{}会话处理异常:{}".format(session_name, e), "[bot]err:{}".format(e))
|
||||
break
|
||||
|
||||
return reply
|
||||
@@ -1,191 +0,0 @@
|
||||
# 此模块提供了消息处理的具体逻辑的接口
|
||||
import asyncio
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import mirai
|
||||
import logging
|
||||
|
||||
# 这里不使用动态引入config
|
||||
# 因为在这里动态引入会卡死程序
|
||||
# 而此模块静态引用config与动态引入的表现一致
|
||||
# 已弃用,由于超时时间现已动态使用
|
||||
# import config as config_init_import
|
||||
|
||||
from ..qqbot import ratelimit
|
||||
from ..qqbot import command, message
|
||||
from ..openai import session as openai_session
|
||||
from ..utils import context
|
||||
|
||||
from ..plugin import host as plugin_host
|
||||
from ..plugin import models as plugin_models
|
||||
from ..qqbot import ignore
|
||||
from ..qqbot import banlist
|
||||
from ..qqbot import blob
|
||||
import tips as tips_custom
|
||||
|
||||
processing = []
|
||||
|
||||
|
||||
def is_admin(qq: int) -> bool:
|
||||
"""兼容list和int类型的管理员判断"""
|
||||
config = context.get_config_manager().data
|
||||
if type(config['admin_qq']) == list:
|
||||
return qq in config['admin_qq']
|
||||
else:
|
||||
return qq == config['admin_qq']
|
||||
|
||||
|
||||
def process_message(launcher_type: str, launcher_id: int, text_message: str, message_chain: mirai.MessageChain,
|
||||
sender_id: int) -> mirai.MessageChain:
|
||||
global processing
|
||||
|
||||
mgr = context.get_qqbot_manager()
|
||||
|
||||
reply = []
|
||||
session_name = "{}_{}".format(launcher_type, launcher_id)
|
||||
|
||||
# 检查发送方是否被禁用
|
||||
if banlist.is_banned(launcher_type, launcher_id, sender_id):
|
||||
logging.info("根据禁用列表忽略{}_{}的消息".format(launcher_type, launcher_id))
|
||||
return []
|
||||
|
||||
if ignore.ignore(text_message):
|
||||
logging.info("根据忽略规则忽略消息: {}".format(text_message))
|
||||
return []
|
||||
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if not config['wait_last_done'] and session_name in processing:
|
||||
return mirai.MessageChain([mirai.Plain(tips_custom.message_drop_tip)])
|
||||
|
||||
# 检查是否被禁言
|
||||
if launcher_type == 'group':
|
||||
is_muted = mgr.adapter.is_muted(launcher_id)
|
||||
if is_muted:
|
||||
logging.info("机器人被禁言,跳过消息处理(group_{})".format(launcher_id))
|
||||
return reply
|
||||
|
||||
if config['income_msg_check']:
|
||||
if mgr.reply_filter.is_illegal(text_message):
|
||||
return mirai.MessageChain(mirai.Plain("[bot] 消息中存在不合适的内容, 请更换措辞"))
|
||||
|
||||
openai_session.get_session(session_name).acquire_response_lock()
|
||||
|
||||
text_message = text_message.strip()
|
||||
|
||||
|
||||
# 为强制消息延迟计时
|
||||
start_time = time.time()
|
||||
|
||||
# 处理消息
|
||||
try:
|
||||
|
||||
processing.append(session_name)
|
||||
try:
|
||||
msg_type = ''
|
||||
if text_message.startswith('!') or text_message.startswith("!"): # 命令
|
||||
msg_type = 'command'
|
||||
# 触发插件事件
|
||||
args = {
|
||||
'launcher_type': launcher_type,
|
||||
'launcher_id': launcher_id,
|
||||
'sender_id': sender_id,
|
||||
'command': text_message[1:].strip().split(' ')[0],
|
||||
'params': text_message[1:].strip().split(' ')[1:],
|
||||
'text_message': text_message,
|
||||
'is_admin': is_admin(sender_id),
|
||||
}
|
||||
event = plugin_host.emit(plugin_models.PersonCommandSent
|
||||
if launcher_type == 'person'
|
||||
else plugin_models.GroupCommandSent, **args)
|
||||
|
||||
if event.get_return_value("alter") is not None:
|
||||
text_message = event.get_return_value("alter")
|
||||
|
||||
# 取出插件提交的返回值赋值给reply
|
||||
if event.get_return_value("reply") is not None:
|
||||
reply = event.get_return_value("reply")
|
||||
|
||||
if not event.is_prevented_default():
|
||||
reply = command.process_command(session_name, text_message,
|
||||
mgr, config, launcher_type, launcher_id, sender_id, is_admin(sender_id))
|
||||
|
||||
else: # 消息
|
||||
msg_type = 'message'
|
||||
# 限速丢弃检查
|
||||
# print(ratelimit.__crt_minute_usage__[session_name])
|
||||
if config['rate_limit_strategy'] == "drop":
|
||||
if ratelimit.is_reach_limit(session_name):
|
||||
logging.info("根据限速策略丢弃[{}]消息: {}".format(session_name, text_message))
|
||||
|
||||
return mirai.MessageChain(["[bot]"+tips_custom.rate_limit_drop_tip]) if tips_custom.rate_limit_drop_tip != "" else []
|
||||
|
||||
before = time.time()
|
||||
# 触发插件事件
|
||||
args = {
|
||||
"launcher_type": launcher_type,
|
||||
"launcher_id": launcher_id,
|
||||
"sender_id": sender_id,
|
||||
"text_message": text_message,
|
||||
}
|
||||
event = plugin_host.emit(plugin_models.PersonNormalMessageReceived
|
||||
if launcher_type == 'person'
|
||||
else plugin_models.GroupNormalMessageReceived, **args)
|
||||
|
||||
if event.get_return_value("alter") is not None:
|
||||
text_message = event.get_return_value("alter")
|
||||
|
||||
# 取出插件提交的返回值赋值给reply
|
||||
if event.get_return_value("reply") is not None:
|
||||
reply = event.get_return_value("reply")
|
||||
|
||||
if not event.is_prevented_default():
|
||||
reply = message.process_normal_message(text_message,
|
||||
mgr, config, launcher_type, launcher_id, sender_id)
|
||||
|
||||
# 限速等待时间
|
||||
if config['rate_limit_strategy'] == "wait":
|
||||
time.sleep(ratelimit.get_rest_wait_time(session_name, time.time() - before))
|
||||
|
||||
ratelimit.add_usage(session_name)
|
||||
|
||||
if reply is not None and len(reply) > 0 and (type(reply[0]) == str or type(reply[0]) == mirai.Plain):
|
||||
if type(reply[0]) == mirai.Plain:
|
||||
reply[0] = reply[0].text
|
||||
logging.info(
|
||||
"回复[{}]文字消息:{}".format(session_name,
|
||||
reply[0][:min(100, len(reply[0]))] + (
|
||||
"..." if len(reply[0]) > 100 else "")))
|
||||
if msg_type == 'message':
|
||||
reply = [mgr.reply_filter.process(reply[0])]
|
||||
|
||||
reply = blob.check_text(reply[0])
|
||||
else:
|
||||
logging.info("回复[{}]消息".format(session_name))
|
||||
|
||||
finally:
|
||||
processing.remove(session_name)
|
||||
finally:
|
||||
openai_session.get_session(session_name).release_response_lock()
|
||||
|
||||
# 检查延迟时间
|
||||
if config['force_delay_range'][1] == 0:
|
||||
delay_time = 0
|
||||
else:
|
||||
import random
|
||||
|
||||
# 从延迟范围中随机取一个值(浮点)
|
||||
rdm = random.uniform(config['force_delay_range'][0], config['force_delay_range'][1])
|
||||
|
||||
spent = time.time() - start_time
|
||||
|
||||
# 如果花费时间小于延迟时间,则延迟
|
||||
delay_time = rdm - spent if rdm - spent > 0 else 0
|
||||
|
||||
# 延迟
|
||||
if delay_time > 0:
|
||||
logging.info("[风控] 强制延迟{:.2f}秒(如需关闭,请到config.py修改force_delay_range字段)".format(delay_time))
|
||||
time.sleep(delay_time)
|
||||
|
||||
return mirai.MessageChain(reply)
|
||||
@@ -1,89 +0,0 @@
|
||||
# 限速相关模块
|
||||
import time
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from ..utils import context
|
||||
|
||||
|
||||
__crt_minute_usage__ = {}
|
||||
"""当前分钟每个会话的对话次数"""
|
||||
|
||||
|
||||
__timer_thr__: threading.Thread = None
|
||||
|
||||
|
||||
def get_limitation(session_name: str) -> int:
|
||||
"""获取会话的限制次数"""
|
||||
config = context.get_config_manager().data
|
||||
|
||||
if session_name in config['rate_limitation']:
|
||||
return config['rate_limitation'][session_name]
|
||||
else:
|
||||
return config['rate_limitation']["default"]
|
||||
|
||||
|
||||
def add_usage(session_name: str):
|
||||
"""增加会话的对话次数"""
|
||||
global __crt_minute_usage__
|
||||
if session_name in __crt_minute_usage__:
|
||||
__crt_minute_usage__[session_name] += 1
|
||||
else:
|
||||
__crt_minute_usage__[session_name] = 1
|
||||
|
||||
|
||||
def start_timer():
|
||||
"""启动定时器"""
|
||||
global __timer_thr__
|
||||
__timer_thr__ = threading.Thread(target=run_timer, daemon=True)
|
||||
__timer_thr__.start()
|
||||
|
||||
|
||||
def run_timer():
|
||||
"""启动定时器,每分钟清空一次对话次数"""
|
||||
global __crt_minute_usage__
|
||||
global __timer_thr__
|
||||
|
||||
# 等待直到整分钟
|
||||
time.sleep(60 - time.time() % 60)
|
||||
|
||||
while True:
|
||||
if __timer_thr__ != threading.current_thread():
|
||||
break
|
||||
|
||||
logging.debug("清空当前分钟的对话次数")
|
||||
__crt_minute_usage__ = {}
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
def get_usage(session_name: str) -> int:
|
||||
"""获取会话的对话次数"""
|
||||
global __crt_minute_usage__
|
||||
if session_name in __crt_minute_usage__:
|
||||
return __crt_minute_usage__[session_name]
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def get_rest_wait_time(session_name: str, spent: float) -> float:
|
||||
"""获取会话此回合的剩余等待时间"""
|
||||
global __crt_minute_usage__
|
||||
|
||||
min_seconds_per_round = 60.0 / get_limitation(session_name)
|
||||
|
||||
if session_name in __crt_minute_usage__:
|
||||
return max(0, min_seconds_per_round - spent)
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def is_reach_limit(session_name: str) -> bool:
|
||||
"""判断会话是否超过限制"""
|
||||
global __crt_minute_usage__
|
||||
|
||||
if session_name in __crt_minute_usage__:
|
||||
return __crt_minute_usage__[session_name] >= get_limitation(session_name)
|
||||
else:
|
||||
return False
|
||||
|
||||
start_timer()
|
||||
@@ -1,123 +0,0 @@
|
||||
import asyncio
|
||||
import typing
|
||||
|
||||
import mirai
|
||||
import mirai.models.bus
|
||||
from mirai.bot import MiraiRunner
|
||||
|
||||
from .. import adapter as adapter_model
|
||||
|
||||
|
||||
class YiriMiraiAdapter(adapter_model.MessageSourceAdapter):
|
||||
"""YiriMirai适配器"""
|
||||
bot: mirai.Mirai
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""初始化YiriMirai的对象"""
|
||||
if 'adapter' not in config or \
|
||||
config['adapter'] == 'WebSocketAdapter':
|
||||
self.bot = mirai.Mirai(
|
||||
qq=config['qq'],
|
||||
adapter=mirai.WebSocketAdapter(
|
||||
host=config['host'],
|
||||
port=config['port'],
|
||||
verify_key=config['verifyKey']
|
||||
)
|
||||
)
|
||||
elif config['adapter'] == 'HTTPAdapter':
|
||||
self.bot = mirai.Mirai(
|
||||
qq=config['qq'],
|
||||
adapter=mirai.HTTPAdapter(
|
||||
host=config['host'],
|
||||
port=config['port'],
|
||||
verify_key=config['verifyKey']
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise Exception('Unknown adapter for YiriMirai: ' + config['adapter'])
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
message: mirai.MessageChain
|
||||
):
|
||||
"""发送消息
|
||||
|
||||
Args:
|
||||
target_type (str): 目标类型,`person`或`group`
|
||||
target_id (str): 目标ID
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
"""
|
||||
task = None
|
||||
if target_type == 'person':
|
||||
task = self.bot.send_friend_message(int(target_id), message)
|
||||
elif target_type == 'group':
|
||||
task = self.bot.send_group_message(int(target_id), message)
|
||||
else:
|
||||
raise Exception('Unknown target type: ' + target_type)
|
||||
|
||||
asyncio.run(task)
|
||||
|
||||
def reply_message(
|
||||
self,
|
||||
message_source: mirai.MessageEvent,
|
||||
message: mirai.MessageChain,
|
||||
quote_origin: bool = False
|
||||
):
|
||||
"""回复消息
|
||||
|
||||
Args:
|
||||
message_source (mirai.MessageEvent): YiriMirai消息源事件
|
||||
message (mirai.MessageChain): YiriMirai库的消息链
|
||||
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
|
||||
"""
|
||||
asyncio.run(self.bot.send(message_source, message, quote_origin))
|
||||
|
||||
def is_muted(self, group_id: int) -> bool:
|
||||
result = self.bot.member_info(target=group_id, member_id=self.bot.qq).get()
|
||||
result = asyncio.run(result)
|
||||
if result.mute_time_remaining > 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
def register_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注册事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
self.bot.on(event_type)(callback)
|
||||
|
||||
def unregister_listener(
|
||||
self,
|
||||
event_type: typing.Type[mirai.Event],
|
||||
callback: typing.Callable[[mirai.Event], None]
|
||||
):
|
||||
"""注销事件监听器
|
||||
|
||||
Args:
|
||||
event_type (typing.Type[mirai.Event]): YiriMirai事件类型
|
||||
callback (typing.Callable[[mirai.Event], None]): 回调函数,接收一个参数,为YiriMirai事件
|
||||
"""
|
||||
assert isinstance(self.bot, mirai.Mirai)
|
||||
bus = self.bot.bus
|
||||
assert isinstance(bus, mirai.models.bus.ModelEventBus)
|
||||
|
||||
bus.unsubscribe(event_type, callback)
|
||||
|
||||
def run_sync(self):
|
||||
"""运行YiriMirai"""
|
||||
|
||||
# 创建新的
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
loop.run_until_complete(MiraiRunner(self.bot)._run())
|
||||
|
||||
def kill(self) -> bool:
|
||||
return False
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user