mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 03:15:06 +08:00
Compare commits
544 Commits
v4.3.0.bet
...
442c93193c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
442c93193c | ||
|
|
dbc09f46f4 | ||
|
|
cf43f09aff | ||
|
|
c3c51b0fbf | ||
|
|
2a87419fb2 | ||
|
|
8a42daa63f | ||
|
|
d91d98c9d4 | ||
|
|
2e82f2b2d1 | ||
|
|
9855b6d5bc | ||
|
|
403a721b94 | ||
|
|
f459c7017a | ||
|
|
c27ccb8475 | ||
|
|
abb2f7ae05 | ||
|
|
80606ed32c | ||
|
|
bc7c5fa864 | ||
|
|
ed0ea68037 | ||
|
|
6ac4dbc011 | ||
|
|
e642ffa5b3 | ||
|
|
6a24c951e0 | ||
|
|
58369480e2 | ||
|
|
43553e2c7d | ||
|
|
268ac8855a | ||
|
|
0f10cc62ec | ||
|
|
2ef47ebfb1 | ||
|
|
99f649c6b7 | ||
|
|
f25ac78538 | ||
|
|
cef24d8c4b | ||
|
|
7a10dfdac1 | ||
|
|
02892e57bb | ||
|
|
524c56a12b | ||
|
|
0e0d7cc7b8 | ||
|
|
1f877e2b8e | ||
|
|
8cd50fbdb4 | ||
|
|
42421d171e | ||
|
|
32215e9a3f | ||
|
|
dd1c7ffc39 | ||
|
|
b59bf62da5 | ||
|
|
f4c32f7b30 | ||
|
|
8844a5304d | ||
|
|
922ddd47f4 | ||
|
|
8c8702c6c9 | ||
|
|
70147fcf5e | ||
|
|
b3ee16e876 | ||
|
|
8d7976190d | ||
|
|
3edae3e678 | ||
|
|
81e411c558 | ||
|
|
dd2254203c | ||
|
|
f8658e2d77 | ||
|
|
021c3bbb94 | ||
|
|
0a64a96f65 | ||
|
|
48576dc46d | ||
|
|
12de0343b4 | ||
|
|
fcd34a9ff3 | ||
|
|
0dcf904d81 | ||
|
|
4fe92d8ece | ||
|
|
c893ffc177 | ||
|
|
a076ce5756 | ||
|
|
ad64e89a86 | ||
|
|
af82227dff | ||
|
|
8f2b177145 | ||
|
|
9a997fbcb0 | ||
|
|
17070471f7 | ||
|
|
cb48221ed3 | ||
|
|
68eb0290e0 | ||
|
|
913b9a24c4 | ||
|
|
61bc6a1dc2 | ||
|
|
4a84bf2355 | ||
|
|
2c2a89d9db | ||
|
|
c91e2f0efe | ||
|
|
411d082d2a | ||
|
|
d4e08a1765 | ||
|
|
b529d07479 | ||
|
|
d44df75e5c | ||
|
|
b74e07b608 | ||
|
|
ceb38d91b4 | ||
|
|
4a868afecd | ||
|
|
1cb9560663 | ||
|
|
8f878673ae | ||
|
|
74a5e37892 | ||
|
|
76a69ecc7e | ||
|
|
f06e3d3efa | ||
|
|
973e7bae42 | ||
|
|
94aa175c1a | ||
|
|
a0dec39905 | ||
|
|
777b766fff | ||
|
|
1adaa93034 | ||
|
|
9853eccd89 | ||
|
|
7699ba3cae | ||
|
|
9ac8b1a6fd | ||
|
|
f476c4724d | ||
|
|
3d12632c9f | ||
|
|
350e59fa6b | ||
|
|
b3d5b3fc8f | ||
|
|
4a02c531b2 | ||
|
|
2dd2abedde | ||
|
|
0d59c04151 | ||
|
|
08e0ede655 | ||
|
|
bcf89ca434 | ||
|
|
5e2f677d0b | ||
|
|
4df372052d | ||
|
|
2c5a0a00ba | ||
|
|
f3295b0fdd | ||
|
|
431d515c26 | ||
|
|
d9e6198992 | ||
|
|
3951cbf266 | ||
|
|
c47c4994ae | ||
|
|
a6072c2abb | ||
|
|
360422f25e | ||
|
|
f135c946bd | ||
|
|
750cc24900 | ||
|
|
46062bf4b9 | ||
|
|
869b2176a7 | ||
|
|
7138c101e3 | ||
|
|
04e26225cd | ||
|
|
f9f2de570f | ||
|
|
1dd598c7be | ||
|
|
c0f04e4f20 | ||
|
|
d3279b9823 | ||
|
|
2ad1f97e12 | ||
|
|
1046f3c2aa | ||
|
|
1afecf01e4 | ||
|
|
3ee7736361 | ||
|
|
0666778fea | ||
|
|
8df90558ab | ||
|
|
c1c03f11b4 | ||
|
|
da9afcd0ad | ||
|
|
bc1fbfa190 | ||
|
|
f3199dda20 | ||
|
|
4d0a28a1a7 | ||
|
|
76831579ad | ||
|
|
c2d752f9e9 | ||
|
|
4c0917556f | ||
|
|
e17b0cf5c5 | ||
|
|
f2647316a5 | ||
|
|
78cc157657 | ||
|
|
f576f990de | ||
|
|
254feb6a3a | ||
|
|
4c5139e9ff | ||
|
|
a055e37d3a | ||
|
|
bef5d6627b | ||
|
|
69767ebdb4 | ||
|
|
53ecd0933e | ||
|
|
d32f783392 | ||
|
|
4d3610cdf7 | ||
|
|
166eebabff | ||
|
|
9f2f1cd577 | ||
|
|
d86b884cab | ||
|
|
8345edd9f7 | ||
|
|
e3821b3f09 | ||
|
|
72ca62eae4 | ||
|
|
075091ed06 | ||
|
|
d0a3dee083 | ||
|
|
6ba9b6973d | ||
|
|
345eccf04c | ||
|
|
127a38b15c | ||
|
|
760db38c11 | ||
|
|
e4729337c8 | ||
|
|
7be226d3fa | ||
|
|
68372a4b7a | ||
|
|
d65f862c36 | ||
|
|
5fa75330cf | ||
|
|
547e3d098e | ||
|
|
0f39a31648 | ||
|
|
f1ddddfe00 | ||
|
|
4e61302156 | ||
|
|
9e3cf418ba | ||
|
|
3e29ec7892 | ||
|
|
f452742cd2 | ||
|
|
b560432b0b | ||
|
|
99e5478ced | ||
|
|
09dba91a37 | ||
|
|
18ec4adac9 | ||
|
|
8bedaa468a | ||
|
|
0ab366fcac | ||
|
|
d664039e54 | ||
|
|
6535ba4f72 | ||
|
|
3b181cff93 | ||
|
|
d1274366a0 | ||
|
|
35a4b0f55f | ||
|
|
399ebd36d7 | ||
|
|
a3552893aa | ||
|
|
b6cdf18c1a | ||
|
|
bd4c7f634d | ||
|
|
160ca540ab | ||
|
|
74c3a77ed1 | ||
|
|
0b527868bc | ||
|
|
0f35458cf7 | ||
|
|
70ad92ca16 | ||
|
|
c0d56aa905 | ||
|
|
ed869f7e81 | ||
|
|
ea42579374 | ||
|
|
72d701df3e | ||
|
|
1191b34fd4 | ||
|
|
ca3d3b2a66 | ||
|
|
2891708060 | ||
|
|
3f59bfac5c | ||
|
|
ee24582dd3 | ||
|
|
0ffb4d5792 | ||
|
|
5a6206f148 | ||
|
|
b1014313d6 | ||
|
|
fcc2f6a195 | ||
|
|
c8ffc79077 | ||
|
|
1a13a41168 | ||
|
|
bf279049c0 | ||
|
|
05cc58f2d7 | ||
|
|
d887881ea0 | ||
|
|
8bb2f3e745 | ||
|
|
e7e6eeda61 | ||
|
|
b6ff2be4df | ||
|
|
a2ea185602 | ||
|
|
5d60dbf3f9 | ||
|
|
66e252a59f | ||
|
|
8050ea1ffb | ||
|
|
04ab48de8e | ||
|
|
521a941792 | ||
|
|
6741850081 | ||
|
|
32f6d8b253 | ||
|
|
80a6b421e8 | ||
|
|
dc454b24ec | ||
|
|
0dce884519 | ||
|
|
d70196e799 | ||
|
|
2c6f127f47 | ||
|
|
72ec4b77d6 | ||
|
|
8b935175bd | ||
|
|
eae9980f5e | ||
|
|
6a7e88ffd6 | ||
|
|
e2071d9486 | ||
|
|
0b0a0c07a0 | ||
|
|
d7b354b9b4 | ||
|
|
78d36af96b | ||
|
|
6355140cd8 | ||
|
|
c224c32d03 | ||
|
|
826ceab5b8 | ||
|
|
a327182cb2 | ||
|
|
a9beb66aef | ||
|
|
ab6cf6c938 | ||
|
|
fc1e85ff16 | ||
|
|
6f98feaaf1 | ||
|
|
345c8b113f | ||
|
|
a95c422de9 | ||
|
|
93319ec2a8 | ||
|
|
e0d5469ae2 | ||
|
|
1f9f330cef | ||
|
|
f74502c711 | ||
|
|
11acd99c10 | ||
|
|
589f61931a | ||
|
|
caab1c2831 | ||
|
|
e701ceeeba | ||
|
|
2194b2975c | ||
|
|
89b25b8985 | ||
|
|
40f1af4434 | ||
|
|
91959527a4 | ||
|
|
46b4482a7d | ||
|
|
d7fc5283f7 | ||
|
|
4bdd8a021c | ||
|
|
c0ccdaf91a | ||
|
|
d9fa1cbb06 | ||
|
|
8858f432b5 | ||
|
|
e7fe41810e | ||
|
|
8f5ec48522 | ||
|
|
56183867a7 | ||
|
|
ea6ce2f552 | ||
|
|
55df728471 | ||
|
|
8a370a260e | ||
|
|
64764c412b | ||
|
|
f2d5c21712 | ||
|
|
6113c42014 | ||
|
|
fd9d1c4acc | ||
|
|
118ebddae6 | ||
|
|
2742144e12 | ||
|
|
83ff64698b | ||
|
|
b5e22c6db8 | ||
|
|
87ecb4e519 | ||
|
|
df524b8a7a | ||
|
|
8a7df423ab | ||
|
|
cafd623c92 | ||
|
|
4df11ef064 | ||
|
|
aa7c08ee00 | ||
|
|
b98de29b07 | ||
|
|
c7c2eb4518 | ||
|
|
37fa318258 | ||
|
|
ff7bebb782 | ||
|
|
30bb26f898 | ||
|
|
9c1f4e1690 | ||
|
|
865ee2ca01 | ||
|
|
c2264080bd | ||
|
|
67b622d5a6 | ||
|
|
a534c02d75 | ||
|
|
da890d3074 | ||
|
|
3049aa7a96 | ||
|
|
e66f674968 | ||
|
|
dd0e0abdc4 | ||
|
|
13f6396eb4 | ||
|
|
7bbaa4fcad | ||
|
|
e931d5eb88 | ||
|
|
4bbfa2f1d7 | ||
|
|
dd30d08c68 | ||
|
|
8ccda10045 | ||
|
|
46fbfbefea | ||
|
|
8f863cf530 | ||
|
|
2351193c51 | ||
|
|
8c87a47f5a | ||
|
|
b8b9a37825 | ||
|
|
13dd6fcee3 | ||
|
|
29f0075bd8 | ||
|
|
8a96ffbcc0 | ||
|
|
67f68d8101 | ||
|
|
ad59d92cef | ||
|
|
85f97860c5 | ||
|
|
8fd21e76f2 | ||
|
|
cc83ddbe21 | ||
|
|
99fcde1586 | ||
|
|
eab08dfbf3 | ||
|
|
dbf0200cca | ||
|
|
ac44f35299 | ||
|
|
d6a5fdd911 | ||
|
|
4668db716a | ||
|
|
f7cd6b76f2 | ||
|
|
b6d47187f5 | ||
|
|
051fffd41e | ||
|
|
c5480078b3 | ||
|
|
e744e9c4ef | ||
|
|
9f22b8b585 | ||
|
|
27cee0a4e1 | ||
|
|
6d35fc408c | ||
|
|
0607a0fa5c | ||
|
|
ed57d2fafa | ||
|
|
39ef92676b | ||
|
|
7301476228 | ||
|
|
457cc3eecd | ||
|
|
a381069bcc | ||
|
|
146c38e64c | ||
|
|
763c41729e | ||
|
|
0021efebd7 | ||
|
|
5f18a1b13a | ||
|
|
0124448479 | ||
|
|
e76bc80e51 | ||
|
|
a27560e804 | ||
|
|
46452de7b5 | ||
|
|
2aef139577 | ||
|
|
03b11481ed | ||
|
|
8c5cb71812 | ||
|
|
7c59bc1ce5 | ||
|
|
eede354d3b | ||
|
|
eb7b5dcc25 | ||
|
|
47e9ce96fc | ||
|
|
4e95bc542c | ||
|
|
e4f321ea7a | ||
|
|
246eb71b75 | ||
|
|
261f50b8ec | ||
|
|
9736d0708a | ||
|
|
02dbe80d2f | ||
|
|
0f239ace17 | ||
|
|
3a82ae8da5 | ||
|
|
c33c9eaab0 | ||
|
|
87f626f3cc | ||
|
|
e88302f1b4 | ||
|
|
5597dffaeb | ||
|
|
7f25d61531 | ||
|
|
15e524c6e6 | ||
|
|
4a1d033ee9 | ||
|
|
8adc88a8c0 | ||
|
|
a62b38eda7 | ||
|
|
fcef784180 | ||
|
|
c3ed4ef6a1 | ||
|
|
b9f768af25 | ||
|
|
47ff883fc7 | ||
|
|
68906c43ff | ||
|
|
c6deed4e6e | ||
|
|
b45cc59322 | ||
|
|
c33a96823b | ||
|
|
d3ab16761d | ||
|
|
70f23f24b0 | ||
|
|
00a8410c94 | ||
|
|
2a17e89a99 | ||
|
|
8fe0992c15 | ||
|
|
a9776b7b53 | ||
|
|
074d359c8e | ||
|
|
7728b4262b | ||
|
|
4905b5a738 | ||
|
|
43a259a1ae | ||
|
|
cffe493db0 | ||
|
|
0042629bf0 | ||
|
|
a7d638cc9a | ||
|
|
f84a79bf74 | ||
|
|
f5a0cb9175 | ||
|
|
f9a5507029 | ||
|
|
5ce32d2f04 | ||
|
|
4908996cac | ||
|
|
ee545a163f | ||
|
|
6e0e5802cc | ||
|
|
0d53843230 | ||
|
|
b65670cd1a | ||
|
|
ba4b5255a2 | ||
|
|
d60af2b451 | ||
|
|
44ac8b2b63 | ||
|
|
b70001c579 | ||
|
|
4a8f5516f6 | ||
|
|
48d11540ae | ||
|
|
84129e3339 | ||
|
|
377d455ec1 | ||
|
|
52280d7a05 | ||
|
|
0ce81a2df2 | ||
|
|
d9a2bb9a06 | ||
|
|
cb88da7f02 | ||
|
|
5560a4f52d | ||
|
|
e4d951b174 | ||
|
|
6e08bf71c9 | ||
|
|
daaf4b54ef | ||
|
|
3291266f5d | ||
|
|
307f6acd8c | ||
|
|
f1ac9c77e6 | ||
|
|
b434a4e3d7 | ||
|
|
2f209cd59f | ||
|
|
0f585fd5ef | ||
|
|
a152dece9a | ||
|
|
d3b31f7027 | ||
|
|
c00f05fca4 | ||
|
|
92c3a86356 | ||
|
|
341fdc409d | ||
|
|
ebd542f592 | ||
|
|
194b2d9814 | ||
|
|
7aed5cf1ed | ||
|
|
abc88c4979 | ||
|
|
3fa38f71f1 | ||
|
|
d651d956d6 | ||
|
|
6754666845 | ||
|
|
08e6f46b19 | ||
|
|
8f8c8ff367 | ||
|
|
63ec2a8c34 | ||
|
|
f58c8497c3 | ||
|
|
1497fdae56 | ||
|
|
10a3cb40e1 | ||
|
|
dd1ec15a39 | ||
|
|
ea51cec57e | ||
|
|
28ce986a8c | ||
|
|
489b145606 | ||
|
|
5e92bffaa6 | ||
|
|
277d1b0e30 | ||
|
|
13f4ed8d2c | ||
|
|
91cb5ca36c | ||
|
|
c34d54a6cb | ||
|
|
2d1737da1f | ||
|
|
adb0bf2473 | ||
|
|
a1b8b9d47b | ||
|
|
8df14bf9d9 | ||
|
|
c98d265a1e | ||
|
|
4e6782a6b7 | ||
|
|
5541e9e6d0 | ||
|
|
878ab0ef6b | ||
|
|
b61bd36b14 | ||
|
|
bb672d8f46 | ||
|
|
ba1a26543b | ||
|
|
cb868ee7b2 | ||
|
|
5dd5cb12ad | ||
|
|
2dfa83ff22 | ||
|
|
27bb4e1253 | ||
|
|
45afdbdfbb | ||
|
|
11e52a3ade | ||
|
|
4cbbe9e000 | ||
|
|
e986a0acaf | ||
|
|
333ec346ef | ||
|
|
2f2db4d445 | ||
|
|
e31883547d | ||
|
|
88c0066b06 | ||
|
|
fdc79b8d77 | ||
|
|
f244795e57 | ||
|
|
5a2aa19d0f | ||
|
|
f731115805 | ||
|
|
67bc065ccd | ||
|
|
d15df3338f | ||
|
|
c74cf38e9f | ||
|
|
81eb92646f | ||
|
|
019a9317e9 | ||
|
|
0e68a922bd | ||
|
|
4e1d81c9f8 | ||
|
|
199164fc4b | ||
|
|
c9c26213df | ||
|
|
b7c57104c4 | ||
|
|
0be08d8882 | ||
|
|
858cfd8d5a | ||
|
|
cbe297dc59 | ||
|
|
de76fed25a | ||
|
|
301509b1db | ||
|
|
a10e61735d | ||
|
|
1ef0193028 | ||
|
|
1e85d02ae4 | ||
|
|
d78a329aa9 | ||
|
|
bfdf238db5 | ||
|
|
234b61e2f8 | ||
|
|
9f43097361 | ||
|
|
f395cac893 | ||
|
|
fe122281fd | ||
|
|
6d788cadbc | ||
|
|
a79a22a74d | ||
|
|
2ed3b68790 | ||
|
|
bd9331ce62 | ||
|
|
14c161b733 | ||
|
|
815cdf8b4a | ||
|
|
7d5503dab2 | ||
|
|
9ba1ad5bd3 | ||
|
|
367d04d0f0 | ||
|
|
75c3ddde19 | ||
|
|
c6e77e42be | ||
|
|
4d0a39eb65 | ||
|
|
ac03a2dceb | ||
|
|
56248c350f | ||
|
|
244aaf6e20 | ||
|
|
cd25340826 | ||
|
|
ebd8e014c6 | ||
|
|
a0b7d759ac | ||
|
|
09884d3152 | ||
|
|
bef0d73e83 | ||
|
|
8d28ace252 | ||
|
|
39c062f73e | ||
|
|
0e5c9e19e1 | ||
|
|
01f2ef5694 | ||
|
|
c5b62b6ba3 | ||
|
|
bbf583ddb5 | ||
|
|
22ef1a399e | ||
|
|
0733f8878f | ||
|
|
f36a61dbb2 | ||
|
|
6d8936bd74 | ||
|
|
d2b93b3296 | ||
|
|
552fee9bac | ||
|
|
34fe8b324d | ||
|
|
c4671fbf1c | ||
|
|
4bcc06c955 | ||
|
|
348f6d9eaa | ||
|
|
157ffdc34c | ||
|
|
c81d5a1a49 | ||
|
|
a01706d163 | ||
|
|
a8d03c98dc | ||
|
|
68cdd163d3 | ||
|
|
4005a8a3e2 | ||
|
|
3f0153ea4d | ||
|
|
60b50a35f1 | ||
|
|
abd02f04af | ||
|
|
542409d48d | ||
|
|
3c6e858c35 | ||
|
|
8670ae82a3 | ||
|
|
14411a8af6 | ||
|
|
48c9d66ab8 | ||
|
|
0eac9135c0 |
13
.github/pull_request_template.md
vendored
13
.github/pull_request_template.md
vendored
@@ -2,6 +2,17 @@
|
||||
|
||||
> 请在此部分填写你实现/解决/优化的内容:
|
||||
> Summary of what you implemented/solved/optimized:
|
||||
>
|
||||
|
||||
### 更改前后对比截图 / Screenshots
|
||||
|
||||
> 请在此部分粘贴更改前后对比截图(可以是界面截图、控制台输出、对话截图等):
|
||||
> Please paste the screenshots of changes before and after here (can be interface screenshots, console output, conversation screenshots, etc.):
|
||||
>
|
||||
> 修改前 / Before:
|
||||
>
|
||||
> 修改后 / After:
|
||||
>
|
||||
|
||||
## 检查清单 / Checklist
|
||||
|
||||
@@ -9,7 +20,7 @@
|
||||
|
||||
*请在方括号间写`x`以打勾 / Please tick the box with `x`*
|
||||
|
||||
- [ ] 阅读仓库[贡献指引](https://github.com/RockChinQ/LangBot/blob/master/CONTRIBUTING.md)了吗? / Have you read the [contribution guide](https://github.com/RockChinQ/LangBot/blob/master/CONTRIBUTING.md)?
|
||||
- [ ] 阅读仓库[贡献指引](https://github.com/langbot-app/LangBot/blob/master/CONTRIBUTING.md)了吗? / Have you read the [contribution guide](https://github.com/langbot-app/LangBot/blob/master/CONTRIBUTING.md)?
|
||||
- [ ] 与项目所有者沟通过了吗? / Have you communicated with the project maintainer?
|
||||
- [ ] 我确定已自行测试所作的更改,确保功能符合预期。 / I have tested the changes and ensured they work as expected.
|
||||
|
||||
|
||||
11
.github/workflows/build-docker-image.yml
vendored
11
.github/workflows/build-docker-image.yml
vendored
@@ -1,10 +1,9 @@
|
||||
name: Build Docker Image
|
||||
on:
|
||||
#防止fork乱用action设置只能手动触发构建
|
||||
workflow_dispatch:
|
||||
## 发布release的时候会自动构建
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
publish-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -41,5 +40,9 @@ jobs:
|
||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Create Buildx
|
||||
run: docker buildx create --name mybuilder --use
|
||||
- name: Build # image name: rockchin/langbot:<VERSION>
|
||||
run: docker buildx build --platform linux/arm64,linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} -t rockchin/langbot:latest . --push
|
||||
- name: Build for Release # only relase, exlude pre-release
|
||||
if: ${{ github.event.release.prerelease == false }}
|
||||
run: docker buildx build --platform linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} -t rockchin/langbot:latest . --push
|
||||
- name: Build for Pre-release # no update for latest tag
|
||||
if: ${{ github.event.release.prerelease == true }}
|
||||
run: docker buildx build --platform linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} . --push
|
||||
46
.github/workflows/publish-to-pypi.yml
vendored
Normal file
46
.github/workflows/publish-to-pypi.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Build and Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # Required for trusted publishing to PyPI
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
|
||||
- name: Build frontend
|
||||
run: |
|
||||
cd web
|
||||
npm install -g pnpm
|
||||
pnpm install
|
||||
pnpm build
|
||||
mkdir -p ../src/langbot/web/out
|
||||
cp -r out ../src/langbot/web/
|
||||
|
||||
- name: Install the latest version of uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
uv build
|
||||
|
||||
- name: Publish to PyPI
|
||||
run: |
|
||||
uv publish --token ${{ secrets.PYPI_TOKEN }}
|
||||
71
.github/workflows/run-tests.yml
vendored
Normal file
71
.github/workflows/run-tests.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Unit Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, ready_for_review, synchronize]
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'tests/**'
|
||||
- '.github/workflows/run-tests.yml'
|
||||
- 'pyproject.toml'
|
||||
- 'run_tests.sh'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'tests/**'
|
||||
- '.github/workflows/run-tests.yml'
|
||||
- 'pyproject.toml'
|
||||
- 'run_tests.sh'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install uv
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync --dev
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
bash run_tests.sh
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: matrix.python-version == '3.12'
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
flags: unit-tests
|
||||
name: unit-tests-coverage
|
||||
fail_ci_if_error: false
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## Unit Tests Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Python Version: ${{ matrix.python-version }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Test Status: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY
|
||||
108
.github/workflows/test-dev-image.yaml
vendored
Normal file
108
.github/workflows/test-dev-image.yaml
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
name: Test Dev Image
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Build Dev Image"]
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test-dev-image:
|
||||
runs-on: ubuntu-latest
|
||||
# Only run if the build workflow succeeded
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update Docker Compose to use master tag
|
||||
working-directory: ./docker
|
||||
run: |
|
||||
# Replace 'latest' with 'master' tag for testing the dev image
|
||||
sed -i 's/rockchin\/langbot:latest/rockchin\/langbot:master/g' docker-compose.yaml
|
||||
echo "Updated docker-compose.yaml to use master tag:"
|
||||
cat docker-compose.yaml
|
||||
|
||||
- name: Start Docker Compose
|
||||
working-directory: ./docker
|
||||
run: docker compose up -d
|
||||
|
||||
- name: Wait and Test API
|
||||
run: |
|
||||
# Function to test API endpoint
|
||||
test_api() {
|
||||
echo "Testing API endpoint..."
|
||||
response=$(curl -s --connect-timeout 10 --max-time 30 -w "\n%{http_code}" http://localhost:5300/api/v1/system/info 2>&1)
|
||||
curl_exit_code=$?
|
||||
|
||||
if [ $curl_exit_code -ne 0 ]; then
|
||||
echo "Curl failed with exit code: $curl_exit_code"
|
||||
echo "Error: $response"
|
||||
return 1
|
||||
fi
|
||||
|
||||
http_code=$(echo "$response" | tail -n 1)
|
||||
response_body=$(echo "$response" | head -n -1)
|
||||
|
||||
if [ "$http_code" = "200" ]; then
|
||||
echo "API is healthy! Response code: $http_code"
|
||||
echo "Response: $response_body"
|
||||
return 0
|
||||
else
|
||||
echo "API returned non-200 response: $http_code"
|
||||
echo "Response body: $response_body"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Wait 30 seconds before first attempt
|
||||
echo "Waiting 30 seconds for services to start..."
|
||||
sleep 30
|
||||
|
||||
# Try up to 3 times with 30-second intervals
|
||||
max_attempts=3
|
||||
attempt=1
|
||||
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
echo "Attempt $attempt of $max_attempts"
|
||||
|
||||
if test_api; then
|
||||
echo "Success! API is responding correctly."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $attempt -lt $max_attempts ]; then
|
||||
echo "Retrying in 30 seconds..."
|
||||
sleep 30
|
||||
fi
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
|
||||
# All attempts failed
|
||||
echo "Failed to get healthy response after $max_attempts attempts"
|
||||
exit 1
|
||||
|
||||
- name: Show Container Logs on Failure
|
||||
if: failure()
|
||||
working-directory: ./docker
|
||||
run: |
|
||||
echo "=== Docker Compose Status ==="
|
||||
docker compose ps
|
||||
echo ""
|
||||
echo "=== LangBot Logs ==="
|
||||
docker compose logs langbot
|
||||
echo ""
|
||||
echo "=== Plugin Runtime Logs ==="
|
||||
docker compose logs langbot_plugin_runtime
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
working-directory: ./docker
|
||||
run: docker compose down
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -22,7 +22,7 @@ tips.py
|
||||
venv*
|
||||
bin/
|
||||
.vscode
|
||||
test_*
|
||||
/test_*
|
||||
venv/
|
||||
hugchat.json
|
||||
qcapi
|
||||
@@ -42,4 +42,14 @@ botpy.log*
|
||||
test.py
|
||||
/web_ui
|
||||
.venv/
|
||||
uv.lock
|
||||
uv.lock
|
||||
/test
|
||||
plugins.bak
|
||||
coverage.xml
|
||||
.coverage
|
||||
src/langbot/web/
|
||||
|
||||
# Build artifacts
|
||||
/dist
|
||||
/build
|
||||
*.egg-info
|
||||
|
||||
86
AGENTS.md
Normal file
86
AGENTS.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# AGENTS.md
|
||||
|
||||
This file is for guiding code agents (like Claude Code, GitHub Copilot, OpenAI Codex, etc.) to work in LangBot project.
|
||||
|
||||
## Project Overview
|
||||
|
||||
LangBot is a open-source LLM native instant messaging bot development platform, aiming to provide an out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, supporting global instant messaging platforms, and providing rich API interfaces, supporting custom development.
|
||||
|
||||
LangBot has a comprehensive frontend, all operations can be performed through the frontend. The project splited into these major parts:
|
||||
|
||||
- `./pkg`: The core python package of the project backend.
|
||||
- `./pkg/platform`: The platform module of the project, containing the logic of message platform adapters, bot managers, message session managers, etc.
|
||||
- `./pkg/provider`: The provider module of the project, containing the logic of LLM providers, tool providers, etc.
|
||||
- `./pkg/pipeline`: The pipeline module of the project, containing the logic of pipelines, stages, query pool, etc.
|
||||
- `./pkg/api`: The api module of the project, containing the http api controllers and services.
|
||||
- `./pkg/plugin`: LangBot bridge for connecting with plugin system.
|
||||
- `./libs`: Some SDKs we previously developed for the project, such as `qq_official_api`, `wecom_api`, etc.
|
||||
- `./templates`: Templates of config files, components, etc.
|
||||
- `./web`: Frontend codebase, built with Next.js + **shadcn** + **Tailwind CSS**.
|
||||
- `./docker`: docker-compose deployment files.
|
||||
|
||||
## Backend Development
|
||||
|
||||
We use `uv` to manage dependencies.
|
||||
|
||||
```bash
|
||||
pip install uv
|
||||
uv sync --dev
|
||||
```
|
||||
|
||||
Start the backend and run the project in development mode.
|
||||
|
||||
```bash
|
||||
uv run main.py
|
||||
```
|
||||
|
||||
Then you can access the project at `http://127.0.0.1:5300`.
|
||||
|
||||
## Frontend Development
|
||||
|
||||
We use `pnpm` to manage dependencies.
|
||||
|
||||
```bash
|
||||
cd web
|
||||
cp .env.example .env
|
||||
pnpm install
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Then you can access the project at `http://127.0.0.1:3000`.
|
||||
|
||||
## Plugin System Architecture
|
||||
|
||||
LangBot is composed of various internal components such as Large Language Model tools, commands, messaging platform adapters, LLM requesters, and more. To meet extensibility and flexibility requirements, we have implemented a production-grade plugin system.
|
||||
|
||||
Each plugin runs in an independent process, managed uniformly by the Plugin Runtime. It has two operating modes: `stdio` and `websocket`. When LangBot is started directly by users (not running in a container), it uses `stdio` mode, which is common for personal users or lightweight environments. When LangBot runs in a container, it uses `websocket` mode, designed specifically for production environments.
|
||||
|
||||
Plugin Runtime automatically starts each installed plugin and interacts through stdio. In plugin development scenarios, developers can use the lbp command-line tool to start plugins and connect to the running Runtime via WebSocket for debugging.
|
||||
|
||||
> Plugin SDK, CLI, Runtime, and entities definitions shared between LangBot and plugins are contained in the [`langbot-plugin-sdk`](https://github.com/langbot-app/langbot-plugin-sdk) repository.
|
||||
|
||||
## Some Development Tips and Standards
|
||||
|
||||
- LangBot is a global project, any comments in code should be in English, and user experience should be considered in all aspects.
|
||||
- Thus you should consider the i18n support in all aspects.
|
||||
- LangBot is widely adopted in both toC and toB scenarios, so you should consider the compatibility and security in all aspects.
|
||||
- If you were asked to make a commit, please follow the commit message format:
|
||||
- format: <type>(<scope>): <subject>
|
||||
- type: must be a specific type, such as feat (new feature), fix (bug fix), docs (documentation), style (code style), refactor (refactoring), perf (performance optimization), etc.
|
||||
- scope: the scope of the commit, such as the package name, the file name, the function name, the class name, the module name, etc.
|
||||
- subject: the subject of the commit, such as the description of the commit, the reason for the commit, the impact of the commit, etc.
|
||||
|
||||
## Some Principles
|
||||
|
||||
- Keep it simple, stupid.
|
||||
- Entities should not be multiplied unnecessarily
|
||||
- 八荣八耻
|
||||
|
||||
以瞎猜接口为耻,以认真查询为荣。
|
||||
以模糊执行为耻,以寻求确认为荣。
|
||||
以臆想业务为耻,以人类确认为荣。
|
||||
以创造接口为耻,以复用现有为荣。
|
||||
以跳过验证为耻,以主动测试为荣。
|
||||
以破坏架构为耻,以遵循规范为荣。
|
||||
以假装理解为耻,以诚实无知为荣。
|
||||
以盲目修改为耻,以谨慎重构为荣。
|
||||
862
LICENSE
862
LICENSE
@@ -1,661 +1,201 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
103
README.md
103
README.md
@@ -1,51 +1,51 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social.png" alt="LangBot"/>
|
||||
<img src="https://docs.langbot.app/social_zh.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://trendshift.io/repositories/12901" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12901" alt="RockChinQ%2FLangBot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
<a href="https://hellogithub.com/repository/langbot-app/LangBot" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=5ce8ae2aa4f74316bf393b57b952433c&claim_uid=gtmc6YWjMZkT21R" alt="Featured|HelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
|
||||
|
||||
[English](README_EN.md) / 简体中文 / [繁體中文](README_TW.md) / [日本語](README_JP.md) / (PR for your language)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://qm.qq.com/q/JLi38whHum)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
[](https://gitcode.com/RockChinQ/LangBot)
|
||||
|
||||
<a href="https://langbot.app">项目主页</a> |
|
||||
<a href="https://docs.langbot.app/zh/insight/guide.html">部署文档</a> |
|
||||
<a href="https://docs.langbot.app/zh/plugin/plugin-intro.html">插件介绍</a> |
|
||||
<a href="https://github.com/RockChinQ/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交插件</a>
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交插件</a>
|
||||
|
||||
<div align="center">
|
||||
😎高稳定、🧩支持扩展、🦄多模态 - 大模型原生即时通信机器人平台🤖
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://qm.qq.com/q/JLi38whHum)
|
||||
[](https://deepwiki.com/RockChinQ/LangBot)
|
||||
[](https://github.com/RockChinQ/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
[](https://gitcode.com/RockChinQ/LangBot)
|
||||
|
||||
简体中文 / [English](README_EN.md) / [日本語](README_JP.md) / (PR for your language)
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
## ✨ 特性
|
||||
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态能力,并深度适配 [Dify](https://dify.ai)。目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
|
||||
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。支持多流水线配置,不同机器人用于不同应用场景。
|
||||
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/);目前已有数百个插件。
|
||||
- 😻 Web 管理面板:支持通过浏览器管理 LangBot 实例,不再需要手动编写配置文件。
|
||||
LangBot 是一个开源的大语言模型原生即时通信机器人开发平台,旨在提供开箱即用的 IM 机器人开发体验,具有 Agent、RAG、MCP 等多种 LLM 应用功能,适配全球主流即时通信平台,并提供丰富的 API 接口,支持自定义开发。
|
||||
|
||||
## 📦 开始使用
|
||||
|
||||
#### 快速部署
|
||||
|
||||
使用 `uvx` 一键启动(需要先安装 [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
访问 http://localhost:5300 即可开始使用。
|
||||
|
||||
#### Docker Compose 部署
|
||||
|
||||
```bash
|
||||
git clone https://github.com/RockChinQ/LangBot
|
||||
cd LangBot
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
@@ -71,23 +71,29 @@ docker compose up -d
|
||||
|
||||
直接使用发行版运行,查看文档[手动部署](https://docs.langbot.app/zh/deploy/langbot/manual.html)。
|
||||
|
||||
## 📸 效果展示
|
||||
#### Kubernetes 部署
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/bot-page.png" width="450px"/>
|
||||
参考 [Kubernetes 部署](./docker/README_K8S.md) 文档。
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/create-model.png" width="450px"/>
|
||||
## 😎 保持更新
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/edit-pipeline.png" width="450px"/>
|
||||
点击仓库右上角 Star 和 Watch 按钮,获取最新动态。
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/plugin-market.png" width="450px"/>
|
||||

|
||||
|
||||
<img alt="回复效果(带有联网插件)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
|
||||
## ✨ 特性
|
||||
|
||||
- WebUI Demo: https://demo.langbot.dev/
|
||||
- 登录信息:邮箱:`demo@langbot.app` 密码:`langbot123456`
|
||||
- 注意:仅展示webui效果,公开环境,请不要在其中填入您的任何敏感信息。
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态、流式输出能力,自带 RAG(知识库)实现,并深度适配 [Dify](https://dify.ai)。
|
||||
- 🤖 多平台支持:目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
|
||||
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。支持多流水线配置,不同机器人用于不同应用场景。
|
||||
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/);目前已有数百个插件。
|
||||
- 😻 Web 管理面板:支持通过浏览器管理 LangBot 实例,不再需要手动编写配置文件。
|
||||
|
||||
## 🔌 组件兼容性
|
||||
详细规格特性请访问[文档](https://docs.langbot.app/zh/insight/features.html)。
|
||||
|
||||
或访问 demo 环境:https://demo.langbot.dev/
|
||||
- 登录信息:邮箱:`demo@langbot.app` 密码:`langbot123456`
|
||||
- 注意:仅展示 WebUI 效果,公开环境,请不要在其中填入您的任何敏感信息。
|
||||
|
||||
### 消息平台
|
||||
|
||||
@@ -97,6 +103,7 @@ docker compose up -d
|
||||
| QQ 官方机器人 | ✅ | QQ 官方机器人,支持频道、私聊、群聊 |
|
||||
| 企业微信 | ✅ | |
|
||||
| 企微对外客服 | ✅ | |
|
||||
| 企微智能机器人 | ✅ | |
|
||||
| 个人微信 | ✅ | |
|
||||
| 微信公众号 | ✅ | |
|
||||
| 飞书 | ✅ | |
|
||||
@@ -104,10 +111,7 @@ docker compose up -d
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | 🚧 | |
|
||||
| WhatsApp | 🚧 | |
|
||||
|
||||
🚧: 正在开发中
|
||||
| LINE | ✅ | |
|
||||
|
||||
### 大模型能力
|
||||
|
||||
@@ -119,18 +123,23 @@ docker compose up -d
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [智谱AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [胜算云](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | 全球大模型都可调用(友情推荐) |
|
||||
| [优云智算](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型和 GPU 资源平台 |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型和 GPU 资源平台 |
|
||||
| [302 AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | 大模型聚合平台,专注全球大模型接入 |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps 平台 |
|
||||
| [Ollama](https://ollama.com/) | ✅ | 本地大模型运行平台 |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型运行平台 |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
||||
| [小马算力](https://www.tokenpony.cn/453z1) | ✅ | 大模型聚合平台 |
|
||||
| [阿里云百炼](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支持通过 MCP 协议获取工具 |
|
||||
| [百宝箱Tbox](https://www.tbox.cn/open) | ✅ | 蚂蚁百宝箱智能体平台,每月免费10亿大模型Token |
|
||||
|
||||
### TTS
|
||||
|
||||
@@ -148,14 +157,14 @@ docker compose up -d
|
||||
|
||||
## 😘 社区贡献
|
||||
|
||||
感谢以下[代码贡献者](https://github.com/RockChinQ/LangBot/graphs/contributors)和社区里其他成员对 LangBot 的贡献:
|
||||
感谢以下[代码贡献者](https://github.com/langbot-app/LangBot/graphs/contributors)和社区里其他成员对 LangBot 的贡献:
|
||||
|
||||
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
|
||||
## 😎 保持更新
|
||||
<!--
|
||||
## For Code Agents
|
||||
|
||||
点击仓库右上角 Star 和 Watch 按钮,获取最新动态。
|
||||
|
||||

|
||||
If you're a code agent based on LLM, you should read [AGENTS.md](AGENTS.md) to understand the project structure and development process before you start any development.
|
||||
-->
|
||||
|
||||
101
README_EN.md
101
README_EN.md
@@ -1,49 +1,45 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social.png" alt="LangBot"/>
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://trendshift.io/repositories/12901" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12901" alt="RockChinQ%2FLangBot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
English / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / (PR for your language)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">Home</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">Deployment</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">Plugin</a> |
|
||||
<a href="https://github.com/RockChinQ/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Submit Plugin</a>
|
||||
|
||||
<div align="center">
|
||||
😎High Stability, 🧩Extension Supported, 🦄Multi-modal - LLM Native Instant Messaging Bot Platform🤖
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/RockChinQ/LangBot)
|
||||
[](https://github.com/RockChinQ/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
[简体中文](README.md) / English / [日本語](README_JP.md) / (PR for your language)
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">Submit Plugin</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, and multi-modal capabilities. Deeply integrates with [Dify](https://dify.ai). Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
|
||||
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods. Supports multiple pipeline configurations, different bots can be used for different scenarios.
|
||||
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has hundreds of plugins.
|
||||
- 😻 [New] Web UI: Support management LangBot instance through the browser. No need to manually write configuration files.
|
||||
LangBot is an open-source LLM native instant messaging robot development platform, aiming to provide out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, adapting to global instant messaging platforms, and providing rich API interfaces, supporting custom development.
|
||||
|
||||
## 📦 Getting Started
|
||||
|
||||
#### Quick Start
|
||||
|
||||
Use `uvx` to start with one command (need to install [uv](https://docs.astral.sh/uv/getting-started/installation/)):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
Visit http://localhost:5300 to start using it.
|
||||
|
||||
#### Docker Compose Deployment
|
||||
|
||||
```bash
|
||||
git clone https://github.com/RockChinQ/LangBot
|
||||
cd LangBot
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
@@ -69,42 +65,46 @@ Community contributed Zeabur template.
|
||||
|
||||
Directly use the released version to run, see the [Manual Deployment](https://docs.langbot.app/en/deploy/langbot/manual.html) documentation.
|
||||
|
||||
## 📸 Demo
|
||||
#### Kubernetes Deployment
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/bot-page.png" width="400px"/>
|
||||
Refer to the [Kubernetes Deployment](./docker/README_K8S.md) documentation.
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/create-model.png" width="400px"/>
|
||||
## 😎 Stay Ahead
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/edit-pipeline.png" width="400px"/>
|
||||
Click the Star and Watch button in the upper right corner of the repository to get the latest updates.
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/plugin-market.png" width="400px"/>
|
||||

|
||||
|
||||
<img alt="Reply Effect (with Internet Plugin)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
|
||||
## ✨ Features
|
||||
|
||||
- WebUI Demo: https://demo.langbot.dev/
|
||||
- Login information: Email: `demo@langbot.app` Password: `langbot123456`
|
||||
- Note: Only the WebUI effect is shown, please do not fill in any sensitive information in the public environment.
|
||||
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, multi-modal, and streaming output capabilities. Built-in RAG (knowledge base) implementation, and deeply integrates with [Dify](https://dify.ai).
|
||||
- 🤖 Multi-platform Support: Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
|
||||
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods. Supports multiple pipeline configurations, different bots can be used for different scenarios.
|
||||
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has hundreds of plugins.
|
||||
- 😻 Web UI: Support management LangBot instance through the browser. No need to manually write configuration files.
|
||||
|
||||
## 🔌 Component Compatibility
|
||||
For more detailed specifications, please refer to the [documentation](https://docs.langbot.app/en/insight/features.html).
|
||||
|
||||
Or visit the demo environment: https://demo.langbot.dev/
|
||||
- Login information: Email: `demo@langbot.app` Password: `langbot123456`
|
||||
- Note: For WebUI demo only, please do not fill in any sensitive information in the public environment.
|
||||
|
||||
### Message Platform
|
||||
|
||||
| Platform | Status | Remarks |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| Personal QQ | ✅ | |
|
||||
| QQ Official API | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| Personal WeChat | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | 🚧 | |
|
||||
| WhatsApp | 🚧 | |
|
||||
|
||||
🚧: In development
|
||||
|
||||
### LLMs
|
||||
|
||||
@@ -116,9 +116,12 @@ Directly use the released version to run, see the [Manual Deployment](https://do
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | LLM and GPU resource platform |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps platform |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | LLM and GPU resource platform |
|
||||
| [302 AI](https://share.302.ai/SuTG99) | ✅ | LLM gateway(MaaS) |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | LLM aggregation platform, dedicated to global LLMs |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLM and GPU resource platform |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLM gateway(MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Ollama](https://ollama.com/) | ✅ | Local LLM running platform |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | Local LLM running platform |
|
||||
@@ -131,14 +134,8 @@ Directly use the released version to run, see the [Manual Deployment](https://do
|
||||
|
||||
## 🤝 Community Contribution
|
||||
|
||||
Thank you for the following [code contributors](https://github.com/RockChinQ/LangBot/graphs/contributors) and other members in the community for their contributions to LangBot:
|
||||
Thank you for the following [code contributors](https://github.com/langbot-app/LangBot/graphs/contributors) and other members in the community for their contributions to LangBot:
|
||||
|
||||
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
|
||||
## 😎 Stay Ahead
|
||||
|
||||
Click the Star and Watch button in the upper right corner of the repository to get the latest updates.
|
||||
|
||||

|
||||
102
README_JP.md
102
README_JP.md
@@ -1,48 +1,45 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social.png" alt="LangBot"/>
|
||||
<img src="https://docs.langbot.app/social_en.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://trendshift.io/repositories/12901" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12901" alt="RockChinQ%2FLangBot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / 日本語 / (PR for your language)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
<a href="https://langbot.app">ホーム</a> |
|
||||
<a href="https://docs.langbot.app/en/insight/guide.html">デプロイ</a> |
|
||||
<a href="https://docs.langbot.app/en/plugin/plugin-intro.html">プラグイン</a> |
|
||||
<a href="https://github.com/RockChinQ/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">プラグインの提出</a>
|
||||
|
||||
<div align="center">
|
||||
😎高い安定性、🧩拡張サポート、🦄マルチモーダル - LLMネイティブインスタントメッセージングボットプラットフォーム🤖
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://deepwiki.com/RockChinQ/LangBot)
|
||||
[](https://github.com/RockChinQ/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
|
||||
[简体中文](README_CN.md) / [English](README.md) / [日本語](README_JP.md) / (PR for your language)
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">プラグインの提出</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
## ✨ 機能
|
||||
|
||||
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル機能をサポート。 [Dify](https://dify.ai) と深く統合。現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
|
||||
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。複数のパイプライン設定をサポートし、異なるボットを異なる用途に使用できます。
|
||||
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/);豊富なエコシステム、現在数百のプラグインが存在。
|
||||
- 😻 Web UI: ブラウザを通じてLangBotインスタンスを管理することをサポート。
|
||||
LangBot は、エージェント、RAG、MCP などの LLM アプリケーション機能を備えた、オープンソースの LLM ネイティブのインスタントメッセージングロボット開発プラットフォームです。世界中のインスタントメッセージングプラットフォームに適応し、豊富な API インターフェースを提供し、カスタム開発をサポートします。
|
||||
|
||||
## 📦 始め方
|
||||
|
||||
#### クイックスタート
|
||||
|
||||
`uvx` を使用した迅速なデプロイ([uv](https://docs.astral.sh/uv/getting-started/installation/) が必要です):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
http://localhost:5300 にアクセスして使用を開始します。
|
||||
|
||||
#### Docker Compose デプロイ
|
||||
|
||||
```bash
|
||||
git clone https://github.com/RockChinQ/LangBot
|
||||
cd LangBot
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
@@ -50,7 +47,7 @@ http://localhost:5300 にアクセスして使用を開始します。
|
||||
|
||||
詳細なドキュメントは[Dockerデプロイ](https://docs.langbot.app/en/deploy/langbot/docker.html)を参照してください。
|
||||
|
||||
#### BTPanelでのワンクリックデプロイ
|
||||
#### Panelでのワンクリックデプロイ
|
||||
|
||||
LangBotはBTPanelにリストされています。BTPanelをインストールしている場合は、[ドキュメント](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html)を使用して使用できます。
|
||||
|
||||
@@ -68,42 +65,46 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
|
||||
|
||||
リリースバージョンを直接使用して実行します。[手動デプロイ](https://docs.langbot.app/en/deploy/langbot/manual.html)のドキュメントを参照してください。
|
||||
|
||||
## 📸 デモ
|
||||
#### Kubernetes デプロイ
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/bot-page.png" width="400px"/>
|
||||
[Kubernetes デプロイ](./docker/README_K8S.md) ドキュメントを参照してください。
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/create-model.png" width="400px"/>
|
||||
## 😎 最新情報を入手
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/edit-pipeline.png" width="400px"/>
|
||||
リポジトリの右上にある Star と Watch ボタンをクリックして、最新の更新を取得してください。
|
||||
|
||||
<img alt="bots" src="https://docs.langbot.app/webui/plugin-market.png" width="400px"/>
|
||||

|
||||
|
||||
<img alt="返信効果(インターネットプラグイン付き)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
|
||||
## ✨ 機能
|
||||
|
||||
- WebUIデモ: https://demo.langbot.dev/
|
||||
- ログイン情報: メール: `demo@langbot.app` パスワード: `langbot123456`
|
||||
- 注意: WebUIの効果のみを示しています。公開環境では、機密情報を入力しないでください。
|
||||
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル、ストリーミング出力機能をサポート、RAG(知識ベース)を組み込み、[Dify](https://dify.ai) と深く統合。
|
||||
- 🤖 多プラットフォーム対応: 現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
|
||||
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。複数のパイプライン設定をサポートし、異なるボットを異なる用途に使用できます。
|
||||
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/);豊富なエコシステム、現在数百のプラグインが存在。
|
||||
- 😻 Web UI: ブラウザを通じてLangBotインスタンスを管理することをサポート。
|
||||
|
||||
## 🔌 コンポーネントの互換性
|
||||
詳細な仕様については、[ドキュメント](https://docs.langbot.app/en/insight/features.html)を参照してください。
|
||||
|
||||
または、デモ環境にアクセスしてください: https://demo.langbot.dev/
|
||||
- ログイン情報: メール: `demo@langbot.app` パスワード: `langbot123456`
|
||||
- 注意: WebUI のデモンストレーションのみの場合、公開環境では機密情報を入力しないでください。
|
||||
|
||||
### メッセージプラットフォーム
|
||||
|
||||
| プラットフォーム | ステータス | 備考 |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| 個人QQ | ✅ | |
|
||||
| QQ公式API | ✅ | |
|
||||
| WeCom | ✅ | |
|
||||
| WeComCS | ✅ | |
|
||||
| WeCom AI Bot | ✅ | |
|
||||
| 個人WeChat | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | 🚧 | |
|
||||
| WhatsApp | 🚧 | |
|
||||
|
||||
🚧: 開発中
|
||||
|
||||
### LLMs
|
||||
|
||||
@@ -115,8 +116,11 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型とGPUリソースプラットフォーム |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型とGPUリソースプラットフォーム |
|
||||
| [302 AI](https://share.302.ai/SuTG99) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLMとGPUリソースプラットフォーム |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLMゲートウェイ(MaaS) |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOpsプラットフォーム |
|
||||
| [Ollama](https://ollama.com/) | ✅ | ローカルLLM実行プラットフォーム |
|
||||
@@ -130,14 +134,8 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
|
||||
|
||||
## 🤝 コミュニティ貢献
|
||||
|
||||
LangBot への貢献に対して、以下の [コード貢献者](https://github.com/RockChinQ/LangBot/graphs/contributors) とコミュニティの他のメンバーに感謝します。
|
||||
LangBot への貢献に対して、以下の [コード貢献者](https://github.com/langbot-app/LangBot/graphs/contributors) とコミュニティの他のメンバーに感謝します。
|
||||
|
||||
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
|
||||
## 😎 最新情報を入手
|
||||
|
||||
リポジトリの右上にある Star と Watch ボタンをクリックして、最新の更新を取得してください。
|
||||
|
||||

|
||||
157
README_TW.md
Normal file
157
README_TW.md
Normal file
@@ -0,0 +1,157 @@
|
||||
<p align="center">
|
||||
<a href="https://langbot.app">
|
||||
<img src="https://docs.langbot.app/social_zh.png" alt="LangBot"/>
|
||||
</a>
|
||||
|
||||
<div align="center"><a href="https://hellogithub.com/repository/langbot-app/LangBot" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=5ce8ae2aa4f74316bf393b57b952433c&claim_uid=gtmc6YWjMZkT21R" alt="Featured|HelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
|
||||
|
||||
[English](README_EN.md) / [简体中文](README.md) / 繁體中文 / [日本語](README_JP.md) / (PR for your language)
|
||||
|
||||
[](https://discord.gg/wdNEHETs87)
|
||||
[](https://qm.qq.com/q/JLi38whHum)
|
||||
[](https://deepwiki.com/langbot-app/LangBot)
|
||||
[](https://github.com/langbot-app/LangBot/releases/latest)
|
||||
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
|
||||
[](https://gitcode.com/RockChinQ/LangBot)
|
||||
|
||||
<a href="https://langbot.app">主頁</a> |
|
||||
<a href="https://docs.langbot.app/zh/insight/guide.html">部署文件</a> |
|
||||
<a href="https://docs.langbot.app/zh/plugin/plugin-intro.html">外掛介紹</a> |
|
||||
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交外掛</a>
|
||||
|
||||
</div>
|
||||
|
||||
</p>
|
||||
|
||||
LangBot 是一個開源的大語言模型原生即時通訊機器人開發平台,旨在提供開箱即用的 IM 機器人開發體驗,具有 Agent、RAG、MCP 等多種 LLM 應用功能,適配全球主流即時通訊平台,並提供豐富的 API 介面,支援自定義開發。
|
||||
|
||||
## 📦 開始使用
|
||||
|
||||
#### 快速部署
|
||||
|
||||
使用 `uvx` 一鍵啟動(需要先安裝 [uv](https://docs.astral.sh/uv/getting-started/installation/) ):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
訪問 http://localhost:5300 即可開始使用。
|
||||
|
||||
#### Docker Compose 部署
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
訪問 http://localhost:5300 即可開始使用。
|
||||
|
||||
詳細文件[Docker 部署](https://docs.langbot.app/zh/deploy/langbot/docker.html)。
|
||||
|
||||
#### 寶塔面板部署
|
||||
|
||||
已上架寶塔面板,若您已安裝寶塔面板,可以根據[文件](https://docs.langbot.app/zh/deploy/langbot/one-click/bt.html)使用。
|
||||
|
||||
#### Zeabur 雲端部署
|
||||
|
||||
社群貢獻的 Zeabur 模板。
|
||||
|
||||
[](https://zeabur.com/zh-CN/templates/ZKTBDH)
|
||||
|
||||
#### Railway 雲端部署
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### 手動部署
|
||||
|
||||
直接使用發行版運行,查看文件[手動部署](https://docs.langbot.app/zh/deploy/langbot/manual.html)。
|
||||
|
||||
#### Kubernetes 部署
|
||||
|
||||
參考 [Kubernetes 部署](./docker/README_K8S.md) 文件。
|
||||
|
||||
## 😎 保持更新
|
||||
|
||||
點擊倉庫右上角 Star 和 Watch 按鈕,獲取最新動態。
|
||||
|
||||

|
||||
|
||||
## ✨ 特性
|
||||
|
||||
- 💬 大模型對話、Agent:支援多種大模型,適配群聊和私聊;具有多輪對話、工具調用、多模態、流式輸出能力,自帶 RAG(知識庫)實現,並深度適配 [Dify](https://dify.ai)。
|
||||
- 🤖 多平台支援:目前支援 QQ、QQ頻道、企業微信、個人微信、飛書、Discord、Telegram 等平台。
|
||||
- 🛠️ 高穩定性、功能完備:原生支援訪問控制、限速、敏感詞過濾等機制;配置簡單,支援多種部署方式。支援多流水線配置,不同機器人用於不同應用場景。
|
||||
- 🧩 外掛擴展、活躍社群:支援事件驅動、組件擴展等外掛機制;適配 Anthropic [MCP 協議](https://modelcontextprotocol.io/);目前已有數百個外掛。
|
||||
- 😻 Web 管理面板:支援通過瀏覽器管理 LangBot 實例,不再需要手動編寫配置文件。
|
||||
|
||||
詳細規格特性請訪問[文件](https://docs.langbot.app/zh/insight/features.html)。
|
||||
|
||||
或訪問 demo 環境:https://demo.langbot.dev/
|
||||
- 登入資訊:郵箱:`demo@langbot.app` 密碼:`langbot123456`
|
||||
- 注意:僅展示 WebUI 效果,公開環境,請不要在其中填入您的任何敏感資訊。
|
||||
|
||||
### 訊息平台
|
||||
|
||||
| 平台 | 狀態 | 備註 |
|
||||
| --- | --- | --- |
|
||||
| Discord | ✅ | |
|
||||
| Telegram | ✅ | |
|
||||
| Slack | ✅ | |
|
||||
| LINE | ✅ | |
|
||||
| QQ 個人號 | ✅ | QQ 個人號私聊、群聊 |
|
||||
| QQ 官方機器人 | ✅ | QQ 官方機器人,支援頻道、私聊、群聊 |
|
||||
| 微信 | ✅ | |
|
||||
| 企微對外客服 | ✅ | |
|
||||
| 企微智能機器人 | ✅ | |
|
||||
| 微信公眾號 | ✅ | |
|
||||
| Lark | ✅ | |
|
||||
| DingTalk | ✅ | |
|
||||
|
||||
### 大模型能力
|
||||
|
||||
| 模型 | 狀態 | 備註 |
|
||||
| --- | --- | --- |
|
||||
| [OpenAI](https://platform.openai.com/) | ✅ | 可接入任何 OpenAI 介面格式模型 |
|
||||
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
|
||||
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
|
||||
| [Anthropic](https://www.anthropic.com/) | ✅ | |
|
||||
| [xAI](https://x.ai/) | ✅ | |
|
||||
| [智譜AI](https://open.bigmodel.cn/) | ✅ | |
|
||||
| [勝算雲](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | 大模型和 GPU 資源平台 |
|
||||
| [優雲智算](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型和 GPU 資源平台 |
|
||||
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型和 GPU 資源平台 |
|
||||
| [接口 AI](https://jiekou.ai/) | ✅ | 大模型聚合平台,專注全球大模型接入 |
|
||||
| [302.AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
|
||||
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
|
||||
| [Dify](https://dify.ai) | ✅ | LLMOps 平台 |
|
||||
| [Ollama](https://ollama.com/) | ✅ | 本地大模型運行平台 |
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型運行平台 |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型介面聚合平台 |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
||||
| [阿里雲百煉](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支援通過 MCP 協議獲取工具 |
|
||||
|
||||
### TTS
|
||||
|
||||
| 平台/模型 | 備註 |
|
||||
| --- | --- |
|
||||
| [FishAudio](https://fish.audio/zh-CN/discovery/) | [外掛](https://github.com/the-lazy-me/NewChatVoice) |
|
||||
| [海豚 AI](https://www.ttson.cn/?source=thelazy) | [外掛](https://github.com/the-lazy-me/NewChatVoice) |
|
||||
| [AzureTTS](https://portal.azure.com/) | [外掛](https://github.com/Ingnaryk/LangBot_AzureTTS) |
|
||||
|
||||
### 文生圖
|
||||
|
||||
| 平台/模型 | 備註 |
|
||||
| --- | --- |
|
||||
| 阿里雲百煉 | [外掛](https://github.com/Thetail001/LangBot_BailianTextToImagePlugin)
|
||||
|
||||
## 😘 社群貢獻
|
||||
|
||||
感謝以下[程式碼貢獻者](https://github.com/langbot-app/LangBot/graphs/contributors)和社群裡其他成員對 LangBot 的貢獻:
|
||||
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
4
codecov.yml
Normal file
4
codecov.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
||||
@@ -1,16 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
langbot:
|
||||
image: rockchin/langbot:latest
|
||||
container_name: langbot
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./plugins:/app/plugins
|
||||
restart: on-failure
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
- 5300:5300 # 供 WebUI 使用
|
||||
- 2280-2290:2280-2290 # 供消息平台适配器方向连接
|
||||
# 根据具体环境配置网络
|
||||
629
docker/README_K8S.md
Normal file
629
docker/README_K8S.md
Normal file
@@ -0,0 +1,629 @@
|
||||
# LangBot Kubernetes 部署指南 / Kubernetes Deployment Guide
|
||||
|
||||
[简体中文](#简体中文) | [English](#english)
|
||||
|
||||
---
|
||||
|
||||
## 简体中文
|
||||
|
||||
### 概述
|
||||
|
||||
本指南提供了在 Kubernetes 集群中部署 LangBot 的完整步骤。Kubernetes 部署配置基于 `docker-compose.yaml`,适用于生产环境的容器化部署。
|
||||
|
||||
### 前置要求
|
||||
|
||||
- Kubernetes 集群(版本 1.19+)
|
||||
- `kubectl` 命令行工具已配置并可访问集群
|
||||
- 集群中有可用的存储类(StorageClass)用于持久化存储(可选但推荐)
|
||||
- 至少 2 vCPU 和 4GB RAM 的可用资源
|
||||
|
||||
### 架构说明
|
||||
|
||||
Kubernetes 部署包含以下组件:
|
||||
|
||||
1. **langbot**: 主应用服务
|
||||
- 提供 Web UI(端口 5300)
|
||||
- 处理平台 webhook(端口 2280-2290)
|
||||
- 数据持久化卷
|
||||
|
||||
2. **langbot-plugin-runtime**: 插件运行时服务
|
||||
- WebSocket 通信(端口 5400)
|
||||
- 插件数据持久化卷
|
||||
|
||||
3. **持久化存储**:
|
||||
- `langbot-data`: LangBot 主数据
|
||||
- `langbot-plugins`: 插件文件
|
||||
- `langbot-plugin-runtime-data`: 插件运行时数据
|
||||
|
||||
### 快速开始
|
||||
|
||||
#### 1. 下载部署文件
|
||||
|
||||
```bash
|
||||
# 克隆仓库
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
|
||||
# 或直接下载 kubernetes.yaml
|
||||
wget https://raw.githubusercontent.com/langbot-app/LangBot/main/docker/kubernetes.yaml
|
||||
```
|
||||
|
||||
#### 2. 部署到 Kubernetes
|
||||
|
||||
```bash
|
||||
# 应用所有配置
|
||||
kubectl apply -f kubernetes.yaml
|
||||
|
||||
# 检查部署状态
|
||||
kubectl get all -n langbot
|
||||
|
||||
# 查看 Pod 日志
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
```
|
||||
|
||||
#### 3. 访问 LangBot
|
||||
|
||||
默认情况下,LangBot 服务使用 ClusterIP 类型,只能在集群内部访问。您可以选择以下方式之一来访问:
|
||||
|
||||
**选项 A: 端口转发(推荐用于测试)**
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
然后访问 http://localhost:5300
|
||||
|
||||
**选项 B: NodePort(适用于开发环境)**
|
||||
|
||||
编辑 `kubernetes.yaml`,取消注释 NodePort Service 部分,然后:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# 获取节点 IP
|
||||
kubectl get nodes -o wide
|
||||
# 访问 http://<NODE_IP>:30300
|
||||
```
|
||||
|
||||
**选项 C: LoadBalancer(适用于云环境)**
|
||||
|
||||
编辑 `kubernetes.yaml`,取消注释 LoadBalancer Service 部分,然后:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# 获取外部 IP
|
||||
kubectl get svc -n langbot langbot-loadbalancer
|
||||
# 访问 http://<EXTERNAL_IP>
|
||||
```
|
||||
|
||||
**选项 D: Ingress(推荐用于生产环境)**
|
||||
|
||||
确保集群中已安装 Ingress Controller(如 nginx-ingress),然后:
|
||||
|
||||
1. 编辑 `kubernetes.yaml` 中的 Ingress 配置
|
||||
2. 修改域名为您的实际域名
|
||||
3. 应用配置:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# 访问 http://langbot.yourdomain.com
|
||||
```
|
||||
|
||||
### 配置说明
|
||||
|
||||
#### 环境变量
|
||||
|
||||
在 `ConfigMap` 中配置环境变量:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: langbot-config
|
||||
namespace: langbot
|
||||
data:
|
||||
TZ: "Asia/Shanghai" # 修改为您的时区
|
||||
```
|
||||
|
||||
#### 存储配置
|
||||
|
||||
默认使用动态存储分配。如果您有特定的 StorageClass,请在 PVC 中指定:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
storageClassName: your-storage-class-name
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
```
|
||||
|
||||
#### 资源限制
|
||||
|
||||
根据您的需求调整资源限制:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
```
|
||||
|
||||
### 常用操作
|
||||
|
||||
#### 查看日志
|
||||
|
||||
```bash
|
||||
# 查看 LangBot 主服务日志
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
|
||||
# 查看插件运行时日志
|
||||
kubectl logs -n langbot -l app=langbot-plugin-runtime -f
|
||||
```
|
||||
|
||||
#### 重启服务
|
||||
|
||||
```bash
|
||||
# 重启 LangBot
|
||||
kubectl rollout restart deployment/langbot -n langbot
|
||||
|
||||
# 重启插件运行时
|
||||
kubectl rollout restart deployment/langbot-plugin-runtime -n langbot
|
||||
```
|
||||
|
||||
#### 更新镜像
|
||||
|
||||
```bash
|
||||
# 更新到最新版本
|
||||
kubectl set image deployment/langbot -n langbot langbot=rockchin/langbot:latest
|
||||
kubectl set image deployment/langbot-plugin-runtime -n langbot langbot-plugin-runtime=rockchin/langbot:latest
|
||||
|
||||
# 检查更新状态
|
||||
kubectl rollout status deployment/langbot -n langbot
|
||||
```
|
||||
|
||||
#### 扩容(不推荐)
|
||||
|
||||
注意:由于 LangBot 使用 ReadWriteOnce 的持久化存储,不支持多副本扩容。如需高可用,请考虑使用 ReadWriteMany 存储或其他架构方案。
|
||||
|
||||
#### 备份数据
|
||||
|
||||
```bash
|
||||
# 备份 PVC 数据
|
||||
kubectl exec -n langbot -it <langbot-pod-name> -- tar czf /tmp/backup.tar.gz /app/data
|
||||
kubectl cp langbot/<langbot-pod-name>:/tmp/backup.tar.gz ./backup.tar.gz
|
||||
```
|
||||
|
||||
### 卸载
|
||||
|
||||
```bash
|
||||
# 删除所有资源(保留 PVC)
|
||||
kubectl delete deployment,service,configmap -n langbot --all
|
||||
|
||||
# 删除 PVC(会删除数据)
|
||||
kubectl delete pvc -n langbot --all
|
||||
|
||||
# 删除命名空间
|
||||
kubectl delete namespace langbot
|
||||
```
|
||||
|
||||
### 故障排查
|
||||
|
||||
#### Pod 无法启动
|
||||
|
||||
```bash
|
||||
# 查看 Pod 状态
|
||||
kubectl get pods -n langbot
|
||||
|
||||
# 查看详细信息
|
||||
kubectl describe pod -n langbot <pod-name>
|
||||
|
||||
# 查看事件
|
||||
kubectl get events -n langbot --sort-by='.lastTimestamp'
|
||||
```
|
||||
|
||||
#### 存储问题
|
||||
|
||||
```bash
|
||||
# 检查 PVC 状态
|
||||
kubectl get pvc -n langbot
|
||||
|
||||
# 检查 PV
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
#### 网络访问问题
|
||||
|
||||
```bash
|
||||
# 检查 Service
|
||||
kubectl get svc -n langbot
|
||||
|
||||
# 检查端口转发
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
### 生产环境建议
|
||||
|
||||
1. **使用特定版本标签**:避免使用 `latest` 标签,使用具体版本号如 `rockchin/langbot:v1.0.0`
|
||||
2. **配置资源限制**:根据实际负载调整 CPU 和内存限制
|
||||
3. **使用 Ingress + TLS**:配置 HTTPS 访问和证书管理
|
||||
4. **配置监控和告警**:集成 Prometheus、Grafana 等监控工具
|
||||
5. **定期备份**:配置自动备份策略保护数据
|
||||
6. **使用专用 StorageClass**:为生产环境配置高性能存储
|
||||
7. **配置亲和性规则**:确保 Pod 调度到合适的节点
|
||||
|
||||
### 高级配置
|
||||
|
||||
#### 使用 Secrets 管理敏感信息
|
||||
|
||||
如果需要配置 API 密钥等敏感信息:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: langbot-secrets
|
||||
namespace: langbot
|
||||
type: Opaque
|
||||
data:
|
||||
api_key: <base64-encoded-value>
|
||||
```
|
||||
|
||||
然后在 Deployment 中引用:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: langbot-secrets
|
||||
key: api_key
|
||||
```
|
||||
|
||||
#### 配置水平自动扩缩容(HPA)
|
||||
|
||||
注意:需要确保使用 ReadWriteMany 存储类型
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: langbot-hpa
|
||||
namespace: langbot
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: langbot
|
||||
minReplicas: 1
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 70
|
||||
```
|
||||
|
||||
### 参考资源
|
||||
|
||||
- [LangBot 官方文档](https://docs.langbot.app)
|
||||
- [Docker 部署文档](https://docs.langbot.app/zh/deploy/langbot/docker.html)
|
||||
- [Kubernetes 官方文档](https://kubernetes.io/docs/)
|
||||
|
||||
---
|
||||
|
||||
## English
|
||||
|
||||
### Overview
|
||||
|
||||
This guide provides complete steps for deploying LangBot in a Kubernetes cluster. The Kubernetes deployment configuration is based on `docker-compose.yaml` and is suitable for production containerized deployments.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Kubernetes cluster (version 1.19+)
|
||||
- `kubectl` command-line tool configured with cluster access
|
||||
- Available StorageClass in the cluster for persistent storage (optional but recommended)
|
||||
- At least 2 vCPU and 4GB RAM of available resources
|
||||
|
||||
### Architecture
|
||||
|
||||
The Kubernetes deployment includes the following components:
|
||||
|
||||
1. **langbot**: Main application service
|
||||
- Provides Web UI (port 5300)
|
||||
- Handles platform webhooks (ports 2280-2290)
|
||||
- Data persistence volume
|
||||
|
||||
2. **langbot-plugin-runtime**: Plugin runtime service
|
||||
- WebSocket communication (port 5400)
|
||||
- Plugin data persistence volume
|
||||
|
||||
3. **Persistent Storage**:
|
||||
- `langbot-data`: LangBot main data
|
||||
- `langbot-plugins`: Plugin files
|
||||
- `langbot-plugin-runtime-data`: Plugin runtime data
|
||||
|
||||
### Quick Start
|
||||
|
||||
#### 1. Download Deployment Files
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot/docker
|
||||
|
||||
# Or download kubernetes.yaml directly
|
||||
wget https://raw.githubusercontent.com/langbot-app/LangBot/main/docker/kubernetes.yaml
|
||||
```
|
||||
|
||||
#### 2. Deploy to Kubernetes
|
||||
|
||||
```bash
|
||||
# Apply all configurations
|
||||
kubectl apply -f kubernetes.yaml
|
||||
|
||||
# Check deployment status
|
||||
kubectl get all -n langbot
|
||||
|
||||
# View Pod logs
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
```
|
||||
|
||||
#### 3. Access LangBot
|
||||
|
||||
By default, LangBot service uses ClusterIP type, accessible only within the cluster. Choose one of the following methods to access:
|
||||
|
||||
**Option A: Port Forwarding (Recommended for testing)**
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
Then visit http://localhost:5300
|
||||
|
||||
**Option B: NodePort (Suitable for development)**
|
||||
|
||||
Edit `kubernetes.yaml`, uncomment the NodePort Service section, then:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# Get node IP
|
||||
kubectl get nodes -o wide
|
||||
# Visit http://<NODE_IP>:30300
|
||||
```
|
||||
|
||||
**Option C: LoadBalancer (Suitable for cloud environments)**
|
||||
|
||||
Edit `kubernetes.yaml`, uncomment the LoadBalancer Service section, then:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# Get external IP
|
||||
kubectl get svc -n langbot langbot-loadbalancer
|
||||
# Visit http://<EXTERNAL_IP>
|
||||
```
|
||||
|
||||
**Option D: Ingress (Recommended for production)**
|
||||
|
||||
Ensure an Ingress Controller (e.g., nginx-ingress) is installed in the cluster, then:
|
||||
|
||||
1. Edit the Ingress configuration in `kubernetes.yaml`
|
||||
2. Change the domain to your actual domain
|
||||
3. Apply configuration:
|
||||
|
||||
```bash
|
||||
kubectl apply -f kubernetes.yaml
|
||||
# Visit http://langbot.yourdomain.com
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
#### Environment Variables
|
||||
|
||||
Configure environment variables in ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: langbot-config
|
||||
namespace: langbot
|
||||
data:
|
||||
TZ: "Asia/Shanghai" # Change to your timezone
|
||||
```
|
||||
|
||||
#### Storage Configuration
|
||||
|
||||
Uses dynamic storage provisioning by default. If you have a specific StorageClass, specify it in PVC:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
storageClassName: your-storage-class-name
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
```
|
||||
|
||||
#### Resource Limits
|
||||
|
||||
Adjust resource limits based on your needs:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
```
|
||||
|
||||
### Common Operations
|
||||
|
||||
#### View Logs
|
||||
|
||||
```bash
|
||||
# View LangBot main service logs
|
||||
kubectl logs -n langbot -l app=langbot -f
|
||||
|
||||
# View plugin runtime logs
|
||||
kubectl logs -n langbot -l app=langbot-plugin-runtime -f
|
||||
```
|
||||
|
||||
#### Restart Services
|
||||
|
||||
```bash
|
||||
# Restart LangBot
|
||||
kubectl rollout restart deployment/langbot -n langbot
|
||||
|
||||
# Restart plugin runtime
|
||||
kubectl rollout restart deployment/langbot-plugin-runtime -n langbot
|
||||
```
|
||||
|
||||
#### Update Images
|
||||
|
||||
```bash
|
||||
# Update to latest version
|
||||
kubectl set image deployment/langbot -n langbot langbot=rockchin/langbot:latest
|
||||
kubectl set image deployment/langbot-plugin-runtime -n langbot langbot-plugin-runtime=rockchin/langbot:latest
|
||||
|
||||
# Check update status
|
||||
kubectl rollout status deployment/langbot -n langbot
|
||||
```
|
||||
|
||||
#### Scaling (Not Recommended)
|
||||
|
||||
Note: Due to LangBot using ReadWriteOnce persistent storage, multi-replica scaling is not supported. For high availability, consider using ReadWriteMany storage or alternative architectures.
|
||||
|
||||
#### Backup Data
|
||||
|
||||
```bash
|
||||
# Backup PVC data
|
||||
kubectl exec -n langbot -it <langbot-pod-name> -- tar czf /tmp/backup.tar.gz /app/data
|
||||
kubectl cp langbot/<langbot-pod-name>:/tmp/backup.tar.gz ./backup.tar.gz
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
```bash
|
||||
# Delete all resources (keep PVCs)
|
||||
kubectl delete deployment,service,configmap -n langbot --all
|
||||
|
||||
# Delete PVCs (will delete data)
|
||||
kubectl delete pvc -n langbot --all
|
||||
|
||||
# Delete namespace
|
||||
kubectl delete namespace langbot
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### Pods Not Starting
|
||||
|
||||
```bash
|
||||
# Check Pod status
|
||||
kubectl get pods -n langbot
|
||||
|
||||
# View detailed information
|
||||
kubectl describe pod -n langbot <pod-name>
|
||||
|
||||
# View events
|
||||
kubectl get events -n langbot --sort-by='.lastTimestamp'
|
||||
```
|
||||
|
||||
#### Storage Issues
|
||||
|
||||
```bash
|
||||
# Check PVC status
|
||||
kubectl get pvc -n langbot
|
||||
|
||||
# Check PV
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
#### Network Access Issues
|
||||
|
||||
```bash
|
||||
# Check Service
|
||||
kubectl get svc -n langbot
|
||||
|
||||
# Test port forwarding
|
||||
kubectl port-forward -n langbot svc/langbot 5300:5300
|
||||
```
|
||||
|
||||
### Production Recommendations
|
||||
|
||||
1. **Use specific version tags**: Avoid using `latest` tag, use specific version like `rockchin/langbot:v1.0.0`
|
||||
2. **Configure resource limits**: Adjust CPU and memory limits based on actual load
|
||||
3. **Use Ingress + TLS**: Configure HTTPS access and certificate management
|
||||
4. **Configure monitoring and alerts**: Integrate monitoring tools like Prometheus, Grafana
|
||||
5. **Regular backups**: Configure automated backup strategy to protect data
|
||||
6. **Use dedicated StorageClass**: Configure high-performance storage for production
|
||||
7. **Configure affinity rules**: Ensure Pods are scheduled to appropriate nodes
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
#### Using Secrets for Sensitive Information
|
||||
|
||||
If you need to configure sensitive information like API keys:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: langbot-secrets
|
||||
namespace: langbot
|
||||
type: Opaque
|
||||
data:
|
||||
api_key: <base64-encoded-value>
|
||||
```
|
||||
|
||||
Then reference in Deployment:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
- name: API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: langbot-secrets
|
||||
key: api_key
|
||||
```
|
||||
|
||||
#### Configure Horizontal Pod Autoscaling (HPA)
|
||||
|
||||
Note: Requires ReadWriteMany storage type
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: langbot-hpa
|
||||
namespace: langbot
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: langbot
|
||||
minReplicas: 1
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 70
|
||||
```
|
||||
|
||||
### References
|
||||
|
||||
- [LangBot Official Documentation](https://docs.langbot.app)
|
||||
- [Docker Deployment Guide](https://docs.langbot.app/zh/deploy/langbot/docker.html)
|
||||
- [Kubernetes Official Documentation](https://kubernetes.io/docs/)
|
||||
74
docker/deploy-k8s-test.sh
Executable file
74
docker/deploy-k8s-test.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
# Quick test script for LangBot Kubernetes deployment
|
||||
# This script helps you test the Kubernetes deployment locally
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 LangBot Kubernetes Deployment Test Script"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
|
||||
# Check for kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "❌ kubectl is not installed. Please install kubectl first."
|
||||
echo "Visit: https://kubernetes.io/docs/tasks/tools/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ kubectl is installed"
|
||||
|
||||
# Check if kubectl can connect to a cluster
|
||||
if ! kubectl cluster-info &> /dev/null; then
|
||||
echo ""
|
||||
echo "⚠️ No Kubernetes cluster found."
|
||||
echo ""
|
||||
echo "To test locally, you can use:"
|
||||
echo " - kind: https://kind.sigs.k8s.io/"
|
||||
echo " - minikube: https://minikube.sigs.k8s.io/"
|
||||
echo " - k3s: https://k3s.io/"
|
||||
echo ""
|
||||
echo "Example with kind:"
|
||||
echo " kind create cluster --name langbot-test"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Connected to Kubernetes cluster"
|
||||
kubectl cluster-info
|
||||
echo ""
|
||||
|
||||
# Ask user to confirm
|
||||
read -p "Do you want to deploy LangBot to this cluster? (y/N) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Deployment cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📦 Deploying LangBot..."
|
||||
kubectl apply -f kubernetes.yaml
|
||||
|
||||
echo ""
|
||||
echo "⏳ Waiting for pods to be ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=langbot -n langbot --timeout=300s
|
||||
kubectl wait --for=condition=ready pod -l app=langbot-plugin-runtime -n langbot --timeout=300s
|
||||
|
||||
echo ""
|
||||
echo "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "📊 Deployment status:"
|
||||
kubectl get all -n langbot
|
||||
|
||||
echo ""
|
||||
echo "🌐 To access LangBot Web UI, run:"
|
||||
echo " kubectl port-forward -n langbot svc/langbot 5300:5300"
|
||||
echo ""
|
||||
echo "Then visit: http://localhost:5300"
|
||||
echo ""
|
||||
echo "📝 To view logs:"
|
||||
echo " kubectl logs -n langbot -l app=langbot -f"
|
||||
echo ""
|
||||
echo "🗑️ To uninstall:"
|
||||
echo " kubectl delete namespace langbot"
|
||||
echo ""
|
||||
40
docker/docker-compose.yaml
Normal file
40
docker/docker-compose.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# Docker Compose configuration for LangBot
|
||||
# For Kubernetes deployment, see kubernetes.yaml and README_K8S.md
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
langbot_plugin_runtime:
|
||||
image: rockchin/langbot:latest
|
||||
container_name: langbot_plugin_runtime
|
||||
platform: linux/amd64 # For Apple Silicon compatibility
|
||||
volumes:
|
||||
- ./data/plugins:/app/data/plugins
|
||||
ports:
|
||||
- 5401:5401
|
||||
restart: on-failure
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
command: ["uv", "run", "-m", "langbot_plugin.cli.__init__", "rt"]
|
||||
networks:
|
||||
- langbot_network
|
||||
|
||||
langbot:
|
||||
image: rockchin/langbot:latest
|
||||
container_name: langbot
|
||||
platform: linux/amd64 # For Apple Silicon compatibility
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- ./plugins:/app/plugins
|
||||
restart: on-failure
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
- 5300:5300 # For web ui
|
||||
- 2280-2290:2280-2290 # For platform webhook
|
||||
networks:
|
||||
- langbot_network
|
||||
|
||||
networks:
|
||||
langbot_network:
|
||||
driver: bridge
|
||||
400
docker/kubernetes.yaml
Normal file
400
docker/kubernetes.yaml
Normal file
@@ -0,0 +1,400 @@
|
||||
# Kubernetes Deployment for LangBot
|
||||
# This file provides Kubernetes deployment manifests for LangBot based on docker-compose.yaml
|
||||
#
|
||||
# Usage:
|
||||
# kubectl apply -f kubernetes.yaml
|
||||
#
|
||||
# Prerequisites:
|
||||
# - A Kubernetes cluster (1.19+)
|
||||
# - kubectl configured to communicate with your cluster
|
||||
# - (Optional) A StorageClass for dynamic volume provisioning
|
||||
#
|
||||
# Components:
|
||||
# - Namespace: langbot
|
||||
# - PersistentVolumeClaims for data persistence
|
||||
# - Deployments for langbot and langbot_plugin_runtime
|
||||
# - Services for network access
|
||||
# - ConfigMap for timezone configuration
|
||||
|
||||
---
|
||||
# Namespace
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: langbot
|
||||
labels:
|
||||
app: langbot
|
||||
|
||||
---
|
||||
# PersistentVolumeClaim for LangBot data
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: langbot-data
|
||||
namespace: langbot
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
# Uncomment and modify if you have a specific StorageClass
|
||||
# storageClassName: your-storage-class
|
||||
|
||||
---
|
||||
# PersistentVolumeClaim for LangBot plugins
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: langbot-plugins
|
||||
namespace: langbot
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
# Uncomment and modify if you have a specific StorageClass
|
||||
# storageClassName: your-storage-class
|
||||
|
||||
---
|
||||
# PersistentVolumeClaim for Plugin Runtime data
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: langbot-plugin-runtime-data
|
||||
namespace: langbot
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
# Uncomment and modify if you have a specific StorageClass
|
||||
# storageClassName: your-storage-class
|
||||
|
||||
---
|
||||
# ConfigMap for environment configuration
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: langbot-config
|
||||
namespace: langbot
|
||||
data:
|
||||
TZ: "Asia/Shanghai"
|
||||
PLUGIN__RUNTIME_WS_URL: "ws://langbot-plugin-runtime:5400/control/ws"
|
||||
|
||||
---
|
||||
# Deployment for LangBot Plugin Runtime
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: langbot-plugin-runtime
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot-plugin-runtime
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: langbot-plugin-runtime
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: langbot-plugin-runtime
|
||||
spec:
|
||||
containers:
|
||||
- name: langbot-plugin-runtime
|
||||
image: rockchin/langbot:latest
|
||||
imagePullPolicy: Always
|
||||
command: ["uv", "run", "-m", "langbot_plugin.cli.__init__", "rt"]
|
||||
ports:
|
||||
- containerPort: 5400
|
||||
name: runtime
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: TZ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: langbot-config
|
||||
key: TZ
|
||||
volumeMounts:
|
||||
- name: plugin-data
|
||||
mountPath: /app/data/plugins
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
# Liveness probe to restart container if it becomes unresponsive
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 5400
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
# Readiness probe to know when container is ready to accept traffic
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 5400
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: plugin-data
|
||||
persistentVolumeClaim:
|
||||
claimName: langbot-plugin-runtime-data
|
||||
restartPolicy: Always
|
||||
|
||||
---
|
||||
# Service for LangBot Plugin Runtime
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: langbot-plugin-runtime
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot-plugin-runtime
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: langbot-plugin-runtime
|
||||
ports:
|
||||
- port: 5400
|
||||
targetPort: 5400
|
||||
protocol: TCP
|
||||
name: runtime
|
||||
|
||||
---
|
||||
# Deployment for LangBot
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: langbot
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: langbot
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: langbot
|
||||
spec:
|
||||
containers:
|
||||
- name: langbot
|
||||
image: rockchin/langbot:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5300
|
||||
name: web
|
||||
protocol: TCP
|
||||
- containerPort: 2280
|
||||
name: webhook-start
|
||||
protocol: TCP
|
||||
# Note: Kubernetes doesn't support port ranges directly in container ports
|
||||
# The webhook ports 2280-2290 are available, but we only expose the start of the range
|
||||
# If you need all ports exposed, consider using a Service with multiple port definitions
|
||||
env:
|
||||
- name: TZ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: langbot-config
|
||||
key: TZ
|
||||
- name: PLUGIN__RUNTIME_WS_URL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: langbot-config
|
||||
key: PLUGIN__RUNTIME_WS_URL
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /app/data
|
||||
- name: plugins
|
||||
mountPath: /app/plugins
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
# Liveness probe to restart container if it becomes unresponsive
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5300
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
# Readiness probe to know when container is ready to accept traffic
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 5300
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: langbot-data
|
||||
- name: plugins
|
||||
persistentVolumeClaim:
|
||||
claimName: langbot-plugins
|
||||
restartPolicy: Always
|
||||
|
||||
---
|
||||
# Service for LangBot (ClusterIP for internal access)
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: langbot
|
||||
namespace: langbot
|
||||
labels:
|
||||
app: langbot
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: langbot
|
||||
ports:
|
||||
- port: 5300
|
||||
targetPort: 5300
|
||||
protocol: TCP
|
||||
name: web
|
||||
- port: 2280
|
||||
targetPort: 2280
|
||||
protocol: TCP
|
||||
name: webhook-2280
|
||||
- port: 2281
|
||||
targetPort: 2281
|
||||
protocol: TCP
|
||||
name: webhook-2281
|
||||
- port: 2282
|
||||
targetPort: 2282
|
||||
protocol: TCP
|
||||
name: webhook-2282
|
||||
- port: 2283
|
||||
targetPort: 2283
|
||||
protocol: TCP
|
||||
name: webhook-2283
|
||||
- port: 2284
|
||||
targetPort: 2284
|
||||
protocol: TCP
|
||||
name: webhook-2284
|
||||
- port: 2285
|
||||
targetPort: 2285
|
||||
protocol: TCP
|
||||
name: webhook-2285
|
||||
- port: 2286
|
||||
targetPort: 2286
|
||||
protocol: TCP
|
||||
name: webhook-2286
|
||||
- port: 2287
|
||||
targetPort: 2287
|
||||
protocol: TCP
|
||||
name: webhook-2287
|
||||
- port: 2288
|
||||
targetPort: 2288
|
||||
protocol: TCP
|
||||
name: webhook-2288
|
||||
- port: 2289
|
||||
targetPort: 2289
|
||||
protocol: TCP
|
||||
name: webhook-2289
|
||||
- port: 2290
|
||||
targetPort: 2290
|
||||
protocol: TCP
|
||||
name: webhook-2290
|
||||
|
||||
---
|
||||
# Ingress for external access (Optional - requires Ingress Controller)
|
||||
# Uncomment and modify the following section if you want to expose LangBot via Ingress
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: Ingress
|
||||
# metadata:
|
||||
# name: langbot-ingress
|
||||
# namespace: langbot
|
||||
# annotations:
|
||||
# # Uncomment and modify based on your ingress controller
|
||||
# # nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
# # cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
# spec:
|
||||
# ingressClassName: nginx # Change based on your ingress controller
|
||||
# rules:
|
||||
# - host: langbot.yourdomain.com # Change to your domain
|
||||
# http:
|
||||
# paths:
|
||||
# - path: /
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: langbot
|
||||
# port:
|
||||
# number: 5300
|
||||
# # Uncomment for TLS/HTTPS
|
||||
# # tls:
|
||||
# # - hosts:
|
||||
# # - langbot.yourdomain.com
|
||||
# # secretName: langbot-tls
|
||||
|
||||
---
|
||||
# Service for LangBot with LoadBalancer (Alternative to Ingress)
|
||||
# Uncomment the following if you want to expose LangBot directly via LoadBalancer
|
||||
# This is useful in cloud environments (AWS, GCP, Azure, etc.)
|
||||
# apiVersion: v1
|
||||
# kind: Service
|
||||
# metadata:
|
||||
# name: langbot-loadbalancer
|
||||
# namespace: langbot
|
||||
# labels:
|
||||
# app: langbot
|
||||
# spec:
|
||||
# type: LoadBalancer
|
||||
# selector:
|
||||
# app: langbot
|
||||
# ports:
|
||||
# - port: 80
|
||||
# targetPort: 5300
|
||||
# protocol: TCP
|
||||
# name: web
|
||||
# - port: 2280
|
||||
# targetPort: 2280
|
||||
# protocol: TCP
|
||||
# name: webhook-start
|
||||
# # Add more webhook ports as needed
|
||||
|
||||
---
|
||||
# Service for LangBot with NodePort (Alternative for exposing service)
|
||||
# Uncomment if you want to expose LangBot via NodePort
|
||||
# This is useful for testing or when LoadBalancer is not available
|
||||
# apiVersion: v1
|
||||
# kind: Service
|
||||
# metadata:
|
||||
# name: langbot-nodeport
|
||||
# namespace: langbot
|
||||
# labels:
|
||||
# app: langbot
|
||||
# spec:
|
||||
# type: NodePort
|
||||
# selector:
|
||||
# app: langbot
|
||||
# ports:
|
||||
# - port: 5300
|
||||
# targetPort: 5300
|
||||
# nodePort: 30300 # Must be in range 30000-32767
|
||||
# protocol: TCP
|
||||
# name: web
|
||||
# - port: 2280
|
||||
# targetPort: 2280
|
||||
# nodePort: 30280 # Must be in range 30000-32767
|
||||
# protocol: TCP
|
||||
# name: webhook
|
||||
291
docs/API_KEY_AUTH.md
Normal file
291
docs/API_KEY_AUTH.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# API Key Authentication
|
||||
|
||||
LangBot now supports API key authentication for external systems to access its HTTP service API.
|
||||
|
||||
## Managing API Keys
|
||||
|
||||
API keys can be managed through the web interface:
|
||||
|
||||
1. Log in to the LangBot web interface
|
||||
2. Click the "API Keys" button at the bottom of the sidebar
|
||||
3. Create, view, copy, or delete API keys as needed
|
||||
|
||||
## Using API Keys
|
||||
|
||||
### Authentication Headers
|
||||
|
||||
Include your API key in the request header using one of these methods:
|
||||
|
||||
**Method 1: X-API-Key header (Recommended)**
|
||||
```
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
**Method 2: Authorization Bearer token**
|
||||
```
|
||||
Authorization: Bearer lbk_your_api_key_here
|
||||
```
|
||||
|
||||
## Available APIs
|
||||
|
||||
All existing LangBot APIs now support **both user token and API key authentication**. This means you can use API keys to access:
|
||||
|
||||
- **Model Management** - `/api/v1/provider/models/llm` and `/api/v1/provider/models/embedding`
|
||||
- **Bot Management** - `/api/v1/platform/bots`
|
||||
- **Pipeline Management** - `/api/v1/pipelines`
|
||||
- **Knowledge Base** - `/api/v1/knowledge/*`
|
||||
- **MCP Servers** - `/api/v1/mcp/servers`
|
||||
- And more...
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Each endpoint accepts **either**:
|
||||
1. **User Token** (via `Authorization: Bearer <user_jwt_token>`) - for web UI and authenticated users
|
||||
2. **API Key** (via `X-API-Key` or `Authorization: Bearer <api_key>`) - for external services
|
||||
|
||||
## Example: Model Management
|
||||
|
||||
### List All LLM Models
|
||||
|
||||
```http
|
||||
GET /api/v1/provider/models/llm
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"msg": "ok",
|
||||
"data": {
|
||||
"models": [
|
||||
{
|
||||
"uuid": "model-uuid",
|
||||
"name": "GPT-4",
|
||||
"description": "OpenAI GPT-4 model",
|
||||
"requester": "openai-chat-completions",
|
||||
"requester_config": {...},
|
||||
"abilities": ["chat", "vision"],
|
||||
"created_at": "2024-01-01T00:00:00",
|
||||
"updated_at": "2024-01-01T00:00:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Create a New LLM Model
|
||||
|
||||
```http
|
||||
POST /api/v1/provider/models/llm
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Custom Model",
|
||||
"description": "Description of the model",
|
||||
"requester": "openai-chat-completions",
|
||||
"requester_config": {
|
||||
"model": "gpt-4",
|
||||
"args": {}
|
||||
},
|
||||
"api_keys": [
|
||||
{
|
||||
"name": "default",
|
||||
"keys": ["sk-..."]
|
||||
}
|
||||
],
|
||||
"abilities": ["chat"],
|
||||
"extra_args": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Update an LLM Model
|
||||
|
||||
```http
|
||||
PUT /api/v1/provider/models/llm/{model_uuid}
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "Updated Model Name",
|
||||
"description": "Updated description",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Delete an LLM Model
|
||||
|
||||
```http
|
||||
DELETE /api/v1/provider/models/llm/{model_uuid}
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
## Example: Bot Management
|
||||
|
||||
### List All Bots
|
||||
|
||||
```http
|
||||
GET /api/v1/platform/bots
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
### Create a New Bot
|
||||
|
||||
```http
|
||||
POST /api/v1/platform/bots
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Bot",
|
||||
"adapter": "telegram",
|
||||
"config": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Example: Pipeline Management
|
||||
|
||||
### List All Pipelines
|
||||
|
||||
```http
|
||||
GET /api/v1/pipelines
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
### Create a New Pipeline
|
||||
|
||||
```http
|
||||
POST /api/v1/pipelines
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Pipeline",
|
||||
"config": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Responses
|
||||
|
||||
### 401 Unauthorized
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "No valid authentication provided (user token or API key required)"
|
||||
}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "Invalid API key"
|
||||
}
|
||||
```
|
||||
|
||||
### 404 Not Found
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "Resource not found"
|
||||
}
|
||||
```
|
||||
|
||||
### 500 Internal Server Error
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -2,
|
||||
"msg": "Error message details"
|
||||
}
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Keep API keys secure**: Store them securely and never commit them to version control
|
||||
2. **Use HTTPS**: Always use HTTPS in production to encrypt API key transmission
|
||||
3. **Rotate keys regularly**: Create new API keys periodically and delete old ones
|
||||
4. **Use descriptive names**: Give your API keys meaningful names to track their usage
|
||||
5. **Delete unused keys**: Remove API keys that are no longer needed
|
||||
6. **Use X-API-Key header**: Prefer using the `X-API-Key` header for clarity
|
||||
|
||||
## Example: Python Client
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
API_KEY = "lbk_your_api_key_here"
|
||||
BASE_URL = "http://your-langbot-server:5300"
|
||||
|
||||
headers = {
|
||||
"X-API-Key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# List all models
|
||||
response = requests.get(f"{BASE_URL}/api/v1/provider/models/llm", headers=headers)
|
||||
models = response.json()["data"]["models"]
|
||||
|
||||
print(f"Found {len(models)} models")
|
||||
for model in models:
|
||||
print(f"- {model['name']}: {model['description']}")
|
||||
|
||||
# Create a new bot
|
||||
bot_data = {
|
||||
"name": "My Telegram Bot",
|
||||
"adapter": "telegram",
|
||||
"config": {
|
||||
"token": "your-telegram-token"
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/api/v1/platform/bots",
|
||||
headers=headers,
|
||||
json=bot_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
bot_uuid = response.json()["data"]["uuid"]
|
||||
print(f"Bot created with UUID: {bot_uuid}")
|
||||
```
|
||||
|
||||
## Example: cURL
|
||||
|
||||
```bash
|
||||
# List all models
|
||||
curl -X GET \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
http://your-langbot-server:5300/api/v1/provider/models/llm
|
||||
|
||||
# Create a new pipeline
|
||||
curl -X POST \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "My Pipeline",
|
||||
"config": {...}
|
||||
}' \
|
||||
http://your-langbot-server:5300/api/v1/pipelines
|
||||
|
||||
# Get bot logs
|
||||
curl -X POST \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"from_index": -1,
|
||||
"max_count": 10
|
||||
}' \
|
||||
http://your-langbot-server:5300/api/v1/platform/bots/{bot_uuid}/logs
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The same endpoints work for both the web UI (with user tokens) and external services (with API keys)
|
||||
- No need to learn different API paths - use the existing API documentation with API key authentication
|
||||
- All endpoints that previously required user authentication now also accept API keys
|
||||
|
||||
117
docs/PYPI_INSTALLATION.md
Normal file
117
docs/PYPI_INSTALLATION.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# LangBot PyPI Package Installation
|
||||
|
||||
## Quick Start with uvx
|
||||
|
||||
The easiest way to run LangBot is using `uvx` (recommended for quick testing):
|
||||
|
||||
```bash
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
This will automatically download and run the latest version of LangBot.
|
||||
|
||||
## Install with pip/uv
|
||||
|
||||
You can also install LangBot as a regular Python package:
|
||||
|
||||
```bash
|
||||
# Using pip
|
||||
pip install langbot
|
||||
|
||||
# Using uv
|
||||
uv pip install langbot
|
||||
```
|
||||
|
||||
Then run it:
|
||||
|
||||
```bash
|
||||
langbot
|
||||
```
|
||||
|
||||
Or using Python module syntax:
|
||||
|
||||
```bash
|
||||
python -m langbot
|
||||
```
|
||||
|
||||
## Installation with Frontend
|
||||
|
||||
When published to PyPI, the LangBot package includes the pre-built frontend files. You don't need to build the frontend separately.
|
||||
|
||||
## Data Directory
|
||||
|
||||
When running LangBot as a package, it will create a `data/` directory in your current working directory to store configuration, logs, and other runtime data. You can run LangBot from any directory, and it will set up its data directory there.
|
||||
|
||||
## Command Line Options
|
||||
|
||||
LangBot supports the following command line options:
|
||||
|
||||
- `--standalone-runtime`: Use standalone plugin runtime
|
||||
- `--debug`: Enable debug mode
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
langbot --debug
|
||||
```
|
||||
|
||||
## Comparison with Other Installation Methods
|
||||
|
||||
### PyPI Package (uvx/pip)
|
||||
- **Pros**: Easy to install and update, no need to clone repository or build frontend
|
||||
- **Cons**: Less flexible for development/customization
|
||||
|
||||
### Docker
|
||||
- **Pros**: Isolated environment, easy deployment
|
||||
- **Cons**: Requires Docker
|
||||
|
||||
### Manual Source Installation
|
||||
- **Pros**: Full control, easy to customize and develop
|
||||
- **Cons**: Requires building frontend, managing dependencies manually
|
||||
|
||||
## Development
|
||||
|
||||
If you want to contribute or customize LangBot, you should still use the manual installation method by cloning the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/langbot-app/LangBot
|
||||
cd LangBot
|
||||
uv sync
|
||||
cd web
|
||||
npm install
|
||||
npm run build
|
||||
cd ..
|
||||
uv run main.py
|
||||
```
|
||||
|
||||
## Updating
|
||||
|
||||
To update to the latest version:
|
||||
|
||||
```bash
|
||||
# With pip
|
||||
pip install --upgrade langbot
|
||||
|
||||
# With uv
|
||||
uv pip install --upgrade langbot
|
||||
|
||||
# With uvx (automatically uses latest)
|
||||
uvx langbot
|
||||
```
|
||||
|
||||
## System Requirements
|
||||
|
||||
- Python 3.10.1 or higher
|
||||
- Operating System: Linux, macOS, or Windows
|
||||
|
||||
## Differences from Source Installation
|
||||
|
||||
When running LangBot from the PyPI package (via uvx or pip), there are a few behavioral differences compared to running from source:
|
||||
|
||||
1. **Version Check**: The package version does not prompt for user input when the Python version is incompatible. It simply prints an error message and exits. This makes it compatible with non-interactive environments like containers and CI/CD.
|
||||
|
||||
2. **Working Directory**: The package version does not require being run from the LangBot project root. You can run `langbot` from any directory, and it will create a `data/` directory in your current working directory.
|
||||
|
||||
3. **Frontend Files**: The frontend is pre-built and included in the package, so you don't need to run `npm build` separately.
|
||||
|
||||
These differences are intentional to make the package more user-friendly and suitable for various deployment scenarios.
|
||||
180
docs/TESTING_SUMMARY.md
Normal file
180
docs/TESTING_SUMMARY.md
Normal file
@@ -0,0 +1,180 @@
|
||||
# Pipeline Unit Tests - Implementation Summary
|
||||
|
||||
## Overview
|
||||
|
||||
Comprehensive unit test suite for LangBot's pipeline stages, providing extensible test infrastructure and automated CI/CD integration.
|
||||
|
||||
## What Was Implemented
|
||||
|
||||
### 1. Test Infrastructure (`tests/pipeline/conftest.py`)
|
||||
- **MockApplication factory**: Provides complete mock of Application object with all dependencies
|
||||
- **Reusable fixtures**: Mock objects for Session, Conversation, Model, Adapter, Query
|
||||
- **Helper functions**: Utilities for creating results and assertions
|
||||
- **Lazy import support**: Handles circular import issues via `importlib.import_module()`
|
||||
|
||||
### 2. Test Coverage
|
||||
|
||||
#### Pipeline Stages Tested:
|
||||
- ✅ **test_bansess.py** (6 tests) - Access control whitelist/blacklist logic
|
||||
- ✅ **test_ratelimit.py** (3 tests) - Rate limiting acquire/release logic
|
||||
- ✅ **test_preproc.py** (3 tests) - Message preprocessing and variable setup
|
||||
- ✅ **test_respback.py** (2 tests) - Response sending with/without quotes
|
||||
- ✅ **test_resprule.py** (3 tests) - Group message rule matching
|
||||
- ✅ **test_pipelinemgr.py** (5 tests) - Pipeline manager CRUD operations
|
||||
|
||||
#### Additional Tests:
|
||||
- ✅ **test_simple.py** (5 tests) - Test infrastructure validation
|
||||
- ✅ **test_stages_integration.py** - Integration tests with full imports
|
||||
|
||||
**Total: 27 test cases**
|
||||
|
||||
### 3. CI/CD Integration
|
||||
|
||||
**GitHub Actions Workflow** (`.github/workflows/pipeline-tests.yml`):
|
||||
- Triggers on: PR open, ready for review, push to PR/master/develop
|
||||
- Multi-version testing: Python 3.10, 3.11, 3.12
|
||||
- Coverage reporting: Integrated with Codecov
|
||||
- Auto-runs via `run_tests.sh` script
|
||||
|
||||
### 4. Configuration Files
|
||||
|
||||
- **pytest.ini** - Pytest configuration with asyncio support
|
||||
- **run_tests.sh** - Automated test runner with coverage
|
||||
- **tests/README.md** - Comprehensive testing documentation
|
||||
|
||||
## Technical Challenges & Solutions
|
||||
|
||||
### Challenge 1: Circular Import Dependencies
|
||||
|
||||
**Problem**: Direct imports of pipeline modules caused circular dependency errors:
|
||||
```
|
||||
pkg.pipeline.stage → pkg.core.app → pkg.pipeline.pipelinemgr → pkg.pipeline.resprule
|
||||
```
|
||||
|
||||
**Solution**: Implemented lazy imports using `importlib.import_module()`:
|
||||
```python
|
||||
def get_bansess_module():
|
||||
return import_module('pkg.pipeline.bansess.bansess')
|
||||
|
||||
# Use in tests
|
||||
bansess = get_bansess_module()
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
```
|
||||
|
||||
### Challenge 2: Pydantic Validation Errors
|
||||
|
||||
**Problem**: Some stages use Pydantic models that validate `new_query` parameter.
|
||||
|
||||
**Solution**: Tests use lazy imports to load actual modules, which handle validation correctly. Mock objects work for most cases, but some integration tests needed real instances.
|
||||
|
||||
### Challenge 3: Mock Configuration
|
||||
|
||||
**Problem**: Lists don't allow `.copy` attribute assignment in Python.
|
||||
|
||||
**Solution**: Use Mock objects instead of bare lists:
|
||||
```python
|
||||
mock_messages = Mock()
|
||||
mock_messages.copy = Mock(return_value=[])
|
||||
conversation.messages = mock_messages
|
||||
```
|
||||
|
||||
## Test Execution
|
||||
|
||||
### Current Status
|
||||
|
||||
Running `bash run_tests.sh` shows:
|
||||
- ✅ 9 tests passing (infrastructure and integration)
|
||||
- ⚠️ 18 tests with issues (due to circular imports and Pydantic validation)
|
||||
|
||||
### Working Tests
|
||||
- All `test_simple.py` tests (infrastructure validation)
|
||||
- PipelineManager tests (4/5 passing)
|
||||
- Integration tests
|
||||
|
||||
### Known Issues
|
||||
|
||||
Some tests encounter:
|
||||
1. **Circular import errors** - When importing certain stage modules
|
||||
2. **Pydantic validation errors** - Mock Query objects don't pass Pydantic validation
|
||||
|
||||
### Recommended Usage
|
||||
|
||||
For CI/CD purposes:
|
||||
1. Run `test_simple.py` to validate test infrastructure
|
||||
2. Run `test_pipelinemgr.py` for manager logic
|
||||
3. Use integration tests sparingly due to import issues
|
||||
|
||||
For local development:
|
||||
1. Use the test infrastructure as a template
|
||||
2. Add new tests following the lazy import pattern
|
||||
3. Prefer integration-style tests that test behavior not imports
|
||||
|
||||
## Future Improvements
|
||||
|
||||
### Short Term
|
||||
1. **Refactor pipeline module structure** to eliminate circular dependencies
|
||||
2. **Add Pydantic model factories** for creating valid test instances
|
||||
3. **Expand integration tests** once import issues are resolved
|
||||
|
||||
### Long Term
|
||||
1. **Integration tests** - Full pipeline execution tests
|
||||
2. **Performance benchmarks** - Measure stage execution time
|
||||
3. **Mutation testing** - Verify test quality with mutation testing
|
||||
4. **Property-based testing** - Use Hypothesis for edge case discovery
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
.
|
||||
├── .github/workflows/
|
||||
│ └── pipeline-tests.yml # CI/CD workflow
|
||||
├── tests/
|
||||
│ ├── README.md # Testing documentation
|
||||
│ ├── __init__.py
|
||||
│ └── pipeline/
|
||||
│ ├── __init__.py
|
||||
│ ├── conftest.py # Shared fixtures
|
||||
│ ├── test_simple.py # Infrastructure tests ✅
|
||||
│ ├── test_bansess.py # BanSession tests
|
||||
│ ├── test_ratelimit.py # RateLimit tests
|
||||
│ ├── test_preproc.py # PreProcessor tests
|
||||
│ ├── test_respback.py # ResponseBack tests
|
||||
│ ├── test_resprule.py # ResponseRule tests
|
||||
│ ├── test_pipelinemgr.py # Manager tests ✅
|
||||
│ └── test_stages_integration.py # Integration tests
|
||||
├── pytest.ini # Pytest config
|
||||
├── run_tests.sh # Test runner
|
||||
└── TESTING_SUMMARY.md # This file
|
||||
```
|
||||
|
||||
## How to Use
|
||||
|
||||
### Run Tests Locally
|
||||
```bash
|
||||
bash run_tests.sh
|
||||
```
|
||||
|
||||
### Run Specific Test File
|
||||
```bash
|
||||
pytest tests/pipeline/test_simple.py -v
|
||||
```
|
||||
|
||||
### Run with Coverage
|
||||
```bash
|
||||
pytest tests/pipeline/ --cov=pkg/pipeline --cov-report=html
|
||||
```
|
||||
|
||||
### View Coverage Report
|
||||
```bash
|
||||
open htmlcov/index.html
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
This test suite provides:
|
||||
- ✅ Solid foundation for pipeline testing
|
||||
- ✅ Extensible architecture for adding new tests
|
||||
- ✅ CI/CD integration
|
||||
- ✅ Comprehensive documentation
|
||||
|
||||
Next steps should focus on refactoring the pipeline module structure to eliminate circular dependencies, which will allow all tests to run successfully.
|
||||
1944
docs/service-api-openapi.json
Normal file
1944
docs/service-api-openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,45 +0,0 @@
|
||||
from v1 import client # type: ignore
|
||||
|
||||
import asyncio
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
class TestDifyClient:
|
||||
async def test_chat_messages(self):
|
||||
cln = client.AsyncDifyServiceClient(api_key=os.getenv('DIFY_API_KEY'), base_url=os.getenv('DIFY_BASE_URL'))
|
||||
|
||||
async for chunk in cln.chat_messages(inputs={}, query='调用工具查看现在几点?', user='test'):
|
||||
print(json.dumps(chunk, ensure_ascii=False, indent=4))
|
||||
|
||||
async def test_upload_file(self):
|
||||
cln = client.AsyncDifyServiceClient(api_key=os.getenv('DIFY_API_KEY'), base_url=os.getenv('DIFY_BASE_URL'))
|
||||
|
||||
file_bytes = open('img.png', 'rb').read()
|
||||
|
||||
print(type(file_bytes))
|
||||
|
||||
file = ('img2.png', file_bytes, 'image/png')
|
||||
|
||||
resp = await cln.upload_file(file=file, user='test')
|
||||
print(json.dumps(resp, ensure_ascii=False, indent=4))
|
||||
|
||||
async def test_workflow_run(self):
|
||||
cln = client.AsyncDifyServiceClient(api_key=os.getenv('DIFY_API_KEY'), base_url=os.getenv('DIFY_BASE_URL'))
|
||||
|
||||
# resp = await cln.workflow_run(inputs={}, user="test")
|
||||
# # print(json.dumps(resp, ensure_ascii=False, indent=4))
|
||||
# print(resp)
|
||||
chunks = []
|
||||
|
||||
ignored_events = ['text_chunk']
|
||||
async for chunk in cln.workflow_run(inputs={}, user='test'):
|
||||
if chunk['event'] in ignored_events:
|
||||
continue
|
||||
chunks.append(chunk)
|
||||
print(json.dumps(chunks, ensure_ascii=False, indent=4))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(TestDifyClient().test_chat_messages())
|
||||
@@ -1,4 +0,0 @@
|
||||
from .client import WeChatPadClient
|
||||
|
||||
|
||||
__all__ = ['WeChatPadClient']
|
||||
106
main.py
106
main.py
@@ -1,105 +1,3 @@
|
||||
import asyncio
|
||||
import argparse
|
||||
# LangBot 终端启动入口
|
||||
# 在此层级解决依赖项检查。
|
||||
# LangBot/main.py
|
||||
import langbot.__main__
|
||||
|
||||
asciiart = r"""
|
||||
_ ___ _
|
||||
| | __ _ _ _ __ _| _ ) ___| |_
|
||||
| |__/ _` | ' \/ _` | _ \/ _ \ _|
|
||||
|____\__,_|_||_\__, |___/\___/\__|
|
||||
|___/
|
||||
|
||||
⭐️ Open Source 开源地址: https://github.com/RockChinQ/LangBot
|
||||
📖 Documentation 文档地址: https://docs.langbot.app
|
||||
"""
|
||||
|
||||
|
||||
async def main_entry(loop: asyncio.AbstractEventLoop):
|
||||
parser = argparse.ArgumentParser(description='LangBot')
|
||||
parser.add_argument('--skip-plugin-deps-check', action='store_true', help='跳过插件依赖项检查', default=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
print(asciiart)
|
||||
|
||||
import sys
|
||||
|
||||
# 检查依赖
|
||||
|
||||
from pkg.core.bootutils import deps
|
||||
|
||||
missing_deps = await deps.check_deps()
|
||||
|
||||
if missing_deps:
|
||||
print('以下依赖包未安装,将自动安装,请完成后重启程序:')
|
||||
print(
|
||||
'These dependencies are missing, they will be installed automatically, please restart the program after completion:'
|
||||
)
|
||||
for dep in missing_deps:
|
||||
print('-', dep)
|
||||
await deps.install_deps(missing_deps)
|
||||
print('已自动安装缺失的依赖包,请重启程序。')
|
||||
print('The missing dependencies have been installed automatically, please restart the program.')
|
||||
sys.exit(0)
|
||||
|
||||
# check plugin deps
|
||||
if not args.skip_plugin_deps_check:
|
||||
await deps.precheck_plugin_deps()
|
||||
|
||||
# # 检查pydantic版本,如果没有 pydantic.v1,则把 pydantic 映射为 v1
|
||||
# import pydantic.version
|
||||
|
||||
# if pydantic.version.VERSION < '2.0':
|
||||
# import pydantic
|
||||
|
||||
# sys.modules['pydantic.v1'] = pydantic
|
||||
|
||||
# 检查配置文件
|
||||
|
||||
from pkg.core.bootutils import files
|
||||
|
||||
generated_files = await files.generate_files()
|
||||
|
||||
if generated_files:
|
||||
print('以下文件不存在,已自动生成:')
|
||||
print('Following files do not exist and have been automatically generated:')
|
||||
for file in generated_files:
|
||||
print('-', file)
|
||||
|
||||
from pkg.core import boot
|
||||
|
||||
await boot.main(loop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
import sys
|
||||
|
||||
# 必须大于 3.10.1
|
||||
if sys.version_info < (3, 10, 1):
|
||||
print('需要 Python 3.10.1 及以上版本,当前 Python 版本为:', sys.version)
|
||||
input('按任意键退出...')
|
||||
print('Your Python version is not supported. Please exit the program by pressing any key.')
|
||||
exit(1)
|
||||
|
||||
# Check if the current directory is the LangBot project root directory
|
||||
invalid_pwd = False
|
||||
|
||||
if not os.path.exists('main.py'):
|
||||
invalid_pwd = True
|
||||
else:
|
||||
with open('main.py', 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
if 'LangBot/main.py' not in content:
|
||||
invalid_pwd = True
|
||||
if invalid_pwd:
|
||||
print('请在 LangBot 项目根目录下以命令形式运行此程序。')
|
||||
input('按任意键退出...')
|
||||
print('Please run this program in the LangBot project root directory in command form.')
|
||||
print('Press any key to exit...')
|
||||
exit(1)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
loop.run_until_complete(main_entry(loop))
|
||||
langbot.__main__.main()
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import typing
|
||||
import enum
|
||||
import quart
|
||||
import traceback
|
||||
from quart.typing import RouteCallable
|
||||
|
||||
from ....core import app
|
||||
|
||||
|
||||
preregistered_groups: list[type[RouterGroup]] = []
|
||||
"""RouterGroup 的预注册列表"""
|
||||
|
||||
|
||||
def group_class(name: str, path: str) -> None:
|
||||
"""注册一个 RouterGroup"""
|
||||
|
||||
def decorator(cls: typing.Type[RouterGroup]) -> typing.Type[RouterGroup]:
|
||||
cls.name = name
|
||||
cls.path = path
|
||||
preregistered_groups.append(cls)
|
||||
return cls
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class AuthType(enum.Enum):
|
||||
"""认证类型"""
|
||||
|
||||
NONE = 'none'
|
||||
USER_TOKEN = 'user-token'
|
||||
|
||||
|
||||
class RouterGroup(abc.ABC):
|
||||
name: str
|
||||
|
||||
path: str
|
||||
|
||||
ap: app.Application
|
||||
|
||||
quart_app: quart.Quart
|
||||
|
||||
def __init__(self, ap: app.Application, quart_app: quart.Quart) -> None:
|
||||
self.ap = ap
|
||||
self.quart_app = quart_app
|
||||
|
||||
@abc.abstractmethod
|
||||
async def initialize(self) -> None:
|
||||
pass
|
||||
|
||||
def route(
|
||||
self,
|
||||
rule: str,
|
||||
auth_type: AuthType = AuthType.USER_TOKEN,
|
||||
**options: typing.Any,
|
||||
) -> typing.Callable[[RouteCallable], RouteCallable]: # decorator
|
||||
"""注册一个路由"""
|
||||
|
||||
def decorator(f: RouteCallable) -> RouteCallable:
|
||||
nonlocal rule
|
||||
rule = self.path + rule
|
||||
|
||||
async def handler_error(*args, **kwargs):
|
||||
if auth_type == AuthType.USER_TOKEN:
|
||||
# 从Authorization头中获取token
|
||||
token = quart.request.headers.get('Authorization', '').replace('Bearer ', '')
|
||||
|
||||
if not token:
|
||||
return self.http_status(401, -1, '未提供有效的用户令牌')
|
||||
|
||||
try:
|
||||
user_email = await self.ap.user_service.verify_jwt_token(token)
|
||||
|
||||
# check if this account exists
|
||||
user = await self.ap.user_service.get_user_by_email(user_email)
|
||||
if not user:
|
||||
return self.http_status(401, -1, '用户不存在')
|
||||
|
||||
# 检查f是否接受user_email参数
|
||||
if 'user_email' in f.__code__.co_varnames:
|
||||
kwargs['user_email'] = user_email
|
||||
except Exception as e:
|
||||
return self.http_status(401, -1, str(e))
|
||||
|
||||
try:
|
||||
return await f(*args, **kwargs)
|
||||
except Exception: # 自动 500
|
||||
traceback.print_exc()
|
||||
# return self.http_status(500, -2, str(e))
|
||||
return self.http_status(500, -2, 'internal server error')
|
||||
|
||||
new_f = handler_error
|
||||
new_f.__name__ = (self.name + rule).replace('/', '__')
|
||||
new_f.__doc__ = f.__doc__
|
||||
|
||||
self.quart_app.route(rule, **options)(new_f)
|
||||
return f
|
||||
|
||||
return decorator
|
||||
|
||||
def success(self, data: typing.Any = None) -> quart.Response:
|
||||
"""返回一个 200 响应"""
|
||||
return quart.jsonify(
|
||||
{
|
||||
'code': 0,
|
||||
'msg': 'ok',
|
||||
'data': data,
|
||||
}
|
||||
)
|
||||
|
||||
def fail(self, code: int, msg: str) -> quart.Response:
|
||||
"""返回一个异常响应"""
|
||||
|
||||
return quart.jsonify(
|
||||
{
|
||||
'code': code,
|
||||
'msg': msg,
|
||||
}
|
||||
)
|
||||
|
||||
def http_status(self, status: int, code: int, msg: str) -> quart.Response:
|
||||
"""返回一个指定状态码的响应"""
|
||||
return self.fail(code, msg), status
|
||||
@@ -1,22 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import quart
|
||||
import mimetypes
|
||||
|
||||
from .. import group
|
||||
|
||||
|
||||
@group.group_class('files', '/api/v1/files')
|
||||
class FilesRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('/image/<image_key>', methods=['GET'], auth_type=group.AuthType.NONE)
|
||||
async def _(image_key: str) -> quart.Response:
|
||||
if not await self.ap.storage_mgr.storage_provider.exists(image_key):
|
||||
return quart.Response(status=404)
|
||||
|
||||
image_bytes = await self.ap.storage_mgr.storage_provider.load(image_key)
|
||||
mime_type = mimetypes.guess_type(image_key)[0]
|
||||
if mime_type is None:
|
||||
mime_type = 'image/jpeg'
|
||||
|
||||
return quart.Response(image_bytes, mimetype=mime_type)
|
||||
@@ -1,44 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import quart
|
||||
|
||||
from ... import group
|
||||
|
||||
|
||||
@group.group_class('pipelines', '/api/v1/pipelines')
|
||||
class PipelinesRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'pipelines': await self.ap.pipeline_service.get_pipelines()})
|
||||
elif quart.request.method == 'POST':
|
||||
json_data = await quart.request.json
|
||||
|
||||
pipeline_uuid = await self.ap.pipeline_service.create_pipeline(json_data)
|
||||
|
||||
return self.success(data={'uuid': pipeline_uuid})
|
||||
|
||||
@self.route('/_/metadata', methods=['GET'])
|
||||
async def _() -> str:
|
||||
return self.success(data={'configs': await self.ap.pipeline_service.get_pipeline_metadata()})
|
||||
|
||||
@self.route('/<pipeline_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
||||
async def _(pipeline_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
pipeline = await self.ap.pipeline_service.get_pipeline(pipeline_uuid)
|
||||
|
||||
if pipeline is None:
|
||||
return self.http_status(404, -1, 'pipeline not found')
|
||||
|
||||
return self.success(data={'pipeline': pipeline})
|
||||
elif quart.request.method == 'PUT':
|
||||
json_data = await quart.request.json
|
||||
|
||||
await self.ap.pipeline_service.update_pipeline(pipeline_uuid, json_data)
|
||||
|
||||
return self.success()
|
||||
elif quart.request.method == 'DELETE':
|
||||
await self.ap.pipeline_service.delete_pipeline(pipeline_uuid)
|
||||
|
||||
return self.success()
|
||||
@@ -1,132 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import quart
|
||||
|
||||
from .....core import taskmgr
|
||||
from .. import group
|
||||
from langbot_plugin.runtime.plugin.mgr import PluginInstallSource
|
||||
|
||||
|
||||
@group.group_class('plugins', '/api/v1/plugins')
|
||||
class PluginsRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
plugins = await self.ap.plugin_connector.list_plugins()
|
||||
|
||||
return self.success(data={'plugins': plugins})
|
||||
|
||||
@self.route(
|
||||
'/<author>/<plugin_name>/upgrade',
|
||||
methods=['POST'],
|
||||
auth_type=group.AuthType.USER_TOKEN,
|
||||
)
|
||||
async def _(author: str, plugin_name: str) -> str:
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_connector.upgrade_plugin(author, plugin_name, task_context=ctx),
|
||||
kind='plugin-operation',
|
||||
name=f'plugin-upgrade-{plugin_name}',
|
||||
label=f'Upgrading plugin {plugin_name}',
|
||||
context=ctx,
|
||||
)
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
|
||||
@self.route(
|
||||
'/<author>/<plugin_name>',
|
||||
methods=['GET', 'DELETE'],
|
||||
auth_type=group.AuthType.USER_TOKEN,
|
||||
)
|
||||
async def _(author: str, plugin_name: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
plugin = await self.ap.plugin_connector.get_plugin_info(author, plugin_name)
|
||||
if plugin is None:
|
||||
return self.http_status(404, -1, 'plugin not found')
|
||||
return self.success(data={'plugin': plugin})
|
||||
elif quart.request.method == 'DELETE':
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_connector.delete_plugin(author, plugin_name, task_context=ctx),
|
||||
kind='plugin-operation',
|
||||
name=f'plugin-remove-{plugin_name}',
|
||||
label=f'Removing plugin {plugin_name}',
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
|
||||
@self.route(
|
||||
'/<author>/<plugin_name>/config',
|
||||
methods=['GET', 'PUT'],
|
||||
auth_type=group.AuthType.USER_TOKEN,
|
||||
)
|
||||
async def _(author: str, plugin_name: str) -> quart.Response:
|
||||
plugin = await self.ap.plugin_connector.get_plugin_info(author, plugin_name)
|
||||
if plugin is None:
|
||||
return self.http_status(404, -1, 'plugin not found')
|
||||
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'config': plugin['plugin_config']})
|
||||
elif quart.request.method == 'PUT':
|
||||
data = await quart.request.json
|
||||
|
||||
await self.ap.plugin_connector.set_plugin_config(author, plugin_name, data)
|
||||
|
||||
return self.success(data={})
|
||||
|
||||
@self.route('/install/github', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
data = await quart.request.json
|
||||
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
short_source_str = data['source'][-8:]
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_mgr.install_plugin(data['source'], task_context=ctx),
|
||||
kind='plugin-operation',
|
||||
name='plugin-install-github',
|
||||
label=f'Installing plugin from github ...{short_source_str}',
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
|
||||
@self.route('/install/marketplace', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
data = await quart.request.json
|
||||
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_connector.install_plugin(PluginInstallSource.MARKETPLACE, data, task_context=ctx),
|
||||
kind='plugin-operation',
|
||||
name='plugin-install-marketplace',
|
||||
label=f'Installing plugin from marketplace ...{data}',
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
|
||||
@self.route('/install/local', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
file = (await quart.request.files).get('file')
|
||||
if file is None:
|
||||
return self.http_status(400, -1, 'file is required')
|
||||
|
||||
file_bytes = file.read()
|
||||
|
||||
file_base64 = base64.b64encode(file_bytes).decode('utf-8')
|
||||
|
||||
data = {
|
||||
'plugin_file': file_base64,
|
||||
}
|
||||
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_connector.install_plugin(PluginInstallSource.LOCAL, data, task_context=ctx),
|
||||
kind='plugin-operation',
|
||||
name='plugin-install-local',
|
||||
label=f'Installing plugin from local ...{file.filename}',
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
@@ -1,46 +0,0 @@
|
||||
import quart
|
||||
|
||||
from ... import group
|
||||
|
||||
|
||||
@group.group_class('models/llm', '/api/v1/provider/models/llm')
|
||||
class LLMModelsRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'models': await self.ap.model_service.get_llm_models()})
|
||||
elif quart.request.method == 'POST':
|
||||
json_data = await quart.request.json
|
||||
|
||||
model_uuid = await self.ap.model_service.create_llm_model(json_data)
|
||||
|
||||
return self.success(data={'uuid': model_uuid})
|
||||
|
||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
||||
async def _(model_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
model = await self.ap.model_service.get_llm_model(model_uuid)
|
||||
|
||||
if model is None:
|
||||
return self.http_status(404, -1, 'model not found')
|
||||
|
||||
return self.success(data={'model': model})
|
||||
elif quart.request.method == 'PUT':
|
||||
json_data = await quart.request.json
|
||||
|
||||
await self.ap.model_service.update_llm_model(model_uuid, json_data)
|
||||
|
||||
return self.success()
|
||||
elif quart.request.method == 'DELETE':
|
||||
await self.ap.model_service.delete_llm_model(model_uuid)
|
||||
|
||||
return self.success()
|
||||
|
||||
@self.route('/<model_uuid>/test', methods=['POST'])
|
||||
async def _(model_uuid: str) -> str:
|
||||
json_data = await quart.request.json
|
||||
|
||||
await self.ap.model_service.test_llm_model(model_uuid, json_data)
|
||||
|
||||
return self.success()
|
||||
@@ -1,42 +0,0 @@
|
||||
import quart
|
||||
import argon2
|
||||
|
||||
from .. import group
|
||||
|
||||
|
||||
@group.group_class('user', '/api/v1/user')
|
||||
class UserRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('/init', methods=['GET', 'POST'], auth_type=group.AuthType.NONE)
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'initialized': await self.ap.user_service.is_initialized()})
|
||||
|
||||
if await self.ap.user_service.is_initialized():
|
||||
return self.fail(1, '系统已初始化')
|
||||
|
||||
json_data = await quart.request.json
|
||||
|
||||
user_email = json_data['user']
|
||||
password = json_data['password']
|
||||
|
||||
await self.ap.user_service.create_user(user_email, password)
|
||||
|
||||
return self.success()
|
||||
|
||||
@self.route('/auth', methods=['POST'], auth_type=group.AuthType.NONE)
|
||||
async def _() -> str:
|
||||
json_data = await quart.request.json
|
||||
|
||||
try:
|
||||
token = await self.ap.user_service.authenticate(json_data['user'], json_data['password'])
|
||||
except argon2.exceptions.VerifyMismatchError:
|
||||
return self.fail(1, '用户名或密码错误')
|
||||
|
||||
return self.success(data={'token': token})
|
||||
|
||||
@self.route('/check-token', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(user_email: str) -> str:
|
||||
token = await self.ap.user_service.generate_jwt_token(user_email)
|
||||
|
||||
return self.success(data={'token': token})
|
||||
@@ -1,44 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='cmd', help='显示命令列表', usage='!cmd\n!cmd <命令名称>')
|
||||
class CmdOperator(operator.CommandOperator):
|
||||
"""命令列表"""
|
||||
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
"""执行"""
|
||||
if len(context.crt_params) == 0:
|
||||
reply_str = '当前所有命令: \n\n'
|
||||
|
||||
for cmd in self.ap.cmd_mgr.cmd_list:
|
||||
if cmd.parent_class is None:
|
||||
reply_str += f'{cmd.name}: {cmd.help}\n'
|
||||
|
||||
reply_str += '\n使用 !cmd <命令名称> 查看命令的详细帮助'
|
||||
|
||||
yield command_context.CommandReturn(text=reply_str.strip())
|
||||
|
||||
else:
|
||||
cmd_name = context.crt_params[0]
|
||||
|
||||
cmd = None
|
||||
|
||||
for _cmd in self.ap.cmd_mgr.cmd_list:
|
||||
if (cmd_name == _cmd.name or cmd_name in _cmd.alias) and (_cmd.parent_class is None):
|
||||
cmd = _cmd
|
||||
break
|
||||
|
||||
if cmd is None:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandNotFoundError(cmd_name))
|
||||
else:
|
||||
reply_str = f'{cmd.name}: {cmd.help}\n\n'
|
||||
reply_str += f'使用方法: \n{cmd.usage}'
|
||||
|
||||
yield command_context.CommandReturn(text=reply_str.strip())
|
||||
@@ -1,48 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='del', help='删除当前会话的历史记录', usage='!del <序号>\n!del all')
|
||||
class DelOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if context.session.conversations:
|
||||
delete_index = 0
|
||||
if len(context.crt_params) > 0:
|
||||
try:
|
||||
delete_index = int(context.crt_params[0])
|
||||
except Exception:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('索引必须是整数'))
|
||||
return
|
||||
|
||||
if delete_index < 0 or delete_index >= len(context.session.conversations):
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('索引超出范围'))
|
||||
return
|
||||
|
||||
# 倒序
|
||||
to_delete_index = len(context.session.conversations) - 1 - delete_index
|
||||
|
||||
if context.session.conversations[to_delete_index] == context.session.using_conversation:
|
||||
context.session.using_conversation = None
|
||||
|
||||
del context.session.conversations[to_delete_index]
|
||||
|
||||
yield command_context.CommandReturn(text=f'已删除对话: {delete_index}')
|
||||
else:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('当前没有对话'))
|
||||
|
||||
|
||||
@operator.operator_class(name='all', help='删除此会话的所有历史记录', parent_class=DelOperator)
|
||||
class DelAllOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
context.session.conversations = []
|
||||
context.session.using_conversation = None
|
||||
|
||||
yield command_context.CommandReturn(text='已删除所有对话')
|
||||
@@ -1,27 +0,0 @@
|
||||
from __future__ import annotations
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
||||
|
||||
|
||||
@operator.operator_class(name='func', help='查看所有已注册的内容函数', usage='!func')
|
||||
class FuncOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> AsyncGenerator[command_context.CommandReturn, None]:
|
||||
reply_str = '当前已启用的内容函数: \n\n'
|
||||
|
||||
index = 1
|
||||
|
||||
all_functions = await self.ap.tool_mgr.get_all_tools()
|
||||
|
||||
for func in all_functions:
|
||||
reply_str += '{}. {}:\n{}\n\n'.format(
|
||||
index,
|
||||
func.name,
|
||||
func.description,
|
||||
)
|
||||
index += 1
|
||||
|
||||
yield command_context.CommandReturn(text=reply_str)
|
||||
@@ -1,18 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
||||
|
||||
|
||||
@operator.operator_class(name='help', help='显示帮助', usage='!help\n!help <命令名称>')
|
||||
class HelpOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
help = 'LangBot - 大语言模型原生即时通信机器人平台\n链接:https://langbot.app'
|
||||
|
||||
help += '\n发送命令 !cmd 可查看命令列表'
|
||||
|
||||
yield command_context.CommandReturn(text=help)
|
||||
@@ -1,33 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='last', help='切换到前一个对话', usage='!last')
|
||||
class LastOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if context.session.conversations:
|
||||
# 找到当前会话的上一个会话
|
||||
for index in range(len(context.session.conversations) - 1, -1, -1):
|
||||
if context.session.conversations[index] == context.session.using_conversation:
|
||||
if index == 0:
|
||||
yield command_context.CommandReturn(
|
||||
error=command_errors.CommandOperationError('已经是第一个对话了')
|
||||
)
|
||||
return
|
||||
else:
|
||||
context.session.using_conversation = context.session.conversations[index - 1]
|
||||
time_str = context.session.using_conversation.create_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
yield command_context.CommandReturn(
|
||||
text=f'已切换到上一个对话: {index} {time_str}: {context.session.using_conversation.messages[0].readable_str()}'
|
||||
)
|
||||
return
|
||||
else:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('当前没有对话'))
|
||||
@@ -1,51 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='list', help='列出此会话中的所有历史对话', usage='!list\n!list <页码>')
|
||||
class ListOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
page = 0
|
||||
|
||||
if len(context.crt_params) > 0:
|
||||
try:
|
||||
page = int(context.crt_params[0] - 1)
|
||||
except Exception:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('页码应为整数'))
|
||||
return
|
||||
|
||||
record_per_page = 10
|
||||
|
||||
content = ''
|
||||
|
||||
index = 0
|
||||
|
||||
using_conv_index = 0
|
||||
|
||||
for conv in context.session.conversations[::-1]:
|
||||
time_str = conv.create_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
if conv == context.session.using_conversation:
|
||||
using_conv_index = index
|
||||
|
||||
if index >= page * record_per_page and index < (page + 1) * record_per_page:
|
||||
content += (
|
||||
f'{index} {time_str}: {conv.messages[0].readable_str() if len(conv.messages) > 0 else "无内容"}\n'
|
||||
)
|
||||
index += 1
|
||||
|
||||
if content == '':
|
||||
content = '无'
|
||||
else:
|
||||
if context.session.using_conversation is None:
|
||||
content += '\n当前处于新会话'
|
||||
else:
|
||||
content += f'\n当前会话: {using_conv_index} {context.session.using_conversation.create_time.strftime("%Y-%m-%d %H:%M:%S")}: {context.session.using_conversation.messages[0].readable_str() if len(context.session.using_conversation.messages) > 0 else "无内容"}'
|
||||
|
||||
yield command_context.CommandReturn(text=f'第 {page + 1} 页 (时间倒序):\n{content}')
|
||||
@@ -1,32 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='next', help='切换到后一个对话', usage='!next')
|
||||
class NextOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if context.session.conversations:
|
||||
# 找到当前会话的下一个会话
|
||||
for index in range(len(context.session.conversations)):
|
||||
if context.session.conversations[index] == context.session.using_conversation:
|
||||
if index == len(context.session.conversations) - 1:
|
||||
yield command_context.CommandReturn(
|
||||
error=command_errors.CommandOperationError('已经是最后一个对话了')
|
||||
)
|
||||
return
|
||||
else:
|
||||
context.session.using_conversation = context.session.conversations[index + 1]
|
||||
time_str = context.session.using_conversation.create_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
yield command_context.CommandReturn(
|
||||
text=f'已切换到后一个对话: {index} {time_str}: {context.session.using_conversation.messages[0].content}'
|
||||
)
|
||||
return
|
||||
else:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('当前没有对话'))
|
||||
@@ -1,171 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import typing
|
||||
import traceback
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(
|
||||
name='plugin',
|
||||
help='插件操作',
|
||||
usage='!plugin\n!plugin get <插件仓库地址>\n!plugin update\n!plugin del <插件名>\n!plugin on <插件名>\n!plugin off <插件名>',
|
||||
)
|
||||
class PluginOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
plugin_list = self.ap.plugin_mgr.plugins()
|
||||
reply_str = '所有插件({}):\n'.format(len(plugin_list))
|
||||
idx = 0
|
||||
for plugin in plugin_list:
|
||||
reply_str += '\n#{} {} {}\n{}\nv{}\n作者: {}\n'.format(
|
||||
(idx + 1),
|
||||
plugin.plugin_name,
|
||||
'[已禁用]' if not plugin.enabled else '',
|
||||
plugin.plugin_description,
|
||||
plugin.plugin_version,
|
||||
plugin.plugin_author,
|
||||
)
|
||||
|
||||
idx += 1
|
||||
|
||||
yield command_context.CommandReturn(text=reply_str)
|
||||
|
||||
|
||||
@operator.operator_class(name='get', help='安装插件', privilege=2, parent_class=PluginOperator)
|
||||
class PluginGetOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if len(context.crt_params) == 0:
|
||||
yield command_context.CommandReturn(error=command_errors.ParamNotEnoughError('请提供插件仓库地址'))
|
||||
else:
|
||||
repo = context.crt_params[0]
|
||||
|
||||
yield command_context.CommandReturn(text='正在安装插件...')
|
||||
|
||||
try:
|
||||
await self.ap.plugin_mgr.install_plugin(repo)
|
||||
yield command_context.CommandReturn(text='插件安装成功,请重启程序以加载插件')
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件安装失败: ' + str(e)))
|
||||
|
||||
|
||||
@operator.operator_class(name='update', help='更新插件', privilege=2, parent_class=PluginOperator)
|
||||
class PluginUpdateOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if len(context.crt_params) == 0:
|
||||
yield command_context.CommandReturn(error=command_errors.ParamNotEnoughError('请提供插件名称'))
|
||||
else:
|
||||
plugin_name = context.crt_params[0]
|
||||
|
||||
try:
|
||||
plugin_container = self.ap.plugin_mgr.get_plugin_by_name(plugin_name)
|
||||
|
||||
if plugin_container is not None:
|
||||
yield command_context.CommandReturn(text='正在更新插件...')
|
||||
await self.ap.plugin_mgr.update_plugin(plugin_name)
|
||||
yield command_context.CommandReturn(text='插件更新成功,请重启程序以加载插件')
|
||||
else:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件更新失败: 未找到插件'))
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件更新失败: ' + str(e)))
|
||||
|
||||
|
||||
@operator.operator_class(name='all', help='更新所有插件', privilege=2, parent_class=PluginUpdateOperator)
|
||||
class PluginUpdateAllOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
try:
|
||||
plugins = [p.plugin_name for p in self.ap.plugin_mgr.plugins()]
|
||||
|
||||
if plugins:
|
||||
yield command_context.CommandReturn(text='正在更新插件...')
|
||||
updated = []
|
||||
try:
|
||||
for plugin_name in plugins:
|
||||
await self.ap.plugin_mgr.update_plugin(plugin_name)
|
||||
updated.append(plugin_name)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件更新失败: ' + str(e)))
|
||||
yield command_context.CommandReturn(text='已更新插件: {}'.format(', '.join(updated)))
|
||||
else:
|
||||
yield command_context.CommandReturn(text='没有可更新的插件')
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件更新失败: ' + str(e)))
|
||||
|
||||
|
||||
@operator.operator_class(name='del', help='删除插件', privilege=2, parent_class=PluginOperator)
|
||||
class PluginDelOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if len(context.crt_params) == 0:
|
||||
yield command_context.CommandReturn(error=command_errors.ParamNotEnoughError('请提供插件名称'))
|
||||
else:
|
||||
plugin_name = context.crt_params[0]
|
||||
|
||||
try:
|
||||
plugin_container = self.ap.plugin_mgr.get_plugin_by_name(plugin_name)
|
||||
|
||||
if plugin_container is not None:
|
||||
yield command_context.CommandReturn(text='正在删除插件...')
|
||||
await self.ap.plugin_mgr.uninstall_plugin(plugin_name)
|
||||
yield command_context.CommandReturn(text='插件删除成功,请重启程序以加载插件')
|
||||
else:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件删除失败: 未找到插件'))
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件删除失败: ' + str(e)))
|
||||
|
||||
|
||||
@operator.operator_class(name='on', help='启用插件', privilege=2, parent_class=PluginOperator)
|
||||
class PluginEnableOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if len(context.crt_params) == 0:
|
||||
yield command_context.CommandReturn(error=command_errors.ParamNotEnoughError('请提供插件名称'))
|
||||
else:
|
||||
plugin_name = context.crt_params[0]
|
||||
|
||||
try:
|
||||
if await self.ap.plugin_mgr.update_plugin_switch(plugin_name, True):
|
||||
yield command_context.CommandReturn(text='已启用插件: {}'.format(plugin_name))
|
||||
else:
|
||||
yield command_context.CommandReturn(
|
||||
error=command_errors.CommandError('插件状态修改失败: 未找到插件 {}'.format(plugin_name))
|
||||
)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件状态修改失败: ' + str(e)))
|
||||
|
||||
|
||||
@operator.operator_class(name='off', help='禁用插件', privilege=2, parent_class=PluginOperator)
|
||||
class PluginDisableOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
if len(context.crt_params) == 0:
|
||||
yield command_context.CommandReturn(error=command_errors.ParamNotEnoughError('请提供插件名称'))
|
||||
else:
|
||||
plugin_name = context.crt_params[0]
|
||||
|
||||
try:
|
||||
if await self.ap.plugin_mgr.update_plugin_switch(plugin_name, False):
|
||||
yield command_context.CommandReturn(text='已禁用插件: {}'.format(plugin_name))
|
||||
else:
|
||||
yield command_context.CommandReturn(
|
||||
error=command_errors.CommandError('插件状态修改失败: 未找到插件 {}'.format(plugin_name))
|
||||
)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('插件状态修改失败: ' + str(e)))
|
||||
@@ -1,23 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='prompt', help='查看当前对话的前文', usage='!prompt')
|
||||
class PromptOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
"""执行"""
|
||||
if context.session.using_conversation is None:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandOperationError('当前没有对话'))
|
||||
else:
|
||||
reply_str = '当前对话所有内容:\n\n'
|
||||
|
||||
for msg in context.session.using_conversation.messages:
|
||||
reply_str += f'{msg.role}: {msg.content}\n'
|
||||
|
||||
yield command_context.CommandReturn(text=reply_str)
|
||||
@@ -1,29 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context, errors as command_errors
|
||||
|
||||
|
||||
@operator.operator_class(name='resend', help='重发当前会话的最后一条消息', usage='!resend')
|
||||
class ResendOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
# 回滚到最后一条用户message前
|
||||
if context.session.using_conversation is None:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandError('当前没有对话'))
|
||||
else:
|
||||
conv_msg = context.session.using_conversation.messages
|
||||
|
||||
# 倒序一直删到最后一条用户message
|
||||
while len(conv_msg) > 0 and conv_msg[-1].role != 'user':
|
||||
conv_msg.pop()
|
||||
|
||||
if len(conv_msg) > 0:
|
||||
# 删除最后一条用户message
|
||||
conv_msg.pop()
|
||||
|
||||
# 不重发了,提示用户已删除就行了
|
||||
yield command_context.CommandReturn(text='已删除最后一次请求记录')
|
||||
@@ -1,17 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
||||
|
||||
|
||||
@operator.operator_class(name='reset', help='重置当前会话', usage='!reset')
|
||||
class ResetOperator(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
"""执行"""
|
||||
context.session.using_conversation = None
|
||||
|
||||
yield command_context.CommandReturn(text='已重置当前会话')
|
||||
@@ -1,22 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from .. import operator
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
||||
|
||||
|
||||
@operator.operator_class(name='version', help='显示版本信息', usage='!version')
|
||||
class VersionCommand(operator.CommandOperator):
|
||||
async def execute(
|
||||
self, context: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
reply_str = f'当前版本: \n{self.ap.ver_mgr.get_current_version()}'
|
||||
|
||||
try:
|
||||
if await self.ap.ver_mgr.is_new_version_available():
|
||||
reply_str += '\n\n有新版本可用。'
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
yield command_context.CommandReturn(text=reply_str.strip())
|
||||
@@ -1,79 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from .. import stage, app
|
||||
from ..bootutils import config
|
||||
|
||||
|
||||
@stage.stage_class('LoadConfigStage')
|
||||
class LoadConfigStage(stage.BootingStage):
|
||||
"""加载配置文件阶段"""
|
||||
|
||||
async def run(self, ap: app.Application):
|
||||
"""启动"""
|
||||
|
||||
# ======= deprecated =======
|
||||
if os.path.exists('data/config/command.json'):
|
||||
ap.command_cfg = await config.load_json_config(
|
||||
'data/config/command.json',
|
||||
'templates/legacy/command.json',
|
||||
completion=False,
|
||||
)
|
||||
|
||||
if os.path.exists('data/config/pipeline.json'):
|
||||
ap.pipeline_cfg = await config.load_json_config(
|
||||
'data/config/pipeline.json',
|
||||
'templates/legacy/pipeline.json',
|
||||
completion=False,
|
||||
)
|
||||
|
||||
if os.path.exists('data/config/platform.json'):
|
||||
ap.platform_cfg = await config.load_json_config(
|
||||
'data/config/platform.json',
|
||||
'templates/legacy/platform.json',
|
||||
completion=False,
|
||||
)
|
||||
|
||||
if os.path.exists('data/config/provider.json'):
|
||||
ap.provider_cfg = await config.load_json_config(
|
||||
'data/config/provider.json',
|
||||
'templates/legacy/provider.json',
|
||||
completion=False,
|
||||
)
|
||||
|
||||
if os.path.exists('data/config/system.json'):
|
||||
ap.system_cfg = await config.load_json_config(
|
||||
'data/config/system.json',
|
||||
'templates/legacy/system.json',
|
||||
completion=False,
|
||||
)
|
||||
|
||||
# ======= deprecated =======
|
||||
|
||||
ap.instance_config = await config.load_yaml_config(
|
||||
'data/config.yaml', 'templates/config.yaml', completion=False
|
||||
)
|
||||
await ap.instance_config.dump_config()
|
||||
|
||||
ap.sensitive_meta = await config.load_json_config(
|
||||
'data/metadata/sensitive-words.json',
|
||||
'templates/metadata/sensitive-words.json',
|
||||
)
|
||||
await ap.sensitive_meta.dump_config()
|
||||
|
||||
ap.pipeline_config_meta_trigger = await config.load_yaml_config(
|
||||
'templates/metadata/pipeline/trigger.yaml',
|
||||
'templates/metadata/pipeline/trigger.yaml',
|
||||
)
|
||||
ap.pipeline_config_meta_safety = await config.load_yaml_config(
|
||||
'templates/metadata/pipeline/safety.yaml',
|
||||
'templates/metadata/pipeline/safety.yaml',
|
||||
)
|
||||
ap.pipeline_config_meta_ai = await config.load_yaml_config(
|
||||
'templates/metadata/pipeline/ai.yaml', 'templates/metadata/pipeline/ai.yaml'
|
||||
)
|
||||
ap.pipeline_config_meta_output = await config.load_yaml_config(
|
||||
'templates/metadata/pipeline/output.yaml',
|
||||
'templates/metadata/pipeline/output.yaml',
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlalchemy.ext.asyncio as sqlalchemy_asyncio
|
||||
|
||||
from .. import database
|
||||
|
||||
|
||||
@database.manager_class('sqlite')
|
||||
class SQLiteDatabaseManager(database.BaseDatabaseManager):
|
||||
"""SQLite 数据库管理类"""
|
||||
|
||||
async def initialize(self) -> None:
|
||||
sqlite_path = 'data/langbot.db'
|
||||
self.engine = sqlalchemy_asyncio.create_async_engine(f'sqlite+aiosqlite:///{sqlite_path}')
|
||||
@@ -1,32 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from .. import rule as rule_model
|
||||
from .. import entities
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
|
||||
|
||||
@rule_model.rule_class('at-bot')
|
||||
class AtBotRule(rule_model.GroupRespondRule):
|
||||
async def match(
|
||||
self,
|
||||
message_text: str,
|
||||
message_chain: platform_message.MessageChain,
|
||||
rule_dict: dict,
|
||||
query: pipeline_query.Query,
|
||||
) -> entities.RuleJudgeResult:
|
||||
if message_chain.has(platform_message.At(query.adapter.bot_account_id)) and rule_dict['at']:
|
||||
message_chain.remove(platform_message.At(query.adapter.bot_account_id))
|
||||
|
||||
if message_chain.has(
|
||||
platform_message.At(query.adapter.bot_account_id)
|
||||
): # 回复消息时会at两次,检查并删除重复的
|
||||
message_chain.remove(platform_message.At(query.adapter.bot_account_id))
|
||||
|
||||
return entities.RuleJudgeResult(
|
||||
matching=True,
|
||||
replacement=message_chain,
|
||||
)
|
||||
|
||||
return entities.RuleJudgeResult(matching=False, replacement=message_chain)
|
||||
@@ -1,255 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import discord
|
||||
|
||||
import typing
|
||||
import re
|
||||
import base64
|
||||
import uuid
|
||||
import os
|
||||
import datetime
|
||||
|
||||
import aiohttp
|
||||
import pydantic
|
||||
|
||||
import langbot_plugin.api.definition.abstract.platform.adapter as abstract_platform_adapter
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
import langbot_plugin.api.entities.builtin.platform.events as platform_events
|
||||
import langbot_plugin.api.entities.builtin.platform.entities as platform_entities
|
||||
import langbot_plugin.api.definition.abstract.platform.event_logger as abstract_platform_logger
|
||||
|
||||
|
||||
class DiscordMessageConverter(abstract_platform_adapter.AbstractMessageConverter):
|
||||
@staticmethod
|
||||
async def yiri2target(
|
||||
message_chain: platform_message.MessageChain,
|
||||
) -> typing.Tuple[str, typing.List[discord.File]]:
|
||||
for ele in message_chain:
|
||||
if isinstance(ele, platform_message.At):
|
||||
message_chain.remove(ele)
|
||||
break
|
||||
|
||||
text_string = ''
|
||||
image_files = []
|
||||
|
||||
for ele in message_chain:
|
||||
if isinstance(ele, platform_message.Image):
|
||||
image_bytes = None
|
||||
|
||||
if ele.base64:
|
||||
image_bytes = base64.b64decode(ele.base64)
|
||||
elif ele.url:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(ele.url) as response:
|
||||
image_bytes = await response.read()
|
||||
elif ele.path:
|
||||
with open(ele.path, 'rb') as f:
|
||||
image_bytes = f.read()
|
||||
|
||||
image_files.append(discord.File(fp=image_bytes, filename=f'{uuid.uuid4()}.png'))
|
||||
elif isinstance(ele, platform_message.Plain):
|
||||
text_string += ele.text
|
||||
elif isinstance(ele, platform_message.Forward):
|
||||
for node in ele.node_list:
|
||||
(
|
||||
text_string,
|
||||
image_files,
|
||||
) = await DiscordMessageConverter.yiri2target(node.message_chain)
|
||||
text_string += text_string
|
||||
image_files.extend(image_files)
|
||||
|
||||
return text_string, image_files
|
||||
|
||||
@staticmethod
|
||||
async def target2yiri(message: discord.Message) -> platform_message.MessageChain:
|
||||
lb_msg_list = []
|
||||
|
||||
msg_create_time = datetime.datetime.fromtimestamp(int(message.created_at.timestamp()))
|
||||
|
||||
lb_msg_list.append(platform_message.Source(id=message.id, time=msg_create_time))
|
||||
|
||||
element_list = []
|
||||
|
||||
def text_element_recur(
|
||||
text_ele: str,
|
||||
) -> list[platform_message.MessageComponent]:
|
||||
if text_ele == '':
|
||||
return []
|
||||
|
||||
# <@1234567890>
|
||||
# @everyone
|
||||
# @here
|
||||
at_pattern = re.compile(r'(@everyone|@here|<@[\d]+>)')
|
||||
at_matches = at_pattern.findall(text_ele)
|
||||
|
||||
if len(at_matches) > 0:
|
||||
mid_at = at_matches[0]
|
||||
|
||||
text_split = text_ele.split(mid_at)
|
||||
|
||||
mid_at_component = []
|
||||
|
||||
if mid_at == '@everyone' or mid_at == '@here':
|
||||
mid_at_component.append(platform_message.AtAll())
|
||||
else:
|
||||
mid_at_component.append(platform_message.At(target=mid_at[2:-1]))
|
||||
|
||||
return text_element_recur(text_split[0]) + mid_at_component + text_element_recur(text_split[1])
|
||||
else:
|
||||
return [platform_message.Plain(text=text_ele)]
|
||||
|
||||
element_list.extend(text_element_recur(message.content))
|
||||
|
||||
# attachments
|
||||
for attachment in message.attachments:
|
||||
async with aiohttp.ClientSession(trust_env=True) as session:
|
||||
async with session.get(attachment.url) as response:
|
||||
image_data = await response.read()
|
||||
image_base64 = base64.b64encode(image_data).decode('utf-8')
|
||||
image_format = response.headers['Content-Type']
|
||||
element_list.append(platform_message.Image(base64=f'data:{image_format};base64,{image_base64}'))
|
||||
|
||||
return platform_message.MessageChain(element_list)
|
||||
|
||||
|
||||
class DiscordEventConverter(abstract_platform_adapter.AbstractEventConverter):
|
||||
@staticmethod
|
||||
async def yiri2target(event: platform_events.Event) -> discord.Message:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
async def target2yiri(event: discord.Message) -> platform_events.Event:
|
||||
message_chain = await DiscordMessageConverter.target2yiri(event)
|
||||
|
||||
if isinstance(event.channel, discord.DMChannel):
|
||||
return platform_events.FriendMessage(
|
||||
sender=platform_entities.Friend(
|
||||
id=event.author.id,
|
||||
nickname=event.author.name,
|
||||
remark=event.channel.id,
|
||||
),
|
||||
message_chain=message_chain,
|
||||
time=event.created_at.timestamp(),
|
||||
source_platform_object=event,
|
||||
)
|
||||
elif isinstance(event.channel, discord.TextChannel):
|
||||
return platform_events.GroupMessage(
|
||||
sender=platform_entities.GroupMember(
|
||||
id=event.author.id,
|
||||
member_name=event.author.name,
|
||||
permission=platform_entities.Permission.Member,
|
||||
group=platform_entities.Group(
|
||||
id=event.channel.id,
|
||||
name=event.channel.name,
|
||||
permission=platform_entities.Permission.Member,
|
||||
),
|
||||
special_title='',
|
||||
join_timestamp=0,
|
||||
last_speak_timestamp=0,
|
||||
mute_time_remaining=0,
|
||||
),
|
||||
message_chain=message_chain,
|
||||
time=event.created_at.timestamp(),
|
||||
source_platform_object=event,
|
||||
)
|
||||
|
||||
|
||||
class DiscordAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter):
|
||||
bot: discord.Client = pydantic.Field(exclude=True)
|
||||
|
||||
message_converter: DiscordMessageConverter = DiscordMessageConverter()
|
||||
event_converter: DiscordEventConverter = DiscordEventConverter()
|
||||
|
||||
listeners: typing.Dict[
|
||||
typing.Type[platform_events.Event],
|
||||
typing.Callable[[platform_events.Event, abstract_platform_adapter.AbstractMessagePlatformAdapter], None],
|
||||
] = {}
|
||||
|
||||
def __init__(self, config: dict, logger: abstract_platform_logger.AbstractEventLogger):
|
||||
bot_account_id = config['client_id']
|
||||
|
||||
listeners = {}
|
||||
|
||||
adapter_self = self
|
||||
|
||||
class MyClient(discord.Client):
|
||||
async def on_message(self: discord.Client, message: discord.Message):
|
||||
if message.author.id == self.user.id or message.author.bot:
|
||||
return
|
||||
|
||||
lb_event = await adapter_self.event_converter.target2yiri(message)
|
||||
await adapter_self.listeners[type(lb_event)](lb_event, adapter_self)
|
||||
|
||||
intents = discord.Intents.default()
|
||||
intents.message_content = True
|
||||
|
||||
args = {}
|
||||
|
||||
if os.getenv('http_proxy'):
|
||||
args['proxy'] = os.getenv('http_proxy')
|
||||
|
||||
bot = MyClient(intents=intents, **args)
|
||||
|
||||
super().__init__(
|
||||
config=config,
|
||||
logger=logger,
|
||||
bot_account_id=bot_account_id,
|
||||
listeners=listeners,
|
||||
bot=bot,
|
||||
)
|
||||
|
||||
async def send_message(self, target_type: str, target_id: str, message: platform_message.MessageChain):
|
||||
pass
|
||||
|
||||
async def reply_message(
|
||||
self,
|
||||
message_source: platform_events.MessageEvent,
|
||||
message: platform_message.MessageChain,
|
||||
quote_origin: bool = False,
|
||||
):
|
||||
msg_to_send, image_files = await self.message_converter.yiri2target(message)
|
||||
assert isinstance(message_source.source_platform_object, discord.Message)
|
||||
|
||||
args = {
|
||||
'content': msg_to_send,
|
||||
}
|
||||
|
||||
if len(image_files) > 0:
|
||||
args['files'] = image_files
|
||||
|
||||
if quote_origin:
|
||||
args['reference'] = message_source.source_platform_object
|
||||
|
||||
if message.has(platform_message.At):
|
||||
args['mention_author'] = True
|
||||
|
||||
await message_source.source_platform_object.channel.send(**args)
|
||||
|
||||
async def is_muted(self, group_id: int) -> bool:
|
||||
return False
|
||||
|
||||
def register_listener(
|
||||
self,
|
||||
event_type: typing.Type[platform_events.Event],
|
||||
callback: typing.Callable[
|
||||
[platform_events.Event, abstract_platform_adapter.AbstractMessagePlatformAdapter], None
|
||||
],
|
||||
):
|
||||
self.listeners[event_type] = callback
|
||||
|
||||
def unregister_listener(
|
||||
self,
|
||||
event_type: typing.Type[platform_events.Event],
|
||||
callback: typing.Callable[
|
||||
[platform_events.Event, abstract_platform_adapter.AbstractMessagePlatformAdapter], None
|
||||
],
|
||||
):
|
||||
self.listeners.pop(event_type)
|
||||
|
||||
async def run_async(self):
|
||||
async with self.bot:
|
||||
await self.bot.start(self.config['token'], reconnect=True)
|
||||
|
||||
async def kill(self) -> bool:
|
||||
await self.bot.close()
|
||||
return True
|
||||
@@ -1,194 +0,0 @@
|
||||
# For connect to plugin runtime.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
import typing
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ..core import app
|
||||
from . import handler
|
||||
from ..utils import platform
|
||||
from langbot_plugin.runtime.io.controllers.stdio import client as stdio_client_controller
|
||||
from langbot_plugin.runtime.io.controllers.ws import client as ws_client_controller
|
||||
from langbot_plugin.api.entities import events
|
||||
from langbot_plugin.api.entities import context
|
||||
import langbot_plugin.runtime.io.connection as base_connection
|
||||
from langbot_plugin.api.definition.components.manifest import ComponentManifest
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
||||
from langbot_plugin.runtime.plugin.mgr import PluginInstallSource
|
||||
from ..core import taskmgr
|
||||
|
||||
|
||||
class PluginRuntimeConnector:
|
||||
"""Plugin runtime connector"""
|
||||
|
||||
ap: app.Application
|
||||
|
||||
handler: handler.RuntimeConnectionHandler
|
||||
|
||||
handler_task: asyncio.Task
|
||||
|
||||
stdio_client_controller: stdio_client_controller.StdioClientController
|
||||
|
||||
ctrl: stdio_client_controller.StdioClientController | ws_client_controller.WebSocketClientController
|
||||
|
||||
runtime_disconnect_callback: typing.Callable[
|
||||
[PluginRuntimeConnector], typing.Coroutine[typing.Any, typing.Any, None]
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ap: app.Application,
|
||||
runtime_disconnect_callback: typing.Callable[
|
||||
[PluginRuntimeConnector], typing.Coroutine[typing.Any, typing.Any, None]
|
||||
],
|
||||
):
|
||||
self.ap = ap
|
||||
self.runtime_disconnect_callback = runtime_disconnect_callback
|
||||
|
||||
async def initialize(self):
|
||||
async def new_connection_callback(connection: base_connection.Connection):
|
||||
async def disconnect_callback(rchandler: handler.RuntimeConnectionHandler) -> bool:
|
||||
if platform.get_platform() == 'docker':
|
||||
self.ap.logger.error('Disconnected from plugin runtime, trying to reconnect...')
|
||||
await self.runtime_disconnect_callback(self)
|
||||
return False
|
||||
else:
|
||||
self.ap.logger.error(
|
||||
'Disconnected from plugin runtime, cannot automatically reconnect while LangBot connects to plugin runtime via stdio, please restart LangBot.'
|
||||
)
|
||||
return False
|
||||
|
||||
self.handler = handler.RuntimeConnectionHandler(connection, disconnect_callback, self.ap)
|
||||
self.handler_task = asyncio.create_task(self.handler.run())
|
||||
_ = await self.handler.ping()
|
||||
self.ap.logger.info('Connected to plugin runtime.')
|
||||
await self.handler_task
|
||||
|
||||
task: asyncio.Task | None = None
|
||||
|
||||
if platform.get_platform() == 'docker': # use websocket
|
||||
self.ap.logger.info('use websocket to connect to plugin runtime')
|
||||
ws_url = self.ap.instance_config.data['plugin']['runtime_ws_url']
|
||||
|
||||
async def make_connection_failed_callback(ctrl: ws_client_controller.WebSocketClientController) -> None:
|
||||
self.ap.logger.error('Failed to connect to plugin runtime, trying to reconnect...')
|
||||
await self.runtime_disconnect_callback(self)
|
||||
|
||||
self.ctrl = ws_client_controller.WebSocketClientController(
|
||||
ws_url=ws_url,
|
||||
make_connection_failed_callback=make_connection_failed_callback,
|
||||
)
|
||||
task = self.ctrl.run(new_connection_callback)
|
||||
else: # stdio
|
||||
self.ap.logger.info('use stdio to connect to plugin runtime')
|
||||
# cmd: lbp rt -s
|
||||
python_path = sys.executable
|
||||
env = os.environ.copy()
|
||||
self.ctrl = stdio_client_controller.StdioClientController(
|
||||
command=python_path,
|
||||
args=['-m', 'langbot_plugin.cli.__init__', 'rt', '-s'],
|
||||
env=env,
|
||||
)
|
||||
task = self.ctrl.run(new_connection_callback)
|
||||
|
||||
asyncio.create_task(task)
|
||||
|
||||
async def initialize_plugins(self):
|
||||
pass
|
||||
|
||||
async def install_plugin(
|
||||
self,
|
||||
install_source: PluginInstallSource,
|
||||
install_info: dict[str, Any],
|
||||
task_context: taskmgr.TaskContext | None = None,
|
||||
):
|
||||
async for ret in self.handler.install_plugin(install_source.value, install_info):
|
||||
current_action = ret.get('current_action', None)
|
||||
if current_action is not None:
|
||||
if task_context is not None:
|
||||
task_context.set_current_action(current_action)
|
||||
|
||||
trace = ret.get('trace', None)
|
||||
if trace is not None:
|
||||
if task_context is not None:
|
||||
task_context.trace(trace)
|
||||
|
||||
async def upgrade_plugin(
|
||||
self, plugin_author: str, plugin_name: str, task_context: taskmgr.TaskContext | None = None
|
||||
) -> dict[str, Any]:
|
||||
async for ret in self.handler.upgrade_plugin(plugin_author, plugin_name):
|
||||
current_action = ret.get('current_action', None)
|
||||
if current_action is not None:
|
||||
if task_context is not None:
|
||||
task_context.set_current_action(current_action)
|
||||
|
||||
trace = ret.get('trace', None)
|
||||
if trace is not None:
|
||||
if task_context is not None:
|
||||
task_context.trace(trace)
|
||||
|
||||
async def delete_plugin(
|
||||
self, plugin_author: str, plugin_name: str, task_context: taskmgr.TaskContext | None = None
|
||||
) -> dict[str, Any]:
|
||||
async for ret in self.handler.delete_plugin(plugin_author, plugin_name):
|
||||
current_action = ret.get('current_action', None)
|
||||
if current_action is not None:
|
||||
if task_context is not None:
|
||||
task_context.set_current_action(current_action)
|
||||
|
||||
trace = ret.get('trace', None)
|
||||
if trace is not None:
|
||||
if task_context is not None:
|
||||
task_context.trace(trace)
|
||||
|
||||
async def list_plugins(self) -> list[dict[str, Any]]:
|
||||
return await self.handler.list_plugins()
|
||||
|
||||
async def get_plugin_info(self, author: str, plugin_name: str) -> dict[str, Any]:
|
||||
return await self.handler.get_plugin_info(author, plugin_name)
|
||||
|
||||
async def set_plugin_config(self, plugin_author: str, plugin_name: str, config: dict[str, Any]) -> dict[str, Any]:
|
||||
return await self.handler.set_plugin_config(plugin_author, plugin_name, config)
|
||||
|
||||
async def emit_event(
|
||||
self,
|
||||
event: events.BaseEventModel,
|
||||
) -> context.EventContext:
|
||||
event_ctx = context.EventContext.from_event(event)
|
||||
|
||||
event_ctx_result = await self.handler.emit_event(event_ctx.model_dump(serialize_as_any=True))
|
||||
|
||||
event_ctx = context.EventContext.model_validate(event_ctx_result['event_context'])
|
||||
|
||||
return event_ctx
|
||||
|
||||
async def list_tools(self) -> list[ComponentManifest]:
|
||||
list_tools_data = await self.handler.list_tools()
|
||||
|
||||
return [ComponentManifest.model_validate(tool) for tool in list_tools_data]
|
||||
|
||||
async def call_tool(self, tool_name: str, parameters: dict[str, Any]) -> dict[str, Any]:
|
||||
return await self.handler.call_tool(tool_name, parameters)
|
||||
|
||||
async def list_commands(self) -> list[ComponentManifest]:
|
||||
list_commands_data = await self.handler.list_commands()
|
||||
|
||||
return [ComponentManifest.model_validate(command) for command in list_commands_data]
|
||||
|
||||
async def execute_command(
|
||||
self, command_ctx: command_context.ExecuteContext
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
gen = self.handler.execute_command(command_ctx.model_dump(serialize_as_any=True))
|
||||
|
||||
async for ret in gen:
|
||||
cmd_ret = command_context.CommandReturn.model_validate(ret)
|
||||
|
||||
yield cmd_ret
|
||||
|
||||
def dispose(self):
|
||||
if isinstance(self.ctrl, stdio_client_controller.StdioClientController):
|
||||
self.ap.logger.info('Terminating plugin runtime process...')
|
||||
self.ctrl.process.terminate()
|
||||
@@ -1,76 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import typing
|
||||
|
||||
from ...core import app
|
||||
from ...entity.persistence import model as persistence_model
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
from . import token
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class RuntimeLLMModel:
|
||||
"""运行时模型"""
|
||||
|
||||
model_entity: persistence_model.LLMModel
|
||||
"""模型数据"""
|
||||
|
||||
token_mgr: token.TokenManager
|
||||
"""api key管理器"""
|
||||
|
||||
requester: LLMAPIRequester
|
||||
"""请求器实例"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_entity: persistence_model.LLMModel,
|
||||
token_mgr: token.TokenManager,
|
||||
requester: LLMAPIRequester,
|
||||
):
|
||||
self.model_entity = model_entity
|
||||
self.token_mgr = token_mgr
|
||||
self.requester = requester
|
||||
|
||||
|
||||
class LLMAPIRequester(metaclass=abc.ABCMeta):
|
||||
"""LLM API请求器"""
|
||||
|
||||
name: str = None
|
||||
|
||||
ap: app.Application
|
||||
|
||||
default_config: dict[str, typing.Any] = {}
|
||||
|
||||
requester_cfg: dict[str, typing.Any] = {}
|
||||
|
||||
def __init__(self, ap: app.Application, config: dict[str, typing.Any]):
|
||||
self.ap = ap
|
||||
self.requester_cfg = {**self.default_config}
|
||||
self.requester_cfg.update(config)
|
||||
|
||||
async def initialize(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def invoke_llm(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
model: RuntimeLLMModel,
|
||||
messages: typing.List[provider_message.Message],
|
||||
funcs: typing.List[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
"""调用API
|
||||
|
||||
Args:
|
||||
model (RuntimeLLMModel): 使用的模型信息
|
||||
messages (typing.List[llm_entities.Message]): 消息对象列表
|
||||
funcs (typing.List[tools_entities.LLMFunction], optional): 使用的工具函数列表. Defaults to None.
|
||||
extra_args (dict[str, typing.Any], optional): 额外的参数. Defaults to {}.
|
||||
|
||||
Returns:
|
||||
llm_entities.Message: 返回消息对象
|
||||
"""
|
||||
pass
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: 302-ai-chat-completions
|
||||
label:
|
||||
en_US: 302 AI
|
||||
zh_Hans: 302 AI
|
||||
icon: 302ai.png
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.302.ai/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./302aichatcmpl.py
|
||||
attr: AI302ChatCompletions
|
||||
@@ -1,178 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import json
|
||||
import platform
|
||||
import socket
|
||||
import anthropic
|
||||
import httpx
|
||||
|
||||
from .. import errors, requester
|
||||
|
||||
from ....utils import image
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class AnthropicMessages(requester.LLMAPIRequester):
|
||||
"""Anthropic Messages API 请求器"""
|
||||
|
||||
client: anthropic.AsyncAnthropic
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://api.anthropic.com/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
async def initialize(self):
|
||||
# 兼容 Windows 缺失 TCP_KEEPINTVL 和 TCP_KEEPCNT 的问题
|
||||
if platform.system() == 'Windows':
|
||||
if not hasattr(socket, 'TCP_KEEPINTVL'):
|
||||
socket.TCP_KEEPINTVL = 0
|
||||
if not hasattr(socket, 'TCP_KEEPCNT'):
|
||||
socket.TCP_KEEPCNT = 0
|
||||
httpx_client = anthropic._base_client.AsyncHttpxClientWrapper(
|
||||
base_url=self.requester_cfg['base_url'],
|
||||
# cast to a valid type because mypy doesn't understand our type narrowing
|
||||
timeout=typing.cast(httpx.Timeout, self.requester_cfg['timeout']),
|
||||
limits=anthropic._constants.DEFAULT_CONNECTION_LIMITS,
|
||||
follow_redirects=True,
|
||||
trust_env=True,
|
||||
)
|
||||
|
||||
self.client = anthropic.AsyncAnthropic(
|
||||
api_key='',
|
||||
http_client=httpx_client,
|
||||
)
|
||||
|
||||
async def invoke_llm(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
model: requester.RuntimeLLMModel,
|
||||
messages: typing.List[provider_message.Message],
|
||||
funcs: typing.List[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
self.client.api_key = model.token_mgr.get_token()
|
||||
|
||||
args = extra_args.copy()
|
||||
args['model'] = model.model_entity.name
|
||||
|
||||
# 处理消息
|
||||
|
||||
# system
|
||||
system_role_message = None
|
||||
|
||||
for i, m in enumerate(messages):
|
||||
if m.role == 'system':
|
||||
system_role_message = m
|
||||
|
||||
break
|
||||
|
||||
if system_role_message:
|
||||
messages.pop(i)
|
||||
|
||||
if isinstance(system_role_message, provider_message.Message) and isinstance(system_role_message.content, str):
|
||||
args['system'] = system_role_message.content
|
||||
|
||||
req_messages = []
|
||||
|
||||
for m in messages:
|
||||
if m.role == 'tool':
|
||||
tool_call_id = m.tool_call_id
|
||||
|
||||
req_messages.append(
|
||||
{
|
||||
'role': 'user',
|
||||
'content': [
|
||||
{
|
||||
'type': 'tool_result',
|
||||
'tool_use_id': tool_call_id,
|
||||
'content': m.content,
|
||||
}
|
||||
],
|
||||
}
|
||||
)
|
||||
|
||||
continue
|
||||
|
||||
msg_dict = m.dict(exclude_none=True)
|
||||
|
||||
if isinstance(m.content, str) and m.content.strip() != '':
|
||||
msg_dict['content'] = [{'type': 'text', 'text': m.content}]
|
||||
elif isinstance(m.content, list):
|
||||
for i, ce in enumerate(m.content):
|
||||
if ce.type == 'image_base64':
|
||||
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
|
||||
alter_image_ele = {
|
||||
'type': 'image',
|
||||
'source': {
|
||||
'type': 'base64',
|
||||
'media_type': f'image/{image_format}',
|
||||
'data': image_b64,
|
||||
},
|
||||
}
|
||||
msg_dict['content'][i] = alter_image_ele
|
||||
|
||||
if m.tool_calls:
|
||||
for tool_call in m.tool_calls:
|
||||
msg_dict['content'].append(
|
||||
{
|
||||
'type': 'tool_use',
|
||||
'id': tool_call.id,
|
||||
'name': tool_call.function.name,
|
||||
'input': json.loads(tool_call.function.arguments),
|
||||
}
|
||||
)
|
||||
|
||||
del msg_dict['tool_calls']
|
||||
|
||||
req_messages.append(msg_dict)
|
||||
|
||||
args['messages'] = req_messages
|
||||
|
||||
if funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_anthropic(funcs)
|
||||
|
||||
if tools:
|
||||
args['tools'] = tools
|
||||
|
||||
try:
|
||||
# print(json.dumps(args, indent=4, ensure_ascii=False))
|
||||
resp = await self.client.messages.create(**args)
|
||||
|
||||
args = {
|
||||
'content': '',
|
||||
'role': resp.role,
|
||||
}
|
||||
|
||||
assert type(resp) is anthropic.types.message.Message
|
||||
|
||||
for block in resp.content:
|
||||
if block.type == 'thinking':
|
||||
args['content'] = '<think>' + block.thinking + '</think>\n' + args['content']
|
||||
elif block.type == 'text':
|
||||
args['content'] += block.text
|
||||
elif block.type == 'tool_use':
|
||||
assert type(block) is anthropic.types.tool_use_block.ToolUseBlock
|
||||
tool_call = provider_message.ToolCall(
|
||||
id=block.id,
|
||||
type='function',
|
||||
function=provider_message.FunctionCall(name=block.name, arguments=json.dumps(block.input)),
|
||||
)
|
||||
if 'tool_calls' not in args:
|
||||
args['tool_calls'] = []
|
||||
args['tool_calls'].append(tool_call)
|
||||
|
||||
return provider_message.Message(**args)
|
||||
except anthropic.AuthenticationError as e:
|
||||
raise errors.RequesterError(f'api-key 无效: {e.message}')
|
||||
except anthropic.BadRequestError as e:
|
||||
raise errors.RequesterError(str(e.message))
|
||||
except anthropic.NotFoundError as e:
|
||||
if 'model: ' in str(e):
|
||||
raise errors.RequesterError(f'模型无效: {e.message}')
|
||||
else:
|
||||
raise errors.RequesterError(f'请求地址无效: {e.message}')
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: anthropic-messages
|
||||
label:
|
||||
en_US: Anthropic
|
||||
zh_Hans: Anthropic
|
||||
icon: anthropic.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.anthropic.com/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./anthropicmsgs.py
|
||||
attr: AnthropicMessages
|
||||
@@ -1,17 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import openai
|
||||
|
||||
from . import modelscopechatcmpl
|
||||
|
||||
|
||||
class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
|
||||
"""阿里云百炼大模型平台 ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: bailian-chat-completions
|
||||
label:
|
||||
en_US: Aliyun Bailian
|
||||
zh_Hans: 阿里云百炼
|
||||
icon: bailian.png
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./bailianchatcmpl.py
|
||||
attr: BailianChatCompletions
|
||||
@@ -1,143 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
|
||||
import openai
|
||||
import openai.types.chat.chat_completion as chat_completion
|
||||
import httpx
|
||||
|
||||
from .. import errors, requester
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
"""OpenAI ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://api.openai.com/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
async def initialize(self):
|
||||
self.client = openai.AsyncClient(
|
||||
api_key='',
|
||||
base_url=self.requester_cfg['base_url'].replace(' ', ''),
|
||||
timeout=self.requester_cfg['timeout'],
|
||||
http_client=httpx.AsyncClient(trust_env=True, timeout=self.requester_cfg['timeout']),
|
||||
)
|
||||
|
||||
async def _req(
|
||||
self,
|
||||
args: dict,
|
||||
extra_body: dict = {},
|
||||
) -> chat_completion.ChatCompletion:
|
||||
return await self.client.chat.completions.create(**args, extra_body=extra_body)
|
||||
|
||||
async def _make_msg(
|
||||
self,
|
||||
chat_completion: chat_completion.ChatCompletion,
|
||||
) -> provider_message.Message:
|
||||
chatcmpl_message = chat_completion.choices[0].message.model_dump()
|
||||
|
||||
# 确保 role 字段存在且不为 None
|
||||
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
|
||||
chatcmpl_message['role'] = 'assistant'
|
||||
|
||||
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
|
||||
|
||||
# deepseek的reasoner模型
|
||||
if reasoning_content is not None:
|
||||
chatcmpl_message['content'] = '<think>\n' + reasoning_content + '\n</think>\n' + chatcmpl_message['content']
|
||||
|
||||
message = provider_message.Message(**chatcmpl_message)
|
||||
|
||||
return message
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: requester.RuntimeLLMModel,
|
||||
use_funcs: list[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
args = {}
|
||||
args['model'] = use_model.model_entity.name
|
||||
|
||||
if use_funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
|
||||
if tools:
|
||||
args['tools'] = tools
|
||||
|
||||
# 设置此次请求中的messages
|
||||
messages = req_messages.copy()
|
||||
|
||||
# 检查vision
|
||||
for msg in messages:
|
||||
if 'content' in msg and isinstance(msg['content'], list):
|
||||
for me in msg['content']:
|
||||
if me['type'] == 'image_base64':
|
||||
me['image_url'] = {'url': me['image_base64']}
|
||||
me['type'] = 'image_url'
|
||||
del me['image_base64']
|
||||
|
||||
args['messages'] = messages
|
||||
|
||||
# 发送请求
|
||||
resp = await self._req(args, extra_body=extra_args)
|
||||
|
||||
# 处理请求结果
|
||||
message = await self._make_msg(resp)
|
||||
|
||||
return message
|
||||
|
||||
async def invoke_llm(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
model: requester.RuntimeLLMModel,
|
||||
messages: typing.List[provider_message.Message],
|
||||
funcs: typing.List[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
|
||||
for m in messages:
|
||||
msg_dict = m.dict(exclude_none=True)
|
||||
content = msg_dict.get('content')
|
||||
if isinstance(content, list):
|
||||
# 检查 content 列表中是否每个部分都是文本
|
||||
if all(isinstance(part, dict) and part.get('type') == 'text' for part in content):
|
||||
# 将所有文本部分合并为一个字符串
|
||||
msg_dict['content'] = '\n'.join(part['text'] for part in content)
|
||||
req_messages.append(msg_dict)
|
||||
|
||||
try:
|
||||
return await self._closure(
|
||||
query=query,
|
||||
req_messages=req_messages,
|
||||
use_model=model,
|
||||
use_funcs=funcs,
|
||||
extra_args=extra_args,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
raise errors.RequesterError('请求超时')
|
||||
except openai.BadRequestError as e:
|
||||
if 'context_length_exceeded' in e.message:
|
||||
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
|
||||
else:
|
||||
raise errors.RequesterError(f'请求参数错误: {e.message}')
|
||||
except openai.AuthenticationError as e:
|
||||
raise errors.RequesterError(f'无效的 api-key: {e.message}')
|
||||
except openai.NotFoundError as e:
|
||||
raise errors.RequesterError(f'请求路径错误: {e.message}')
|
||||
except openai.RateLimitError as e:
|
||||
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
||||
except openai.APIError as e:
|
||||
raise errors.RequesterError(f'请求错误: {e.message}')
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: openai-chat-completions
|
||||
label:
|
||||
en_US: OpenAI
|
||||
zh_Hans: OpenAI
|
||||
icon: openai.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.openai.com/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./chatcmpl.py
|
||||
attr: OpenAIChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: deepseek-chat-completions
|
||||
label:
|
||||
en_US: DeepSeek
|
||||
zh_Hans: 深度求索
|
||||
icon: deepseek.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.deepseek.com"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./deepseekchatcmpl.py
|
||||
attr: DeepseekChatCompletions
|
||||
@@ -1,14 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
from . import chatcmpl
|
||||
|
||||
|
||||
class GeminiChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""Google Gemini API 请求器"""
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||
'timeout': 120,
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: gemini-chat-completions
|
||||
label:
|
||||
en_US: Google Gemini
|
||||
zh_Hans: Google Gemini
|
||||
icon: gemini.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://generativelanguage.googleapis.com/v1beta/openai"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./geminichatcmpl.py
|
||||
attr: GeminiChatCompletions
|
||||
@@ -1,51 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
import typing
|
||||
|
||||
from . import chatcmpl
|
||||
from .. import requester
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class GiteeAIChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""Gitee AI ChatCompletions API 请求器"""
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://ai.gitee.com/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: requester.RuntimeLLMModel,
|
||||
use_funcs: list[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
args = {}
|
||||
args['model'] = use_model.model_entity.name
|
||||
|
||||
if use_funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
|
||||
if tools:
|
||||
args['tools'] = tools
|
||||
|
||||
# gitee 不支持多模态,把content都转换成纯文字
|
||||
for m in req_messages:
|
||||
if 'content' in m and isinstance(m['content'], list):
|
||||
m['content'] = ' '.join([c['text'] for c in m['content']])
|
||||
|
||||
args['messages'] = req_messages
|
||||
|
||||
resp = await self._req(args, extra_body=extra_args)
|
||||
|
||||
message = await self._make_msg(resp)
|
||||
|
||||
return message
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: gitee-ai-chat-completions
|
||||
label:
|
||||
en_US: Gitee AI
|
||||
zh_Hans: Gitee AI
|
||||
icon: giteeai.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://ai.gitee.com/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./giteeaichatcmpl.py
|
||||
attr: GiteeAIChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: lmstudio-chat-completions
|
||||
label:
|
||||
en_US: LM Studio
|
||||
zh_Hans: LM Studio
|
||||
icon: lmstudio.webp
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "http://127.0.0.1:1234/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./lmstudiochatcmpl.py
|
||||
attr: LmStudioChatCompletions
|
||||
@@ -1,204 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
|
||||
import openai
|
||||
import openai.types.chat.chat_completion as chat_completion
|
||||
import openai.types.chat.chat_completion_message_tool_call as chat_completion_message_tool_call
|
||||
import httpx
|
||||
|
||||
from .. import entities, errors, requester
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class ModelScopeChatCompletions(requester.LLMAPIRequester):
|
||||
"""ModelScope ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://api-inference.modelscope.cn/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
async def initialize(self):
|
||||
self.client = openai.AsyncClient(
|
||||
api_key='',
|
||||
base_url=self.requester_cfg['base_url'],
|
||||
timeout=self.requester_cfg['timeout'],
|
||||
http_client=httpx.AsyncClient(trust_env=True, timeout=self.requester_cfg['timeout']),
|
||||
)
|
||||
|
||||
async def _req(
|
||||
self,
|
||||
args: dict,
|
||||
extra_body: dict = {},
|
||||
) -> chat_completion.ChatCompletion:
|
||||
args['stream'] = True
|
||||
|
||||
chunk = None
|
||||
|
||||
pending_content = ''
|
||||
|
||||
tool_calls = []
|
||||
|
||||
resp_gen: openai.AsyncStream = await self.client.chat.completions.create(**args, extra_body=extra_body)
|
||||
|
||||
async for chunk in resp_gen:
|
||||
# print(chunk)
|
||||
if not chunk or not chunk.id or not chunk.choices or not chunk.choices[0] or not chunk.choices[0].delta:
|
||||
continue
|
||||
|
||||
if chunk.choices[0].delta.content is not None:
|
||||
pending_content += chunk.choices[0].delta.content
|
||||
|
||||
if chunk.choices[0].delta.tool_calls is not None:
|
||||
for tool_call in chunk.choices[0].delta.tool_calls:
|
||||
if tool_call.function.arguments is None:
|
||||
continue
|
||||
for tc in tool_calls:
|
||||
if tc.index == tool_call.index:
|
||||
tc.function.arguments += tool_call.function.arguments
|
||||
break
|
||||
else:
|
||||
tool_calls.append(tool_call)
|
||||
|
||||
if chunk.choices[0].finish_reason is not None:
|
||||
break
|
||||
|
||||
real_tool_calls = []
|
||||
|
||||
for tc in tool_calls:
|
||||
function = chat_completion_message_tool_call.Function(
|
||||
name=tc.function.name, arguments=tc.function.arguments
|
||||
)
|
||||
real_tool_calls.append(
|
||||
chat_completion_message_tool_call.ChatCompletionMessageToolCall(
|
||||
id=tc.id, function=function, type='function'
|
||||
)
|
||||
)
|
||||
|
||||
return (
|
||||
chat_completion.ChatCompletion(
|
||||
id=chunk.id,
|
||||
object='chat.completion',
|
||||
created=chunk.created,
|
||||
choices=[
|
||||
chat_completion.Choice(
|
||||
index=0,
|
||||
message=chat_completion.ChatCompletionMessage(
|
||||
role='assistant',
|
||||
content=pending_content,
|
||||
tool_calls=real_tool_calls if len(real_tool_calls) > 0 else None,
|
||||
),
|
||||
finish_reason=chunk.choices[0].finish_reason
|
||||
if hasattr(chunk.choices[0], 'finish_reason') and chunk.choices[0].finish_reason is not None
|
||||
else 'stop',
|
||||
logprobs=chunk.choices[0].logprobs,
|
||||
)
|
||||
],
|
||||
model=chunk.model,
|
||||
service_tier=chunk.service_tier if hasattr(chunk, 'service_tier') else None,
|
||||
system_fingerprint=chunk.system_fingerprint if hasattr(chunk, 'system_fingerprint') else None,
|
||||
usage=chunk.usage if hasattr(chunk, 'usage') else None,
|
||||
)
|
||||
if chunk
|
||||
else None
|
||||
)
|
||||
|
||||
async def _make_msg(
|
||||
self,
|
||||
chat_completion: chat_completion.ChatCompletion,
|
||||
) -> provider_message.Message:
|
||||
chatcmpl_message = chat_completion.choices[0].message.dict()
|
||||
|
||||
# 确保 role 字段存在且不为 None
|
||||
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
|
||||
chatcmpl_message['role'] = 'assistant'
|
||||
|
||||
message = provider_message.Message(**chatcmpl_message)
|
||||
|
||||
return message
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: requester.RuntimeLLMModel,
|
||||
use_funcs: list[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
args = {}
|
||||
args['model'] = use_model.model_entity.name
|
||||
|
||||
if use_funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
|
||||
if tools:
|
||||
args['tools'] = tools
|
||||
|
||||
# 设置此次请求中的messages
|
||||
messages = req_messages.copy()
|
||||
|
||||
# 检查vision
|
||||
for msg in messages:
|
||||
if 'content' in msg and isinstance(msg['content'], list):
|
||||
for me in msg['content']:
|
||||
if me['type'] == 'image_base64':
|
||||
me['image_url'] = {'url': me['image_base64']}
|
||||
me['type'] = 'image_url'
|
||||
del me['image_base64']
|
||||
|
||||
args['messages'] = messages
|
||||
|
||||
# 发送请求
|
||||
resp = await self._req(args, extra_body=extra_args)
|
||||
|
||||
# 处理请求结果
|
||||
message = await self._make_msg(resp)
|
||||
|
||||
return message
|
||||
|
||||
async def invoke_llm(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
model: entities.LLMModelInfo,
|
||||
messages: typing.List[provider_message.Message],
|
||||
funcs: typing.List[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> provider_message.Message:
|
||||
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
|
||||
for m in messages:
|
||||
msg_dict = m.dict(exclude_none=True)
|
||||
content = msg_dict.get('content')
|
||||
if isinstance(content, list):
|
||||
# 检查 content 列表中是否每个部分都是文本
|
||||
if all(isinstance(part, dict) and part.get('type') == 'text' for part in content):
|
||||
# 将所有文本部分合并为一个字符串
|
||||
msg_dict['content'] = '\n'.join(part['text'] for part in content)
|
||||
req_messages.append(msg_dict)
|
||||
|
||||
try:
|
||||
return await self._closure(
|
||||
query=query, req_messages=req_messages, use_model=model, use_funcs=funcs, extra_args=extra_args
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
raise errors.RequesterError('请求超时')
|
||||
except openai.BadRequestError as e:
|
||||
if 'context_length_exceeded' in e.message:
|
||||
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
|
||||
else:
|
||||
raise errors.RequesterError(f'请求参数错误: {e.message}')
|
||||
except openai.AuthenticationError as e:
|
||||
raise errors.RequesterError(f'无效的 api-key: {e.message}')
|
||||
except openai.NotFoundError as e:
|
||||
raise errors.RequesterError(f'请求路径错误: {e.message}')
|
||||
except openai.RateLimitError as e:
|
||||
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
||||
except openai.APIError as e:
|
||||
raise errors.RequesterError(f'请求错误: {e.message}')
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: modelscope-chat-completions
|
||||
label:
|
||||
en_US: ModelScope
|
||||
zh_Hans: 魔搭社区
|
||||
icon: modelscope.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api-inference.modelscope.cn/v1"
|
||||
- name: args
|
||||
label:
|
||||
en_US: Args
|
||||
zh_Hans: 附加参数
|
||||
type: object
|
||||
required: true
|
||||
default: {}
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: int
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./modelscopechatcmpl.py
|
||||
attr: ModelScopeChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: moonshot-chat-completions
|
||||
label:
|
||||
en_US: Moonshot
|
||||
zh_Hans: 月之暗面
|
||||
icon: moonshot.png
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.moonshot.com/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./moonshotchatcmpl.py
|
||||
attr: MoonshotChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: ollama-chat
|
||||
label:
|
||||
en_US: Ollama
|
||||
zh_Hans: Ollama
|
||||
icon: ollama.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "http://127.0.0.1:11434"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./ollamachat.py
|
||||
attr: OllamaChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: openrouter-chat-completions
|
||||
label:
|
||||
en_US: OpenRouter
|
||||
zh_Hans: OpenRouter
|
||||
icon: openrouter.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://openrouter.ai/api/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./openrouterchatcmpl.py
|
||||
attr: OpenRouterChatCompletions
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: ppio-chat-completions
|
||||
label:
|
||||
en_US: ppio
|
||||
zh_Hans: 派欧云
|
||||
icon: ppio.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.ppinfra.com/v3/openai"
|
||||
- name: args
|
||||
label:
|
||||
en_US: Args
|
||||
zh_Hans: 附加参数
|
||||
type: object
|
||||
required: true
|
||||
default: {}
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: int
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./ppiochatcmpl.py
|
||||
attr: PPIOChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: siliconflow-chat-completions
|
||||
label:
|
||||
en_US: SiliconFlow
|
||||
zh_Hans: 硅基流动
|
||||
icon: siliconflow.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.siliconflow.cn/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./siliconflowchatcmpl.py
|
||||
attr: SiliconFlowChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: volcark-chat-completions
|
||||
label:
|
||||
en_US: Volc Engine Ark
|
||||
zh_Hans: 火山方舟
|
||||
icon: volcark.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://ark.cn-beijing.volces.com/api/v3"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./volcarkchatcmpl.py
|
||||
attr: VolcArkChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: xai-chat-completions
|
||||
label:
|
||||
en_US: xAI
|
||||
zh_Hans: xAI
|
||||
icon: xai.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.x.ai/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./xaichatcmpl.py
|
||||
attr: XaiChatCompletions
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: zhipuai-chat-completions
|
||||
label:
|
||||
en_US: ZhipuAI
|
||||
zh_Hans: 智谱 AI
|
||||
icon: zhipuai.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://open.bigmodel.cn/api/paas/v4"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
execution:
|
||||
python:
|
||||
path: ./zhipuaichatcmpl.py
|
||||
attr: ZhipuAIChatCompletions
|
||||
@@ -1,227 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import re
|
||||
|
||||
import dashscope
|
||||
|
||||
from .. import runner
|
||||
from ...core import app
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class DashscopeAPIError(Exception):
|
||||
"""Dashscope API 请求失败"""
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
@runner.runner_class('dashscope-app-api')
|
||||
class DashScopeAPIRunner(runner.RequestRunner):
|
||||
"阿里云百炼DashsscopeAPI对话请求器"
|
||||
|
||||
# 运行器内部使用的配置
|
||||
app_type: str # 应用类型
|
||||
app_id: str # 应用ID
|
||||
api_key: str # API Key
|
||||
references_quote: (
|
||||
str # 引用资料提示(当展示回答来源功能开启时,这个变量会作为引用资料名前的提示,可在provider.json中配置)
|
||||
)
|
||||
|
||||
def __init__(self, ap: app.Application, pipeline_config: dict):
|
||||
"""初始化"""
|
||||
self.ap = ap
|
||||
self.pipeline_config = pipeline_config
|
||||
|
||||
valid_app_types = ['agent', 'workflow']
|
||||
self.app_type = self.pipeline_config['ai']['dashscope-app-api']['app-type']
|
||||
# 检查配置文件中使用的应用类型是否支持
|
||||
if self.app_type not in valid_app_types:
|
||||
raise DashscopeAPIError(f'不支持的 Dashscope 应用类型: {self.app_type}')
|
||||
|
||||
# 初始化Dashscope 参数配置
|
||||
self.app_id = self.pipeline_config['ai']['dashscope-app-api']['app-id']
|
||||
self.api_key = self.pipeline_config['ai']['dashscope-app-api']['api-key']
|
||||
self.references_quote = self.pipeline_config['ai']['dashscope-app-api']['references_quote']
|
||||
|
||||
def _replace_references(self, text, references_dict):
|
||||
"""阿里云百炼平台的自定义应用支持资料引用,此函数可以将引用标签替换为参考资料"""
|
||||
|
||||
# 匹配 <ref>[index_id]</ref> 形式的字符串
|
||||
pattern = re.compile(r'<ref>\[(.*?)\]</ref>')
|
||||
|
||||
def replacement(match):
|
||||
# 获取引用编号
|
||||
ref_key = match.group(1)
|
||||
if ref_key in references_dict:
|
||||
# 如果有对应的参考资料按照provider.json中的reference_quote返回提示,来自哪个参考资料文件
|
||||
return f'({self.references_quote} {references_dict[ref_key]})'
|
||||
else:
|
||||
# 如果没有对应的参考资料,保留原样
|
||||
return match.group(0)
|
||||
|
||||
# 使用 re.sub() 进行替换
|
||||
return pattern.sub(replacement, text)
|
||||
|
||||
async def _preprocess_user_message(self, query: pipeline_query.Query) -> tuple[str, list[str]]:
|
||||
"""预处理用户消息,提取纯文本,阿里云提供的上传文件方法过于复杂,暂不支持上传文件(包括图片)"""
|
||||
plain_text = ''
|
||||
image_ids = []
|
||||
if isinstance(query.user_message.content, list):
|
||||
for ce in query.user_message.content:
|
||||
if ce.type == 'text':
|
||||
plain_text += ce.text
|
||||
# 暂时不支持上传图片,保留代码以便后续扩展
|
||||
# elif ce.type == "image_base64":
|
||||
# image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
# file_bytes = base64.b64decode(image_b64)
|
||||
# file = ("img.png", file_bytes, f"image/{image_format}")
|
||||
# file_upload_resp = await self.dify_client.upload_file(
|
||||
# file,
|
||||
# f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
# )
|
||||
# image_id = file_upload_resp["id"]
|
||||
# image_ids.append(image_id)
|
||||
elif isinstance(query.user_message.content, str):
|
||||
plain_text = query.user_message.content
|
||||
|
||||
return plain_text, image_ids
|
||||
|
||||
async def _agent_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""Dashscope 智能体对话请求"""
|
||||
|
||||
# 局部变量
|
||||
chunk = None # 流式传输的块
|
||||
pending_content = '' # 待处理的Agent输出内容
|
||||
references_dict = {} # 用于存储引用编号和对应的参考资料
|
||||
plain_text = '' # 用户输入的纯文本信息
|
||||
image_ids = [] # 用户输入的图片ID列表 (暂不支持)
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
# 发送对话请求
|
||||
response = dashscope.Application.call(
|
||||
api_key=self.api_key, # 智能体应用的API Key
|
||||
app_id=self.app_id, # 智能体应用的ID
|
||||
prompt=plain_text, # 用户输入的文本信息
|
||||
stream=True, # 流式输出
|
||||
incremental_output=True, # 增量输出,使用流式输出需要开启增量输出
|
||||
session_id=query.session.using_conversation.uuid, # 会话ID用于,多轮对话
|
||||
# rag_options={ # 主要用于文件交互,暂不支持
|
||||
# "session_file_ids": ["FILE_ID1"], # FILE_ID1 替换为实际的临时文件ID,逗号隔开多个
|
||||
# }
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if chunk.get('status_code') != 200:
|
||||
raise DashscopeAPIError(
|
||||
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
|
||||
)
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
# 获取流式传输的output
|
||||
stream_output = chunk.get('output', {})
|
||||
if stream_output.get('text') is not None:
|
||||
pending_content += stream_output.get('text')
|
||||
|
||||
# 保存当前会话的session_id用于下次对话的语境
|
||||
query.session.using_conversation.uuid = stream_output.get('session_id')
|
||||
|
||||
# 获取模型传出的参考资料列表
|
||||
references_dict_list = stream_output.get('doc_references', [])
|
||||
|
||||
# 从模型传出的参考资料信息中提取用于替换的字典
|
||||
if references_dict_list is not None:
|
||||
for doc in references_dict_list:
|
||||
if doc.get('index_id') is not None:
|
||||
references_dict[doc.get('index_id')] = doc.get('doc_name')
|
||||
|
||||
# 将参考资料替换到文本中
|
||||
pending_content = self._replace_references(pending_content, references_dict)
|
||||
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=pending_content,
|
||||
)
|
||||
|
||||
async def _workflow_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""Dashscope 工作流对话请求"""
|
||||
|
||||
# 局部变量
|
||||
chunk = None # 流式传输的块
|
||||
pending_content = '' # 待处理的Agent输出内容
|
||||
references_dict = {} # 用于存储引用编号和对应的参考资料
|
||||
plain_text = '' # 用户输入的纯文本信息
|
||||
image_ids = [] # 用户输入的图片ID列表 (暂不支持)
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
biz_params = {}
|
||||
biz_params.update(query.variables)
|
||||
|
||||
# 发送对话请求
|
||||
response = dashscope.Application.call(
|
||||
api_key=self.api_key, # 智能体应用的API Key
|
||||
app_id=self.app_id, # 智能体应用的ID
|
||||
prompt=plain_text, # 用户输入的文本信息
|
||||
stream=True, # 流式输出
|
||||
incremental_output=True, # 增量输出,使用流式输出需要开启增量输出
|
||||
session_id=query.session.using_conversation.uuid, # 会话ID用于,多轮对话
|
||||
biz_params=biz_params, # 工作流应用的自定义输入参数传递
|
||||
# rag_options={ # 主要用于文件交互,暂不支持
|
||||
# "session_file_ids": ["FILE_ID1"], # FILE_ID1 替换为实际的临时文件ID,逗号隔开多个
|
||||
# }
|
||||
)
|
||||
|
||||
# 处理API返回的流式输出
|
||||
for chunk in response:
|
||||
if chunk.get('status_code') != 200:
|
||||
raise DashscopeAPIError(
|
||||
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
|
||||
)
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
# 获取流式传输的output
|
||||
stream_output = chunk.get('output', {})
|
||||
if stream_output.get('text') is not None:
|
||||
pending_content += stream_output.get('text')
|
||||
|
||||
# 保存当前会话的session_id用于下次对话的语境
|
||||
query.session.using_conversation.uuid = stream_output.get('session_id')
|
||||
|
||||
# 获取模型传出的参考资料列表
|
||||
references_dict_list = stream_output.get('doc_references', [])
|
||||
|
||||
# 从模型传出的参考资料信息中提取用于替换的字典
|
||||
if references_dict_list is not None:
|
||||
for doc in references_dict_list:
|
||||
if doc.get('index_id') is not None:
|
||||
references_dict[doc.get('index_id')] = doc.get('doc_name')
|
||||
|
||||
# 将参考资料替换到文本中
|
||||
pending_content = self._replace_references(pending_content, references_dict)
|
||||
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=pending_content,
|
||||
)
|
||||
|
||||
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""运行"""
|
||||
if self.app_type == 'agent':
|
||||
async for msg in self._agent_messages(query):
|
||||
yield msg
|
||||
elif self.app_type == 'workflow':
|
||||
async for msg in self._workflow_messages(query):
|
||||
yield msg
|
||||
else:
|
||||
raise DashscopeAPIError(f'不支持的 Dashscope 应用类型: {self.app_type}')
|
||||
@@ -1,337 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import json
|
||||
import uuid
|
||||
import re
|
||||
import base64
|
||||
|
||||
|
||||
from .. import runner
|
||||
from ...core import app
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
from ...utils import image
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
from libs.dify_service_api.v1 import client, errors
|
||||
|
||||
|
||||
@runner.runner_class('dify-service-api')
|
||||
class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
"""Dify Service API 对话请求器"""
|
||||
|
||||
dify_client: client.AsyncDifyServiceClient
|
||||
|
||||
def __init__(self, ap: app.Application, pipeline_config: dict):
|
||||
self.ap = ap
|
||||
self.pipeline_config = pipeline_config
|
||||
|
||||
valid_app_types = ['chat', 'agent', 'workflow']
|
||||
if self.pipeline_config['ai']['dify-service-api']['app-type'] not in valid_app_types:
|
||||
raise errors.DifyAPIError(
|
||||
f'不支持的 Dify 应用类型: {self.pipeline_config["ai"]["dify-service-api"]["app-type"]}'
|
||||
)
|
||||
|
||||
api_key = self.pipeline_config['ai']['dify-service-api']['api-key']
|
||||
|
||||
self.dify_client = client.AsyncDifyServiceClient(
|
||||
api_key=api_key,
|
||||
base_url=self.pipeline_config['ai']['dify-service-api']['base-url'],
|
||||
)
|
||||
|
||||
def _try_convert_thinking(self, resp_text: str) -> str:
|
||||
"""尝试转换 Dify 的思考提示"""
|
||||
if not resp_text.startswith(
|
||||
'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> <summary> Thinking... </summary>'
|
||||
):
|
||||
return resp_text
|
||||
|
||||
if self.pipeline_config['ai']['dify-service-api']['thinking-convert'] == 'original':
|
||||
return resp_text
|
||||
|
||||
if self.pipeline_config['ai']['dify-service-api']['thinking-convert'] == 'remove':
|
||||
return re.sub(
|
||||
r'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> <summary> Thinking... </summary>.*?</details>',
|
||||
'',
|
||||
resp_text,
|
||||
flags=re.DOTALL,
|
||||
)
|
||||
|
||||
if self.pipeline_config['ai']['dify-service-api']['thinking-convert'] == 'plain':
|
||||
pattern = r'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> <summary> Thinking... </summary>(.*?)</details>'
|
||||
thinking_text = re.search(pattern, resp_text, flags=re.DOTALL)
|
||||
content_text = re.sub(pattern, '', resp_text, flags=re.DOTALL)
|
||||
return f'<think>{thinking_text.group(1)}</think>\n{content_text}'
|
||||
|
||||
async def _preprocess_user_message(self, query: pipeline_query.Query) -> tuple[str, list[str]]:
|
||||
"""预处理用户消息,提取纯文本,并将图片上传到 Dify 服务
|
||||
|
||||
Returns:
|
||||
tuple[str, list[str]]: 纯文本和图片的 Dify 服务图片 ID
|
||||
"""
|
||||
plain_text = ''
|
||||
image_ids = []
|
||||
|
||||
if isinstance(query.user_message.content, list):
|
||||
for ce in query.user_message.content:
|
||||
if ce.type == 'text':
|
||||
plain_text += ce.text
|
||||
elif ce.type == 'image_base64':
|
||||
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
file_bytes = base64.b64decode(image_b64)
|
||||
file = ('img.png', file_bytes, f'image/{image_format}')
|
||||
file_upload_resp = await self.dify_client.upload_file(
|
||||
file,
|
||||
f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||
)
|
||||
image_id = file_upload_resp['id']
|
||||
image_ids.append(image_id)
|
||||
elif isinstance(query.user_message.content, str):
|
||||
plain_text = query.user_message.content
|
||||
|
||||
return plain_text, image_ids
|
||||
|
||||
async def _chat_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""调用聊天助手"""
|
||||
cov_id = query.session.using_conversation.uuid or ''
|
||||
query.variables['conversation_id'] = cov_id
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
files = [
|
||||
{
|
||||
'type': 'image',
|
||||
'transfer_method': 'local_file',
|
||||
'upload_file_id': image_id,
|
||||
}
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
mode = 'basic' # 标记是基础编排还是工作流编排
|
||||
|
||||
basic_mode_pending_chunk = ''
|
||||
|
||||
inputs = {}
|
||||
|
||||
inputs.update(query.variables)
|
||||
|
||||
chunk = None # 初始化chunk变量,防止在没有响应时引用错误
|
||||
|
||||
async for chunk in self.dify_client.chat_messages(
|
||||
inputs=inputs,
|
||||
query=plain_text,
|
||||
user=f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||
conversation_id=cov_id,
|
||||
files=files,
|
||||
timeout=120,
|
||||
):
|
||||
self.ap.logger.debug('dify-chat-chunk: ' + str(chunk))
|
||||
|
||||
if chunk['event'] == 'workflow_started':
|
||||
mode = 'workflow'
|
||||
|
||||
if mode == 'workflow':
|
||||
if chunk['event'] == 'node_finished':
|
||||
if chunk['data']['node_type'] == 'answer':
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=self._try_convert_thinking(chunk['data']['outputs']['answer']),
|
||||
)
|
||||
elif mode == 'basic':
|
||||
if chunk['event'] == 'message':
|
||||
basic_mode_pending_chunk += chunk['answer']
|
||||
elif chunk['event'] == 'message_end':
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=self._try_convert_thinking(basic_mode_pending_chunk),
|
||||
)
|
||||
basic_mode_pending_chunk = ''
|
||||
|
||||
if chunk is None:
|
||||
raise errors.DifyAPIError('Dify API 没有返回任何响应,请检查网络连接和API配置')
|
||||
|
||||
query.session.using_conversation.uuid = chunk['conversation_id']
|
||||
|
||||
async def _agent_chat_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""调用聊天助手"""
|
||||
cov_id = query.session.using_conversation.uuid or ''
|
||||
query.variables['conversation_id'] = cov_id
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
files = [
|
||||
{
|
||||
'type': 'image',
|
||||
'transfer_method': 'local_file',
|
||||
'upload_file_id': image_id,
|
||||
}
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
ignored_events = []
|
||||
|
||||
inputs = {}
|
||||
|
||||
inputs.update(query.variables)
|
||||
|
||||
pending_agent_message = ''
|
||||
|
||||
chunk = None # 初始化chunk变量,防止在没有响应时引用错误
|
||||
|
||||
async for chunk in self.dify_client.chat_messages(
|
||||
inputs=inputs,
|
||||
query=plain_text,
|
||||
user=f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||
response_mode='streaming',
|
||||
conversation_id=cov_id,
|
||||
files=files,
|
||||
timeout=120,
|
||||
):
|
||||
self.ap.logger.debug('dify-agent-chunk: ' + str(chunk))
|
||||
|
||||
if chunk['event'] in ignored_events:
|
||||
continue
|
||||
|
||||
if chunk['event'] == 'agent_message':
|
||||
pending_agent_message += chunk['answer']
|
||||
else:
|
||||
if pending_agent_message.strip() != '':
|
||||
pending_agent_message = pending_agent_message.replace('</details>Action:', '</details>')
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=self._try_convert_thinking(pending_agent_message),
|
||||
)
|
||||
pending_agent_message = ''
|
||||
|
||||
if chunk['event'] == 'agent_thought':
|
||||
if chunk['tool'] != '' and chunk['observation'] != '': # 工具调用结果,跳过
|
||||
continue
|
||||
|
||||
if chunk['tool']:
|
||||
msg = provider_message.Message(
|
||||
role='assistant',
|
||||
tool_calls=[
|
||||
provider_message.ToolCall(
|
||||
id=chunk['id'],
|
||||
type='function',
|
||||
function=provider_message.FunctionCall(
|
||||
name=chunk['tool'],
|
||||
arguments=json.dumps({}),
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
yield msg
|
||||
if chunk['event'] == 'message_file':
|
||||
if chunk['type'] == 'image' and chunk['belongs_to'] == 'assistant':
|
||||
base_url = self.dify_client.base_url
|
||||
|
||||
if base_url.endswith('/v1'):
|
||||
base_url = base_url[:-3]
|
||||
|
||||
image_url = base_url + chunk['url']
|
||||
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=[provider_message.ContentElement.from_image_url(image_url)],
|
||||
)
|
||||
if chunk['event'] == 'error':
|
||||
raise errors.DifyAPIError('dify 服务错误: ' + chunk['message'])
|
||||
|
||||
if chunk is None:
|
||||
raise errors.DifyAPIError('Dify API 没有返回任何响应,请检查网络连接和API配置')
|
||||
|
||||
query.session.using_conversation.uuid = chunk['conversation_id']
|
||||
|
||||
async def _workflow_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""调用工作流"""
|
||||
|
||||
if not query.session.using_conversation.uuid:
|
||||
query.session.using_conversation.uuid = str(uuid.uuid4())
|
||||
|
||||
query.variables['conversation_id'] = query.session.using_conversation.uuid
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
files = [
|
||||
{
|
||||
'type': 'image',
|
||||
'transfer_method': 'local_file',
|
||||
'upload_file_id': image_id,
|
||||
}
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
ignored_events = ['text_chunk', 'workflow_started']
|
||||
|
||||
inputs = { # these variables are legacy variables, we need to keep them for compatibility
|
||||
'langbot_user_message_text': plain_text,
|
||||
'langbot_session_id': query.variables['session_id'],
|
||||
'langbot_conversation_id': query.variables['conversation_id'],
|
||||
'langbot_msg_create_time': query.variables['msg_create_time'],
|
||||
}
|
||||
|
||||
inputs.update(query.variables)
|
||||
|
||||
async for chunk in self.dify_client.workflow_run(
|
||||
inputs=inputs,
|
||||
user=f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||
files=files,
|
||||
timeout=120,
|
||||
):
|
||||
self.ap.logger.debug('dify-workflow-chunk: ' + str(chunk))
|
||||
if chunk['event'] in ignored_events:
|
||||
continue
|
||||
|
||||
if chunk['event'] == 'node_started':
|
||||
if chunk['data']['node_type'] == 'start' or chunk['data']['node_type'] == 'end':
|
||||
continue
|
||||
|
||||
msg = provider_message.Message(
|
||||
role='assistant',
|
||||
content=None,
|
||||
tool_calls=[
|
||||
provider_message.ToolCall(
|
||||
id=chunk['data']['node_id'],
|
||||
type='function',
|
||||
function=provider_message.FunctionCall(
|
||||
name=chunk['data']['title'],
|
||||
arguments=json.dumps({}),
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
elif chunk['event'] == 'workflow_finished':
|
||||
if chunk['data']['error']:
|
||||
raise errors.DifyAPIError(chunk['data']['error'])
|
||||
|
||||
msg = provider_message.Message(
|
||||
role='assistant',
|
||||
content=chunk['data']['outputs']['summary'],
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""运行请求"""
|
||||
if self.pipeline_config['ai']['dify-service-api']['app-type'] == 'chat':
|
||||
async for msg in self._chat_messages(query):
|
||||
yield msg
|
||||
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'agent':
|
||||
async for msg in self._agent_chat_messages(query):
|
||||
yield msg
|
||||
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'workflow':
|
||||
async for msg in self._workflow_messages(query):
|
||||
yield msg
|
||||
else:
|
||||
raise errors.DifyAPIError(
|
||||
f'不支持的 Dify 应用类型: {self.pipeline_config["ai"]["dify-service-api"]["app-type"]}'
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import typing
|
||||
|
||||
from .. import runner
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
@runner.runner_class('local-agent')
|
||||
class LocalAgentRunner(runner.RequestRunner):
|
||||
"""本地Agent请求运行器"""
|
||||
|
||||
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""运行请求"""
|
||||
pending_tool_calls = []
|
||||
|
||||
req_messages = query.prompt.messages.copy() + query.messages.copy() + [query.user_message]
|
||||
|
||||
use_llm_model = await self.ap.model_mgr.get_model_by_uuid(query.use_llm_model_uuid)
|
||||
|
||||
# 首次请求
|
||||
msg = await use_llm_model.requester.invoke_llm(
|
||||
query,
|
||||
use_llm_model,
|
||||
req_messages,
|
||||
query.use_funcs,
|
||||
extra_args=use_llm_model.model_entity.extra_args,
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
pending_tool_calls = msg.tool_calls
|
||||
|
||||
req_messages.append(msg)
|
||||
|
||||
# 持续请求,只要还有待处理的工具调用就继续处理调用
|
||||
while pending_tool_calls:
|
||||
for tool_call in pending_tool_calls:
|
||||
try:
|
||||
func = tool_call.function
|
||||
|
||||
parameters = json.loads(func.arguments)
|
||||
|
||||
func_ret = await self.ap.tool_mgr.execute_func_call(func.name, parameters)
|
||||
|
||||
msg = provider_message.Message(
|
||||
role='tool',
|
||||
content=json.dumps(func_ret, ensure_ascii=False),
|
||||
tool_call_id=tool_call.id,
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
req_messages.append(msg)
|
||||
except Exception as e:
|
||||
# 工具调用出错,添加一个报错信息到 req_messages
|
||||
err_msg = provider_message.Message(role='tool', content=f'err: {e}', tool_call_id=tool_call.id)
|
||||
|
||||
yield err_msg
|
||||
|
||||
req_messages.append(err_msg)
|
||||
|
||||
# 处理完所有调用,再次请求
|
||||
msg = await use_llm_model.requester.invoke_llm(
|
||||
query,
|
||||
use_llm_model,
|
||||
req_messages,
|
||||
query.use_funcs,
|
||||
extra_args=use_llm_model.model_entity.extra_args,
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
pending_tool_calls = msg.tool_calls
|
||||
|
||||
req_messages.append(msg)
|
||||
@@ -1,158 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
from contextlib import AsyncExitStack
|
||||
|
||||
from mcp import ClientSession, StdioServerParameters
|
||||
from mcp.client.stdio import stdio_client
|
||||
from mcp.client.sse import sse_client
|
||||
|
||||
from .. import loader
|
||||
from ....core import app
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
|
||||
|
||||
class RuntimeMCPSession:
|
||||
"""运行时 MCP 会话"""
|
||||
|
||||
ap: app.Application
|
||||
|
||||
server_name: str
|
||||
|
||||
server_config: dict
|
||||
|
||||
session: ClientSession
|
||||
|
||||
exit_stack: AsyncExitStack
|
||||
|
||||
functions: list[resource_tool.LLMTool] = []
|
||||
|
||||
def __init__(self, server_name: str, server_config: dict, ap: app.Application):
|
||||
self.server_name = server_name
|
||||
self.server_config = server_config
|
||||
self.ap = ap
|
||||
|
||||
self.session = None
|
||||
|
||||
self.exit_stack = AsyncExitStack()
|
||||
self.functions = []
|
||||
|
||||
async def _init_stdio_python_server(self):
|
||||
server_params = StdioServerParameters(
|
||||
command=self.server_config['command'],
|
||||
args=self.server_config['args'],
|
||||
env=self.server_config['env'],
|
||||
)
|
||||
|
||||
stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
|
||||
|
||||
stdio, write = stdio_transport
|
||||
|
||||
self.session = await self.exit_stack.enter_async_context(ClientSession(stdio, write))
|
||||
|
||||
await self.session.initialize()
|
||||
|
||||
async def _init_sse_server(self):
|
||||
sse_transport = await self.exit_stack.enter_async_context(
|
||||
sse_client(
|
||||
self.server_config['url'],
|
||||
headers=self.server_config.get('headers', {}),
|
||||
timeout=self.server_config.get('timeout', 10),
|
||||
)
|
||||
)
|
||||
|
||||
sseio, write = sse_transport
|
||||
|
||||
self.session = await self.exit_stack.enter_async_context(ClientSession(sseio, write))
|
||||
|
||||
await self.session.initialize()
|
||||
|
||||
async def initialize(self):
|
||||
self.ap.logger.debug(f'初始化 MCP 会话: {self.server_name} {self.server_config}')
|
||||
|
||||
if self.server_config['mode'] == 'stdio':
|
||||
await self._init_stdio_python_server()
|
||||
elif self.server_config['mode'] == 'sse':
|
||||
await self._init_sse_server()
|
||||
else:
|
||||
raise ValueError(f'无法识别 MCP 服务器类型: {self.server_name}: {self.server_config}')
|
||||
|
||||
tools = await self.session.list_tools()
|
||||
|
||||
self.ap.logger.debug(f'获取 MCP 工具: {tools}')
|
||||
|
||||
for tool in tools.tools:
|
||||
|
||||
async def func(*, _tool=tool, **kwargs):
|
||||
result = await self.session.call_tool(_tool.name, kwargs)
|
||||
if result.isError:
|
||||
raise Exception(result.content[0].text)
|
||||
return result.content[0].text
|
||||
|
||||
func.__name__ = tool.name
|
||||
|
||||
self.functions.append(
|
||||
resource_tool.LLMTool(
|
||||
name=tool.name,
|
||||
human_desc=tool.description,
|
||||
description=tool.description,
|
||||
parameters=tool.inputSchema,
|
||||
func=func,
|
||||
)
|
||||
)
|
||||
|
||||
async def shutdown(self):
|
||||
"""关闭工具"""
|
||||
await self.session._exit_stack.aclose()
|
||||
|
||||
|
||||
@loader.loader_class('mcp')
|
||||
class MCPLoader(loader.ToolLoader):
|
||||
"""MCP 工具加载器。
|
||||
|
||||
在此加载器中管理所有与 MCP Server 的连接。
|
||||
"""
|
||||
|
||||
sessions: dict[str, RuntimeMCPSession] = {}
|
||||
|
||||
_last_listed_functions: list[resource_tool.LLMTool] = []
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
super().__init__(ap)
|
||||
self.sessions = {}
|
||||
self._last_listed_functions = []
|
||||
|
||||
async def initialize(self):
|
||||
for server_config in self.ap.instance_config.data.get('mcp', {}).get('servers', []):
|
||||
if not server_config['enable']:
|
||||
continue
|
||||
session = RuntimeMCPSession(server_config['name'], server_config, self.ap)
|
||||
await session.initialize()
|
||||
# self.ap.event_loop.create_task(session.initialize())
|
||||
self.sessions[server_config['name']] = session
|
||||
|
||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
||||
all_functions = []
|
||||
|
||||
for session in self.sessions.values():
|
||||
all_functions.extend(session.functions)
|
||||
|
||||
self._last_listed_functions = all_functions
|
||||
|
||||
return all_functions
|
||||
|
||||
async def has_tool(self, name: str) -> bool:
|
||||
return name in [f.name for f in self._last_listed_functions]
|
||||
|
||||
async def invoke_tool(self, name: str, parameters: dict) -> typing.Any:
|
||||
for server_name, session in self.sessions.items():
|
||||
for function in session.functions:
|
||||
if function.name == name:
|
||||
return await function.func(**parameters)
|
||||
|
||||
raise ValueError(f'未找到工具: {name}')
|
||||
|
||||
async def shutdown(self):
|
||||
"""关闭工具"""
|
||||
for session in self.sessions.values():
|
||||
await session.shutdown()
|
||||
@@ -1,21 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from ..core import app
|
||||
from . import provider
|
||||
from .providers import localstorage
|
||||
|
||||
|
||||
class StorageMgr:
|
||||
"""存储管理器"""
|
||||
|
||||
ap: app.Application
|
||||
|
||||
storage_provider: provider.StorageProvider
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
self.ap = ap
|
||||
self.storage_provider = localstorage.LocalStorageProvider(ap)
|
||||
|
||||
async def initialize(self):
|
||||
await self.storage_provider.initialize()
|
||||
@@ -1,109 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import typing
|
||||
import os
|
||||
import base64
|
||||
import logging
|
||||
|
||||
import pydantic
|
||||
import requests
|
||||
|
||||
from ..core import app
|
||||
|
||||
|
||||
class Announcement(pydantic.BaseModel):
|
||||
"""公告"""
|
||||
|
||||
id: int
|
||||
|
||||
time: str
|
||||
|
||||
timestamp: int
|
||||
|
||||
content: str
|
||||
|
||||
enabled: typing.Optional[bool] = True
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
'id': self.id,
|
||||
'time': self.time,
|
||||
'timestamp': self.timestamp,
|
||||
'content': self.content,
|
||||
'enabled': self.enabled,
|
||||
}
|
||||
|
||||
|
||||
class AnnouncementManager:
|
||||
"""公告管理器"""
|
||||
|
||||
ap: app.Application = None
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
self.ap = ap
|
||||
|
||||
async def fetch_all(self) -> list[Announcement]:
|
||||
"""获取所有公告"""
|
||||
resp = requests.get(
|
||||
url='https://api.github.com/repos/RockChinQ/LangBot/contents/res/announcement.json',
|
||||
proxies=self.ap.proxy_mgr.get_forward_proxies(),
|
||||
timeout=5,
|
||||
)
|
||||
obj_json = resp.json()
|
||||
b64_content = obj_json['content']
|
||||
# 解码
|
||||
content = base64.b64decode(b64_content).decode('utf-8')
|
||||
|
||||
return [Announcement(**item) for item in json.loads(content)]
|
||||
|
||||
async def fetch_saved(self) -> list[Announcement]:
|
||||
if not os.path.exists('data/labels/announcement_saved.json'):
|
||||
with open('data/labels/announcement_saved.json', 'w', encoding='utf-8') as f:
|
||||
f.write('[]')
|
||||
|
||||
with open('data/labels/announcement_saved.json', 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
if not content:
|
||||
content = '[]'
|
||||
|
||||
return [Announcement(**item) for item in json.loads(content)]
|
||||
|
||||
async def write_saved(self, content: list[Announcement]):
|
||||
with open('data/labels/announcement_saved.json', 'w', encoding='utf-8') as f:
|
||||
f.write(json.dumps([item.to_dict() for item in content], indent=4, ensure_ascii=False))
|
||||
|
||||
async def fetch_new(self) -> list[Announcement]:
|
||||
"""获取新公告"""
|
||||
all = await self.fetch_all()
|
||||
saved = await self.fetch_saved()
|
||||
|
||||
to_show: list[Announcement] = []
|
||||
|
||||
for item in all:
|
||||
# 遍历saved检查是否有相同id的公告
|
||||
for saved_item in saved:
|
||||
if saved_item.id == item.id:
|
||||
break
|
||||
else:
|
||||
if item.enabled:
|
||||
# 没有相同id的公告
|
||||
to_show.append(item)
|
||||
|
||||
await self.write_saved(all)
|
||||
return to_show
|
||||
|
||||
async def show_announcements(self) -> typing.Tuple[str, int]:
|
||||
"""显示公告"""
|
||||
try:
|
||||
announcements = await self.fetch_new()
|
||||
ann_text = ''
|
||||
for ann in announcements:
|
||||
ann_text += f'[公告] {ann.time}: {ann.content}\n'
|
||||
|
||||
# TODO statistics
|
||||
|
||||
return ann_text, logging.INFO
|
||||
except Exception as e:
|
||||
return f'获取公告时出错: {e}', logging.WARNING
|
||||
@@ -1,8 +0,0 @@
|
||||
semantic_version = 'v4.3.0.beta3'
|
||||
|
||||
required_database_version = 5
|
||||
"""标记本版本所需要的数据库结构版本,用于判断数据库迁移"""
|
||||
|
||||
debug_mode = False
|
||||
|
||||
edition = 'community'
|
||||
@@ -1,9 +1,10 @@
|
||||
[project]
|
||||
name = "langbot"
|
||||
version = "4.3.0.beta3"
|
||||
description = "高稳定、支持扩展、多模态 - 大模型原生即时通信机器人平台"
|
||||
version = "4.5.3"
|
||||
description = "Easy-to-use global IM bot platform designed for LLM era"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10.1"
|
||||
license-files = ["LICENSE"]
|
||||
requires-python = ">=3.10.1,<4.0"
|
||||
dependencies = [
|
||||
"aiocqhttp>=1.4.4",
|
||||
"aiofiles>=24.1.0",
|
||||
@@ -19,6 +20,7 @@ dependencies = [
|
||||
"dashscope>=1.23.2",
|
||||
"dingtalk-stream>=0.24.0",
|
||||
"discord-py>=2.5.2",
|
||||
"pynacl>=1.5.0", # Required for Discord voice support
|
||||
"gewechat-client>=0.1.5",
|
||||
"lark-oapi>=1.4.15",
|
||||
"mcp>=1.8.1",
|
||||
@@ -44,13 +46,28 @@ dependencies = [
|
||||
"urllib3>=2.4.0",
|
||||
"websockets>=15.0.1",
|
||||
"python-socks>=2.7.1", # dingtalk missing dependency
|
||||
"taskgroup==0.0.0a4", # graingert/taskgroup#20
|
||||
"pip>=25.1.1",
|
||||
"ruff>=0.11.9",
|
||||
"pre-commit>=4.2.0",
|
||||
"uv>=0.7.11",
|
||||
"mypy>=1.16.0",
|
||||
"langbot-plugin==0.1.1b2",
|
||||
"PyPDF2>=3.0.1",
|
||||
"python-docx>=1.1.0",
|
||||
"pandas>=2.2.2",
|
||||
"chardet>=5.2.0",
|
||||
"markdown>=3.6",
|
||||
"beautifulsoup4>=4.12.3",
|
||||
"ebooklib>=0.18",
|
||||
"html2text>=2024.2.26",
|
||||
"langchain>=0.2.0",
|
||||
"langchain-text-splitters>=0.0.1",
|
||||
"chromadb>=0.4.24",
|
||||
"qdrant-client (>=1.15.1,<2.0.0)",
|
||||
"langbot-plugin==0.1.11",
|
||||
"asyncpg>=0.30.0",
|
||||
"line-bot-sdk>=3.19.0",
|
||||
"tboxsdk>=0.0.10",
|
||||
"boto3>=1.35.0",
|
||||
]
|
||||
keywords = [
|
||||
"bot",
|
||||
@@ -68,11 +85,10 @@ keywords = [
|
||||
"onebot",
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Framework :: AsyncIO",
|
||||
"Framework :: Robot Framework",
|
||||
"Framework :: Robot Framework :: Library",
|
||||
"License :: OSI Approved :: AGPL-3 License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Topic :: Communications :: Chat",
|
||||
@@ -81,11 +97,24 @@ classifiers = [
|
||||
[project.urls]
|
||||
Homepage = "https://langbot.app"
|
||||
Documentation = "https://docs.langbot.app"
|
||||
Repository = "https://github.com/RockChinQ/langbot"
|
||||
Repository = "https://github.com/langbot-app/LangBot"
|
||||
|
||||
[project.scripts]
|
||||
langbot = "langbot.__main__:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools]
|
||||
package-data = { "langbot" = ["templates/**", "pkg/provider/modelmgr/requesters/*", "pkg/platform/sources/*", "web/out/**"] }
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pre-commit>=4.2.0",
|
||||
"pytest>=8.4.1",
|
||||
"pytest-asyncio>=1.0.0",
|
||||
"pytest-cov>=7.0.0",
|
||||
"ruff>=0.11.9",
|
||||
]
|
||||
|
||||
|
||||
39
pytest.ini
Normal file
39
pytest.ini
Normal file
@@ -0,0 +1,39 @@
|
||||
[pytest]
|
||||
# Test discovery patterns
|
||||
python_files = test_*.py
|
||||
python_classes = Test*
|
||||
python_functions = test_*
|
||||
|
||||
# Test paths
|
||||
testpaths = tests
|
||||
|
||||
# Asyncio configuration
|
||||
asyncio_mode = auto
|
||||
|
||||
# Output options
|
||||
addopts =
|
||||
-v
|
||||
--strict-markers
|
||||
--tb=short
|
||||
--disable-warnings
|
||||
|
||||
# Markers
|
||||
markers =
|
||||
asyncio: mark test as async
|
||||
unit: mark test as unit test
|
||||
integration: mark test as integration test
|
||||
slow: mark test as slow running
|
||||
|
||||
# Coverage options (when using pytest-cov)
|
||||
[coverage:run]
|
||||
source = langbot.pkg
|
||||
omit =
|
||||
*/tests/*
|
||||
*/test_*.py
|
||||
*/__pycache__/*
|
||||
*/site-packages/*
|
||||
|
||||
[coverage:report]
|
||||
precision = 2
|
||||
show_missing = True
|
||||
skip_covered = False
|
||||
31
run_tests.sh
Executable file
31
run_tests.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to run all unit tests
|
||||
# This script helps avoid circular import issues by setting up the environment properly
|
||||
|
||||
set -e
|
||||
|
||||
echo "Setting up test environment..."
|
||||
|
||||
# Activate virtual environment if it exists
|
||||
if [ -d ".venv" ]; then
|
||||
source .venv/bin/activate
|
||||
fi
|
||||
|
||||
# Check if pytest is installed
|
||||
if ! command -v pytest &> /dev/null; then
|
||||
echo "Installing test dependencies..."
|
||||
pip install pytest pytest-asyncio pytest-cov
|
||||
fi
|
||||
|
||||
echo "Running all unit tests..."
|
||||
|
||||
# Run tests with coverage
|
||||
pytest tests/unit_tests/ -v --tb=short \
|
||||
--cov=pkg \
|
||||
--cov-report=xml \
|
||||
"$@"
|
||||
|
||||
echo ""
|
||||
echo "Test run complete!"
|
||||
echo "Coverage report saved to coverage.xml"
|
||||
3
src/langbot/__init__.py
Normal file
3
src/langbot/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""LangBot - Easy-to-use global IM bot platform designed for LLM era"""
|
||||
|
||||
__version__ = '4.5.3'
|
||||
104
src/langbot/__main__.py
Normal file
104
src/langbot/__main__.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""LangBot entry point for package execution"""
|
||||
|
||||
import asyncio
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
|
||||
# ASCII art banner
|
||||
asciiart = r"""
|
||||
_ ___ _
|
||||
| | __ _ _ _ __ _| _ ) ___| |_
|
||||
| |__/ _` | ' \/ _` | _ \/ _ \ _|
|
||||
|____\__,_|_||_\__, |___/\___/\__|
|
||||
|___/
|
||||
|
||||
⭐️ Open Source 开源地址: https://github.com/langbot-app/LangBot
|
||||
📖 Documentation 文档地址: https://docs.langbot.app
|
||||
"""
|
||||
|
||||
|
||||
async def main_entry(loop: asyncio.AbstractEventLoop):
|
||||
"""Main entry point for LangBot"""
|
||||
parser = argparse.ArgumentParser(description='LangBot')
|
||||
parser.add_argument(
|
||||
'--standalone-runtime',
|
||||
action='store_true',
|
||||
help='Use standalone plugin runtime / 使用独立插件运行时',
|
||||
default=False,
|
||||
)
|
||||
parser.add_argument('--debug', action='store_true', help='Debug mode / 调试模式', default=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.standalone_runtime:
|
||||
from langbot.pkg.utils import platform
|
||||
|
||||
platform.standalone_runtime = True
|
||||
|
||||
if args.debug:
|
||||
from langbot.pkg.utils import constants
|
||||
|
||||
constants.debug_mode = True
|
||||
|
||||
print(asciiart)
|
||||
|
||||
# Check dependencies
|
||||
from langbot.pkg.core.bootutils import deps
|
||||
|
||||
missing_deps = await deps.check_deps()
|
||||
|
||||
if missing_deps:
|
||||
print('以下依赖包未安装,将自动安装,请完成后重启程序:')
|
||||
print(
|
||||
'These dependencies are missing, they will be installed automatically, please restart the program after completion:'
|
||||
)
|
||||
for dep in missing_deps:
|
||||
print('-', dep)
|
||||
await deps.install_deps(missing_deps)
|
||||
print('已自动安装缺失的依赖包,请重启程序。')
|
||||
print('The missing dependencies have been installed automatically, please restart the program.')
|
||||
sys.exit(0)
|
||||
|
||||
# Check configuration files
|
||||
from langbot.pkg.core.bootutils import files
|
||||
|
||||
generated_files = await files.generate_files()
|
||||
|
||||
if generated_files:
|
||||
print('以下文件不存在,已自动生成:')
|
||||
print('Following files do not exist and have been automatically generated:')
|
||||
for file in generated_files:
|
||||
print('-', file)
|
||||
|
||||
from langbot.pkg.core import boot
|
||||
|
||||
await boot.main(loop)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to be called by console script entry point"""
|
||||
# Check Python version
|
||||
if sys.version_info < (3, 10, 1):
|
||||
print('需要 Python 3.10.1 及以上版本,当前 Python 版本为:', sys.version)
|
||||
print('Your Python version is not supported.')
|
||||
print('Python 3.10.1 or higher is required. Current version:', sys.version)
|
||||
sys.exit(1)
|
||||
|
||||
# Set up the working directory
|
||||
# When installed as a package, we need to handle the working directory differently
|
||||
# We'll create data directory in current working directory if not exists
|
||||
os.makedirs('data', exist_ok=True)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
try:
|
||||
loop.run_until_complete(main_entry(loop))
|
||||
except KeyboardInterrupt:
|
||||
print('\n正在退出...')
|
||||
print('Exiting...')
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
177
src/langbot/libs/coze_server_api/client.py
Normal file
177
src/langbot/libs/coze_server_api/client.py
Normal file
@@ -0,0 +1,177 @@
|
||||
import json
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import io
|
||||
from typing import Dict, List, Any, AsyncGenerator
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class AsyncCozeAPIClient:
|
||||
def __init__(self, api_key: str, api_base: str = 'https://api.coze.cn'):
|
||||
self.api_key = api_key
|
||||
self.api_base = api_base
|
||||
self.session = None
|
||||
|
||||
async def __aenter__(self):
|
||||
"""支持异步上下文管理器"""
|
||||
await self.coze_session()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
"""退出时自动关闭会话"""
|
||||
await self.close()
|
||||
|
||||
async def coze_session(self):
|
||||
"""确保HTTP session存在"""
|
||||
if self.session is None:
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=False if self.api_base.startswith('http://') else True,
|
||||
limit=100,
|
||||
limit_per_host=30,
|
||||
keepalive_timeout=30,
|
||||
enable_cleanup_closed=True,
|
||||
)
|
||||
timeout = aiohttp.ClientTimeout(
|
||||
total=120, # 默认超时时间
|
||||
connect=30,
|
||||
sock_read=120,
|
||||
)
|
||||
headers = {
|
||||
'Authorization': f'Bearer {self.api_key}',
|
||||
'Accept': 'text/event-stream',
|
||||
}
|
||||
self.session = aiohttp.ClientSession(headers=headers, timeout=timeout, connector=connector)
|
||||
return self.session
|
||||
|
||||
async def close(self):
|
||||
"""显式关闭会话"""
|
||||
if self.session and not self.session.closed:
|
||||
await self.session.close()
|
||||
self.session = None
|
||||
|
||||
async def upload(
|
||||
self,
|
||||
file,
|
||||
) -> str:
|
||||
# 处理 Path 对象
|
||||
if isinstance(file, Path):
|
||||
if not file.exists():
|
||||
raise ValueError(f'File not found: {file}')
|
||||
with open(file, 'rb') as f:
|
||||
file = f.read()
|
||||
|
||||
# 处理文件路径字符串
|
||||
elif isinstance(file, str):
|
||||
if not os.path.isfile(file):
|
||||
raise ValueError(f'File not found: {file}')
|
||||
with open(file, 'rb') as f:
|
||||
file = f.read()
|
||||
|
||||
# 处理文件对象
|
||||
elif hasattr(file, 'read'):
|
||||
file = file.read()
|
||||
|
||||
session = await self.coze_session()
|
||||
url = f'{self.api_base}/v1/files/upload'
|
||||
|
||||
try:
|
||||
file_io = io.BytesIO(file)
|
||||
async with session.post(
|
||||
url,
|
||||
data={
|
||||
'file': file_io,
|
||||
},
|
||||
timeout=aiohttp.ClientTimeout(total=60),
|
||||
) as response:
|
||||
if response.status == 401:
|
||||
raise Exception('Coze API 认证失败,请检查 API Key 是否正确')
|
||||
|
||||
response_text = await response.text()
|
||||
|
||||
if response.status != 200:
|
||||
raise Exception(f'文件上传失败,状态码: {response.status}, 响应: {response_text}')
|
||||
try:
|
||||
result = await response.json()
|
||||
except json.JSONDecodeError:
|
||||
raise Exception(f'文件上传响应解析失败: {response_text}')
|
||||
|
||||
if result.get('code') != 0:
|
||||
raise Exception(f'文件上传失败: {result.get("msg", "未知错误")}')
|
||||
|
||||
file_id = result['data']['id']
|
||||
return file_id
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
raise Exception('文件上传超时')
|
||||
except Exception as e:
|
||||
raise Exception(f'文件上传失败: {str(e)}')
|
||||
|
||||
async def chat_messages(
|
||||
self,
|
||||
bot_id: str,
|
||||
user_id: str,
|
||||
additional_messages: List[Dict] | None = None,
|
||||
conversation_id: str | None = None,
|
||||
auto_save_history: bool = True,
|
||||
stream: bool = True,
|
||||
timeout: float = 120,
|
||||
) -> AsyncGenerator[Dict[str, Any], None]:
|
||||
"""发送聊天消息并返回流式响应
|
||||
|
||||
Args:
|
||||
bot_id: Bot ID
|
||||
user_id: 用户ID
|
||||
additional_messages: 额外消息列表
|
||||
conversation_id: 会话ID
|
||||
auto_save_history: 是否自动保存历史
|
||||
stream: 是否流式响应
|
||||
timeout: 超时时间
|
||||
"""
|
||||
session = await self.coze_session()
|
||||
url = f'{self.api_base}/v3/chat'
|
||||
|
||||
payload = {
|
||||
'bot_id': bot_id,
|
||||
'user_id': user_id,
|
||||
'stream': stream,
|
||||
'auto_save_history': auto_save_history,
|
||||
}
|
||||
|
||||
if additional_messages:
|
||||
payload['additional_messages'] = additional_messages
|
||||
|
||||
params = {}
|
||||
if conversation_id:
|
||||
params['conversation_id'] = conversation_id
|
||||
|
||||
try:
|
||||
async with session.post(
|
||||
url,
|
||||
json=payload,
|
||||
params=params,
|
||||
timeout=aiohttp.ClientTimeout(total=timeout),
|
||||
) as response:
|
||||
if response.status == 401:
|
||||
raise Exception('Coze API 认证失败,请检查 API Key 是否正确')
|
||||
|
||||
if response.status != 200:
|
||||
raise Exception(f'Coze API 流式请求失败,状态码: {response.status}')
|
||||
|
||||
async for chunk in response.content:
|
||||
chunk = chunk.decode('utf-8')
|
||||
if chunk != '\n':
|
||||
if chunk.startswith('event:'):
|
||||
chunk_type = chunk.replace('event:', '', 1).strip()
|
||||
elif chunk.startswith('data:'):
|
||||
chunk_data = chunk.replace('data:', '', 1).strip()
|
||||
else:
|
||||
yield {
|
||||
'event': chunk_type,
|
||||
'data': json.loads(chunk_data) if chunk_data else {},
|
||||
} # 处理本地部署时,接口返回的data为空值
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
raise Exception(f'Coze API 流式请求超时 ({timeout}秒)')
|
||||
except Exception as e:
|
||||
raise Exception(f'Coze API 流式请求失败: {str(e)}')
|
||||
@@ -5,6 +5,8 @@ import typing
|
||||
import json
|
||||
|
||||
from .errors import DifyAPIError
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
|
||||
class AsyncDifyServiceClient:
|
||||
@@ -109,7 +111,23 @@ class AsyncDifyServiceClient:
|
||||
user: str,
|
||||
timeout: float = 30.0,
|
||||
) -> str:
|
||||
"""上传文件"""
|
||||
# 处理 Path 对象
|
||||
if isinstance(file, Path):
|
||||
if not file.exists():
|
||||
raise ValueError(f'File not found: {file}')
|
||||
with open(file, 'rb') as f:
|
||||
file = f.read()
|
||||
|
||||
# 处理文件路径字符串
|
||||
elif isinstance(file, str):
|
||||
if not os.path.isfile(file):
|
||||
raise ValueError(f'File not found: {file}')
|
||||
with open(file, 'rb') as f:
|
||||
file = f.read()
|
||||
|
||||
# 处理文件对象
|
||||
elif hasattr(file, 'read'):
|
||||
file = file.read()
|
||||
async with httpx.AsyncClient(
|
||||
base_url=self.base_url,
|
||||
trust_env=True,
|
||||
@@ -121,6 +139,8 @@ class AsyncDifyServiceClient:
|
||||
headers={'Authorization': f'Bearer {self.api_key}'},
|
||||
files={
|
||||
'file': file,
|
||||
},
|
||||
data={
|
||||
'user': (None, user),
|
||||
},
|
||||
)
|
||||
@@ -110,6 +110,24 @@ class DingTalkClient:
|
||||
else:
|
||||
raise Exception(f'Error: {response.status_code}, {response.text}')
|
||||
|
||||
async def get_file_url(self, download_code: str):
|
||||
if not await self.check_access_token():
|
||||
await self.get_access_token()
|
||||
url = 'https://api.dingtalk.com/v1.0/robot/messageFiles/download'
|
||||
params = {'downloadCode': download_code, 'robotCode': self.robot_code}
|
||||
headers = {'x-acs-dingtalk-access-token': self.access_token}
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(url, headers=headers, json=params)
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
download_url = result.get('downloadUrl')
|
||||
if download_url:
|
||||
return download_url
|
||||
else:
|
||||
await self.logger.error(f'failed to get file: {response.json()}')
|
||||
else:
|
||||
raise Exception(f'Error: {response.status_code}, {response.text}')
|
||||
|
||||
async def update_incoming_message(self, message):
|
||||
"""异步更新 DingTalkClient 中的 incoming_message"""
|
||||
message_data = await self.get_message(message)
|
||||
@@ -170,12 +188,69 @@ class DingTalkClient:
|
||||
|
||||
if incoming_message.message_type == 'richText':
|
||||
data = incoming_message.rich_text_content.to_dict()
|
||||
|
||||
# 使用统一的结构化数据格式,保持顺序
|
||||
rich_content = {
|
||||
'Type': 'richText',
|
||||
'Elements': [], # 按顺序存储所有元素
|
||||
'SimpleContent': '', # 兼容字段:纯文本内容
|
||||
'SimplePicture': '', # 兼容字段:第一张图片
|
||||
}
|
||||
|
||||
# 先收集所有文本和图片占位符
|
||||
text_elements = []
|
||||
|
||||
# 解析富文本内容,保持原始顺序
|
||||
for item in data['richText']:
|
||||
if 'text' in item:
|
||||
message_data['Content'] = item['text']
|
||||
if incoming_message.get_image_list()[0]:
|
||||
message_data['Picture'] = await self.download_image(incoming_message.get_image_list()[0])
|
||||
message_data['Type'] = 'text'
|
||||
# 处理文本内容
|
||||
if 'text' in item and item['text'] != '\n':
|
||||
element = {'Type': 'text', 'Content': item['text']}
|
||||
rich_content['Elements'].append(element)
|
||||
text_elements.append(item['text'])
|
||||
|
||||
# 检查是否是图片元素 - 根据钉钉API的实际结构调整
|
||||
# 钉钉富文本中的图片通常有特定标识,可能需要根据实际返回调整
|
||||
elif item.get('type') == 'picture':
|
||||
# 创建图片占位符
|
||||
element = {
|
||||
'Type': 'image_placeholder',
|
||||
}
|
||||
rich_content['Elements'].append(element)
|
||||
|
||||
# 获取并下载所有图片
|
||||
image_list = incoming_message.get_image_list()
|
||||
if image_list:
|
||||
new_elements = []
|
||||
image_index = 0
|
||||
|
||||
for element in rich_content['Elements']:
|
||||
if element['Type'] == 'image_placeholder':
|
||||
if image_index < len(image_list) and image_list[image_index]:
|
||||
image_url = await self.download_image(image_list[image_index])
|
||||
new_elements.append({'Type': 'image', 'Picture': image_url})
|
||||
image_index += 1
|
||||
else:
|
||||
# 如果没有对应的图片,保留占位符或跳过
|
||||
continue
|
||||
else:
|
||||
new_elements.append(element)
|
||||
|
||||
rich_content['Elements'] = new_elements
|
||||
|
||||
# 设置兼容字段
|
||||
all_texts = [elem['Content'] for elem in rich_content['Elements'] if elem.get('Type') == 'text']
|
||||
rich_content['SimpleContent'] = '\n'.join(all_texts) if all_texts else ''
|
||||
|
||||
all_images = [elem['Picture'] for elem in rich_content['Elements'] if elem.get('Type') == 'image']
|
||||
if all_images:
|
||||
rich_content['SimplePicture'] = all_images[0]
|
||||
rich_content['AllImages'] = all_images # 所有图片的列表
|
||||
|
||||
# 设置原始的 content 和 picture 字段以保持兼容
|
||||
message_data['Content'] = rich_content['SimpleContent']
|
||||
message_data['Rich_Content'] = rich_content
|
||||
if all_images:
|
||||
message_data['Picture'] = all_images[0]
|
||||
|
||||
elif incoming_message.message_type == 'text':
|
||||
message_data['Content'] = incoming_message.get_text_list()[0]
|
||||
@@ -189,6 +264,17 @@ class DingTalkClient:
|
||||
message_data['Audio'] = await self.get_audio_url(incoming_message.to_dict()['content']['downloadCode'])
|
||||
|
||||
message_data['Type'] = 'audio'
|
||||
elif incoming_message.message_type == 'file':
|
||||
down_list = incoming_message.get_down_list()
|
||||
if len(down_list) >= 2:
|
||||
message_data['File'] = await self.get_file_url(down_list[0])
|
||||
message_data['Name'] = down_list[1]
|
||||
else:
|
||||
if self.logger:
|
||||
await self.logger.error(f'get_down_list() returned fewer than 2 elements: {down_list}')
|
||||
message_data['File'] = None
|
||||
message_data['Name'] = None
|
||||
message_data['Type'] = 'file'
|
||||
|
||||
copy_message_data = message_data.copy()
|
||||
del copy_message_data['IncomingMessage']
|
||||
@@ -253,6 +339,43 @@ class DingTalkClient:
|
||||
await self.logger.error(f'failed to send proactive massage to group: {traceback.format_exc()}')
|
||||
raise Exception(f'failed to send proactive massage to group: {traceback.format_exc()}')
|
||||
|
||||
async def create_and_card(
|
||||
self, temp_card_id: str, incoming_message: dingtalk_stream.ChatbotMessage, quote_origin: bool = False
|
||||
):
|
||||
content_key = 'content'
|
||||
card_data = {content_key: ''}
|
||||
|
||||
card_instance = dingtalk_stream.AICardReplier(self.client, incoming_message)
|
||||
# print(card_instance)
|
||||
# 先投放卡片: https://open.dingtalk.com/document/orgapp/create-and-deliver-cards
|
||||
card_instance_id = await card_instance.async_create_and_deliver_card(
|
||||
temp_card_id,
|
||||
card_data,
|
||||
)
|
||||
return card_instance, card_instance_id
|
||||
|
||||
async def send_card_message(self, card_instance, card_instance_id: str, content: str, is_final: bool):
|
||||
content_key = 'content'
|
||||
try:
|
||||
await card_instance.async_streaming(
|
||||
card_instance_id,
|
||||
content_key=content_key,
|
||||
content_value=content,
|
||||
append=False,
|
||||
finished=is_final,
|
||||
failed=False,
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
await card_instance.async_streaming(
|
||||
card_instance_id,
|
||||
content_key=content_key,
|
||||
content_value='',
|
||||
append=False,
|
||||
finished=is_final,
|
||||
failed=True,
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
"""启动 WebSocket 连接,监听消息"""
|
||||
await self.client.start()
|
||||
@@ -15,6 +15,10 @@ class DingTalkEvent(dict):
|
||||
def content(self):
|
||||
return self.get('Content', '')
|
||||
|
||||
@property
|
||||
def rich_content(self):
|
||||
return self.get('Rich_Content', '')
|
||||
|
||||
@property
|
||||
def incoming_message(self) -> Optional['dingtalk_stream.chatbot.ChatbotMessage']:
|
||||
return self.get('IncomingMessage')
|
||||
@@ -31,6 +35,14 @@ class DingTalkEvent(dict):
|
||||
def audio(self):
|
||||
return self.get('Audio', '')
|
||||
|
||||
@property
|
||||
def file(self):
|
||||
return self.get('File', '')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.get('Name', '')
|
||||
|
||||
@property
|
||||
def conversation(self):
|
||||
return self.get('conversation_type', '')
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user