Compare commits

...

187 Commits

Author SHA1 Message Date
Junyan Qin
89b25b8985 chore: release v4.2.2 2025-08-29 17:01:26 +08:00
devin-ai-integration[bot]
46b4482a7d feat: add GitHub star count component to sidebar (#1636)
* feat: add GitHub star count component to sidebar

- Add GitHub star component to sidebar bottom section
- Fetch star count from space.langbot.app API
- Display star count with proper internationalization
- Open GitHub repository in new tab when clicked
- Follow existing sidebar styling patterns

Co-Authored-By: Rock <rockchinq@gmail.com>

* perf: ui

* chore: remove githubStars text

---------

Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Rock <rockchinq@gmail.com>
2025-08-28 21:04:36 +08:00
Bruce
d9fa1cbb06 perf: add cmd enable config & fix announce request timeout & fix send card with disconnect ai platform (#1633)
* add cmd config && fix bugs

* perf: use `get`

* update bansess fix block match rule

* perf: comment for access-control session str

---------

Co-authored-by: Junyan Qin <rockchinq@gmail.com>
2025-08-28 12:59:50 +08:00
Bruce
8858f432b5 fix dingtalk message sender id & update dingtalk streaming card without content (#1630) 2025-08-27 18:09:30 +08:00
Junyan Qin
8f5ec48522 doc: update shengsuanyun comment 2025-08-26 16:00:48 +08:00
devin-ai-integration[bot]
83ff64698b feat: add ZIP file upload support for knowledge base (#1626)
* feat: add ZIP file upload support for knowledge base

- Add _parse_zip method to FileParser class using zipfile library
- Support extraction and processing of TXT, PDF, DOCX, MD, HTML files from ZIP
- Update FileUploadZone to accept .zip files
- Add ZIP format to supported formats in internationalization files
- Implement error handling for invalid ZIP files and unsupported content
- Follow existing async parsing patterns and error handling conventions

Co-Authored-By: Rock <rockchinq@gmail.com>

* refactor: modify ZIP processing to store each document as separate file

- Remove _parse_zip method from FileParser as ZIP handling now occurs at knowledge base level
- Add _store_zip_file method to RuntimeKnowledgeBase to extract and store each document separately
- Each document in ZIP is now stored as individual file entry in knowledge base
- Process ZIP files in memory using io.BytesIO to avoid filesystem writes
- Generate unique file names for extracted documents to prevent conflicts

Co-Authored-By: Rock <rockchinq@gmail.com>

* perf: delete temp files

---------

Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Rock <rockchinq@gmail.com>
2025-08-23 21:18:13 +08:00
Junyan Qin
87ecb4e519 feat: add note for remove_think & remove dify remove cot code 2025-08-21 21:38:58 +08:00
Ljzd_PRO
df524b8a7a Fix: Fixed the incorrect extraction method of sender ID when converting aiocqhttp reply messages (#1624)
* fix: update invoke_embedding to return only embeddings from client.embed

* fix: Fixed the incorrect extraction method of sender ID when converting aiocqhttp reply messages
2025-08-21 20:46:26 +08:00
Junyan Qin
8a7df423ab chore: update shengsuanyun url 2025-08-21 14:14:25 +08:00
Junyan Qin
cafd623c92 chore: update shengsuanyun 2025-08-21 12:03:04 +08:00
Junyan Qin
4df11ef064 chore: update for shengsuanyun 2025-08-21 11:47:40 +08:00
Junyan Qin
aa7c08ee00 chore: release v4.2.1 2025-08-21 10:15:05 +08:00
Junyan Qin
b98de29b07 feat: add shengsuanyun requester 2025-08-20 23:33:35 +08:00
fdc310
c7c2eb4518 fix:in the gmini tool_calls no id The resulting call failure (#1622)
* fix:in the dify agent llm return message can not joint

* fix:in the gmini tool_calls no id The resulting call failure
2025-08-20 22:39:16 +08:00
Ljzd_PRO
37fa318258 fix: update invoke_embedding to return only embeddings from client.embed (#1619) 2025-08-20 10:25:33 +08:00
fdc310
ff7bebb782 fix:in the dify agent llm return message can not joint (#1617) 2025-08-19 23:23:00 +08:00
Junyan Qin
30bb26f898 doc(README): streaming output 2025-08-18 21:21:50 +08:00
Junyan Qin
9c1f4e1690 chore: release v4.2.0 2025-08-18 18:43:20 +08:00
dependabot[bot]
865ee2ca01 Merge pull request #1612 from langbot-app/dependabot/npm_and_yarn/web/form-data-4.0.4
chore(deps): bump form-data from 4.0.2 to 4.0.4 in /web
2025-08-18 16:10:56 +08:00
Junyan Qin (Chin)
c2264080bd Merge pull request #1442 from langbot-app/feat/streaming
feat: streaming output
2025-08-17 23:36:30 +08:00
Dong_master
67b622d5a6 fix:Some adjustments to the return types 2025-08-17 23:34:19 +08:00
Dong_master
a534c02d75 fix:remove print 2025-08-17 23:34:01 +08:00
Junyan Qin
da890d3074 chore: remove fix.MD 2025-08-17 21:20:32 +08:00
Junyan Qin
3049aa7a96 feat: add migration for pipeline remove-think 2025-08-17 21:18:41 +08:00
Junyan Qin (Chin)
e66f674968 Merge branch 'master' into feat/streaming 2025-08-17 14:30:22 +08:00
Junyan Qin (Chin)
dd0e0abdc4 Merge pull request #1571 from fdc310/streaming_feature
feat:add streaming output and pipeline stream
2025-08-17 14:27:39 +08:00
Junyan Qin (Chin)
13f6396eb4 Merge pull request #1610 from langbot-app/devin/1755399221-add-password-change-feature
feat: add password change functionality
2025-08-17 14:25:24 +08:00
Junyan Qin
7bbaa4fcad feat: perf ui & complete i18n 2025-08-17 14:09:28 +08:00
Junyan Qin
e931d5eb88 chore: remove print 2025-08-17 13:52:40 +08:00
Junyan Qin
4bbfa2f1d7 fix: telegram adapter gracefully stop 2025-08-17 13:52:02 +08:00
Devin AI
dd30d08c68 feat: add password change functionality
- Add password change button to sidebar account menu
- Create PasswordChangeDialog component with shadcn UI components
- Implement backend API endpoint /api/v1/user/change-password
- Add form validation with current password verification
- Include internationalization support for Chinese and English
- Add proper error handling and success notifications

Co-Authored-By: Rock <rockchinq@gmail.com>
2025-08-17 03:03:36 +00:00
Dong_master
8ccda10045 fix: in the dashscopeapi.py workflow stream bug 2025-08-16 12:11:00 +08:00
Dong_master
46fbfbefea fix: in the dashscopeapi.py stream and non-stream remove_think logic 2025-08-16 02:13:45 +08:00
Dong_master
8f863cf530 fix: remove_think bug 2025-08-15 00:55:39 +08:00
Dong_master
2351193c51 fix: in the difysvapi.py add stream , and remove_think on chunk 2025-08-15 00:50:32 +08:00
Dong_master
8c87a47f5a fix: in the ollamachat.py func _closure add remove_think agr 2025-08-14 22:35:30 +08:00
Dong_master
b8b9a37825 fix: in the dify non-stream remove_think lgic 2025-08-14 22:32:22 +08:00
Dong_master
13dd6fcee3 fix: in the webchat non-stream not save resp_message in message_lists 2025-08-14 22:29:42 +08:00
Junyan Qin
29f0075bd8 perf: zh-Hant specs 2025-08-13 17:49:54 +08:00
Junyan Qin
8a96ffbcc0 chore: complete zh-Hant specs for top_k 2025-08-13 17:33:47 +08:00
Junyan Qin (Chin)
67f68d8101 Merge pull request #1606 from langbot-app/feat/topk_splitter
Feat/topk splitter
2025-08-13 17:31:11 +08:00
Junyan Qin
ad59d92cef perf: i18n 2025-08-13 17:28:00 +08:00
Dong_master
85f97860c5 fix: Fixed the errors in modelscopechatcmpl.py when in pseudo-non-streaming mode, regarding the display of main content and tool calls. 2025-08-13 01:55:06 +08:00
Dong_master
8fd21e76f2 fix: Only when messagechunk is present, will msg_sequence be assigned to the subsequent tool calls. 2025-08-13 00:00:10 +08:00
Dong_master
cc83ddbe21 fix: del print 2025-08-12 23:29:32 +08:00
Dong_master
99fcde1586 fix: in the MessageChunk add msg_sequence ,And obtain the usage in the adapter. 2025-08-12 23:20:41 +08:00
WangCham
eab08dfbf3 fix: format the code 2025-08-12 23:13:00 +08:00
Dong_master
dbf0200cca feat:add More attractive card templates 2025-08-12 22:36:42 +08:00
Junyan Qin
ac44f35299 chore: remove comments 2025-08-12 21:07:23 +08:00
Junyan Qin
d6a5fdd911 perf: complete sidebar menu 2025-08-12 21:02:40 +08:00
Dong_master
4668db716a fix: fix command reply_message error bug,del some print 2025-08-12 20:54:47 +08:00
Junyan Qin
f7cd6b76f2 feat: refactor account menu 2025-08-12 20:13:18 +08:00
Junyan Qin
b6d47187f5 perf: prettier 2025-08-12 19:39:41 +08:00
Junyan Qin
051fffd41e fix: stash 2025-08-12 19:18:49 +08:00
Junyan Qin
c5480078b3 perf: make prompt editor textarea 2025-08-12 11:30:42 +08:00
Dong_master
e744e9c4ef fix: in the localagent.py yield MessageChunk add agr tool_calls,and After calling the "tool_calls", the first returned body data will be concatenated. 2025-08-12 11:25:37 +08:00
Dong_master
9f22b8b585 fix: be adapter.py func reply_message_chunk agr message_id alter bot_message,and in pipelinemgr.py respback.py agr alter 2025-08-12 11:21:08 +08:00
Dong_master
27cee0a4e1 fix: in the adapter.py func reply_message_chunk agr message_id alter bot_message,and in dingtalk.py lark.py telegram.py webchat.py agr alter 2025-08-12 11:19:27 +08:00
Dong_master
6d35fc408c fix: some time in the anthropicmsgs.py mesg_dcit["content"] is str can not append 2025-08-12 11:15:17 +08:00
Dong_master
0607a0fa5c fix: in the modelscopechatcmpl.py stream tool_calls arguments bug, 2025-08-12 00:04:21 +08:00
Dong_master
ed57d2fafa del localagent.py print 2025-08-11 23:49:19 +08:00
Junyan Qin
39ef92676b doc: add back wechat 2025-08-11 23:38:41 +08:00
Dong_master
7301476228 fix:Because the message_id was popped out, it caused the issue where the tool couldn't find the message_id after being invoked. 2025-08-11 23:36:01 +08:00
WangCham
457cc3eecd fix: wrong definition of topk 2025-08-11 23:22:36 +08:00
Dong_master
a381069bcc fix:fix tool_result argument bug 2025-08-11 23:05:47 +08:00
WangCham
146c38e64c fix: wrong positions 2025-08-11 22:58:48 +08:00
Junyan Qin (Chin)
763c41729e Merge pull request #1605 from TwperBody/master
feat: dark mode supports for webui
2025-08-11 20:51:58 +08:00
Junyan Qin
0021efebd7 perf: minor fix 2025-08-11 20:50:39 +08:00
Junyan Qin
5f18a1b13a chore: prettier 2025-08-11 20:46:08 +08:00
Junyan Qin
0124448479 perf: card shadowbox 2025-08-11 20:41:57 +08:00
WangCham
e76bc80e51 Merge branch 'feat/topk_splitter' of github.com:RockChinQ/LangBot into feat/topk_splitter 2025-08-11 00:20:13 +08:00
WangCham
a27560e804 fix: page bug 2025-08-11 00:12:06 +08:00
Dong_master
46452de7b5 fix:The handling of the streaming tool calls has been fixed, but there are still bugs in the model's reply messages with thoughtfulness. 2025-08-10 23:14:57 +08:00
TwperBody
2aef139577 dark mode 2025-08-10 22:17:06 +08:00
Dong_master
03b11481ed fix:fix remove_think logic, and end<think> fix </think> 2025-08-10 00:28:55 +08:00
Dong_master
8c5cb71812 fix:del the chatcmpl.py useless logic,and in the modelscopechatcmpl.py Non-streaming add and del <think> logic,and fix the ppiochatcmpl.py stream logic and the giteeaichatcmpl.py inherit ppiochatcmpl.py 2025-08-10 00:16:13 +08:00
Dong_master
7c59bc1ce5 feat:add anthropic stream ouput 2025-08-10 00:09:19 +08:00
Dong_master
eede354d3b fix:chatcmpl.py del content <think>,in the ppiochatcmpl.py and modelsopechatcmpl.py fun _closure_stream stream logic 2025-08-09 02:46:13 +08:00
Junyan Qin
eb7b5dcc25 chore: rename zh_Hans label of deepseek requester 2025-08-08 17:31:24 +08:00
WangCham
47e9ce96fc feat: add topk 2025-08-07 18:10:33 +08:00
WangCham
4e95bc542c fix: kb form 2025-08-07 18:10:33 +08:00
WangCham
e4f321ea7a feat: add description for topk 2025-08-07 18:10:33 +08:00
WangCham
246eb71b75 feat: add topk 2025-08-07 18:10:33 +08:00
Junyan Qin
261f50b8ec feat: refactor with cursor max mode claude 4.1 opus 2025-08-07 15:47:57 +08:00
Junyan Qin
9736d0708a fix: missing deps 2025-08-07 10:15:09 +08:00
Junyan Qin
02dbe80d2f perf: model testing 2025-08-07 10:01:04 +08:00
Dong_master
0f239ace17 Merge remote-tracking branch 'origin/streaming_feature' into streaming_feature 2025-08-06 23:02:35 +08:00
Dong_master
3a82ae8da5 fix: the bug in the "remove_think" function. 2025-08-06 23:00:57 +08:00
Junyan Qin
c33c9eaab0 chore: remove remove_think param in trigger.yaml 2025-08-06 15:45:35 +08:00
Junyan Qin
87f626f3cc doc(README): add HelloGitHub badge 2025-08-05 17:40:27 +08:00
Dong_master
e88302f1b4 fix:The handling logic of remove_think in the connector and Temporarily blocked the processing of streaming tool calls in the runner. 2025-08-05 04:24:03 +08:00
Dong_master
5597dffaeb Merge remote-tracking branch 'origin/streaming_feature' into streaming_feature
# Conflicts:
#	pkg/api/http/controller/groups/pipelines/webchat.py
#	pkg/pipeline/process/handlers/chat.py
#	pkg/platform/sources/aiocqhttp.py
#	pkg/platform/sources/dingtalk.py
#	pkg/platform/sources/discord.py
#	pkg/platform/sources/lark.py
#	pkg/platform/sources/telegram.py
#	pkg/platform/sources/wechatpad.py
#	pkg/provider/modelmgr/requester.py
#	pkg/provider/modelmgr/requesters/chatcmpl.py
#	pkg/provider/modelmgr/requesters/deepseekchatcmpl.py
#	pkg/provider/modelmgr/requesters/giteeaichatcmpl.py
#	pkg/provider/modelmgr/requesters/modelscopechatcmpl.py
#	pkg/provider/modelmgr/requesters/ppiochatcmpl.py
#	pkg/provider/runners/dashscopeapi.py
#	pkg/provider/runners/difysvapi.py
#	pkg/provider/runners/localagent.py
2025-08-04 23:17:36 +08:00
Junyan Qin
7f25d61531 fix: minor fix 2025-08-04 23:00:54 +08:00
Junyan Qin
15e524c6e6 perf: move remove-think to output tab 2025-08-04 19:26:19 +08:00
fdc
4a1d033ee9 fix: Reduce chunk returns in dify and Hundred Refining Runner to every 8 chunks 2025-08-04 19:26:19 +08:00
fdc
8adc88a8c0 fix:Modify the remove_think that directly retrieves the configuration file from the requester, retrieves it from the runner, and passes it to the required function 2025-08-04 19:26:18 +08:00
fdc
a62b38eda7 fix: In the reply_message_chunk of the adapter, the message is only streamed into the card or edited at the end of the 8th chunk return or streaming 2025-08-04 19:26:18 +08:00
Dong_master
fcef784180 fix: In the runner, every 8 tokens yield 2025-08-04 19:26:18 +08:00
Junyan Qin
c3ed4ef6a1 feat: no longer use typewriter in debug dialog 2025-08-04 19:26:18 +08:00
Junyan Qin
b9f768af25 perf: minor fixes 2025-08-04 19:26:18 +08:00
Junyan Qin
47ff883fc7 perf: ruff format & remove stream params in requester 2025-08-04 19:26:18 +08:00
Dong_master
68906c43ff feat: add webchat Word-by-word output
fix:webchat on message stream bug
2025-08-04 19:26:18 +08:00
Dong_master
c6deed4e6e feat: webchat stream is ok 2025-08-04 19:26:18 +08:00
Dong_master
b45cc59322 fix:webchat stream judge bug and frontend bug 2025-08-04 19:26:17 +08:00
fdc
c33a96823b fix: frontend bug 2025-08-04 19:26:17 +08:00
fdc
d3ab16761d fix:lsome bug 2025-08-04 19:26:17 +08:00
fdc
70f23f24b0 fix: is_stream_output_supperted in webchat return 2025-08-04 19:26:17 +08:00
fdc
00a8410c94 feat:webchat frontend stream 2025-08-04 19:26:17 +08:00
fdc
2a17e89a99 feat: add webchat stream but only some 2025-08-04 19:26:17 +08:00
fdc
8fe0992c15 fix:in chat judge create_message_card telegram reply_message_chunk no message 2025-08-04 19:26:17 +08:00
Dong_master
a9776b7b53 fix:del some print ,and amend respback on stream judge ,and del in dingtalk this is_stream_output_supported() use 2025-08-04 19:26:16 +08:00
Dong_master
074d359c8e feat:add dashscopeapi stream
fix:dify 64chunk yield
2025-08-04 19:26:16 +08:00
Dong_master
7728b4262b fix:lark message_id and dingtalk incoming_message 2025-08-04 19:26:16 +08:00
Dong_master
4905b5a738 feat:add dingtalk stream
fix:adapter is_stream_output_supported bug
fix:stream message reply chunk in message_id
2025-08-04 19:26:16 +08:00
Dong_master
43a259a1ae feat:add dingtalk stream 2025-08-04 19:26:16 +08:00
Dong_master
cffe493db0 feat:add telegram stream 2025-08-04 19:26:16 +08:00
Dong_master
0042629bf0 feat:add ppio and openrouter llm stream,and ppio think in content remove_think.
fix: giteeai stream no remove_think content add char"<think>"
2025-08-04 19:26:16 +08:00
Dong_master
a7d638cc9a feat:add deepseek and modelscope llm stream,and giteeai think in content remove_think 2025-08-04 19:26:16 +08:00
Dong_master
f84a79bf74 perf:del dify stream in ai.yaml config.and enbale stream in lark.yaml.
fix:localagent remove_think bug
2025-08-04 19:26:15 +08:00
Dong_master
f5a0cb9175 feat:add dify _agent_chat_message streaming 2025-08-04 19:26:15 +08:00
Dong_master
f9a5507029 fix:修复了因为迭代数据只推入resq_messages和resq_message_chain导致缓存到内存中的数据和写入log中的数据量庞大,以及有思考的think处理
feat:增加带有深度思考模型的think的去think操作
feat:dify中聊天机器人,chatflow对流式的支持
2025-08-04 19:26:15 +08:00
Dong_master
5ce32d2f04 fix:修复了因为迭代数据只推入resq_messages和resq_message_chain导致缓存到内存中的数据和写入log中的数据量庞大,以及带有深度思考模型的think增加 2025-08-04 19:26:15 +08:00
Dong_master
4908996cac 流式基本流程已通过修改了yield和return的冲突导致的问题 2025-08-04 19:26:15 +08:00
fdc
ee545a163f 增加了飞书中的流式但是好像还有问题 2025-08-04 19:26:15 +08:00
fdc
6e0e5802cc fix:修改手误message_id写进reply_message中 2025-08-04 19:26:15 +08:00
fdc
0d53843230 chat中的流式修改 2025-08-04 19:26:14 +08:00
fdc
b65670cd1a feat: 实现流式消息处理支持 2025-08-04 19:26:14 +08:00
zejiewang
ba4b5255a2 feat:support dify message streaming output (#1437)
* fix:lark adapter listeners init problem

* feat:support dify streaming mode

* feat:remove some log

* fix(bot form): field desc missing

* fix: not compatible with chatflow

---------

Co-authored-by: wangzejie <wangzejie@meicai.cn>
Co-authored-by: Junyan Qin <rockchinq@gmail.com>
2025-08-04 18:45:52 +08:00
Junyan Qin (Chin)
d60af2b451 fix(pipeline dialog): config reset between tabs switching (#1597) 2025-08-04 00:05:55 +08:00
Dong_master
44ac8b2b63 fix: In the runner, every 8 tokens yield 2025-08-03 23:23:51 +08:00
Junyan Qin
b70001c579 chore: release v4.1.2 2025-08-03 22:52:47 +08:00
Junyan Qin (Chin)
4a8f5516f6 feat: add new api requester (#1596) 2025-08-03 22:30:52 +08:00
Junyan Qin
48d11540ae feat: no longer use typewriter in debug dialog 2025-08-03 17:18:44 +08:00
Junyan Qin
84129e3339 perf: minor fixes 2025-08-03 15:30:11 +08:00
Junyan Qin
377d455ec1 perf: ruff format & remove stream params in requester 2025-08-03 13:08:51 +08:00
Dong_master
52280d7a05 feat: add webchat Word-by-word output
fix:webchat on message stream bug
2025-08-02 01:42:22 +08:00
Dong_master
0ce81a2df2 feat: webchat stream is ok 2025-08-01 11:33:16 +08:00
Dong_master
d9a2bb9a06 fix:webchat stream judge bug and frontend bug 2025-07-31 14:49:12 +08:00
fdc
cb88da7f02 fix: frontend bug 2025-07-31 10:34:36 +08:00
fdc
5560a4f52d fix:lsome bug 2025-07-31 10:28:43 +08:00
fdc
e4d951b174 fix: is_stream_output_supperted in webchat return 2025-07-31 10:01:47 +08:00
fdc
6e08bf71c9 feat:webchat frontend stream 2025-07-31 09:51:25 +08:00
fdc
daaf4b54ef feat: add webchat stream but only some 2025-07-30 17:06:14 +08:00
fdc
3291266f5d fix:in chat judge create_message_card telegram reply_message_chunk no message 2025-07-30 15:21:59 +08:00
Dong_master
307f6acd8c fix:del some print ,and amend respback on stream judge ,and del in dingtalk this is_stream_output_supported() use 2025-07-29 23:09:02 +08:00
Junyan Qin
f1ac9c77e6 doc: update README_TW 2025-07-28 15:50:00 +08:00
Junyan Qin
b434a4e3d7 doc: add README_TW 2025-07-28 15:47:50 +08:00
Junyan Qin
2f209cd59f chore(i18n): add zh-Hant 2025-07-28 15:11:41 +08:00
Junyan Qin
0f585fd5ef fix(moonshot): make api.moonshot.ai the default api base url 2025-07-26 22:23:33 +08:00
Junyan Qin
a152dece9a chore: switch to pnpm 2025-07-26 19:45:38 +08:00
Junyan Qin
d3b31f7027 chore: release v4.1.1 2025-07-26 19:28:34 +08:00
How-Sean Xin
c00f05fca4 Add GitHub link redirection for front-end plugin cards (#1579)
* Update package.json

* Update PluginMarketComponent.tsx

* Update PluginMarketComponent.tsx

* Update package.json

* Update PluginCardComponent.tsx

* perf: no display github button when plugin has no github url

---------

Co-authored-by: Junyan Qin <rockchinq@gmail.com>
2025-07-26 19:22:00 +08:00
Junyan Qin
92c3a86356 feat: add qhaigc 2025-07-24 22:42:26 +08:00
Junyan Qin
341fdc409d perf: embedding model ui 2025-07-24 22:29:25 +08:00
Junyan Qin
ebd542f592 feat: 302.AI embeddings 2025-07-24 22:05:15 +08:00
Junyan Qin
194b2d9814 feat: supports more embedding providers 2025-07-24 22:03:20 +08:00
Junyan Qin
7aed5cf1ed feat: ollama embeddings models 2025-07-24 10:36:32 +08:00
Junyan Qin
abc88c4979 doc: update README 2025-07-23 18:53:15 +08:00
WangCham
3fa38f71f1 feat: add topk 2025-07-23 17:29:36 +08:00
WangCham
d651d956d6 Merge branch 'master' into feat/topk_splitter 2025-07-23 16:37:27 +08:00
gaord
6754666845 feat(wechatpad): 添加对@所有人的支持并统一处理消息派发 (#1588)
在消息转换器中添加对AtAll组件的支持,将@所有人转换为特定格式。同时在消息派发时统一处理@所有人的情况,确保通知能正确发送。
2025-07-23 15:22:04 +08:00
Junyan Qin
08e6f46b19 fix(deps): react-focus-scope pkg bug 2025-07-22 11:05:16 +08:00
Dong_master
8f8c8ff367 feat:add dashscopeapi stream
fix:dify 64chunk yield
2025-07-21 18:45:45 +08:00
Dong_master
63ec2a8c34 fix:lark message_id and dingtalk incoming_message 2025-07-21 17:28:11 +08:00
Dong_master
f58c8497c3 feat:add dingtalk stream
fix:adapter is_stream_output_supported bug
fix:stream message reply chunk in message_id
2025-07-20 23:53:20 +08:00
Junyan Qin
1497fdae56 doc(README): adjust structure 2025-07-20 22:10:32 +08:00
Junyan Qin
10a3cb40e1 perf(retrieve): ui 2025-07-20 17:57:33 +08:00
devin-ai-integration[bot]
dd1ec15a39 feat: add knowledge base retrieve test tab with Card-based UI (#1583)
Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Junyan Qin <Chin>, u79E6u9A8Fu8A00 in Chinese, you can call me my english name Rock Chin. <rockchinq@gmail.com>
2025-07-20 17:56:46 +08:00
devin-ai-integration[bot]
ea51cec57e feat: add pipeline sorting functionality with three sort options (#1582)
* feat: add pipeline sorting functionality with three sort options

Co-Authored-By: Junyan Qin <Chin>, u79E6u9A8Fu8A00 in Chinese, you can call me my english name Rock Chin. <rockchinq@gmail.com>

* perf: ui

---------

Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Junyan Qin <Chin>, u79E6u9A8Fu8A00 in Chinese, you can call me my english name Rock Chin. <rockchinq@gmail.com>
2025-07-20 17:23:30 +08:00
Dong_master
adb0bf2473 feat:add dingtalk stream 2025-07-19 01:05:44 +08:00
Dong_master
11e52a3ade feat:add telegram stream 2025-07-17 14:29:30 +08:00
WangCham
e986a0acaf fix: kb form 2025-07-16 22:50:17 +08:00
WangCham
e31883547d feat: add description for topk 2025-07-16 18:15:27 +08:00
WangCham
88c0066b06 feat: add topk 2025-07-16 17:20:13 +08:00
Dong_master
d15df3338f feat:add ppio and openrouter llm stream,and ppio think in content remove_think.
fix: giteeai stream no remove_think content add char"<think>"
2025-07-15 00:50:42 +08:00
Dong_master
c74cf38e9f feat:add deepseek and modelscope llm stream,and giteeai think in content remove_think 2025-07-14 23:53:55 +08:00
Dong_master
0e68a922bd perf:del dify stream in ai.yaml config.and enbale stream in lark.yaml.
fix:localagent remove_think bug
2025-07-14 01:42:42 +08:00
Dong_master
4e1d81c9f8 feat:add dify _agent_chat_message streaming 2025-07-14 00:40:02 +08:00
Dong_master
0be08d8882 fix:修复了因为迭代数据只推入resq_messages和resq_message_chain导致缓存到内存中的数据和写入log中的数据量庞大,以及有思考的think处理
feat:增加带有深度思考模型的think的去think操作
feat:dify中聊天机器人,chatflow对流式的支持
2025-07-13 22:41:39 +08:00
Dong_master
301509b1db fix:修复了因为迭代数据只推入resq_messages和resq_message_chain导致缓存到内存中的数据和写入log中的数据量庞大,以及带有深度思考模型的think增加 2025-07-12 18:09:24 +08:00
Dong_master
68cdd163d3 流式基本流程已通过修改了yield和return的冲突导致的问题 2025-07-04 03:26:44 +08:00
fdc
4005a8a3e2 增加了飞书中的流式但是好像还有问题 2025-07-03 22:58:17 +08:00
fdc
542409d48d Merge branch 'feat/streaming' of github.com:fdc310/LangBot into streaming_feature 2025-07-02 14:09:01 +08:00
zejiewang
3c6e858c35 feat:support dify message streaming output (#1437)
* fix:lark adapter listeners init problem

* feat:support dify streaming mode

* feat:remove some log

* fix(bot form): field desc missing

* fix: not compatible with chatflow

---------

Co-authored-by: wangzejie <wangzejie@meicai.cn>
Co-authored-by: Junyan Qin <rockchinq@gmail.com>
2025-07-02 11:07:31 +08:00
fdc
8670ae82a3 fix:修改手误message_id写进reply_message中 2025-07-02 10:49:50 +08:00
fdc
48c9d66ab8 chat中的流式修改 2025-07-01 18:03:05 +08:00
fdc
0eac9135c0 feat: 实现流式消息处理支持 2025-06-30 17:58:18 +08:00
149 changed files with 6180 additions and 1347 deletions

View File

@@ -6,7 +6,9 @@
<div align="center">
简体中文 / [English](README_EN.md) / [日本語](README_JP.md) / (PR for your language)
<a href="https://hellogithub.com/repository/langbot-app/LangBot" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=5ce8ae2aa4f74316bf393b57b952433c&claim_uid=gtmc6YWjMZkT21R" alt="FeaturedHelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
[English](README_EN.md) / 简体中文 / [繁體中文](README_TW.md) / [日本語](README_JP.md) / (PR for your language)
[![Discord](https://img.shields.io/discord/1335141740050649118?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
[![QQ Group](https://img.shields.io/badge/%E7%A4%BE%E5%8C%BAQQ%E7%BE%A4-966235608-blue)](https://qm.qq.com/q/JLi38whHum)
@@ -25,13 +27,7 @@
</p>
## ✨ 特性
- 💬 大模型对话、Agent支持多种大模型适配群聊和私聊具有多轮对话、工具调用、多模态能力自带 RAG知识库实现并深度适配 [Dify](https://dify.ai)。
- 🤖 多平台支持:目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。支持多流水线配置,不同机器人用于不同应用场景。
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/);目前已有数百个插件。
- 😻 Web 管理面板:支持通过浏览器管理 LangBot 实例,不再需要手动编写配置文件。
LangBot 是一个开源的大语言模型原生即时通信机器人开发平台,旨在提供开箱即用的 IM 机器人开发体验,具有 Agent、RAG、MCP 等多种 LLM 应用功能,适配全球主流即时通信平台,并提供丰富的 API 接口,支持自定义开发。
## 📦 开始使用
@@ -65,23 +61,25 @@ docker compose up -d
直接使用发行版运行,查看文档[手动部署](https://docs.langbot.app/zh/deploy/langbot/manual.html)。
## 📸 效果展示
## 😎 保持更新
<img alt="bots" src="https://docs.langbot.app/webui/bot-page.png" width="450px"/>
点击仓库右上角 Star 和 Watch 按钮,获取最新动态。
<img alt="bots" src="https://docs.langbot.app/webui/create-model.png" width="450px"/>
![star gif](https://docs.langbot.app/star.gif)
<img alt="bots" src="https://docs.langbot.app/webui/edit-pipeline.png" width="450px"/>
## ✨ 特性
<img alt="bots" src="https://docs.langbot.app/webui/plugin-market.png" width="450px"/>
- 💬 大模型对话、Agent支持多种大模型适配群聊和私聊具有多轮对话、工具调用、多模态、流式输出能力自带 RAG知识库实现并深度适配 [Dify](https://dify.ai)。
- 🤖 多平台支持:目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。支持多流水线配置,不同机器人用于不同应用场景。
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/);目前已有数百个插件。
- 😻 Web 管理面板:支持通过浏览器管理 LangBot 实例,不再需要手动编写配置文件。
<img alt="回复效果(带有联网插件)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
详细规格特性请访问[文档](https://docs.langbot.app/zh/insight/features.html)。
- WebUI Demo: https://demo.langbot.dev/
- 登录信息:邮箱:`demo@langbot.app` 密码:`langbot123456`
- 注意:仅展示webui效果,公开环境,请不要在其中填入您的任何敏感信息。
## 🔌 组件兼容性
或访问 demo 环境:https://demo.langbot.dev/
- 登录信息:邮箱:`demo@langbot.app` 密码:`langbot123456`
- 注意:仅展示 WebUI 效果,公开环境,请不要在其中填入您的任何敏感信息。
### 消息平台
@@ -98,10 +96,6 @@ docker compose up -d
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
| LINE | 🚧 | |
| WhatsApp | 🚧 | |
🚧: 正在开发中
### 大模型能力
@@ -113,6 +107,7 @@ docker compose up -d
| [Anthropic](https://www.anthropic.com/) | ✅ | |
| [xAI](https://x.ai/) | ✅ | |
| [智谱AI](https://open.bigmodel.cn/) | ✅ | |
| [胜算云](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | 全球大模型都可调用(友情推荐) |
| [优云智算](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型和 GPU 资源平台 |
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型和 GPU 资源平台 |
| [302.AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
@@ -148,9 +143,3 @@ docker compose up -d
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
</a>
## 😎 保持更新
点击仓库右上角 Star 和 Watch 按钮,获取最新动态。
![star gif](https://docs.langbot.app/star.gif)

View File

@@ -5,7 +5,7 @@
<div align="center">
[简体中文](README.md) / English / [日本語](README_JP.md) / (PR for your language)
English / [简体中文](README.md) / [繁體中文](README_TW.md) / [日本語](README_JP.md) / (PR for your language)
[![Discord](https://img.shields.io/discord/1335141740050649118?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/langbot-app/LangBot)
@@ -21,13 +21,7 @@
</p>
## ✨ Features
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, and multi-modal capabilities. Built-in RAG (knowledge base) implementation, and deeply integrates with [Dify](https://dify.ai).
- 🤖 Multi-platform Support: Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods. Supports multiple pipeline configurations, different bots can be used for different scenarios.
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has hundreds of plugins.
- 😻 [New] Web UI: Support management LangBot instance through the browser. No need to manually write configuration files.
LangBot is an open-source LLM native instant messaging robot development platform, aiming to provide out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, adapting to global instant messaging platforms, and providing rich API interfaces, supporting custom development.
## 📦 Getting Started
@@ -61,23 +55,25 @@ Community contributed Zeabur template.
Directly use the released version to run, see the [Manual Deployment](https://docs.langbot.app/en/deploy/langbot/manual.html) documentation.
## 📸 Demo
## 😎 Stay Ahead
<img alt="bots" src="https://docs.langbot.app/webui/bot-page.png" width="400px"/>
Click the Star and Watch button in the upper right corner of the repository to get the latest updates.
<img alt="bots" src="https://docs.langbot.app/webui/create-model.png" width="400px"/>
![star gif](https://docs.langbot.app/star.gif)
<img alt="bots" src="https://docs.langbot.app/webui/edit-pipeline.png" width="400px"/>
## ✨ Features
<img alt="bots" src="https://docs.langbot.app/webui/plugin-market.png" width="400px"/>
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, multi-modal, and streaming output capabilities. Built-in RAG (knowledge base) implementation, and deeply integrates with [Dify](https://dify.ai).
- 🤖 Multi-platform Support: Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods. Supports multiple pipeline configurations, different bots can be used for different scenarios.
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has hundreds of plugins.
- 😻 Web UI: Support management LangBot instance through the browser. No need to manually write configuration files.
<img alt="Reply Effect (with Internet Plugin)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
For more detailed specifications, please refer to the [documentation](https://docs.langbot.app/en/insight/features.html).
- WebUI Demo: https://demo.langbot.dev/
- Login information: Email: `demo@langbot.app` Password: `langbot123456`
- Note: Only the WebUI effect is shown, please do not fill in any sensitive information in the public environment.
## 🔌 Component Compatibility
Or visit the demo environment: https://demo.langbot.dev/
- Login information: Email: `demo@langbot.app` Password: `langbot123456`
- Note: For WebUI demo only, please do not fill in any sensitive information in the public environment.
### Message Platform
@@ -93,10 +89,6 @@ Directly use the released version to run, see the [Manual Deployment](https://do
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
| LINE | 🚧 | |
| WhatsApp | 🚧 | |
🚧: In development
### LLMs
@@ -111,6 +103,7 @@ Directly use the released version to run, see the [Manual Deployment](https://do
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | LLM and GPU resource platform |
| [Dify](https://dify.ai) | ✅ | LLMOps platform |
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | LLM and GPU resource platform |
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLM and GPU resource platform |
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLM gateway(MaaS) |
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
| [Ollama](https://ollama.com/) | ✅ | Local LLM running platform |
@@ -129,9 +122,3 @@ Thank you for the following [code contributors](https://github.com/langbot-app/L
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
</a>
## 😎 Stay Ahead
Click the Star and Watch button in the upper right corner of the repository to get the latest updates.
![star gif](https://docs.langbot.app/star.gif)

View File

@@ -5,7 +5,7 @@
<div align="center">
[简体中文](README.md) / [English](README_EN.md) / 日本語 / (PR for your language)
[English](README_EN.md) / [简体中文](README.md) / [繁體中文](README_TW.md) / 日本語 / (PR for your language)
[![Discord](https://img.shields.io/discord/1335141740050649118?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/langbot-app/LangBot)
@@ -21,13 +21,7 @@
</p>
## ✨ 機能
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル機能をサポート、RAG知識ベースを組み込み、[Dify](https://dify.ai) と深く統合。
- 🤖 多プラットフォーム対応: 現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。複数のパイプライン設定をサポートし、異なるボットを異なる用途に使用できます。
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/);豊富なエコシステム、現在数百のプラグインが存在。
- 😻 Web UI: ブラウザを通じてLangBotインスタンスを管理することをサポート。
LangBot は、エージェント、RAG、MCP などの LLM アプリケーション機能を備えた、オープンソースの LLM ネイティブのインスタントメッセージングロボット開発プラットフォームです。世界中のインスタントメッセージングプラットフォームに適応し、豊富な API インターフェースを提供し、カスタム開発をサポートします。
## 📦 始め方
@@ -43,7 +37,7 @@ http://localhost:5300 にアクセスして使用を開始します。
詳細なドキュメントは[Dockerデプロイ](https://docs.langbot.app/en/deploy/langbot/docker.html)を参照してください。
#### BTPanelでのワンクリックデプロイ
#### Panelでのワンクリックデプロイ
LangBotはBTPanelにリストされています。BTPanelをインストールしている場合は、[ドキュメント](https://docs.langbot.app/en/deploy/langbot/one-click/bt.html)を使用して使用できます。
@@ -61,23 +55,25 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
リリースバージョンを直接使用して実行します。[手動デプロイ](https://docs.langbot.app/en/deploy/langbot/manual.html)のドキュメントを参照してください。
## 📸 デモ
## 😎 最新情報を入手
<img alt="bots" src="https://docs.langbot.app/webui/bot-page.png" width="400px"/>
リポジトリの右上にある Star と Watch ボタンをクリックして、最新の更新を取得してください。
<img alt="bots" src="https://docs.langbot.app/webui/create-model.png" width="400px"/>
![star gif](https://docs.langbot.app/star.gif)
<img alt="bots" src="https://docs.langbot.app/webui/edit-pipeline.png" width="400px"/>
## ✨ 機能
<img alt="bots" src="https://docs.langbot.app/webui/plugin-market.png" width="400px"/>
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル、ストリーミング出力機能をサポート、RAG知識ベースを組み込み、[Dify](https://dify.ai) と深く統合。
- 🤖 多プラットフォーム対応: 現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。複数のパイプライン設定をサポートし、異なるボットを異なる用途に使用できます。
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/);豊富なエコシステム、現在数百のプラグインが存在。
- 😻 Web UI: ブラウザを通じてLangBotインスタンスを管理することをサポート。
<img alt="返信効果(インターネットプラグイン付き)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
詳細な仕様については、[ドキュメント](https://docs.langbot.app/en/insight/features.html)を参照してください。
- WebUIデモ: https://demo.langbot.dev/
- ログイン情報: メール: `demo@langbot.app` パスワード: `langbot123456`
- 注意: WebUIの効果のみを示しています。公開環境では機密情報を入力しないでください。
## 🔌 コンポーネントの互換性
または、デモ環境にアクセスしてください: https://demo.langbot.dev/
- ログイン情報: メール: `demo@langbot.app` パスワード: `langbot123456`
- 注意: WebUI のデモンストレーションのみの場合、公開環境では機密情報を入力しないでください。
### メッセージプラットフォーム
@@ -93,10 +89,6 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
| LINE | 🚧 | |
| WhatsApp | 🚧 | |
🚧: 開発中
### LLMs
@@ -110,6 +102,7 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
| [Zhipu AI](https://open.bigmodel.cn/) | ✅ | |
| [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型とGPUリソースプラットフォーム |
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型とGPUリソースプラットフォーム |
| [ShengSuanYun](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | LLMとGPUリソースプラットフォーム |
| [302.AI](https://share.302.ai/SuTG99) | ✅ | LLMゲートウェイ(MaaS) |
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
| [Dify](https://dify.ai) | ✅ | LLMOpsプラットフォーム |
@@ -129,9 +122,3 @@ LangBot への貢献に対して、以下の [コード貢献者](https://github
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
</a>
## 😎 最新情報を入手
リポジトリの右上にある Star と Watch ボタンをクリックして、最新の更新を取得してください。
![star gif](https://docs.langbot.app/star.gif)

140
README_TW.md Normal file
View File

@@ -0,0 +1,140 @@
<p align="center">
<a href="https://langbot.app">
<img src="https://docs.langbot.app/social_zh.png" alt="LangBot"/>
</a>
<div align="center"><a href="https://hellogithub.com/repository/langbot-app/LangBot" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=5ce8ae2aa4f74316bf393b57b952433c&claim_uid=gtmc6YWjMZkT21R" alt="FeaturedHelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
[English](README_EN.md) / [简体中文](README.md) / 繁體中文 / [日本語](README_JP.md) / (PR for your language)
[![Discord](https://img.shields.io/discord/1335141740050649118?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
[![QQ Group](https://img.shields.io/badge/%E7%A4%BE%E5%8C%BAQQ%E7%BE%A4-966235608-blue)](https://qm.qq.com/q/JLi38whHum)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/langbot-app/LangBot)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/langbot-app/LangBot)](https://github.com/langbot-app/LangBot/releases/latest)
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
[![star](https://gitcode.com/RockChinQ/LangBot/star/badge.svg)](https://gitcode.com/RockChinQ/LangBot)
<a href="https://langbot.app">主頁</a>
<a href="https://docs.langbot.app/zh/insight/guide.html">部署文件</a>
<a href="https://docs.langbot.app/zh/plugin/plugin-intro.html">外掛介紹</a>
<a href="https://github.com/langbot-app/LangBot/issues/new?assignees=&labels=%E7%8B%AC%E7%AB%8B%E6%8F%92%E4%BB%B6&projects=&template=submit-plugin.yml&title=%5BPlugin%5D%3A+%E8%AF%B7%E6%B1%82%E7%99%BB%E8%AE%B0%E6%96%B0%E6%8F%92%E4%BB%B6">提交外掛</a>
</div>
</p>
LangBot 是一個開源的大語言模型原生即時通訊機器人開發平台,旨在提供開箱即用的 IM 機器人開發體驗,具有 Agent、RAG、MCP 等多種 LLM 應用功能,適配全球主流即時通訊平台,並提供豐富的 API 介面,支援自定義開發。
## 📦 開始使用
#### Docker Compose 部署
```bash
git clone https://github.com/langbot-app/LangBot
cd LangBot
docker compose up -d
```
訪問 http://localhost:5300 即可開始使用。
詳細文件[Docker 部署](https://docs.langbot.app/zh/deploy/langbot/docker.html)。
#### 寶塔面板部署
已上架寶塔面板,若您已安裝寶塔面板,可以根據[文件](https://docs.langbot.app/zh/deploy/langbot/one-click/bt.html)使用。
#### Zeabur 雲端部署
社群貢獻的 Zeabur 模板。
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/zh-CN/templates/ZKTBDH)
#### Railway 雲端部署
[![Deploy on Railway](https://railway.com/button.svg)](https://railway.app/template/yRrAyL?referralCode=vogKPF)
#### 手動部署
直接使用發行版運行,查看文件[手動部署](https://docs.langbot.app/zh/deploy/langbot/manual.html)。
## 😎 保持更新
點擊倉庫右上角 Star 和 Watch 按鈕,獲取最新動態。
![star gif](https://docs.langbot.app/star.gif)
## ✨ 特性
- 💬 大模型對話、Agent支援多種大模型適配群聊和私聊具有多輪對話、工具調用、多模態、流式輸出能力自帶 RAG知識庫實現並深度適配 [Dify](https://dify.ai)。
- 🤖 多平台支援:目前支援 QQ、QQ頻道、企業微信、個人微信、飛書、Discord、Telegram 等平台。
- 🛠️ 高穩定性、功能完備:原生支援訪問控制、限速、敏感詞過濾等機制;配置簡單,支援多種部署方式。支援多流水線配置,不同機器人用於不同應用場景。
- 🧩 外掛擴展、活躍社群:支援事件驅動、組件擴展等外掛機制;適配 Anthropic [MCP 協議](https://modelcontextprotocol.io/);目前已有數百個外掛。
- 😻 Web 管理面板:支援通過瀏覽器管理 LangBot 實例,不再需要手動編寫配置文件。
詳細規格特性請訪問[文件](https://docs.langbot.app/zh/insight/features.html)。
或訪問 demo 環境https://demo.langbot.dev/
- 登入資訊:郵箱:`demo@langbot.app` 密碼:`langbot123456`
- 注意:僅展示 WebUI 效果,公開環境,請不要在其中填入您的任何敏感資訊。
### 訊息平台
| 平台 | 狀態 | 備註 |
| --- | --- | --- |
| QQ 個人號 | ✅ | QQ 個人號私聊、群聊 |
| QQ 官方機器人 | ✅ | QQ 官方機器人,支援頻道、私聊、群聊 |
| 微信 | ✅ | |
| 企微對外客服 | ✅ | |
| 微信公眾號 | ✅ | |
| Lark | ✅ | |
| DingTalk | ✅ | |
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
### 大模型能力
| 模型 | 狀態 | 備註 |
| --- | --- | --- |
| [OpenAI](https://platform.openai.com/) | ✅ | 可接入任何 OpenAI 介面格式模型 |
| [DeepSeek](https://www.deepseek.com/) | ✅ | |
| [Moonshot](https://www.moonshot.cn/) | ✅ | |
| [Anthropic](https://www.anthropic.com/) | ✅ | |
| [xAI](https://x.ai/) | ✅ | |
| [智譜AI](https://open.bigmodel.cn/) | ✅ | |
| [勝算雲](https://www.shengsuanyun.com/?from=CH_KYIPP758) | ✅ | 大模型和 GPU 資源平台 |
| [優雲智算](https://www.compshare.cn/?ytag=GPU_YY-gh_langbot) | ✅ | 大模型和 GPU 資源平台 |
| [PPIO](https://ppinfra.com/user/register?invited_by=QJKFYD&utm_source=github_langbot) | ✅ | 大模型和 GPU 資源平台 |
| [302.AI](https://share.302.ai/SuTG99) | ✅ | 大模型聚合平台 |
| [Google Gemini](https://aistudio.google.com/prompts/new_chat) | ✅ | |
| [Dify](https://dify.ai) | ✅ | LLMOps 平台 |
| [Ollama](https://ollama.com/) | ✅ | 本地大模型運行平台 |
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型運行平台 |
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型介面聚合平台 |
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
| [阿里雲百煉](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支援通過 MCP 協議獲取工具 |
### TTS
| 平台/模型 | 備註 |
| --- | --- |
| [FishAudio](https://fish.audio/zh-CN/discovery/) | [外掛](https://github.com/the-lazy-me/NewChatVoice) |
| [海豚 AI](https://www.ttson.cn/?source=thelazy) | [外掛](https://github.com/the-lazy-me/NewChatVoice) |
| [AzureTTS](https://portal.azure.com/) | [外掛](https://github.com/Ingnaryk/LangBot_AzureTTS) |
### 文生圖
| 平台/模型 | 備註 |
| --- | --- |
| 阿里雲百煉 | [外掛](https://github.com/Thetail001/LangBot_BailianTextToImagePlugin)
## 😘 社群貢獻
感謝以下[程式碼貢獻者](https://github.com/langbot-app/LangBot/graphs/contributors)和社群裡其他成員對 LangBot 的貢獻:
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
</a>

View File

@@ -253,6 +253,43 @@ class DingTalkClient:
await self.logger.error(f'failed to send proactive massage to group: {traceback.format_exc()}')
raise Exception(f'failed to send proactive massage to group: {traceback.format_exc()}')
async def create_and_card(
self, temp_card_id: str, incoming_message: dingtalk_stream.ChatbotMessage, quote_origin: bool = False
):
content_key = 'content'
card_data = {content_key: ''}
card_instance = dingtalk_stream.AICardReplier(self.client, incoming_message)
# print(card_instance)
# 先投放卡片: https://open.dingtalk.com/document/orgapp/create-and-deliver-cards
card_instance_id = await card_instance.async_create_and_deliver_card(
temp_card_id,
card_data,
)
return card_instance, card_instance_id
async def send_card_message(self, card_instance, card_instance_id: str, content: str, is_final: bool):
content_key = 'content'
try:
await card_instance.async_streaming(
card_instance_id,
content_key=content_key,
content_value=content,
append=False,
finished=is_final,
failed=False,
)
except Exception as e:
self.logger.exception(e)
await card_instance.async_streaming(
card_instance_id,
content_key=content_key,
content_value='',
append=False,
finished=is_final,
failed=True,
)
async def start(self):
"""启动 WebSocket 连接,监听消息"""
await self.client.start()

View File

@@ -104,7 +104,7 @@ class QQOfficialClient:
return {'code': 0, 'message': 'success'}
except Exception as e:
await self.logger.error(f"Error in handle_callback_request: {traceback.format_exc()}")
await self.logger.error(f'Error in handle_callback_request: {traceback.format_exc()}')
return {'error': str(e)}, 400
async def run_task(self, host: str, port: int, *args, **kwargs):
@@ -168,7 +168,6 @@ class QQOfficialClient:
if not await self.check_access_token():
await self.get_access_token()
url = self.base_url + '/v2/users/' + user_openid + '/messages'
async with httpx.AsyncClient() as client:
headers = {
@@ -193,7 +192,6 @@ class QQOfficialClient:
if not await self.check_access_token():
await self.get_access_token()
url = self.base_url + '/v2/groups/' + group_openid + '/messages'
async with httpx.AsyncClient() as client:
headers = {
@@ -209,7 +207,7 @@ class QQOfficialClient:
if response.status_code == 200:
return
else:
await self.logger.error(f"发送群聊消息失败:{response.json()}")
await self.logger.error(f'发送群聊消息失败:{response.json()}')
raise Exception(response.read().decode())
async def send_channle_group_text_msg(self, channel_id: str, content: str, msg_id: str):
@@ -217,7 +215,6 @@ class QQOfficialClient:
if not await self.check_access_token():
await self.get_access_token()
url = self.base_url + '/channels/' + channel_id + '/messages'
async with httpx.AsyncClient() as client:
headers = {
@@ -240,7 +237,6 @@ class QQOfficialClient:
"""发送频道私聊消息"""
if not await self.check_access_token():
await self.get_access_token()
url = self.base_url + '/dms/' + guild_id + '/messages'
async with httpx.AsyncClient() as client:

View File

@@ -34,7 +34,6 @@ class SlackClient:
if self.bot_user_id and bot_user_id == self.bot_user_id:
return jsonify({'status': 'ok'})
# 处理私信
if data and data.get('event', {}).get('channel_type') in ['im']:
@@ -52,7 +51,7 @@ class SlackClient:
return jsonify({'status': 'ok'})
except Exception as e:
await self.logger.error(f"Error in handle_callback_request: {traceback.format_exc()}")
await self.logger.error(f'Error in handle_callback_request: {traceback.format_exc()}')
raise (e)
async def _handle_message(self, event: SlackEvent):
@@ -82,7 +81,7 @@ class SlackClient:
self.bot_user_id = response['message']['bot_id']
return
except Exception as e:
await self.logger.error(f"Error in send_message: {e}")
await self.logger.error(f'Error in send_message: {e}')
raise e
async def send_message_to_one(self, text: str, user_id: str):
@@ -93,7 +92,7 @@ class SlackClient:
return
except Exception as e:
await self.logger.error(f"Error in send_message: {traceback.format_exc()}")
await self.logger.error(f'Error in send_message: {traceback.format_exc()}')
raise e
async def run_task(self, host: str, port: int, *args, **kwargs):

View File

@@ -12,12 +12,9 @@ class UserApi:
return get_json(base_url=url, token=self.token)
def get_qr_code(self, recover:bool=True, style:int=8):
def get_qr_code(self, recover: bool = True, style: int = 8):
"""获取自己的二维码"""
param = {
"Recover": recover,
"Style": style
}
param = {'Recover': recover, 'Style': style}
url = f'{self.base_url}/user/GetMyQRCode'
return post_json(base_url=url, token=self.token, data=param)
@@ -26,12 +23,8 @@ class UserApi:
url = f'{self.base_url}/equipment/GetSafetyInfo'
return post_json(base_url=url, token=self.token)
async def update_head_img(self, head_img_base64):
async def update_head_img(self, head_img_base64):
"""修改头像"""
param = {
"Base64": head_img_base64
}
param = {'Base64': head_img_base64}
url = f'{self.base_url}/user/UploadHeadImage'
return await async_request(base_url=url, token_key=self.token, json=param)
return await async_request(base_url=url, token_key=self.token, json=param)

View File

@@ -1,4 +1,3 @@
from libs.wechatpad_api.api.login import LoginApi
from libs.wechatpad_api.api.friend import FriendApi
from libs.wechatpad_api.api.message import MessageApi
@@ -7,9 +6,6 @@ from libs.wechatpad_api.api.downloadpai import DownloadApi
from libs.wechatpad_api.api.chatroom import ChatRoomApi
class WeChatPadClient:
def __init__(self, base_url, token, logger=None):
self._login_api = LoginApi(base_url, token)
@@ -20,16 +16,16 @@ class WeChatPadClient:
self._chatroom_api = ChatRoomApi(base_url, token)
self.logger = logger
def get_token(self,admin_key, day: int):
'''获取token'''
def get_token(self, admin_key, day: int):
"""获取token"""
return self._login_api.get_token(admin_key, day)
def get_login_qr(self, Proxy:str=""):
def get_login_qr(self, Proxy: str = ''):
"""登录二维码"""
return self._login_api.get_login_qr(Proxy=Proxy)
def awaken_login(self, Proxy:str=""):
'''唤醒登录'''
def awaken_login(self, Proxy: str = ''):
"""唤醒登录"""
return self._login_api.wake_up_login(Proxy=Proxy)
def log_out(self):
@@ -40,59 +36,57 @@ class WeChatPadClient:
"""获取登录状态"""
return self._login_api.get_login_status()
def send_text_message(self, to_wxid, message, ats: list=[]):
def send_text_message(self, to_wxid, message, ats: list = []):
"""发送文本消息"""
return self._message_api.post_text(to_wxid, message, ats)
return self._message_api.post_text(to_wxid, message, ats)
def send_image_message(self, to_wxid, img_url, ats: list=[]):
def send_image_message(self, to_wxid, img_url, ats: list = []):
"""发送图片消息"""
return self._message_api.post_image(to_wxid, img_url, ats)
return self._message_api.post_image(to_wxid, img_url, ats)
def send_voice_message(self, to_wxid, voice_data, voice_forma, voice_duration):
"""发送音频消息"""
return self._message_api.post_voice(to_wxid, voice_data, voice_forma, voice_duration)
return self._message_api.post_voice(to_wxid, voice_data, voice_forma, voice_duration)
def send_app_message(self, to_wxid, app_message, type):
"""发送app消息"""
return self._message_api.post_app_msg(to_wxid, app_message, type)
return self._message_api.post_app_msg(to_wxid, app_message, type)
def send_emoji_message(self, to_wxid, emoji_md5, emoji_size):
"""发送emoji消息"""
return self._message_api.post_emoji(to_wxid,emoji_md5,emoji_size)
return self._message_api.post_emoji(to_wxid, emoji_md5, emoji_size)
def revoke_msg(self, to_wxid, msg_id, new_msg_id, create_time):
"""撤回消息"""
return self._message_api.revoke_msg(to_wxid, msg_id, new_msg_id, create_time)
return self._message_api.revoke_msg(to_wxid, msg_id, new_msg_id, create_time)
def get_profile(self):
"""获取用户信息"""
return self._user_api.get_profile()
def get_qr_code(self, recover:bool=True, style:int=8):
def get_qr_code(self, recover: bool = True, style: int = 8):
"""获取用户二维码"""
return self._user_api.get_qr_code(recover=recover, style=style)
return self._user_api.get_qr_code(recover=recover, style=style)
def get_safety_info(self):
"""获取设备信息"""
return self._user_api.get_safety_info()
return self._user_api.get_safety_info()
def update_head_img(self, head_img_base64):
def update_head_img(self, head_img_base64):
"""上传用户头像"""
return self._user_api.update_head_img(head_img_base64)
return self._user_api.update_head_img(head_img_base64)
def cdn_download(self, aeskey, file_type, file_url):
"""cdn下载"""
return self._download_api.send_download( aeskey, file_type, file_url)
return self._download_api.send_download(aeskey, file_type, file_url)
def get_msg_voice(self,buf_id, length, msgid):
def get_msg_voice(self, buf_id, length, msgid):
"""下载语音"""
return self._download_api.get_msg_voice(buf_id, length, msgid)
async def download_base64(self,url):
async def download_base64(self, url):
return await self._download_api.download_url_to_base64(download_url=url)
def get_chatroom_member_detail(self, chatroom_name):
"""查看群成员详情"""
return self._chatroom_api.get_chatroom_member_detail(chatroom_name)

View File

@@ -1,31 +1,34 @@
import qrcode
def print_green(text):
print(f"\033[32m{text}\033[0m")
print(f'\033[32m{text}\033[0m')
def print_yellow(text):
print(f"\033[33m{text}\033[0m")
print(f'\033[33m{text}\033[0m')
def print_red(text):
print(f"\033[31m{text}\033[0m")
print(f'\033[31m{text}\033[0m')
def make_and_print_qr(url):
"""生成并打印二维码
Args:
url: 需要生成二维码的URL字符串
Returns:
None
功能:
1. 在终端打印二维码的ASCII图形
2. 同时提供在线二维码生成链接作为备选
"""
print_green("请扫描下方二维码登录")
print_green('请扫描下方二维码登录')
qr = qrcode.QRCode()
qr.add_data(url)
qr.make()
qr.print_ascii(invert=True)
print_green(f"也可以访问下方链接获取二维码:\nhttps://api.qrserver.com/v1/create-qr-code/?data={url}")
print_green(f'也可以访问下方链接获取二维码:\nhttps://api.qrserver.com/v1/create-qr-code/?data={url}')

View File

@@ -57,7 +57,7 @@ class WecomClient:
if 'access_token' in data:
return data['access_token']
else:
await self.logger.error(f"获取accesstoken失败:{response.json()}")
await self.logger.error(f'获取accesstoken失败:{response.json()}')
raise Exception(f'未获取access token: {data}')
async def get_users(self):
@@ -129,7 +129,7 @@ class WecomClient:
response = await client.post(url, json=params)
data = response.json()
except Exception as e:
await self.logger.error(f"发送图片失败:{data}")
await self.logger.error(f'发送图片失败:{data}')
raise Exception('Failed to send image: ' + str(e))
# 企业微信错误码40014和42001代表accesstoken问题
@@ -164,7 +164,7 @@ class WecomClient:
self.access_token = await self.get_access_token(self.secret)
return await self.send_private_msg(user_id, agent_id, content)
if data['errcode'] != 0:
await self.logger.error(f"发送消息失败:{data}")
await self.logger.error(f'发送消息失败:{data}')
raise Exception('Failed to send message: ' + str(data))
async def handle_callback_request(self):
@@ -181,7 +181,7 @@ class WecomClient:
echostr = request.args.get('echostr')
ret, reply_echo_str = wxcpt.VerifyURL(msg_signature, timestamp, nonce, echostr)
if ret != 0:
await self.logger.error("验证失败")
await self.logger.error('验证失败')
raise Exception(f'验证失败,错误码: {ret}')
return reply_echo_str
@@ -189,9 +189,8 @@ class WecomClient:
encrypt_msg = await request.data
ret, xml_msg = wxcpt.DecryptMsg(encrypt_msg, msg_signature, timestamp, nonce)
if ret != 0:
await self.logger.error("消息解密失败")
await self.logger.error('消息解密失败')
raise Exception(f'消息解密失败,错误码: {ret}')
# 解析消息并处理
message_data = await self.get_message(xml_msg)
@@ -202,7 +201,7 @@ class WecomClient:
return 'success'
except Exception as e:
await self.logger.error(f"Error in handle_callback_request: {traceback.format_exc()}")
await self.logger.error(f'Error in handle_callback_request: {traceback.format_exc()}')
return f'Error processing request: {str(e)}', 400
async def run_task(self, host: str, port: int, *args, **kwargs):
@@ -301,7 +300,7 @@ class WecomClient:
except binascii.Error as e:
raise ValueError(f'Invalid base64 string: {str(e)}')
else:
await self.logger.error("Image对象出错")
await self.logger.error('Image对象出错')
raise ValueError('image对象出错')
# 设置 multipart/form-data 格式的文件
@@ -325,7 +324,7 @@ class WecomClient:
self.access_token = await self.get_access_token(self.secret)
media_id = await self.upload_to_work(image)
if data.get('errcode', 0) != 0:
await self.logger.error(f"上传图片失败:{data}")
await self.logger.error(f'上传图片失败:{data}')
raise Exception('failed to upload file')
media_id = data.get('media_id')

View File

@@ -187,7 +187,7 @@ class WecomCSClient:
self.access_token = await self.get_access_token(self.secret)
return await self.send_text_msg(open_kfid, external_userid, msgid, content)
if data['errcode'] != 0:
await self.logger.error(f"发送消息失败:{data}")
await self.logger.error(f'发送消息失败:{data}')
raise Exception('Failed to send message')
return data
@@ -227,7 +227,7 @@ class WecomCSClient:
return 'success'
except Exception as e:
if self.logger:
await self.logger.error(f"Error in handle_callback_request: {traceback.format_exc()}")
await self.logger.error(f'Error in handle_callback_request: {traceback.format_exc()}')
else:
traceback.print_exc()
return f'Error processing request: {str(e)}', 400

View File

@@ -123,4 +123,4 @@ class RouterGroup(abc.ABC):
def http_status(self, status: int, code: int, msg: str) -> typing.Tuple[quart.Response, int]:
"""返回一个指定状态码的响应"""
return (self.fail(code, msg), status)
return (self.fail(code, msg), status)

View File

@@ -11,7 +11,11 @@ class PipelinesRouterGroup(group.RouterGroup):
@self.route('', methods=['GET', 'POST'])
async def _() -> str:
if quart.request.method == 'GET':
return self.success(data={'pipelines': await self.ap.pipeline_service.get_pipelines()})
sort_by = quart.request.args.get('sort_by', 'created_at')
sort_order = quart.request.args.get('sort_order', 'DESC')
return self.success(
data={'pipelines': await self.ap.pipeline_service.get_pipelines(sort_by, sort_order)}
)
elif quart.request.method == 'POST':
json_data = await quart.request.json

View File

@@ -1,3 +1,5 @@
import json
import quart
from ... import group
@@ -9,10 +11,18 @@ class WebChatDebugRouterGroup(group.RouterGroup):
@self.route('/send', methods=['POST'])
async def send_message(pipeline_uuid: str) -> str:
"""Send a message to the pipeline for debugging"""
async def stream_generator(generator):
yield 'data: {"type": "start"}\n\n'
async for message in generator:
yield f'data: {json.dumps({"message": message})}\n\n'
yield 'data: {"type": "end"}\n\n'
try:
data = await quart.request.get_json()
session_type = data.get('session_type', 'person')
message_chain_obj = data.get('message', [])
is_stream = data.get('is_stream', False)
if not message_chain_obj:
return self.http_status(400, -1, 'message is required')
@@ -25,13 +35,33 @@ class WebChatDebugRouterGroup(group.RouterGroup):
if not webchat_adapter:
return self.http_status(404, -1, 'WebChat adapter not found')
result = await webchat_adapter.send_webchat_message(pipeline_uuid, session_type, message_chain_obj)
return self.success(
data={
'message': result,
if is_stream:
generator = webchat_adapter.send_webchat_message(
pipeline_uuid, session_type, message_chain_obj, is_stream
)
# 设置正确的响应头
headers = {
'Content-Type': 'text/event-stream',
'Transfer-Encoding': 'chunked',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive'
}
)
return quart.Response(stream_generator(generator), mimetype='text/event-stream',headers=headers)
else: # non-stream
result = None
async for message in webchat_adapter.send_webchat_message(
pipeline_uuid, session_type, message_chain_obj
):
result = message
if result is not None:
return self.success(
data={
'message': result,
}
)
else:
return self.http_status(400, -1, 'message is required')
except Exception as e:
return self.http_status(500, -1, f'Internal server error: {str(e)}')

View File

@@ -67,3 +67,19 @@ class UserRouterGroup(group.RouterGroup):
await self.ap.user_service.reset_password(user_email, new_password)
return self.success(data={'user': user_email})
@self.route('/change-password', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
async def _(user_email: str) -> str:
json_data = await quart.request.json
current_password = json_data['current_password']
new_password = json_data['new_password']
try:
await self.ap.user_service.change_password(user_email, current_password, new_password)
except argon2.exceptions.VerifyMismatchError:
return self.http_status(400, -1, 'Current password is incorrect')
except ValueError as e:
return self.http_status(400, -1, str(e))
return self.success(data={'user': user_email})

View File

@@ -78,7 +78,9 @@ class KnowledgeService:
runtime_kb = await self.ap.rag_mgr.get_knowledge_base_by_uuid(kb_uuid)
if runtime_kb is None:
raise Exception('Knowledge base not found')
return [result.model_dump() for result in await runtime_kb.retrieve(query)]
return [
result.model_dump() for result in await runtime_kb.retrieve(query, runtime_kb.knowledge_base_entity.top_k)
]
async def get_files_by_knowledge_base(self, kb_uuid: str) -> list[dict]:
"""获取知识库文件"""

View File

@@ -101,7 +101,7 @@ class LLMModelsService:
model=runtime_llm_model,
messages=[llm_entities.Message(role='user', content='Hello, world!')],
funcs=[],
extra_args={},
extra_args=model_data.get('extra_args', {}),
)

View File

@@ -38,9 +38,21 @@ class PipelineService:
self.ap.pipeline_config_meta_output.data,
]
async def get_pipelines(self) -> list[dict]:
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_pipeline.LegacyPipeline))
async def get_pipelines(self, sort_by: str = 'created_at', sort_order: str = 'DESC') -> list[dict]:
query = sqlalchemy.select(persistence_pipeline.LegacyPipeline)
if sort_by == 'created_at':
if sort_order == 'DESC':
query = query.order_by(persistence_pipeline.LegacyPipeline.created_at.desc())
else:
query = query.order_by(persistence_pipeline.LegacyPipeline.created_at.asc())
elif sort_by == 'updated_at':
if sort_order == 'DESC':
query = query.order_by(persistence_pipeline.LegacyPipeline.updated_at.desc())
else:
query = query.order_by(persistence_pipeline.LegacyPipeline.updated_at.asc())
result = await self.ap.persistence_mgr.execute_async(query)
pipelines = result.all()
return [
self.ap.persistence_mgr.serialize_model(persistence_pipeline.LegacyPipeline, pipeline)

View File

@@ -82,3 +82,18 @@ class UserService:
await self.ap.persistence_mgr.execute_async(
sqlalchemy.update(user.User).where(user.User.user == user_email).values(password=hashed_password)
)
async def change_password(self, user_email: str, current_password: str, new_password: str) -> None:
ph = argon2.PasswordHasher()
user_obj = await self.get_user_by_email(user_email)
if user_obj is None:
raise ValueError('User not found')
ph.verify(user_obj.password, current_password)
hashed_password = ph.hash(new_password)
await self.ap.persistence_mgr.execute_async(
sqlalchemy.update(user.User).where(user.User.user == user_email).values(password=hashed_password)
)

View File

@@ -19,7 +19,7 @@ class LifecycleControlScope(enum.Enum):
APPLICATION = 'application'
PLATFORM = 'platform'
PLUGIN = 'plugin'
PROVIDER = 'provider'
PROVIDER = 'provider'
class LauncherTypes(enum.Enum):
@@ -87,7 +87,9 @@ class Query(pydantic.BaseModel):
"""使用的函数,由前置处理器阶段设置"""
resp_messages: (
typing.Optional[list[llm_entities.Message]] | typing.Optional[list[platform_message.MessageChain]]
typing.Optional[list[llm_entities.Message]]
| typing.Optional[list[platform_message.MessageChain]]
| typing.Optional[list[llm_entities.MessageChunk]]
) = []
"""由Process阶段生成的回复消息对象列表"""

View File

@@ -212,6 +212,7 @@ class DBMigrateV3Config(migration.DBMigration):
self.ap.instance_config.data['api']['port'] = self.ap.system_cfg.data['http-api']['port']
self.ap.instance_config.data['command'] = {
'prefix': self.ap.command_cfg.data['command-prefix'],
'enable': self.ap.command_cfg.data['command-enable'],
'privilege': self.ap.command_cfg.data['privilege'],
}
self.ap.instance_config.data['concurrency']['pipeline'] = self.ap.system_cfg.data['pipeline-concurrency']

View File

@@ -0,0 +1,38 @@
from .. import migration
import sqlalchemy
from ...entity.persistence import pipeline as persistence_pipeline
@migration.migration_class(5)
class DBMigratePipelineRemoveCotConfig(migration.DBMigration):
"""Pipeline remove cot config"""
async def upgrade(self):
"""Upgrade"""
# read all pipelines
pipelines = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_pipeline.LegacyPipeline))
for pipeline in pipelines:
serialized_pipeline = self.ap.persistence_mgr.serialize_model(persistence_pipeline.LegacyPipeline, pipeline)
config = serialized_pipeline['config']
if 'remove-think' not in config['output']['misc']:
config['output']['misc']['remove-think'] = False
await self.ap.persistence_mgr.execute_async(
sqlalchemy.update(persistence_pipeline.LegacyPipeline)
.where(persistence_pipeline.LegacyPipeline.uuid == serialized_pipeline['uuid'])
.values(
{
'config': config,
'for_version': self.ap.ver_mgr.get_current_version(),
}
)
)
async def downgrade(self):
"""Downgrade"""
pass

View File

@@ -30,6 +30,10 @@ class BanSessionCheckStage(stage.PipelineStage):
if sess == f'{query.launcher_type.value}_{query.launcher_id}':
found = True
break
# 使用 *_id 来表示加白/拉黑某用户的私聊和群聊场景
if sess.startswith('*_') and (sess[2:] == query.launcher_id or sess[2:] == query.sender_id):
found = True
break
ctn = False

View File

@@ -67,7 +67,7 @@ class ContentFilterStage(stage.PipelineStage):
if query.pipeline_config['safety']['content-filter']['scope'] == 'output-msg':
return entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
if not message.strip():
return entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
return entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
else:
for filter in self.filter_chain:
if filter_entities.EnableStage.PRE in filter.enable_stages:

View File

@@ -93,12 +93,20 @@ class RuntimePipeline:
query.message_event, platform_events.GroupMessage
):
result.user_notice.insert(0, platform_message.At(query.message_event.sender.id))
await query.adapter.reply_message(
message_source=query.message_event,
message=result.user_notice,
quote_origin=query.pipeline_config['output']['misc']['quote-origin'],
)
if await query.adapter.is_stream_output_supported():
await query.adapter.reply_message_chunk(
message_source=query.message_event,
bot_message=query.resp_messages[-1],
message=result.user_notice,
quote_origin=query.pipeline_config['output']['misc']['quote-origin'],
is_final=[msg.is_final for msg in query.resp_messages][0]
)
else:
await query.adapter.reply_message(
message_source=query.message_event,
message=result.user_notice,
quote_origin=query.pipeline_config['output']['misc']['quote-origin'],
)
if result.debug_notice:
self.ap.logger.debug(result.debug_notice)
if result.console_notice:

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import uuid
import typing
import traceback
@@ -22,11 +23,11 @@ class ChatMessageHandler(handler.MessageHandler):
self,
query: core_entities.Query,
) -> typing.AsyncGenerator[entities.StageProcessResult, None]:
"""Process"""
# Call API
# generator
"""处理"""
# API
# 生成器
# Trigger plugin event
# 触发插件事件
event_class = (
events.PersonNormalMessageReceived
if query.launcher_type == core_entities.LauncherTypes.PERSON
@@ -42,11 +43,10 @@ class ChatMessageHandler(handler.MessageHandler):
query=query,
)
)
is_create_card = False # 判断下是否需要创建流式卡片
if event_ctx.is_prevented_default():
if event_ctx.event.reply is not None:
mc = platform_message.MessageChain(event_ctx.event.reply)
query.resp_messages.append(mc)
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
@@ -54,10 +54,14 @@ class ChatMessageHandler(handler.MessageHandler):
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
else:
if event_ctx.event.alter is not None:
# if isinstance(event_ctx.event, str): # Currently not considering multi-modal alter
# if isinstance(event_ctx.event, str): # 现在暂时不考虑多模态alter
query.user_message.content = event_ctx.event.alter
text_length = 0
try:
is_stream = await query.adapter.is_stream_output_supported()
except AttributeError:
is_stream = False
try:
for r in runner_module.preregistered_runners:
@@ -65,22 +69,45 @@ class ChatMessageHandler(handler.MessageHandler):
runner = r(self.ap, query.pipeline_config)
break
else:
raise ValueError(f'Request runner not found: {query.pipeline_config["ai"]["runner"]["runner"]}')
raise ValueError(f'未找到请求运行器: {query.pipeline_config["ai"]["runner"]["runner"]}')
if is_stream:
resp_message_id = uuid.uuid4()
async for result in runner.run(query):
query.resp_messages.append(result)
async for result in runner.run(query):
result.resp_message_id = str(resp_message_id)
if query.resp_messages:
query.resp_messages.pop()
if query.resp_message_chain:
query.resp_message_chain.pop()
# 此时连接外部 AI 服务正常,创建卡片
if not is_create_card: # 只有不是第一次才创建卡片
await query.adapter.create_message_card(str(resp_message_id), query.message_event)
is_create_card = True
query.resp_messages.append(result)
self.ap.logger.info(f'对话({query.query_id})流式响应: {self.cut_str(result.readable_str())}')
self.ap.logger.info(f'Response({query.query_id}): {self.cut_str(result.readable_str())}')
if result.content is not None:
text_length += len(result.content)
if result.content is not None:
text_length += len(result.content)
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
else:
async for result in runner.run(query):
query.resp_messages.append(result)
self.ap.logger.info(f'对话({query.query_id})响应: {self.cut_str(result.readable_str())}')
if result.content is not None:
text_length += len(result.content)
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
query.session.using_conversation.messages.append(query.user_message)
query.session.using_conversation.messages.extend(query.resp_messages)
except Exception as e:
self.ap.logger.error(f'Request failed({query.query_id}): {type(e).__name__} {str(e)}')
self.ap.logger.error(f'对话({query.query_id})请求失败: {type(e).__name__} {str(e)}')
traceback.print_exc()
hide_exception_info = query.pipeline_config['output']['misc']['hide-exception']
@@ -93,4 +120,4 @@ class ChatMessageHandler(handler.MessageHandler):
)
finally:
# TODO statistics
pass
pass

View File

@@ -42,12 +42,14 @@ class Processor(stage.PipelineStage):
async def generator():
cmd_prefix = self.ap.instance_config.data['command']['prefix']
cmd_enable = self.ap.instance_config.data['command'].get('enable', True)
if any(message_text.startswith(prefix) for prefix in cmd_prefix):
async for result in self.cmd_handler.handle(query):
yield result
if cmd_enable and any(message_text.startswith(prefix) for prefix in cmd_prefix):
handler_to_use = self.cmd_handler
else:
async for result in self.chat_handler.handle(query):
yield result
handler_to_use = self.chat_handler
async for result in handler_to_use.handle(query):
yield result
return generator()

View File

@@ -7,6 +7,10 @@ import asyncio
from ...platform.types import events as platform_events
from ...platform.types import message as platform_message
from ...provider import entities as llm_entities
from .. import stage, entities
from ...core import entities as core_entities
@@ -36,10 +40,22 @@ class SendResponseBackStage(stage.PipelineStage):
quote_origin = query.pipeline_config['output']['misc']['quote-origin']
await query.adapter.reply_message(
message_source=query.message_event,
message=query.resp_message_chain[-1],
quote_origin=quote_origin,
)
has_chunks = any(isinstance(msg, llm_entities.MessageChunk) for msg in query.resp_messages)
# TODO 命令与流式的兼容性问题
if await query.adapter.is_stream_output_supported() and has_chunks:
is_final = [msg.is_final for msg in query.resp_messages][0]
await query.adapter.reply_message_chunk(
message_source=query.message_event,
bot_message=query.resp_messages[-1],
message=query.resp_message_chain[-1],
quote_origin=quote_origin,
is_final=is_final,
)
else:
await query.adapter.reply_message(
message_source=query.message_event,
message=query.resp_message_chain[-1],
quote_origin=quote_origin,
)
return entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)

View File

@@ -61,14 +61,40 @@ class MessagePlatformAdapter(metaclass=abc.ABCMeta):
"""
raise NotImplementedError
async def reply_message_chunk(
self,
message_source: platform_events.MessageEvent,
bot_message: dict,
message: platform_message.MessageChain,
quote_origin: bool = False,
is_final: bool = False,
):
"""回复消息(流式输出)
Args:
message_source (platform.types.MessageEvent): 消息源事件
message_id (int): 消息ID
message (platform.types.MessageChain): 消息链
quote_origin (bool, optional): 是否引用原消息. Defaults to False.
is_final (bool, optional): 流式是否结束. Defaults to False.
"""
raise NotImplementedError
async def create_message_card(self, message_id: typing.Type[str, int], event: platform_events.MessageEvent) -> bool:
"""创建卡片消息
Args:
message_id (str): 消息ID
event (platform_events.MessageEvent): 消息源事件
"""
return False
async def is_muted(self, group_id: int) -> bool:
"""获取账号是否在指定群被禁言"""
raise NotImplementedError
def register_listener(
self,
event_type: typing.Type[platform_message.Event],
callback: typing.Callable[[platform_message.Event, MessagePlatformAdapter], None],
event_type: typing.Type[platform_events.Event],
callback: typing.Callable[[platform_events.Event, MessagePlatformAdapter], None],
):
"""注册事件监听器
@@ -80,8 +106,8 @@ class MessagePlatformAdapter(metaclass=abc.ABCMeta):
def unregister_listener(
self,
event_type: typing.Type[platform_message.Event],
callback: typing.Callable[[platform_message.Event, MessagePlatformAdapter], None],
event_type: typing.Type[platform_events.Event],
callback: typing.Callable[[platform_events.Event, MessagePlatformAdapter], None],
):
"""注销事件监听器
@@ -95,6 +121,10 @@ class MessagePlatformAdapter(metaclass=abc.ABCMeta):
"""异步运行"""
raise NotImplementedError
async def is_stream_output_supported(self) -> bool:
"""是否支持流式输出"""
return False
async def kill(self) -> bool:
"""关闭适配器
@@ -136,7 +166,7 @@ class EventConverter:
"""事件转换器基类"""
@staticmethod
def yiri2target(event: typing.Type[platform_message.Event]):
def yiri2target(event: typing.Type[platform_events.Event]):
"""将源平台事件转换为目标平台事件
Args:
@@ -148,7 +178,7 @@ class EventConverter:
raise NotImplementedError
@staticmethod
def target2yiri(event: typing.Any) -> platform_message.Event:
def target2yiri(event: typing.Any) -> platform_events.Event:
"""将目标平台事件的调用参数转换为源平台的事件参数对象
Args:

View File

@@ -120,8 +120,10 @@ class RuntimeBot:
if isinstance(e, asyncio.CancelledError):
self.task_context.set_current_action('Exited.')
return
traceback_str = traceback.format_exc()
self.task_context.set_current_action('Exited with error.')
await self.logger.error(f'平台适配器运行出错:\n{e}\n{traceback.format_exc()}')
await self.logger.error(f'平台适配器运行出错:\n{e}\n{traceback_str}')
self.task_wrapper = self.ap.task_mgr.create_task(
exception_wrapper(),

View File

@@ -266,7 +266,7 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
await process_message_data(msg_data, reply_list)
reply_msg = platform_message.Quote(
message_id=msg.data['id'], sender_id=msg_datas['user_id'], origin=reply_list
message_id=msg.data['id'], sender_id=msg_datas['sender']['user_id'], origin=reply_list
)
yiri_msg_list.append(reply_msg)

View File

@@ -1,3 +1,4 @@
from re import S
import traceback
import typing
from libs.dingtalk_api.dingtalkevent import DingTalkEvent
@@ -22,6 +23,9 @@ class DingTalkMessageConverter(adapter.MessageConverter):
at = True
if type(msg) is platform_message.Plain:
content += msg.text
if type(msg) is platform_message.Forward:
for node in msg.node_list:
content += (await DingTalkMessageConverter.yiri2target(node.message_chain))[0]
return content, at
@staticmethod
@@ -60,7 +64,7 @@ class DingTalkEventConverter(adapter.EventConverter):
if event.conversation == 'FriendMessage':
return platform_events.FriendMessage(
sender=platform_entities.Friend(
id=event.incoming_message.sender_id,
id=event.incoming_message.sender_staff_id,
nickname=event.incoming_message.sender_nick,
remark='',
),
@@ -70,7 +74,7 @@ class DingTalkEventConverter(adapter.EventConverter):
)
elif event.conversation == 'GroupMessage':
sender = platform_entities.GroupMember(
id=event.incoming_message.sender_id,
id=event.incoming_message.sender_staff_id,
member_name=event.incoming_message.sender_nick,
permission='MEMBER',
group=platform_entities.Group(
@@ -99,11 +103,15 @@ class DingTalkAdapter(adapter.MessagePlatformAdapter):
message_converter: DingTalkMessageConverter = DingTalkMessageConverter()
event_converter: DingTalkEventConverter = DingTalkEventConverter()
config: dict
card_instance_id_dict: dict # 回复卡片消息字典key为消息idvalue为回复卡片实例id用于在流式消息时判断是否发送到指定卡片
seq: int # 消息顺序直接以seq作为标识
def __init__(self, config: dict, ap: app.Application, logger: EventLogger):
self.config = config
self.ap = ap
self.logger = logger
self.card_instance_id_dict = {}
# self.seq = 1
required_keys = [
'client_id',
'client_secret',
@@ -139,6 +147,37 @@ class DingTalkAdapter(adapter.MessagePlatformAdapter):
content, at = await DingTalkMessageConverter.yiri2target(message)
await self.bot.send_message(content, incoming_message, at)
async def reply_message_chunk(
self,
message_source: platform_events.MessageEvent,
bot_message,
message: platform_message.MessageChain,
quote_origin: bool = False,
is_final: bool = False,
):
# event = await DingTalkEventConverter.yiri2target(
# message_source,
# )
# incoming_message = event.incoming_message
# msg_id = incoming_message.message_id
message_id = bot_message.resp_message_id
msg_seq = bot_message.msg_sequence
if (msg_seq - 1) % 8 == 0 or is_final:
content, at = await DingTalkMessageConverter.yiri2target(message)
card_instance, card_instance_id = self.card_instance_id_dict[message_id]
if not content and bot_message.content:
content = bot_message.content # 兼容直接传入content的情况
# print(card_instance_id)
if content:
await self.bot.send_card_message(card_instance, card_instance_id, content, is_final)
if is_final and bot_message.tool_calls is None:
# self.seq = 1 # 消息回复结束之后重置seq
self.card_instance_id_dict.pop(message_id) # 消息回复结束之后删除卡片实例id
async def send_message(self, target_type: str, target_id: str, message: platform_message.MessageChain):
content = await DingTalkMessageConverter.yiri2target(message)
if target_type == 'person':
@@ -146,6 +185,20 @@ class DingTalkAdapter(adapter.MessagePlatformAdapter):
if target_type == 'group':
await self.bot.send_proactive_message_to_group(target_id, content)
async def is_stream_output_supported(self) -> bool:
is_stream = False
if self.config.get('enable-stream-reply', None):
is_stream = True
return is_stream
async def create_message_card(self, message_id, event):
card_template_id = self.config['card_template_id']
incoming_message = event.source_platform_object.incoming_message
# message_id = incoming_message.message_id
card_instance, card_instance_id = await self.bot.create_and_card(card_template_id, incoming_message)
self.card_instance_id_dict[message_id] = (card_instance, card_instance_id)
return True
def register_listener(
self,
event_type: typing.Type[platform_events.Event],

View File

@@ -46,6 +46,23 @@ spec:
type: boolean
required: false
default: true
- name: enable-stream-reply
label:
en_US: Enable Stream Reply Mode
zh_Hans: 启用钉钉卡片流式回复模式
description:
en_US: If enabled, the bot will use the stream of lark reply mode
zh_Hans: 如果启用,将使用钉钉卡片流式方式来回复内容
type: boolean
required: true
default: false
- name: card_template_id
label:
en_US: card template id
zh_Hans: 卡片模板ID
type: string
required: true
default: "填写你的卡片template_id"
execution:
python:
path: ./dingtalk.py

View File

@@ -8,7 +8,6 @@ import base64
import uuid
import os
import datetime
import io
import asyncio
from enum import Enum
@@ -21,9 +20,11 @@ from ..types import message as platform_message
from ..types import events as platform_events
from ..types import entities as platform_entities
# 语音功能相关异常定义
class VoiceConnectionError(Exception):
"""语音连接基础异常"""
def __init__(self, message: str, error_code: str = None, guild_id: int = None):
super().__init__(message)
self.error_code = error_code
@@ -33,8 +34,9 @@ class VoiceConnectionError(Exception):
class VoicePermissionError(VoiceConnectionError):
"""语音权限异常"""
def __init__(self, message: str, missing_permissions: list = None, user_id: int = None, channel_id: int = None):
super().__init__(message, "PERMISSION_ERROR")
super().__init__(message, 'PERMISSION_ERROR')
self.missing_permissions = missing_permissions or []
self.user_id = user_id
self.channel_id = channel_id
@@ -42,40 +44,42 @@ class VoicePermissionError(VoiceConnectionError):
class VoiceNetworkError(VoiceConnectionError):
"""语音网络异常"""
def __init__(self, message: str, retry_count: int = 0):
super().__init__(message, "NETWORK_ERROR")
super().__init__(message, 'NETWORK_ERROR')
self.retry_count = retry_count
self.last_attempt = datetime.datetime.now()
class VoiceConnectionStatus(Enum):
"""语音连接状态枚举"""
IDLE = "idle"
CONNECTING = "connecting"
CONNECTED = "connected"
PLAYING = "playing"
RECONNECTING = "reconnecting"
FAILED = "failed"
IDLE = 'idle'
CONNECTING = 'connecting'
CONNECTED = 'connected'
PLAYING = 'playing'
RECONNECTING = 'reconnecting'
FAILED = 'failed'
class VoiceConnectionInfo:
"""
语音连接信息类
用于存储和管理单个语音连接的详细信息,包括连接状态、时间戳、
频道信息等。提供连接信息的标准化数据结构。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
"""
def __init__(self, guild_id: int, channel_id: int, channel_name: str = None):
"""
初始化语音连接信息
@author: @ydzat
Args:
guild_id (int): 服务器ID
channel_id (int): 语音频道ID
@@ -83,28 +87,28 @@ class VoiceConnectionInfo:
"""
self.guild_id = guild_id
self.channel_id = channel_id
self.channel_name = channel_name or f"Channel-{channel_id}"
self.channel_name = channel_name or f'Channel-{channel_id}'
self.connected = False
self.connection_time: datetime.datetime = None
self.last_activity = datetime.datetime.now()
self.status = VoiceConnectionStatus.IDLE
self.user_count = 0
self.latency = 0.0
self.connection_health = "unknown"
self.connection_health = 'unknown'
self.voice_client = None
def update_status(self, status: VoiceConnectionStatus):
"""
更新连接状态
@author: @ydzat
Args:
status (VoiceConnectionStatus): 新的连接状态
"""
self.status = status
self.last_activity = datetime.datetime.now()
if status == VoiceConnectionStatus.CONNECTED:
self.connected = True
if self.connection_time is None:
@@ -113,48 +117,48 @@ class VoiceConnectionInfo:
self.connected = False
self.connection_time = None
self.voice_client = None
def to_dict(self) -> dict:
"""
转换为字典格式
@author: @ydzat
Returns:
dict: 连接信息的字典表示
"""
return {
"guild_id": self.guild_id,
"channel_id": self.channel_id,
"channel_name": self.channel_name,
"connected": self.connected,
"connection_time": self.connection_time.isoformat() if self.connection_time else None,
"last_activity": self.last_activity.isoformat(),
"status": self.status.value,
"user_count": self.user_count,
"latency": self.latency,
"connection_health": self.connection_health
'guild_id': self.guild_id,
'channel_id': self.channel_id,
'channel_name': self.channel_name,
'connected': self.connected,
'connection_time': self.connection_time.isoformat() if self.connection_time else None,
'last_activity': self.last_activity.isoformat(),
'status': self.status.value,
'user_count': self.user_count,
'latency': self.latency,
'connection_health': self.connection_health,
}
class VoiceConnectionManager:
"""
语音连接管理器
负责管理多个服务器的语音连接,提供连接建立、断开、状态查询等功能。
采用单例模式确保全局只有一个连接管理器实例。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
"""
def __init__(self, bot: discord.Client, logger: EventLogger):
"""
初始化语音连接管理器
@author: @ydzat
Args:
bot (discord.Client): Discord 客户端实例
logger (EventLogger): 事件日志记录器
@@ -165,25 +169,24 @@ class VoiceConnectionManager:
self._connection_lock = asyncio.Lock()
self._cleanup_task = None
self._monitoring_enabled = True
async def join_voice_channel(self, guild_id: int, channel_id: int,
user_id: int = None) -> discord.VoiceClient:
async def join_voice_channel(self, guild_id: int, channel_id: int, user_id: int = None) -> discord.VoiceClient:
"""
加入语音频道
验证用户权限和频道状态后,建立到指定语音频道的连接。
支持连接复用和自动重连机制。
@author: @ydzat
Args:
guild_id (int): 服务器ID
channel_id (int): 语音频道ID
user_id (int, optional): 请求用户ID用于权限验证
Returns:
discord.VoiceClient: 语音客户端实例
Raises:
VoicePermissionError: 权限不足时抛出
VoiceNetworkError: 网络连接失败时抛出
@@ -194,370 +197,353 @@ class VoiceConnectionManager:
# 获取服务器和频道对象
guild = self.bot.get_guild(guild_id)
if not guild:
raise VoiceConnectionError(
f"无法找到服务器 {guild_id}",
"GUILD_NOT_FOUND",
guild_id
)
raise VoiceConnectionError(f'无法找到服务器 {guild_id}', 'GUILD_NOT_FOUND', guild_id)
channel = guild.get_channel(channel_id)
if not channel or not isinstance(channel, discord.VoiceChannel):
raise VoiceConnectionError(
f"无法找到语音频道 {channel_id}",
"CHANNEL_NOT_FOUND",
guild_id
)
raise VoiceConnectionError(f'无法找到语音频道 {channel_id}', 'CHANNEL_NOT_FOUND', guild_id)
# 验证用户是否在语音频道中如果提供了用户ID
if user_id:
await self._validate_user_in_channel(guild, channel, user_id)
# 验证机器人权限
await self._validate_bot_permissions(channel)
# 检查是否已有连接
if guild_id in self.connections:
existing_conn = self.connections[guild_id]
if existing_conn.connected and existing_conn.voice_client:
if existing_conn.channel_id == channel_id:
# 已连接到相同频道,返回现有连接
await self.logger.info(f"复用现有语音连接: {guild.name} -> {channel.name}")
await self.logger.info(f'复用现有语音连接: {guild.name} -> {channel.name}')
return existing_conn.voice_client
else:
# 连接到不同频道,先断开旧连接
await self._disconnect_internal(guild_id)
# 建立新连接
voice_client = await channel.connect()
# 更新连接信息
conn_info = VoiceConnectionInfo(guild_id, channel_id, channel.name)
conn_info.voice_client = voice_client
conn_info.update_status(VoiceConnectionStatus.CONNECTED)
conn_info.user_count = len(channel.members)
self.connections[guild_id] = conn_info
await self.logger.info(f"成功连接到语音频道: {guild.name} -> {channel.name}")
await self.logger.info(f'成功连接到语音频道: {guild.name} -> {channel.name}')
return voice_client
except discord.ClientException as e:
raise VoiceNetworkError(f"Discord 客户端错误: {str(e)}")
raise VoiceNetworkError(f'Discord 客户端错误: {str(e)}')
except discord.opus.OpusNotLoaded as e:
raise VoiceConnectionError(f"Opus 编码器未加载: {str(e)}", "OPUS_NOT_LOADED", guild_id)
raise VoiceConnectionError(f'Opus 编码器未加载: {str(e)}', 'OPUS_NOT_LOADED', guild_id)
except Exception as e:
await self.logger.error(f"连接语音频道时发生未知错误: {str(e)}")
raise VoiceConnectionError(f"连接失败: {str(e)}", "UNKNOWN_ERROR", guild_id)
await self.logger.error(f'连接语音频道时发生未知错误: {str(e)}')
raise VoiceConnectionError(f'连接失败: {str(e)}', 'UNKNOWN_ERROR', guild_id)
async def leave_voice_channel(self, guild_id: int) -> bool:
"""
离开语音频道
断开指定服务器的语音连接,清理相关资源和状态信息。
确保音频播放停止后再断开连接。
@author: @ydzat
Args:
guild_id (int): 服务器ID
Returns:
bool: 断开是否成功
"""
async with self._connection_lock:
return await self._disconnect_internal(guild_id)
async def _disconnect_internal(self, guild_id: int) -> bool:
"""
内部断开连接方法
@author: @ydzat
Args:
guild_id (int): 服务器ID
Returns:
bool: 断开是否成功
"""
if guild_id not in self.connections:
return True
conn_info = self.connections[guild_id]
try:
if conn_info.voice_client and conn_info.voice_client.is_connected():
# 停止当前播放
if conn_info.voice_client.is_playing():
conn_info.voice_client.stop()
# 等待播放完全停止
await asyncio.sleep(0.1)
# 断开连接
await conn_info.voice_client.disconnect()
conn_info.update_status(VoiceConnectionStatus.IDLE)
del self.connections[guild_id]
await self.logger.info(f"已断开语音连接: Guild {guild_id}")
await self.logger.info(f'已断开语音连接: Guild {guild_id}')
return True
except Exception as e:
await self.logger.error(f"断开语音连接时发生错误: {str(e)}")
await self.logger.error(f'断开语音连接时发生错误: {str(e)}')
# 即使出错也要清理连接记录
conn_info.update_status(VoiceConnectionStatus.FAILED)
if guild_id in self.connections:
del self.connections[guild_id]
return False
async def get_voice_client(self, guild_id: int) -> typing.Optional[discord.VoiceClient]:
"""
获取语音客户端
返回指定服务器的语音客户端实例,如果未连接则返回 None。
会验证连接的有效性,自动清理无效连接。
@author: @ydzat
Args:
guild_id (int): 服务器ID
Returns:
Optional[discord.VoiceClient]: 语音客户端实例或 None
"""
if guild_id not in self.connections:
return None
conn_info = self.connections[guild_id]
# 验证连接是否仍然有效
if conn_info.voice_client and not conn_info.voice_client.is_connected():
# 连接已失效,清理状态
await self._disconnect_internal(guild_id)
return None
return conn_info.voice_client if conn_info.connected else None
async def is_connected_to_voice(self, guild_id: int) -> bool:
"""
检查是否连接到语音频道
@author: @ydzat
Args:
guild_id (int): 服务器ID
Returns:
bool: 是否已连接
"""
if guild_id not in self.connections:
return False
conn_info = self.connections[guild_id]
# 检查实际连接状态
if conn_info.voice_client and not conn_info.voice_client.is_connected():
# 连接已失效,清理状态
await self._disconnect_internal(guild_id)
return False
return conn_info.connected
async def get_connection_status(self, guild_id: int) -> typing.Optional[dict]:
"""
获取连接状态信息
@author: @ydzat
Args:
guild_id (int): 服务器ID
Returns:
Optional[dict]: 连接状态信息字典或 None
"""
if guild_id not in self.connections:
return None
conn_info = self.connections[guild_id]
# 更新实时信息
if conn_info.voice_client and conn_info.voice_client.is_connected():
conn_info.latency = conn_info.voice_client.latency * 1000 # 转换为毫秒
conn_info.connection_health = "good" if conn_info.latency < 100 else "poor"
conn_info.connection_health = 'good' if conn_info.latency < 100 else 'poor'
# 更新频道用户数
guild = self.bot.get_guild(guild_id)
if guild:
channel = guild.get_channel(conn_info.channel_id)
if channel and isinstance(channel, discord.VoiceChannel):
conn_info.user_count = len(channel.members)
return conn_info.to_dict()
async def list_active_connections(self) -> typing.List[dict]:
"""
列出所有活跃连接
@author: @ydzat
Returns:
List[dict]: 活跃连接列表
"""
active_connections = []
for guild_id, conn_info in self.connections.items():
if conn_info.connected:
status = await self.get_connection_status(guild_id)
if status:
active_connections.append(status)
return active_connections
async def get_voice_channel_info(self, guild_id: int, channel_id: int) -> typing.Optional[dict]:
"""
获取语音频道信息
@author: @ydzat
Args:
guild_id (int): 服务器ID
channel_id (int): 频道ID
Returns:
Optional[dict]: 频道信息字典或 None
"""
guild = self.bot.get_guild(guild_id)
if not guild:
return None
channel = guild.get_channel(channel_id)
if not channel or not isinstance(channel, discord.VoiceChannel):
return None
# 获取用户信息
users = []
for member in channel.members:
users.append({
"id": member.id,
"name": member.display_name,
"status": str(member.status),
"is_bot": member.bot
})
users.append(
{'id': member.id, 'name': member.display_name, 'status': str(member.status), 'is_bot': member.bot}
)
# 获取权限信息
bot_member = guild.me
permissions = channel.permissions_for(bot_member)
return {
"channel_id": channel_id,
"channel_name": channel.name,
"guild_id": guild_id,
"guild_name": guild.name,
"user_limit": channel.user_limit,
"current_users": users,
"user_count": len(users),
"bitrate": channel.bitrate,
"permissions": {
"connect": permissions.connect,
"speak": permissions.speak,
"use_voice_activation": permissions.use_voice_activation,
"priority_speaker": permissions.priority_speaker
}
'channel_id': channel_id,
'channel_name': channel.name,
'guild_id': guild_id,
'guild_name': guild.name,
'user_limit': channel.user_limit,
'current_users': users,
'user_count': len(users),
'bitrate': channel.bitrate,
'permissions': {
'connect': permissions.connect,
'speak': permissions.speak,
'use_voice_activation': permissions.use_voice_activation,
'priority_speaker': permissions.priority_speaker,
},
}
async def _validate_user_in_channel(self, guild: discord.Guild,
channel: discord.VoiceChannel, user_id: int):
async def _validate_user_in_channel(self, guild: discord.Guild, channel: discord.VoiceChannel, user_id: int):
"""
验证用户是否在语音频道中
@author: @ydzat
Args:
guild: Discord 服务器对象
channel: 语音频道对象
user_id: 用户ID
Raises:
VoicePermissionError: 用户不在频道中时抛出
"""
member = guild.get_member(user_id)
if not member:
raise VoicePermissionError(
f"无法找到用户 {user_id}",
["member_not_found"],
user_id,
channel.id
)
raise VoicePermissionError(f'无法找到用户 {user_id}', ['member_not_found'], user_id, channel.id)
if not member.voice or member.voice.channel != channel:
raise VoicePermissionError(
f"用户 {member.display_name} 不在语音频道 {channel.name}",
["user_not_in_channel"],
f'用户 {member.display_name} 不在语音频道 {channel.name}',
['user_not_in_channel'],
user_id,
channel.id
channel.id,
)
async def _validate_bot_permissions(self, channel: discord.VoiceChannel):
"""
验证机器人权限
@author: @ydzat
Args:
channel: 语音频道对象
Raises:
VoicePermissionError: 权限不足时抛出
"""
bot_member = channel.guild.me
permissions = channel.permissions_for(bot_member)
missing_permissions = []
if not permissions.connect:
missing_permissions.append("connect")
missing_permissions.append('connect')
if not permissions.speak:
missing_permissions.append("speak")
missing_permissions.append('speak')
if missing_permissions:
raise VoicePermissionError(
f"机器人在频道 {channel.name} 中缺少权限: {', '.join(missing_permissions)}",
f'机器人在频道 {channel.name} 中缺少权限: {", ".join(missing_permissions)}',
missing_permissions,
channel_id=channel.id
channel_id=channel.id,
)
async def cleanup_inactive_connections(self):
"""
清理无效连接
定期检查并清理已断开或无效的语音连接,释放资源。
@author: @ydzat
"""
cleanup_guilds = []
for guild_id, conn_info in self.connections.items():
if not conn_info.voice_client or not conn_info.voice_client.is_connected():
cleanup_guilds.append(guild_id)
for guild_id in cleanup_guilds:
await self._disconnect_internal(guild_id)
if cleanup_guilds:
await self.logger.info(f"清理了 {len(cleanup_guilds)} 个无效的语音连接")
await self.logger.info(f'清理了 {len(cleanup_guilds)} 个无效的语音连接')
async def start_monitoring(self):
"""
开始连接监控
@author: @ydzat
"""
if self._cleanup_task is None and self._monitoring_enabled:
self._cleanup_task = asyncio.create_task(self._monitoring_loop())
async def stop_monitoring(self):
"""
停止连接监控
@author: @ydzat
"""
self._monitoring_enabled = False
@@ -568,11 +554,11 @@ class VoiceConnectionManager:
except asyncio.CancelledError:
pass
self._cleanup_task = None
async def _monitoring_loop(self):
"""
监控循环
@author: @ydzat
"""
try:
@@ -581,18 +567,18 @@ class VoiceConnectionManager:
await self.cleanup_inactive_connections()
except asyncio.CancelledError:
pass
async def disconnect_all(self):
"""
断开所有连接
@author: @ydzat
"""
async with self._connection_lock:
guild_ids = list(self.connections.keys())
for guild_id in guild_ids:
await self._disconnect_internal(guild_id)
await self.stop_monitoring()
@@ -815,7 +801,7 @@ class DiscordAdapter(adapter.MessagePlatformAdapter):
self.logger = logger
self.bot_account_id = self.config['client_id']
# 初始化语音连接管理器
self.voice_manager: VoiceConnectionManager = None
@@ -838,163 +824,162 @@ class DiscordAdapter(adapter.MessagePlatformAdapter):
args['proxy'] = os.getenv('http_proxy')
self.bot = MyClient(intents=intents, **args)
# Voice functionality methods
async def join_voice_channel(self, guild_id: int, channel_id: int,
user_id: int = None) -> discord.VoiceClient:
async def join_voice_channel(self, guild_id: int, channel_id: int, user_id: int = None) -> discord.VoiceClient:
"""
加入语音频道
为指定服务器的语音频道建立连接,支持用户权限验证和连接复用。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Args:
guild_id (int): Discord 服务器ID
channel_id (int): 语音频道ID
user_id (int, optional): 请求用户ID用于权限验证
Returns:
discord.VoiceClient: 语音客户端实例
Raises:
VoicePermissionError: 权限不足
VoiceNetworkError: 网络连接失败
VoiceConnectionError: 其他连接错误
"""
if not self.voice_manager:
raise VoiceConnectionError("语音管理器未初始化", "MANAGER_NOT_READY")
raise VoiceConnectionError('语音管理器未初始化', 'MANAGER_NOT_READY')
return await self.voice_manager.join_voice_channel(guild_id, channel_id, user_id)
async def leave_voice_channel(self, guild_id: int) -> bool:
"""
离开语音频道
断开指定服务器的语音连接,清理相关资源。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Args:
guild_id (int): Discord 服务器ID
Returns:
bool: 是否成功断开连接
"""
if not self.voice_manager:
return False
return await self.voice_manager.leave_voice_channel(guild_id)
async def get_voice_client(self, guild_id: int) -> typing.Optional[discord.VoiceClient]:
"""
获取语音客户端
返回指定服务器的语音客户端实例,用于音频播放控制。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Args:
guild_id (int): Discord 服务器ID
Returns:
Optional[discord.VoiceClient]: 语音客户端实例或 None
"""
if not self.voice_manager:
return None
return await self.voice_manager.get_voice_client(guild_id)
async def is_connected_to_voice(self, guild_id: int) -> bool:
"""
检查语音连接状态
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Args:
guild_id (int): Discord 服务器ID
Returns:
bool: 是否已连接到语音频道
"""
if not self.voice_manager:
return False
return await self.voice_manager.is_connected_to_voice(guild_id)
async def get_voice_connection_status(self, guild_id: int) -> typing.Optional[dict]:
"""
获取语音连接详细状态
返回包含连接时间、延迟、用户数等详细信息的状态字典。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Args:
guild_id (int): Discord 服务器ID
Returns:
Optional[dict]: 连接状态信息或 None
"""
if not self.voice_manager:
return None
return await self.voice_manager.get_connection_status(guild_id)
async def list_active_voice_connections(self) -> typing.List[dict]:
"""
列出所有活跃的语音连接
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Returns:
List[dict]: 活跃语音连接列表
"""
if not self.voice_manager:
return []
return await self.voice_manager.list_active_connections()
async def get_voice_channel_info(self, guild_id: int, channel_id: int) -> typing.Optional[dict]:
"""
获取语音频道详细信息
包括频道名称、用户列表、权限信息等。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
Args:
guild_id (int): Discord 服务器ID
channel_id (int): 语音频道ID
Returns:
Optional[dict]: 频道信息字典或 None
"""
if not self.voice_manager:
return None
return await self.voice_manager.get_voice_channel_info(guild_id, channel_id)
async def cleanup_voice_connections(self):
"""
清理无效的语音连接
手动触发语音连接清理,移除已断开或无效的连接。
@author: @ydzat
@version: 1.0
@since: 2025-07-04
@@ -1069,30 +1054,29 @@ class DiscordAdapter(adapter.MessagePlatformAdapter):
async def run_async(self):
"""
启动 Discord 适配器
初始化语音管理器并启动 Discord 客户端连接。
@author: @ydzat (修改)
"""
async with self.bot:
# 初始化语音管理器
self.voice_manager = VoiceConnectionManager(self.bot, self.logger)
await self.voice_manager.start_monitoring()
await self.logger.info("Discord 适配器语音功能已启用")
await self.logger.info('Discord 适配器语音功能已启用')
await self.bot.start(self.config['token'], reconnect=True)
async def kill(self) -> bool:
"""
关闭 Discord 适配器
清理语音连接并关闭 Discord 客户端。
@author: @ydzat (修改)
"""
if self.voice_manager:
await self.voice_manager.disconnect_all()
await self.bot.close()
return True

View File

@@ -17,6 +17,7 @@ import aiohttp
import lark_oapi.ws.exception
import quart
from lark_oapi.api.im.v1 import *
from lark_oapi.api.cardkit.v1 import *
from .. import adapter
from ...core import app
@@ -320,6 +321,10 @@ class LarkEventConverter(adapter.EventConverter):
)
CARD_ID_CACHE_SIZE = 500
CARD_ID_CACHE_MAX_LIFETIME = 20 * 60 # 20分钟
class LarkAdapter(adapter.MessagePlatformAdapter):
bot: lark_oapi.ws.Client
api_client: lark_oapi.Client
@@ -339,12 +344,20 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
quart_app: quart.Quart
ap: app.Application
card_id_dict: dict[str, str] # 消息id到卡片id的映射便于创建卡片后的发送消息到指定卡片
seq: int # 用于在发送卡片消息中识别消息顺序直接以seq作为标识
def __init__(self, config: dict, ap: app.Application, logger: EventLogger):
self.config = config
self.ap = ap
self.logger = logger
self.quart_app = quart.Quart(__name__)
self.listeners = {}
self.card_id_dict = {}
self.seq = 1
@self.quart_app.route('/lark/callback', methods=['POST'])
async def lark_callback():
@@ -409,6 +422,216 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
async def send_message(self, target_type: str, target_id: str, message: platform_message.MessageChain):
pass
async def is_stream_output_supported(self) -> bool:
is_stream = False
if self.config.get('enable-stream-reply', None):
is_stream = True
return is_stream
async def create_card_id(self, message_id):
try:
self.ap.logger.debug('飞书支持stream输出,创建卡片......')
card_data = {"schema": "2.0", "config": {"update_multi": True, "streaming_mode": True,
"streaming_config": {"print_step": {"default": 1},
"print_frequency_ms": {"default": 70},
"print_strategy": "fast"}},
"body": {"direction": "vertical", "padding": "12px 12px 12px 12px", "elements": [{"tag": "div",
"text": {
"tag": "plain_text",
"content": "LangBot",
"text_size": "normal",
"text_align": "left",
"text_color": "default"},
"icon": {
"tag": "custom_icon",
"img_key": "img_v3_02p3_05c65d5d-9bad-440a-a2fb-c89571bfd5bg"}},
{
"tag": "markdown",
"content": "",
"text_align": "left",
"text_size": "normal",
"margin": "0px 0px 0px 0px",
"element_id": "streaming_txt"},
{
"tag": "markdown",
"content": "",
"text_align": "left",
"text_size": "normal",
"margin": "0px 0px 0px 0px"},
{
"tag": "column_set",
"horizontal_spacing": "8px",
"horizontal_align": "left",
"columns": [
{
"tag": "column",
"width": "weighted",
"elements": [
{
"tag": "markdown",
"content": "",
"text_align": "left",
"text_size": "normal",
"margin": "0px 0px 0px 0px"},
{
"tag": "markdown",
"content": "",
"text_align": "left",
"text_size": "normal",
"margin": "0px 0px 0px 0px"},
{
"tag": "markdown",
"content": "",
"text_align": "left",
"text_size": "normal",
"margin": "0px 0px 0px 0px"}],
"padding": "0px 0px 0px 0px",
"direction": "vertical",
"horizontal_spacing": "8px",
"vertical_spacing": "2px",
"horizontal_align": "left",
"vertical_align": "top",
"margin": "0px 0px 0px 0px",
"weight": 1}],
"margin": "0px 0px 0px 0px"},
{"tag": "hr",
"margin": "0px 0px 0px 0px"},
{
"tag": "column_set",
"horizontal_spacing": "12px",
"horizontal_align": "right",
"columns": [
{
"tag": "column",
"width": "weighted",
"elements": [
{
"tag": "markdown",
"content": "<font color=\"grey-600\">以上内容由 AI 生成,仅供参考。更多详细、准确信息可点击引用链接查看</font>",
"text_align": "left",
"text_size": "notation",
"margin": "4px 0px 0px 0px",
"icon": {
"tag": "standard_icon",
"token": "robot_outlined",
"color": "grey"}}],
"padding": "0px 0px 0px 0px",
"direction": "vertical",
"horizontal_spacing": "8px",
"vertical_spacing": "8px",
"horizontal_align": "left",
"vertical_align": "top",
"margin": "0px 0px 0px 0px",
"weight": 1},
{
"tag": "column",
"width": "20px",
"elements": [
{
"tag": "button",
"text": {
"tag": "plain_text",
"content": ""},
"type": "text",
"width": "fill",
"size": "medium",
"icon": {
"tag": "standard_icon",
"token": "thumbsup_outlined"},
"hover_tips": {
"tag": "plain_text",
"content": "有帮助"},
"margin": "0px 0px 0px 0px"}],
"padding": "0px 0px 0px 0px",
"direction": "vertical",
"horizontal_spacing": "8px",
"vertical_spacing": "8px",
"horizontal_align": "left",
"vertical_align": "top",
"margin": "0px 0px 0px 0px"},
{
"tag": "column",
"width": "30px",
"elements": [
{
"tag": "button",
"text": {
"tag": "plain_text",
"content": ""},
"type": "text",
"width": "default",
"size": "medium",
"icon": {
"tag": "standard_icon",
"token": "thumbdown_outlined"},
"hover_tips": {
"tag": "plain_text",
"content": "无帮助"},
"margin": "0px 0px 0px 0px"}],
"padding": "0px 0px 0px 0px",
"vertical_spacing": "8px",
"horizontal_align": "left",
"vertical_align": "top",
"margin": "0px 0px 0px 0px"}],
"margin": "0px 0px 4px 0px"}]}}
# delay / fast 创建卡片模板delay 延迟打印fast 实时打印,可以自定义更好看的消息模板
request: CreateCardRequest = (
CreateCardRequest.builder()
.request_body(CreateCardRequestBody.builder().type('card_json').data(json.dumps(card_data)).build())
.build()
)
# 发起请求
response: CreateCardResponse = self.api_client.cardkit.v1.card.create(request)
# 处理失败返回
if not response.success():
raise Exception(
f'client.cardkit.v1.card.create failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}'
)
self.ap.logger.debug(f'飞书卡片创建成功,卡片ID: {response.data.card_id}')
self.card_id_dict[message_id] = response.data.card_id
card_id = response.data.card_id
return card_id
except Exception as e:
self.ap.logger.error(f'飞书卡片创建失败,错误信息: {e}')
async def create_message_card(self, message_id, event) -> str:
"""
创建卡片消息。
使用卡片消息是因为普通消息更新次数有限制而大模型流式返回结果可能很多而超过限制而飞书卡片没有这个限制api免费次数有限
"""
# message_id = event.message_chain.message_id
card_id = await self.create_card_id(message_id)
content = {
'type': 'card',
'data': {'card_id': card_id, 'template_variable': {'content': 'Thinking...'}},
} # 当收到消息时发送消息模板,可添加模板变量,详情查看飞书中接口文档
request: ReplyMessageRequest = (
ReplyMessageRequest.builder()
.message_id(event.message_chain.message_id)
.request_body(
ReplyMessageRequestBody.builder().content(json.dumps(content)).msg_type('interactive').build()
)
.build()
)
# 发起请求
response: ReplyMessageResponse = await self.api_client.im.v1.message.areply(request)
# 处理失败返回
if not response.success():
raise Exception(
f'client.im.v1.message.reply failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}'
)
return True
async def reply_message(
self,
message_source: platform_events.MessageEvent,
@@ -447,6 +670,64 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
f'client.im.v1.message.reply failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}'
)
async def reply_message_chunk(
self,
message_source: platform_events.MessageEvent,
bot_message,
message: platform_message.MessageChain,
quote_origin: bool = False,
is_final: bool = False,
):
"""
回复消息变成更新卡片消息
"""
# self.seq += 1
message_id = bot_message.resp_message_id
msg_seq = bot_message.msg_sequence
if msg_seq % 8 == 0 or is_final:
lark_message = await self.message_converter.yiri2target(message, self.api_client)
text_message = ''
for ele in lark_message[0]:
if ele['tag'] == 'text':
text_message += ele['text']
elif ele['tag'] == 'md':
text_message += ele['text']
# content = {
# 'type': 'card_json',
# 'data': {'card_id': self.card_id_dict[message_id], 'elements': {'content': text_message}},
# }
request: ContentCardElementRequest = (
ContentCardElementRequest.builder()
.card_id(self.card_id_dict[message_id])
.element_id('streaming_txt')
.request_body(
ContentCardElementRequestBody.builder()
# .uuid("a0d69e20-1dd1-458b-k525-dfeca4015204")
.content(text_message)
.sequence(msg_seq)
.build()
)
.build()
)
if is_final and bot_message.tool_calls is None:
# self.seq = 1 # 消息回复结束之后重置seq
self.card_id_dict.pop(message_id) # 清理已经使用过的卡片
# 发起请求
response: ContentCardElementResponse = self.api_client.cardkit.v1.card_element.content(request)
# 处理失败返回
if not response.success():
raise Exception(
f'client.im.v1.message.patch failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}'
)
return
async def is_muted(self, group_id: int) -> bool:
return False
@@ -492,4 +773,9 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
)
async def kill(self) -> bool:
return False
# 需要断开连接,不然旧的连接会继续运行,导致飞书消息来时会随机选择一个连接
# 断开时lark.ws.Client的_receive_message_loop会打印error日志: receive message loop exit。然后进行重连
# 所以要设置_auto_reconnect=False,让其不重连。
self.bot._auto_reconnect = False
await self.bot._disconnect()
return False

View File

@@ -65,6 +65,16 @@ spec:
type: string
required: true
default: ""
- name: enable-stream-reply
label:
en_US: Enable Stream Reply Mode
zh_Hans: 启用飞书流式回复模式
description:
en_US: If enabled, the bot will use the stream of lark reply mode
zh_Hans: 如果启用,将使用飞书流式方式来回复内容
type: boolean
required: true
default: false
execution:
python:
path: ./lark.py

View File

@@ -501,7 +501,7 @@ class OfficialAdapter(adapter_model.MessagePlatformAdapter):
for event_handler in event_handler_mapping[event_type]:
setattr(self.bot, event_handler, wrapper)
except Exception as e:
self.logger.error(f"Error in qqbotpy callback: {traceback.format_exc()}")
self.logger.error(f'Error in qqbotpy callback: {traceback.format_exc()}')
raise e
def unregister_listener(

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import telegram
import telegram.ext
from telegram import Update
@@ -143,6 +144,10 @@ class TelegramAdapter(adapter.MessagePlatformAdapter):
config: dict
ap: app.Application
msg_stream_id: dict # 流式消息id字典key为流式消息idvalue为首次消息源id用于在流式消息时判断编辑那条消息
seq: int # 消息中识别消息顺序直接以seq作为标识
listeners: typing.Dict[
typing.Type[platform_events.Event],
typing.Callable[[platform_events.Event, adapter.MessagePlatformAdapter], None],
@@ -152,6 +157,8 @@ class TelegramAdapter(adapter.MessagePlatformAdapter):
self.config = config
self.ap = ap
self.logger = logger
self.msg_stream_id = {}
# self.seq = 1
async def telegram_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
if update.message.from_user.is_bot:
@@ -160,6 +167,7 @@ class TelegramAdapter(adapter.MessagePlatformAdapter):
try:
lb_event = await self.event_converter.target2yiri(update, self.bot, self.bot_account_id)
await self.listeners[type(lb_event)](lb_event, self)
await self.is_stream_output_supported()
except Exception:
await self.logger.error(f'Error in telegram callback: {traceback.format_exc()}')
@@ -200,6 +208,70 @@ class TelegramAdapter(adapter.MessagePlatformAdapter):
await self.bot.send_message(**args)
async def reply_message_chunk(
self,
message_source: platform_events.MessageEvent,
bot_message,
message: platform_message.MessageChain,
quote_origin: bool = False,
is_final: bool = False,
):
msg_seq = bot_message.msg_sequence
if (msg_seq - 1) % 8 == 0 or is_final:
assert isinstance(message_source.source_platform_object, Update)
components = await TelegramMessageConverter.yiri2target(message, self.bot)
args = {}
message_id = message_source.source_platform_object.message.id
if quote_origin:
args['reply_to_message_id'] = message_source.source_platform_object.message.id
component = components[0]
if message_id not in self.msg_stream_id: # 当消息回复第一次时,发送新消息
# time.sleep(0.6)
if component['type'] == 'text':
if self.config['markdown_card'] is True:
content = telegramify_markdown.markdownify(
content=component['text'],
)
else:
content = component['text']
args = {
'chat_id': message_source.source_platform_object.effective_chat.id,
'text': content,
}
if self.config['markdown_card'] is True:
args['parse_mode'] = 'MarkdownV2'
send_msg = await self.bot.send_message(**args)
send_msg_id = send_msg.message_id
self.msg_stream_id[message_id] = send_msg_id
else: # 存在消息的时候直接编辑消息1
if component['type'] == 'text':
if self.config['markdown_card'] is True:
content = telegramify_markdown.markdownify(
content=component['text'],
)
else:
content = component['text']
args = {
'message_id': self.msg_stream_id[message_id],
'chat_id': message_source.source_platform_object.effective_chat.id,
'text': content,
}
if self.config['markdown_card'] is True:
args['parse_mode'] = 'MarkdownV2'
await self.bot.edit_message_text(**args)
if is_final and bot_message.tool_calls is None:
# self.seq = 1 # 消息回复结束之后重置seq
self.msg_stream_id.pop(message_id) # 消息回复结束之后删除流式消息id
async def is_stream_output_supported(self) -> bool:
is_stream = False
if self.config.get('enable-stream-reply', None):
is_stream = True
return is_stream
async def is_muted(self, group_id: int) -> bool:
return False
@@ -222,8 +294,12 @@ class TelegramAdapter(adapter.MessagePlatformAdapter):
self.bot_account_id = (await self.bot.get_me()).username
await self.application.updater.start_polling(allowed_updates=Update.ALL_TYPES)
await self.application.start()
await self.logger.info('Telegram adapter running')
async def kill(self) -> bool:
if self.application.running:
await self.application.stop()
if self.application.updater:
await self.application.updater.stop()
await self.logger.info('Telegram adapter stopped')
return True

View File

@@ -25,6 +25,16 @@ spec:
type: boolean
required: false
default: true
- name: enable-stream-reply
label:
en_US: Enable Stream Reply Mode
zh_Hans: 启用电报流式回复模式
description:
en_US: If enabled, the bot will use the stream of telegram reply mode
zh_Hans: 如果启用,将使用电报流式方式来回复内容
type: boolean
required: true
default: false
execution:
python:
path: ./telegram.py

View File

@@ -19,17 +19,20 @@ class WebChatMessage(BaseModel):
content: str
message_chain: list[dict]
timestamp: str
is_final: bool = False
class WebChatSession:
id: str
message_lists: dict[str, list[WebChatMessage]] = {}
resp_waiters: dict[int, asyncio.Future[WebChatMessage]]
resp_queues: dict[int, asyncio.Queue[WebChatMessage]]
def __init__(self, id: str):
self.id = id
self.message_lists = {}
self.resp_waiters = {}
self.resp_queues = {}
def get_message_list(self, pipeline_uuid: str) -> list[WebChatMessage]:
if pipeline_uuid not in self.message_lists:
@@ -49,6 +52,8 @@ class WebChatAdapter(msadapter.MessagePlatformAdapter):
typing.Callable[[platform_events.Event, msadapter.MessagePlatformAdapter], None],
] = {}
is_stream: bool
def __init__(self, config: dict, ap: app.Application, logger: EventLogger):
self.ap = ap
self.logger = logger
@@ -59,6 +64,8 @@ class WebChatAdapter(msadapter.MessagePlatformAdapter):
self.bot_account_id = 'webchatbot'
self.is_stream = False
async def send_message(
self,
target_type: str,
@@ -102,12 +109,53 @@ class WebChatAdapter(msadapter.MessagePlatformAdapter):
# notify waiter
if isinstance(message_source, platform_events.FriendMessage):
self.webchat_person_session.resp_waiters[message_source.message_chain.message_id].set_result(message_data)
await self.webchat_person_session.resp_queues[message_source.message_chain.message_id].put(message_data)
elif isinstance(message_source, platform_events.GroupMessage):
self.webchat_group_session.resp_waiters[message_source.message_chain.message_id].set_result(message_data)
await self.webchat_group_session.resp_queues[message_source.message_chain.message_id].put(message_data)
return message_data.model_dump()
async def reply_message_chunk(
self,
message_source: platform_events.MessageEvent,
bot_message,
message: platform_message.MessageChain,
quote_origin: bool = False,
is_final: bool = False,
) -> dict:
"""回复消息"""
message_data = WebChatMessage(
id=-1,
role='assistant',
content=str(message),
message_chain=[component.__dict__ for component in message],
timestamp=datetime.now().isoformat(),
)
# notify waiter
session = (
self.webchat_group_session
if isinstance(message_source, platform_events.GroupMessage)
else self.webchat_person_session
)
if message_source.message_chain.message_id not in session.resp_waiters:
# session.resp_waiters[message_source.message_chain.message_id] = asyncio.Queue()
queue = session.resp_queues[message_source.message_chain.message_id]
# if isinstance(message_source, platform_events.FriendMessage):
# queue = self.webchat_person_session.resp_queues[message_source.message_chain.message_id]
# elif isinstance(message_source, platform_events.GroupMessage):
# queue = self.webchat_group_session.resp_queues[message_source.message_chain.message_id]
if is_final and bot_message.tool_calls is None:
message_data.is_final = True
# print(message_data)
await queue.put(message_data)
return message_data.model_dump()
async def is_stream_output_supported(self) -> bool:
return self.is_stream
def register_listener(
self,
event_type: typing.Type[platform_events.Event],
@@ -140,8 +188,13 @@ class WebChatAdapter(msadapter.MessagePlatformAdapter):
await self.logger.info('WebChat调试适配器正在停止')
async def send_webchat_message(
self, pipeline_uuid: str, session_type: str, message_chain_obj: typing.List[dict]
self,
pipeline_uuid: str,
session_type: str,
message_chain_obj: typing.List[dict],
is_stream: bool = False,
) -> dict:
self.is_stream = is_stream
"""发送调试消息到流水线"""
if session_type == 'person':
use_session = self.webchat_person_session
@@ -152,6 +205,9 @@ class WebChatAdapter(msadapter.MessagePlatformAdapter):
message_id = len(use_session.get_message_list(pipeline_uuid)) + 1
use_session.resp_queues[message_id] = asyncio.Queue()
logger.debug(f'Initialized queue for message_id: {message_id}')
use_session.get_message_list(pipeline_uuid).append(
WebChatMessage(
id=message_id,
@@ -185,21 +241,46 @@ class WebChatAdapter(msadapter.MessagePlatformAdapter):
self.ap.platform_mgr.webchat_proxy_bot.bot_entity.use_pipeline_uuid = pipeline_uuid
# trigger pipeline
if event.__class__ in self.listeners:
await self.listeners[event.__class__](event, self)
# set waiter
waiter = asyncio.Future[WebChatMessage]()
use_session.resp_waiters[message_id] = waiter
waiter.add_done_callback(lambda future: use_session.resp_waiters.pop(message_id))
if is_stream:
queue = use_session.resp_queues[message_id]
msg_id = len(use_session.get_message_list(pipeline_uuid)) + 1
while True:
resp_message = await queue.get()
resp_message.id = msg_id
if resp_message.is_final:
resp_message.id = msg_id
use_session.get_message_list(pipeline_uuid).append(resp_message)
yield resp_message.model_dump()
break
yield resp_message.model_dump()
use_session.resp_queues.pop(message_id)
resp_message = await waiter
else: # non-stream
# set waiter
# waiter = asyncio.Future[WebChatMessage]()
# use_session.resp_waiters[message_id] = waiter
# # waiter.add_done_callback(lambda future: use_session.resp_waiters.pop(message_id))
#
# resp_message = await waiter
#
# resp_message.id = len(use_session.get_message_list(pipeline_uuid)) + 1
#
# use_session.get_message_list(pipeline_uuid).append(resp_message)
#
# yield resp_message.model_dump()
msg_id = len(use_session.get_message_list(pipeline_uuid)) + 1
resp_message.id = len(use_session.get_message_list(pipeline_uuid)) + 1
queue = use_session.resp_queues[message_id]
resp_message = await queue.get()
use_session.get_message_list(pipeline_uuid).append(resp_message)
resp_message.id = msg_id
resp_message.is_final = True
use_session.get_message_list(pipeline_uuid).append(resp_message)
return resp_message.model_dump()
yield resp_message.model_dump()
def get_webchat_messages(self, pipeline_uuid: str, session_type: str) -> list[dict]:
"""获取调试消息历史"""

View File

@@ -9,7 +9,8 @@ metadata:
en_US: "WebChat adapter for pipeline debugging"
zh_Hans: "用于流水线调试的网页聊天适配器"
icon: ""
spec: {}
spec:
config: []
execution:
python:
path: "webchat.py"

View File

@@ -29,10 +29,9 @@ import logging
class WeChatPadMessageConverter(adapter.MessageConverter):
def __init__(self, config: dict, logger: logging.Logger):
self.config = config
self.bot = WeChatPadClient(self.config["wechatpad_url"],self.config["token"])
self.bot = WeChatPadClient(self.config['wechatpad_url'], self.config['token'])
self.logger = logger
@staticmethod
@@ -40,7 +39,9 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
content_list = []
for component in message_chain:
if isinstance(component, platform_message.At):
if isinstance(component, platform_message.AtAll):
content_list.append({'type': 'at', 'target': 'all'})
elif isinstance(component, platform_message.At):
content_list.append({'type': 'at', 'target': component.target})
elif isinstance(component, platform_message.Plain):
content_list.append({'type': 'text', 'content': component.text})
@@ -75,9 +76,9 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
return content_list
async def target2yiri(
self,
message: dict,
bot_account_id: str,
self,
message: dict,
bot_account_id: str,
) -> platform_message.MessageChain:
"""外部消息转平台消息"""
# 数据预处理
@@ -90,18 +91,18 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
if is_group_message:
ats_bot = self._ats_bot(message, bot_account_id)
self.logger.info(f"ats_bot: {ats_bot}; bot_account_id: {bot_account_id}; bot_wxid: {bot_wxid}")
if "@所有人" in content:
self.logger.info(f'ats_bot: {ats_bot}; bot_account_id: {bot_account_id}; bot_wxid: {bot_wxid}')
if '@所有人' in content:
message_list.append(platform_message.AtAll())
if ats_bot:
message_list.append(platform_message.At(target=bot_account_id))
# 解析@信息并生成At组件
at_targets = self._extract_at_targets(message)
for target_id in at_targets:
if target_id != bot_wxid: # 避免重复添加机器人的At
message_list.append(platform_message.At(target=target_id))
content_no_preifx, _ = self._extract_content_and_sender(content)
msg_type = message['msg_type']
@@ -239,8 +240,8 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
# self.logger.info("_handler_compound_quote", ET.tostring(xml_data, encoding='unicode'))
appmsg_data = xml_data.find('.//appmsg')
quote_data = '' # 引用原文
quote_id = None # 引用消息的原发送者
tousername = None # 接收方: 所属微信的wxid
# quote_id = None # 引用消息的原发送者
# tousername = None # 接收方: 所属微信的wxid
user_data = '' # 用户消息
sender_id = xml_data.findtext('.//fromusername') # 发送方:单聊用户/群member
@@ -248,13 +249,10 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
if appmsg_data:
user_data = appmsg_data.findtext('.//title') or ''
quote_data = appmsg_data.find('.//refermsg').findtext('.//content')
quote_id = appmsg_data.find('.//refermsg').findtext('.//chatusr')
# quote_id = appmsg_data.find('.//refermsg').findtext('.//chatusr')
message_list.append(platform_message.WeChatAppMsg(app_msg=ET.tostring(appmsg_data, encoding='unicode')))
if message:
tousername = message['to_user_name']['str']
_ = quote_id
_ = tousername
# if message:
# tousername = message['to_user_name']['str']
if quote_data:
quote_data_message_list = platform_message.MessageChain()
@@ -419,14 +417,14 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
msg_source = message.get('msg_source', '') or ''
if len(msg_source) > 0:
msg_source_data = ET.fromstring(msg_source)
at_user_list = msg_source_data.findtext("atuserlist") or ""
at_user_list = msg_source_data.findtext('atuserlist') or ''
if at_user_list:
# atuserlist格式通常是逗号分隔的用户ID列表
at_targets = [user_id.strip() for user_id in at_user_list.split(',') if user_id.strip()]
except Exception as e:
self.logger.error(f"_extract_at_targets got except: {e}")
self.logger.error(f'_extract_at_targets got except: {e}')
return at_targets
# 提取一下content前面的sender_id, 和去掉前缀的内容
def _extract_content_and_sender(self, raw_content: str) -> Tuple[str, Optional[str]]:
try:
@@ -450,22 +448,20 @@ class WeChatPadMessageConverter(adapter.MessageConverter):
class WeChatPadEventConverter(adapter.EventConverter):
def __init__(self, config: dict, logger: logging.Logger):
self.config = config
self.message_converter = WeChatPadMessageConverter(config, logger)
self.logger = logger
@staticmethod
async def yiri2target(event: platform_events.MessageEvent) -> dict:
pass
async def target2yiri(
self,
event: dict,
bot_account_id: str,
self,
event: dict,
bot_account_id: str,
) -> platform_events.MessageEvent:
# 排除公众号以及微信团队消息
if (
event['from_user_name']['str'].startswith('gh_')
@@ -577,19 +573,22 @@ class WeChatPadAdapter(adapter.MessagePlatformAdapter):
for msg in content_list:
# 文本消息处理@
if msg['type'] == 'text' and at_targets:
at_nick_name_list = []
for member in member_info:
if member['user_name'] in at_targets:
at_nick_name_list.append(f'@{member["nick_name"]}')
msg['content'] = f'{" ".join(at_nick_name_list)} {msg["content"]}'
if 'all' in at_targets:
msg['content'] = f'@所有人 {msg["content"]}'
else:
at_nick_name_list = []
for member in member_info:
if member['user_name'] in at_targets:
at_nick_name_list.append(f'@{member["nick_name"]}')
msg['content'] = f'{" ".join(at_nick_name_list)} {msg["content"]}'
# 统一消息派发
handler_map = {
'text': lambda msg: self.bot.send_text_message(
to_wxid=target_id, message=msg['content'], ats=at_targets
to_wxid=target_id, message=msg['content'], ats=['notify@all'] if 'all' in at_targets else at_targets
),
'image': lambda msg: self.bot.send_image_message(
to_wxid=target_id, img_url=msg['image'], ats=at_targets
to_wxid=target_id, img_url=msg['image'], ats=['notify@all'] if 'all' in at_targets else at_targets
),
'WeChatEmoji': lambda msg: self.bot.send_emoji_message(
to_wxid=target_id, emoji_md5=msg['emoji_md5'], emoji_size=msg['emoji_size']

View File

@@ -812,12 +812,14 @@ class File(MessageComponent):
def __str__(self):
return f'[文件]{self.name}'
class Face(MessageComponent):
"""系统表情
此处将超级表情骰子/划拳一同归类于face
当face_type为rps(划拳)时 face_id 对应的是手势
当face_type为dice(骰子)时 face_id 对应的是点数
"""
type: str = 'Face'
"""表情类型"""
face_type: str = 'face'
@@ -834,15 +836,15 @@ class Face(MessageComponent):
elif self.face_type == 'rps':
return f'[表情]{self.face_name}({self.rps_data(self.face_id)})'
def rps_data(self,face_id):
rps_dict ={
1 : "",
2 : "剪刀",
3 : "石头",
def rps_data(self, face_id):
rps_dict = {
1: '',
2: '剪刀',
3: '石头',
}
return rps_dict[face_id]
# ================ 个人微信专用组件 ================
@@ -971,5 +973,6 @@ class WeChatFile(MessageComponent):
"""文件地址"""
file_base64: str = ''
"""base64"""
def __str__(self):
return f'[文件]{self.file_name}'
return f'[文件]{self.file_name}'

View File

@@ -125,6 +125,95 @@ class Message(pydantic.BaseModel):
return platform_message.MessageChain(mc)
class MessageChunk(pydantic.BaseModel):
"""消息"""
resp_message_id: typing.Optional[str] = None
"""消息id"""
role: str # user, system, assistant, tool, command, plugin
"""消息的角色"""
name: typing.Optional[str] = None
"""名称,仅函数调用返回时设置"""
all_content: typing.Optional[str] = None
"""所有内容"""
content: typing.Optional[list[ContentElement]] | typing.Optional[str] = None
"""内容"""
tool_calls: typing.Optional[list[ToolCall]] = None
"""工具调用"""
tool_call_id: typing.Optional[str] = None
is_final: bool = False
"""是否是结束"""
msg_sequence: int = 0
"""消息迭代次数"""
def readable_str(self) -> str:
if self.content is not None:
return str(self.role) + ': ' + str(self.get_content_platform_message_chain())
elif self.tool_calls is not None:
return f'调用工具: {self.tool_calls[0].id}'
else:
return '未知消息'
def get_content_platform_message_chain(self, prefix_text: str = '') -> platform_message.MessageChain | None:
"""将内容转换为平台消息 MessageChain 对象
Args:
prefix_text (str): 首个文字组件的前缀文本
"""
if self.content is None:
return None
elif isinstance(self.content, str):
return platform_message.MessageChain([platform_message.Plain(prefix_text + self.content)])
elif isinstance(self.content, list):
mc = []
for ce in self.content:
if ce.type == 'text':
mc.append(platform_message.Plain(ce.text))
elif ce.type == 'image_url':
if ce.image_url.url.startswith('http'):
mc.append(platform_message.Image(url=ce.image_url.url))
else: # base64
b64_str = ce.image_url.url
if b64_str.startswith('data:'):
b64_str = b64_str.split(',')[1]
mc.append(platform_message.Image(base64=b64_str))
# 找第一个文字组件
if prefix_text:
for i, c in enumerate(mc):
if isinstance(c, platform_message.Plain):
mc[i] = platform_message.Plain(prefix_text + c.text)
break
else:
mc.insert(0, platform_message.Plain(prefix_text))
return platform_message.MessageChain(mc)
class ToolCallChunk(pydantic.BaseModel):
"""工具调用"""
id: str
"""工具调用ID"""
type: str
"""工具调用类型"""
function: FunctionCall
"""函数调用"""
class Prompt(pydantic.BaseModel):
"""供AI使用的Prompt"""

View File

@@ -84,6 +84,7 @@ class ProviderAPIRequester(metaclass=abc.ABCMeta):
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
"""调用API
@@ -92,12 +93,36 @@ class ProviderAPIRequester(metaclass=abc.ABCMeta):
messages (typing.List[llm_entities.Message]): 消息对象列表
funcs (typing.List[tools_entities.LLMFunction], optional): 使用的工具函数列表. Defaults to None.
extra_args (dict[str, typing.Any], optional): 额外的参数. Defaults to {}.
remove_think (bool, optional): 是否移思考中的消息. Defaults to False.
Returns:
llm_entities.Message: 返回消息对象
"""
pass
async def invoke_llm_stream(
self,
query: core_entities.Query,
model: RuntimeLLMModel,
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.MessageChunk:
"""调用API
Args:
model (RuntimeLLMModel): 使用的模型信息
messages (typing.List[llm_entities.Message]): 消息对象列表
funcs (typing.List[tools_entities.LLMFunction], optional): 使用的工具函数列表. Defaults to None.
extra_args (dict[str, typing.Any], optional): 额外的参数. Defaults to {}.
remove_think (bool, optional): 是否移除思考中的消息. Defaults to False.
Returns:
typing.AsyncGenerator[llm_entities.MessageChunk]: 返回消息对象
"""
pass
async def invoke_embedding(
self,
model: RuntimeEmbeddingModel,

View File

@@ -24,6 +24,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./302aichatcmpl.py

View File

@@ -21,7 +21,7 @@ class AnthropicMessages(requester.ProviderAPIRequester):
client: anthropic.AsyncAnthropic
default_config: dict[str, typing.Any] = {
'base_url': 'https://api.anthropic.com/v1',
'base_url': 'https://api.anthropic.com',
'timeout': 120,
}
@@ -44,6 +44,7 @@ class AnthropicMessages(requester.ProviderAPIRequester):
self.client = anthropic.AsyncAnthropic(
api_key='',
http_client=httpx_client,
base_url=self.requester_cfg['base_url'],
)
async def invoke_llm(
@@ -53,6 +54,7 @@ class AnthropicMessages(requester.ProviderAPIRequester):
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
self.client.api_key = model.token_mgr.get_token()
@@ -89,7 +91,8 @@ class AnthropicMessages(requester.ProviderAPIRequester):
{
'type': 'tool_result',
'tool_use_id': tool_call_id,
'content': m.content,
'is_error': False,
'content': [{'type': 'text', 'text': m.content}],
}
],
}
@@ -133,6 +136,9 @@ class AnthropicMessages(requester.ProviderAPIRequester):
args['messages'] = req_messages
if 'thinking' in args:
args['thinking'] = {'type': 'enabled', 'budget_tokens': 10000}
if funcs:
tools = await self.ap.tool_mgr.generate_tools_for_anthropic(funcs)
@@ -140,19 +146,17 @@ class AnthropicMessages(requester.ProviderAPIRequester):
args['tools'] = tools
try:
# print(json.dumps(args, indent=4, ensure_ascii=False))
resp = await self.client.messages.create(**args)
args = {
'content': '',
'role': resp.role,
}
assert type(resp) is anthropic.types.message.Message
for block in resp.content:
if block.type == 'thinking':
args['content'] = '<think>' + block.thinking + '</think>\n' + args['content']
if not remove_think and block.type == 'thinking':
args['content'] = '<think>\n' + block.thinking + '\n</think>\n' + args['content']
elif block.type == 'text':
args['content'] += block.text
elif block.type == 'tool_use':
@@ -176,3 +180,191 @@ class AnthropicMessages(requester.ProviderAPIRequester):
raise errors.RequesterError(f'模型无效: {e.message}')
else:
raise errors.RequesterError(f'请求地址无效: {e.message}')
async def invoke_llm_stream(
self,
query: core_entities.Query,
model: requester.RuntimeLLMModel,
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
self.client.api_key = model.token_mgr.get_token()
args = extra_args.copy()
args['model'] = model.model_entity.name
args['stream'] = True
# 处理消息
# system
system_role_message = None
for i, m in enumerate(messages):
if m.role == 'system':
system_role_message = m
break
if system_role_message:
messages.pop(i)
if isinstance(system_role_message, llm_entities.Message) and isinstance(system_role_message.content, str):
args['system'] = system_role_message.content
req_messages = []
for m in messages:
if m.role == 'tool':
tool_call_id = m.tool_call_id
req_messages.append(
{
'role': 'user',
'content': [
{
'type': 'tool_result',
'tool_use_id': tool_call_id,
'is_error': False, # 暂时直接写false
'content': [
{'type': 'text', 'text': m.content}
], # 这里要是list包裹应该是多个返回的情况type类型好像也可以填其他的暂时只写text
}
],
}
)
continue
msg_dict = m.dict(exclude_none=True)
if isinstance(m.content, str) and m.content.strip() != '':
msg_dict['content'] = [{'type': 'text', 'text': m.content}]
elif isinstance(m.content, list):
for i, ce in enumerate(m.content):
if ce.type == 'image_base64':
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
alter_image_ele = {
'type': 'image',
'source': {
'type': 'base64',
'media_type': f'image/{image_format}',
'data': image_b64,
},
}
msg_dict['content'][i] = alter_image_ele
if isinstance(msg_dict['content'], str) and msg_dict['content'] == '':
msg_dict['content'] = [] # 这里不知道为什么会莫名有个空导致content为字符
if m.tool_calls:
for tool_call in m.tool_calls:
msg_dict['content'].append(
{
'type': 'tool_use',
'id': tool_call.id,
'name': tool_call.function.name,
'input': json.loads(tool_call.function.arguments),
}
)
del msg_dict['tool_calls']
req_messages.append(msg_dict)
if 'thinking' in args:
args['thinking'] = {'type': 'enabled', 'budget_tokens': 10000}
args['messages'] = req_messages
if funcs:
tools = await self.ap.tool_mgr.generate_tools_for_anthropic(funcs)
if tools:
args['tools'] = tools
try:
role = 'assistant' # 默认角色
# chunk_idx = 0
think_started = False
think_ended = False
finish_reason = False
content = ''
tool_name = ''
tool_id = ''
async for chunk in await self.client.messages.create(**args):
tool_call = {'id': None, 'function': {'name': None, 'arguments': None}, 'type': 'function'}
if isinstance(
chunk, anthropic.types.raw_content_block_start_event.RawContentBlockStartEvent
): # 记录开始
if chunk.content_block.type == 'tool_use':
if chunk.content_block.name is not None:
tool_name = chunk.content_block.name
if chunk.content_block.id is not None:
tool_id = chunk.content_block.id
tool_call['function']['name'] = tool_name
tool_call['function']['arguments'] = ''
tool_call['id'] = tool_id
if not remove_think:
if chunk.content_block.type == 'thinking' and not remove_think:
think_started = True
elif chunk.content_block.type == 'text' and chunk.index != 0 and not remove_think:
think_ended = True
continue
elif isinstance(chunk, anthropic.types.raw_content_block_delta_event.RawContentBlockDeltaEvent):
if chunk.delta.type == 'thinking_delta':
if think_started:
think_started = False
content = '<think>\n' + chunk.delta.thinking
elif remove_think:
continue
else:
content = chunk.delta.thinking
elif chunk.delta.type == 'text_delta':
if think_ended:
think_ended = False
content = '\n</think>\n' + chunk.delta.text
else:
content = chunk.delta.text
elif chunk.delta.type == 'input_json_delta':
tool_call['function']['arguments'] = chunk.delta.partial_json
tool_call['function']['name'] = tool_name
tool_call['id'] = tool_id
elif isinstance(chunk, anthropic.types.raw_content_block_stop_event.RawContentBlockStopEvent):
continue # 记录raw_content_block结束的
elif isinstance(chunk, anthropic.types.raw_message_delta_event.RawMessageDeltaEvent):
if chunk.delta.stop_reason == 'end_turn':
finish_reason = True
elif isinstance(chunk, anthropic.types.raw_message_stop_event.RawMessageStopEvent):
continue # 这个好像是完全结束
else:
# print(chunk)
self.ap.logger.debug(f'anthropic chunk: {chunk}')
continue
args = {
'content': content,
'role': role,
'is_final': finish_reason,
'tool_calls': None if tool_call['id'] is None else [tool_call],
}
# if chunk_idx == 0:
# chunk_idx += 1
# continue
# assert type(chunk) is anthropic.types.message.Chunk
yield llm_entities.MessageChunk(**args)
# return llm_entities.Message(**args)
except anthropic.AuthenticationError as e:
raise errors.RequesterError(f'api-key 无效: {e.message}')
except anthropic.BadRequestError as e:
raise errors.RequesterError(str(e.message))
except anthropic.NotFoundError as e:
if 'model: ' in str(e):
raise errors.RequesterError(f'模型无效: {e.message}')
else:
raise errors.RequesterError(f'请求地址无效: {e.message}')

View File

@@ -14,7 +14,7 @@ spec:
zh_Hans: 基础 URL
type: string
required: true
default: "https://api.anthropic.com/v1"
default: "https://api.anthropic.com"
- name: timeout
label:
en_US: Timeout

View File

@@ -38,9 +38,18 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
) -> chat_completion.ChatCompletion:
return await self.client.chat.completions.create(**args, extra_body=extra_body)
async def _req_stream(
self,
args: dict,
extra_body: dict = {},
):
async for chunk in await self.client.chat.completions.create(**args, extra_body=extra_body):
yield chunk
async def _make_msg(
self,
chat_completion: chat_completion.ChatCompletion,
remove_think: bool = False,
) -> llm_entities.Message:
chatcmpl_message = chat_completion.choices[0].message.model_dump()
@@ -48,16 +57,192 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
chatcmpl_message['role'] = 'assistant'
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
# 处理思维链
content = chatcmpl_message.get('content', '')
reasoning_content = chatcmpl_message.get('reasoning_content', None)
# deepseek的reasoner模型
if reasoning_content is not None:
chatcmpl_message['content'] = '<think>\n' + reasoning_content + '\n</think>\n' + chatcmpl_message['content']
processed_content, _ = await self._process_thinking_content(
content=content, reasoning_content=reasoning_content, remove_think=remove_think
)
chatcmpl_message['content'] = processed_content
# 移除 reasoning_content 字段,避免传递给 Message
if 'reasoning_content' in chatcmpl_message:
del chatcmpl_message['reasoning_content']
message = llm_entities.Message(**chatcmpl_message)
return message
async def _process_thinking_content(
self,
content: str,
reasoning_content: str = None,
remove_think: bool = False,
) -> tuple[str, str]:
"""处理思维链内容
Args:
content: 原始内容
reasoning_content: reasoning_content 字段内容
remove_think: 是否移除思维链
Returns:
(处理后的内容, 提取的思维链内容)
"""
thinking_content = ''
# 1. 从 reasoning_content 提取思维链
if reasoning_content:
thinking_content = reasoning_content
# 2. 从 content 中提取 <think> 标签内容
if content and '<think>' in content and '</think>' in content:
import re
think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, content, re.DOTALL)
if think_matches:
# 如果已有 reasoning_content则追加
if thinking_content:
thinking_content += '\n' + '\n'.join(think_matches)
else:
thinking_content = '\n'.join(think_matches)
# 移除 content 中的 <think> 标签
content = re.sub(think_pattern, '', content, flags=re.DOTALL).strip()
# 3. 根据 remove_think 参数决定是否保留思维链
if remove_think:
return content, ''
else:
# 如果有思维链内容,将其以 <think> 格式添加到 content 开头
if thinking_content:
content = f'<think>\n{thinking_content}\n</think>\n{content}'.strip()
return content, thinking_content
async def _closure_stream(
self,
query: core_entities.Query,
req_messages: list[dict],
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.MessageChunk:
self.client.api_key = use_model.token_mgr.get_token()
args = {}
args['model'] = use_model.model_entity.name
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
args['tools'] = tools
# 设置此次请求中的messages
messages = req_messages.copy()
# 检查vision
for msg in messages:
if 'content' in msg and isinstance(msg['content'], list):
for me in msg['content']:
if me['type'] == 'image_base64':
me['image_url'] = {'url': me['image_base64']}
me['type'] = 'image_url'
del me['image_base64']
args['messages'] = messages
args['stream'] = True
# 流式处理状态
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
chunk_idx = 0
thinking_started = False
thinking_ended = False
role = 'assistant' # 默认角色
tool_id = ""
tool_name = ''
# accumulated_reasoning = '' # 仅用于判断何时结束思维链
async for chunk in self._req_stream(args, extra_body=extra_args):
# 解析 chunk 数据
if hasattr(chunk, 'choices') and chunk.choices:
choice = chunk.choices[0]
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
finish_reason = getattr(choice, 'finish_reason', None)
else:
delta = {}
finish_reason = None
# 从第一个 chunk 获取 role后续使用这个 role
if 'role' in delta and delta['role']:
role = delta['role']
# 获取增量内容
delta_content = delta.get('content', '')
reasoning_content = delta.get('reasoning_content', '')
# 处理 reasoning_content
if reasoning_content:
# accumulated_reasoning += reasoning_content
# 如果设置了 remove_think跳过 reasoning_content
if remove_think:
chunk_idx += 1
continue
# 第一次出现 reasoning_content添加 <think> 开始标签
if not thinking_started:
thinking_started = True
delta_content = '<think>\n' + reasoning_content
else:
# 继续输出 reasoning_content
delta_content = reasoning_content
elif thinking_started and not thinking_ended and delta_content:
# reasoning_content 结束normal content 开始,添加 </think> 结束标签
thinking_ended = True
delta_content = '\n</think>\n' + delta_content
# 处理 content 中已有的 <think> 标签(如果需要移除)
# if delta_content and remove_think and '<think>' in delta_content:
# import re
#
# # 移除 <think> 标签及其内容
# delta_content = re.sub(r'<think>.*?</think>', '', delta_content, flags=re.DOTALL)
# 处理工具调用增量
# delta_tool_calls = None
if delta.get('tool_calls'):
for tool_call in delta['tool_calls']:
if tool_call['id'] and tool_call['function']['name']:
tool_id = tool_call['id']
tool_name = tool_call['function']['name']
else:
tool_call['id'] = tool_id
tool_call['function']['name'] = tool_name
if tool_call['type'] is None:
tool_call['type'] = 'function'
# 跳过空的第一个 chunk只有 role 没有内容)
if chunk_idx == 0 and not delta_content and not reasoning_content and not delta.get('tool_calls'):
chunk_idx += 1
continue
# 构建 MessageChunk - 只包含增量内容
chunk_data = {
'role': role,
'content': delta_content if delta_content else None,
'tool_calls': delta.get('tool_calls'),
'is_final': bool(finish_reason),
}
# 移除 None 值
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
yield llm_entities.MessageChunk(**chunk_data)
chunk_idx += 1
async def _closure(
self,
query: core_entities.Query,
@@ -65,6 +250,7 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
@@ -92,10 +278,10 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
args['messages'] = messages
# 发送请求
resp = await self._req(args, extra_body=extra_args)
resp = await self._req(args, extra_body=extra_args)
# 处理请求结果
message = await self._make_msg(resp)
message = await self._make_msg(resp, remove_think)
return message
@@ -106,6 +292,7 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
for m in messages:
@@ -119,13 +306,15 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
req_messages.append(msg_dict)
try:
return await self._closure(
msg = await self._closure(
query=query,
req_messages=req_messages,
use_model=model,
use_funcs=funcs,
extra_args=extra_args,
remove_think=remove_think,
)
return msg
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
except openai.BadRequestError as e:
@@ -169,6 +358,45 @@ class OpenAIChatCompletions(requester.ProviderAPIRequester):
raise errors.RequesterError('请求超时')
except openai.BadRequestError as e:
raise errors.RequesterError(f'请求参数错误: {e.message}')
async def invoke_llm_stream(
self,
query: core_entities.Query,
model: requester.RuntimeLLMModel,
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.MessageChunk:
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
for m in messages:
msg_dict = m.dict(exclude_none=True)
content = msg_dict.get('content')
if isinstance(content, list):
# 检查 content 列表中是否每个部分都是文本
if all(isinstance(part, dict) and part.get('type') == 'text' for part in content):
# 将所有文本部分合并为一个字符串
msg_dict['content'] = '\n'.join(part['text'] for part in content)
req_messages.append(msg_dict)
try:
async for item in self._closure_stream(
query=query,
req_messages=req_messages,
use_model=model,
use_funcs=funcs,
extra_args=extra_args,
remove_think=remove_think,
):
yield item
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
except openai.BadRequestError as e:
if 'context_length_exceeded' in e.message:
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
else:
raise errors.RequesterError(f'请求参数错误: {e.message}')
except openai.AuthenticationError as e:
raise errors.RequesterError(f'无效的 api-key: {e.message}')
except openai.NotFoundError as e:

View File

@@ -24,6 +24,7 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
@@ -49,10 +50,11 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
# 发送请求
resp = await self._req(args, extra_body=extra_args)
# print(resp)
if resp is None:
raise errors.RequesterError('接口返回为空,请确定模型提供商服务是否正常')
# 处理请求结果
message = await self._make_msg(resp)
message = await self._make_msg(resp, remove_think)
return message

View File

@@ -4,7 +4,7 @@ metadata:
name: deepseek-chat-completions
label:
en_US: DeepSeek
zh_Hans: 深度求索
zh_Hans: DeepSeek
icon: deepseek.svg
spec:
config:

View File

@@ -4,6 +4,13 @@ import typing
from . import chatcmpl
import uuid
from .. import errors, requester
from ....core import entities as core_entities
from ... import entities as llm_entities
from ...tools import entities as tools_entities
class GeminiChatCompletions(chatcmpl.OpenAIChatCompletions):
"""Google Gemini API 请求器"""
@@ -12,3 +19,127 @@ class GeminiChatCompletions(chatcmpl.OpenAIChatCompletions):
'base_url': 'https://generativelanguage.googleapis.com/v1beta/openai',
'timeout': 120,
}
async def _closure_stream(
self,
query: core_entities.Query,
req_messages: list[dict],
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.MessageChunk:
self.client.api_key = use_model.token_mgr.get_token()
args = {}
args['model'] = use_model.model_entity.name
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
args['tools'] = tools
# 设置此次请求中的messages
messages = req_messages.copy()
# 检查vision
for msg in messages:
if 'content' in msg and isinstance(msg['content'], list):
for me in msg['content']:
if me['type'] == 'image_base64':
me['image_url'] = {'url': me['image_base64']}
me['type'] = 'image_url'
del me['image_base64']
args['messages'] = messages
args['stream'] = True
# 流式处理状态
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
chunk_idx = 0
thinking_started = False
thinking_ended = False
role = 'assistant' # 默认角色
tool_id = ""
tool_name = ''
# accumulated_reasoning = '' # 仅用于判断何时结束思维链
async for chunk in self._req_stream(args, extra_body=extra_args):
# 解析 chunk 数据
if hasattr(chunk, 'choices') and chunk.choices:
choice = chunk.choices[0]
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
finish_reason = getattr(choice, 'finish_reason', None)
else:
delta = {}
finish_reason = None
# 从第一个 chunk 获取 role后续使用这个 role
if 'role' in delta and delta['role']:
role = delta['role']
# 获取增量内容
delta_content = delta.get('content', '')
reasoning_content = delta.get('reasoning_content', '')
# 处理 reasoning_content
if reasoning_content:
# accumulated_reasoning += reasoning_content
# 如果设置了 remove_think跳过 reasoning_content
if remove_think:
chunk_idx += 1
continue
# 第一次出现 reasoning_content添加 <think> 开始标签
if not thinking_started:
thinking_started = True
delta_content = '<think>\n' + reasoning_content
else:
# 继续输出 reasoning_content
delta_content = reasoning_content
elif thinking_started and not thinking_ended and delta_content:
# reasoning_content 结束normal content 开始,添加 </think> 结束标签
thinking_ended = True
delta_content = '\n</think>\n' + delta_content
# 处理 content 中已有的 <think> 标签(如果需要移除)
# if delta_content and remove_think and '<think>' in delta_content:
# import re
#
# # 移除 <think> 标签及其内容
# delta_content = re.sub(r'<think>.*?</think>', '', delta_content, flags=re.DOTALL)
# 处理工具调用增量
# delta_tool_calls = None
if delta.get('tool_calls'):
for tool_call in delta['tool_calls']:
if tool_call['id'] == '' and tool_id == '':
tool_id = str(uuid.uuid4())
if tool_call['function']['name']:
tool_name = tool_call['function']['name']
tool_call['id'] = tool_id
tool_call['function']['name'] = tool_name
if tool_call['type'] is None:
tool_call['type'] = 'function'
# 跳过空的第一个 chunk只有 role 没有内容)
if chunk_idx == 0 and not delta_content and not reasoning_content and not delta.get('tool_calls'):
chunk_idx += 1
continue
# 构建 MessageChunk - 只包含增量内容
chunk_data = {
'role': role,
'content': delta_content if delta_content else None,
'tool_calls': delta.get('tool_calls'),
'is_final': bool(finish_reason),
}
# 移除 None 值
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
yield llm_entities.MessageChunk(**chunk_data)
chunk_idx += 1

View File

@@ -3,14 +3,16 @@ from __future__ import annotations
import typing
from . import chatcmpl
from . import ppiochatcmpl
from .. import requester
from ....core import entities as core_entities
from ... import entities as llm_entities
from ...tools import entities as tools_entities
import re
import openai.types.chat.chat_completion as chat_completion
class GiteeAIChatCompletions(chatcmpl.OpenAIChatCompletions):
class GiteeAIChatCompletions(ppiochatcmpl.PPIOChatCompletions):
"""Gitee AI ChatCompletions API 请求器"""
default_config: dict[str, typing.Any] = {
@@ -18,34 +20,3 @@ class GiteeAIChatCompletions(chatcmpl.OpenAIChatCompletions):
'timeout': 120,
}
async def _closure(
self,
query: core_entities.Query,
req_messages: list[dict],
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
args = {}
args['model'] = use_model.model_entity.name
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
args['tools'] = tools
# gitee 不支持多模态把content都转换成纯文字
for m in req_messages:
if 'content' in m and isinstance(m['content'], list):
m['content'] = ' '.join([c['text'] for c in m['content']])
args['messages'] = req_messages
resp = await self._req(args, extra_body=extra_args)
message = await self._make_msg(resp)
return message

View File

@@ -24,6 +24,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./giteeaichatcmpl.py

View File

@@ -24,6 +24,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./lmstudiochatcmpl.py

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import asyncio
import json
import typing
import openai
@@ -34,9 +35,11 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
async def _req(
self,
query: core_entities.Query,
args: dict,
extra_body: dict = {},
) -> chat_completion.ChatCompletion:
remove_think: bool = False,
) -> list[dict[str, typing.Any]]:
args['stream'] = True
chunk = None
@@ -47,78 +50,75 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
resp_gen: openai.AsyncStream = await self.client.chat.completions.create(**args, extra_body=extra_body)
chunk_idx = 0
thinking_started = False
thinking_ended = False
tool_id = ''
tool_name = ''
message_delta = {}
async for chunk in resp_gen:
# print(chunk)
if not chunk or not chunk.id or not chunk.choices or not chunk.choices[0] or not chunk.choices[0].delta:
continue
if chunk.choices[0].delta.content is not None:
pending_content += chunk.choices[0].delta.content
delta = chunk.choices[0].delta.model_dump() if hasattr(chunk.choices[0], 'delta') else {}
reasoning_content = delta.get('reasoning_content')
# 处理 reasoning_content
if reasoning_content:
# accumulated_reasoning += reasoning_content
# 如果设置了 remove_think跳过 reasoning_content
if remove_think:
chunk_idx += 1
continue
if chunk.choices[0].delta.tool_calls is not None:
for tool_call in chunk.choices[0].delta.tool_calls:
if tool_call.function.arguments is None:
# 第一次出现 reasoning_content添加 <think> 开始标签
if not thinking_started:
thinking_started = True
pending_content += '<think>\n' + reasoning_content
else:
# 继续输出 reasoning_content
pending_content += reasoning_content
elif thinking_started and not thinking_ended and delta.get('content'):
# reasoning_content 结束normal content 开始,添加 </think> 结束标签
thinking_ended = True
pending_content += '\n</think>\n' + delta.get('content')
if delta.get('content') is not None:
pending_content += delta.get('content')
if delta.get('tool_calls') is not None:
for tool_call in delta.get('tool_calls'):
if tool_call['id'] != '':
tool_id = tool_call['id']
if tool_call['function']['name'] is not None:
tool_name = tool_call['function']['name']
if tool_call['function']['arguments'] is None:
continue
tool_call['id'] = tool_id
tool_call['name'] = tool_name
for tc in tool_calls:
if tc.index == tool_call.index:
tc.function.arguments += tool_call.function.arguments
if tc['index'] == tool_call['index']:
tc['function']['arguments'] += tool_call['function']['arguments']
break
else:
tool_calls.append(tool_call)
if chunk.choices[0].finish_reason is not None:
break
message_delta['content'] = pending_content
message_delta['role'] = 'assistant'
real_tool_calls = []
for tc in tool_calls:
function = chat_completion_message_tool_call.Function(
name=tc.function.name, arguments=tc.function.arguments
)
real_tool_calls.append(
chat_completion_message_tool_call.ChatCompletionMessageToolCall(
id=tc.id, function=function, type='function'
)
)
return (
chat_completion.ChatCompletion(
id=chunk.id,
object='chat.completion',
created=chunk.created,
choices=[
chat_completion.Choice(
index=0,
message=chat_completion.ChatCompletionMessage(
role='assistant',
content=pending_content,
tool_calls=real_tool_calls if len(real_tool_calls) > 0 else None,
),
finish_reason=chunk.choices[0].finish_reason
if hasattr(chunk.choices[0], 'finish_reason') and chunk.choices[0].finish_reason is not None
else 'stop',
logprobs=chunk.choices[0].logprobs,
)
],
model=chunk.model,
service_tier=chunk.service_tier if hasattr(chunk, 'service_tier') else None,
system_fingerprint=chunk.system_fingerprint if hasattr(chunk, 'system_fingerprint') else None,
usage=chunk.usage if hasattr(chunk, 'usage') else None,
)
if chunk
else None
)
message_delta['tool_calls'] = tool_calls if tool_calls else None
return [message_delta]
async def _make_msg(
self,
chat_completion: chat_completion.ChatCompletion,
chat_completion: list[dict[str, typing.Any]],
) -> llm_entities.Message:
chatcmpl_message = chat_completion.choices[0].message.dict()
chatcmpl_message = chat_completion[0]
# 确保 role 字段存在且不为 None
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
chatcmpl_message['role'] = 'assistant'
message = llm_entities.Message(**chatcmpl_message)
return message
@@ -130,6 +130,7 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think:bool = False,
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
@@ -157,13 +158,146 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
args['messages'] = messages
# 发送请求
resp = await self._req(args, extra_body=extra_args)
resp = await self._req(query, args, extra_body=extra_args, remove_think=remove_think)
# 处理请求结果
message = await self._make_msg(resp)
return message
async def _req_stream(
self,
args: dict,
extra_body: dict = {},
) -> chat_completion.ChatCompletion:
async for chunk in await self.client.chat.completions.create(**args, extra_body=extra_body):
yield chunk
async def _closure_stream(
self,
query: core_entities.Query,
req_messages: list[dict],
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message | typing.AsyncGenerator[llm_entities.MessageChunk, None]:
self.client.api_key = use_model.token_mgr.get_token()
args = {}
args['model'] = use_model.model_entity.name
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
args['tools'] = tools
# 设置此次请求中的messages
messages = req_messages.copy()
# 检查vision
for msg in messages:
if 'content' in msg and isinstance(msg['content'], list):
for me in msg['content']:
if me['type'] == 'image_base64':
me['image_url'] = {'url': me['image_base64']}
me['type'] = 'image_url'
del me['image_base64']
args['messages'] = messages
args['stream'] = True
# 流式处理状态
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
chunk_idx = 0
thinking_started = False
thinking_ended = False
role = 'assistant' # 默认角色
# accumulated_reasoning = '' # 仅用于判断何时结束思维链
async for chunk in self._req_stream(args, extra_body=extra_args):
# 解析 chunk 数据
if hasattr(chunk, 'choices') and chunk.choices:
choice = chunk.choices[0]
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
finish_reason = getattr(choice, 'finish_reason', None)
else:
delta = {}
finish_reason = None
# 从第一个 chunk 获取 role后续使用这个 role
if 'role' in delta and delta['role']:
role = delta['role']
# 获取增量内容
delta_content = delta.get('content', '')
reasoning_content = delta.get('reasoning_content', '')
# 处理 reasoning_content
if reasoning_content:
# accumulated_reasoning += reasoning_content
# 如果设置了 remove_think跳过 reasoning_content
if remove_think:
chunk_idx += 1
continue
# 第一次出现 reasoning_content添加 <think> 开始标签
if not thinking_started:
thinking_started = True
delta_content = '<think>\n' + reasoning_content
else:
# 继续输出 reasoning_content
delta_content = reasoning_content
elif thinking_started and not thinking_ended and delta_content:
# reasoning_content 结束normal content 开始,添加 </think> 结束标签
thinking_ended = True
delta_content = '\n</think>\n' + delta_content
# 处理 content 中已有的 <think> 标签(如果需要移除)
# if delta_content and remove_think and '<think>' in delta_content:
# import re
#
# # 移除 <think> 标签及其内容
# delta_content = re.sub(r'<think>.*?</think>', '', delta_content, flags=re.DOTALL)
# 处理工具调用增量
if delta.get('tool_calls'):
for tool_call in delta['tool_calls']:
if tool_call['id'] != '':
tool_id = tool_call['id']
if tool_call['function']['name'] is not None:
tool_name = tool_call['function']['name']
if tool_call['type'] is None:
tool_call['type'] = 'function'
tool_call['id'] = tool_id
tool_call['function']['name'] = tool_name
tool_call['function']['arguments'] = "" if tool_call['function']['arguments'] is None else tool_call['function']['arguments']
# 跳过空的第一个 chunk只有 role 没有内容)
if chunk_idx == 0 and not delta_content and not reasoning_content and not delta.get('tool_calls'):
chunk_idx += 1
continue
# 构建 MessageChunk - 只包含增量内容
chunk_data = {
'role': role,
'content': delta_content if delta_content else None,
'tool_calls': delta.get('tool_calls'),
'is_final': bool(finish_reason),
}
# 移除 None 值
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
yield llm_entities.MessageChunk(**chunk_data)
chunk_idx += 1
# return
async def invoke_llm(
self,
query: core_entities.Query,
@@ -171,6 +305,7 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
for m in messages:
@@ -185,7 +320,7 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
try:
return await self._closure(
query=query, req_messages=req_messages, use_model=model, use_funcs=funcs, extra_args=extra_args
query=query, req_messages=req_messages, use_model=model, use_funcs=funcs, extra_args=extra_args, remove_think=remove_think
)
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
@@ -202,3 +337,50 @@ class ModelScopeChatCompletions(requester.ProviderAPIRequester):
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
except openai.APIError as e:
raise errors.RequesterError(f'请求错误: {e.message}')
async def invoke_llm_stream(
self,
query: core_entities.Query,
model: requester.RuntimeLLMModel,
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.MessageChunk:
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
for m in messages:
msg_dict = m.dict(exclude_none=True)
content = msg_dict.get('content')
if isinstance(content, list):
# 检查 content 列表中是否每个部分都是文本
if all(isinstance(part, dict) and part.get('type') == 'text' for part in content):
# 将所有文本部分合并为一个字符串
msg_dict['content'] = '\n'.join(part['text'] for part in content)
req_messages.append(msg_dict)
try:
async for item in self._closure_stream(
query=query,
req_messages=req_messages,
use_model=model,
use_funcs=funcs,
extra_args=extra_args,
remove_think=remove_think,
):
yield item
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
except openai.BadRequestError as e:
if 'context_length_exceeded' in e.message:
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
else:
raise errors.RequesterError(f'请求参数错误: {e.message}')
except openai.AuthenticationError as e:
raise errors.RequesterError(f'无效的 api-key: {e.message}')
except openai.NotFoundError as e:
raise errors.RequesterError(f'请求路径错误: {e.message}')
except openai.RateLimitError as e:
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
except openai.APIError as e:
raise errors.RequesterError(f'请求错误: {e.message}')

View File

@@ -25,6 +25,7 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
@@ -54,6 +55,6 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
resp = await self._req(args, extra_body=extra_args)
# 处理请求结果
message = await self._make_msg(resp)
message = await self._make_msg(resp, remove_think)
return message

View File

@@ -14,7 +14,7 @@ spec:
zh_Hans: 基础 URL
type: string
required: true
default: "https://api.moonshot.com/v1"
default: "https://api.moonshot.ai/v1"
- name: timeout
label:
en_US: Timeout

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.4 KiB

View File

@@ -0,0 +1,17 @@
from __future__ import annotations
import typing
import openai
from . import chatcmpl
class NewAPIChatCompletions(chatcmpl.OpenAIChatCompletions):
"""New API ChatCompletion API 请求器"""
client: openai.AsyncClient
default_config: dict[str, typing.Any] = {
'base_url': 'http://localhost:3000/v1',
'timeout': 120,
}

View File

@@ -0,0 +1,31 @@
apiVersion: v1
kind: LLMAPIRequester
metadata:
name: new-api-chat-completions
label:
en_US: New API
zh_Hans: New API
icon: newapi.png
spec:
config:
- name: base_url
label:
en_US: Base URL
zh_Hans: 基础 URL
type: string
required: true
default: "http://localhost:3000/v1"
- name: timeout
label:
en_US: Timeout
zh_Hans: 超时时间
type: integer
required: true
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./newapichatcmpl.py
attr: NewAPIChatCompletions

View File

@@ -44,6 +44,7 @@ class OllamaChatCompletions(requester.ProviderAPIRequester):
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
args = extra_args.copy()
args['model'] = use_model.model_entity.name
@@ -110,6 +111,7 @@ class OllamaChatCompletions(requester.ProviderAPIRequester):
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message:
req_messages: list = []
for m in messages:
@@ -126,6 +128,19 @@ class OllamaChatCompletions(requester.ProviderAPIRequester):
use_model=model,
use_funcs=funcs,
extra_args=extra_args,
remove_think=remove_think,
)
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
async def invoke_embedding(
self,
model: requester.RuntimeEmbeddingModel,
input_text: list[str],
extra_args: dict[str, typing.Any] = {},
) -> list[list[float]]:
return (await self.client.embed(
model=model.model_entity.name,
input=input_text,
**extra_args,
)).embeddings

View File

@@ -24,6 +24,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./ollamachat.py

View File

@@ -24,6 +24,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./openrouterchatcmpl.py

View File

@@ -4,6 +4,12 @@ import openai
import typing
from . import chatcmpl
import openai.types.chat.chat_completion as chat_completion
from .. import requester
from ....core import entities as core_entities
from ... import entities as llm_entities
from ...tools import entities as tools_entities
import re
class PPIOChatCompletions(chatcmpl.OpenAIChatCompletions):
@@ -15,3 +21,193 @@ class PPIOChatCompletions(chatcmpl.OpenAIChatCompletions):
'base_url': 'https://api.ppinfra.com/v3/openai',
'timeout': 120,
}
is_think: bool = False
async def _make_msg(
self,
chat_completion: chat_completion.ChatCompletion,
remove_think: bool,
) -> llm_entities.Message:
chatcmpl_message = chat_completion.choices[0].message.model_dump()
# print(chatcmpl_message.keys(), chatcmpl_message.values())
# 确保 role 字段存在且不为 None
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
chatcmpl_message['role'] = 'assistant'
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
# deepseek的reasoner模型
chatcmpl_message["content"] = await self._process_thinking_content(
chatcmpl_message['content'],reasoning_content,remove_think)
# 移除 reasoning_content 字段,避免传递给 Message
if 'reasoning_content' in chatcmpl_message:
del chatcmpl_message['reasoning_content']
message = llm_entities.Message(**chatcmpl_message)
return message
async def _process_thinking_content(
self,
content: str,
reasoning_content: str = None,
remove_think: bool = False,
) -> tuple[str, str]:
"""处理思维链内容
Args:
content: 原始内容
reasoning_content: reasoning_content 字段内容
remove_think: 是否移除思维链
Returns:
处理后的内容
"""
if remove_think:
content = re.sub(
r'<think>.*?</think>', '', content, flags=re.DOTALL
)
else:
if reasoning_content is not None:
content = (
'<think>\n' + reasoning_content + '\n</think>\n' + content
)
return content
async def _make_msg_chunk(
self,
delta: dict[str, typing.Any],
idx: int,
) -> llm_entities.MessageChunk:
# 处理流式chunk和完整响应的差异
# print(chat_completion.choices[0])
# 确保 role 字段存在且不为 None
if 'role' not in delta or delta['role'] is None:
delta['role'] = 'assistant'
reasoning_content = delta['reasoning_content'] if 'reasoning_content' in delta else None
delta['content'] = '' if delta['content'] is None else delta['content']
# print(reasoning_content)
# deepseek的reasoner模型
if reasoning_content is not None:
delta['content'] += reasoning_content
message = llm_entities.MessageChunk(**delta)
return message
async def _closure_stream(
self,
query: core_entities.Query,
req_messages: list[dict],
use_model: requester.RuntimeLLMModel,
use_funcs: list[tools_entities.LLMFunction] = None,
extra_args: dict[str, typing.Any] = {},
remove_think: bool = False,
) -> llm_entities.Message | typing.AsyncGenerator[llm_entities.MessageChunk, None]:
self.client.api_key = use_model.token_mgr.get_token()
args = {}
args['model'] = use_model.model_entity.name
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
args['tools'] = tools
# 设置此次请求中的messages
messages = req_messages.copy()
# 检查vision
for msg in messages:
if 'content' in msg and isinstance(msg['content'], list):
for me in msg['content']:
if me['type'] == 'image_base64':
me['image_url'] = {'url': me['image_base64']}
me['type'] = 'image_url'
del me['image_base64']
args['messages'] = messages
args['stream'] = True
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
chunk_idx = 0
thinking_started = False
thinking_ended = False
role = 'assistant' # 默认角色
async for chunk in self._req_stream(args, extra_body=extra_args):
# 解析 chunk 数据
if hasattr(chunk, 'choices') and chunk.choices:
choice = chunk.choices[0]
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
finish_reason = getattr(choice, 'finish_reason', None)
else:
delta = {}
finish_reason = None
# 从第一个 chunk 获取 role后续使用这个 role
if 'role' in delta and delta['role']:
role = delta['role']
# 获取增量内容
delta_content = delta.get('content', '')
# reasoning_content = delta.get('reasoning_content', '')
if remove_think:
if delta['content'] is not None:
if '<think>' in delta['content'] and not thinking_started and not thinking_ended:
thinking_started = True
continue
elif delta['content'] == r'</think>' and not thinking_ended:
thinking_ended = True
continue
elif thinking_ended and delta['content'] == '\n\n' and thinking_started:
thinking_started = False
continue
elif thinking_started and not thinking_ended:
continue
delta_tool_calls = None
if delta.get('tool_calls'):
for tool_call in delta['tool_calls']:
if tool_call['id'] and tool_call['function']['name']:
tool_id = tool_call['id']
tool_name = tool_call['function']['name']
if tool_call['id'] is None:
tool_call['id'] = tool_id
if tool_call['function']['name'] is None:
tool_call['function']['name'] = tool_name
if tool_call['function']['arguments'] is None:
tool_call['function']['arguments'] = ''
if tool_call['type'] is None:
tool_call['type'] = 'function'
# 跳过空的第一个 chunk只有 role 没有内容)
if chunk_idx == 0 and not delta_content and not delta.get('tool_calls'):
chunk_idx += 1
continue
# 构建 MessageChunk - 只包含增量内容
chunk_data = {
'role': role,
'content': delta_content if delta_content else None,
'tool_calls': delta.get('tool_calls'),
'is_final': bool(finish_reason),
}
# 移除 None 值
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
yield llm_entities.MessageChunk(**chunk_data)
chunk_idx += 1

View File

@@ -31,6 +31,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./ppiochatcmpl.py

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

View File

@@ -0,0 +1,17 @@
from __future__ import annotations
import openai
import typing
from . import chatcmpl
class QHAIGCChatCompletions(chatcmpl.OpenAIChatCompletions):
"""启航 AI ChatCompletion API 请求器"""
client: openai.AsyncClient
default_config: dict[str, typing.Any] = {
'base_url': 'https://api.qhaigc.com/v1',
'timeout': 120,
}

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: LLMAPIRequester
metadata:
name: qhaigc-chat-completions
label:
en_US: QH AI
zh_Hans: 启航 AI
icon: qhaigc.png
spec:
config:
- name: base_url
label:
en_US: Base URL
zh_Hans: 基础 URL
type: string
required: true
default: "https://api.qhaigc.net/v1"
- name: args
label:
en_US: Args
zh_Hans: 附加参数
type: object
required: true
default: {}
- name: timeout
label:
en_US: Timeout
zh_Hans: 超时时间
type: int
required: true
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./qhaigcchatcmpl.py
attr: QHAIGCChatCompletions

View File

@@ -0,0 +1,32 @@
from __future__ import annotations
import openai
import typing
from . import chatcmpl
import openai.types.chat.chat_completion as chat_completion
class ShengSuanYunChatCompletions(chatcmpl.OpenAIChatCompletions):
"""胜算云(ModelSpot.AI) ChatCompletion API 请求器"""
client: openai.AsyncClient
default_config: dict[str, typing.Any] = {
'base_url': 'https://router.shengsuanyun.com/api/v1',
'timeout': 120,
}
async def _req(
self,
args: dict,
extra_body: dict = {},
) -> chat_completion.ChatCompletion:
return await self.client.chat.completions.create(
**args,
extra_body=extra_body,
extra_headers={
'HTTP-Referer': 'https://langbot.app',
'X-Title': 'LangBot',
},
)

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 7.4 KiB

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: LLMAPIRequester
metadata:
name: shengsuanyun-chat-completions
label:
en_US: ShengSuanYun
zh_Hans: 胜算云
icon: shengsuanyun.svg
spec:
config:
- name: base_url
label:
en_US: Base URL
zh_Hans: 基础 URL
type: string
required: true
default: "https://router.shengsuanyun.com/api/v1"
- name: args
label:
en_US: Args
zh_Hans: 附加参数
type: object
required: true
default: {}
- name: timeout
label:
en_US: Timeout
zh_Hans: 超时时间
type: int
required: true
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./shengsuanyun.py
attr: ShengSuanYunChatCompletions

View File

@@ -24,6 +24,7 @@ spec:
default: 120
support_type:
- llm
- text-embedding
execution:
python:
path: ./siliconflowchatcmpl.py

View File

@@ -99,8 +99,14 @@ class DashScopeAPIRunner(runner.RequestRunner):
plain_text = '' # 用户输入的纯文本信息
image_ids = [] # 用户输入的图片ID列表 (暂不支持)
plain_text, image_ids = await self._preprocess_user_message(query)
think_start = False
think_end = False
plain_text, image_ids = await self._preprocess_user_message(query)
has_thoughts = True # 获取思考过程
remove_think = self.pipeline_config['output'].get('misc', '').get('remove-think')
if remove_think:
has_thoughts = False
# 发送对话请求
response = dashscope.Application.call(
api_key=self.api_key, # 智能体应用的API Key
@@ -109,43 +115,108 @@ class DashScopeAPIRunner(runner.RequestRunner):
stream=True, # 流式输出
incremental_output=True, # 增量输出,使用流式输出需要开启增量输出
session_id=query.session.using_conversation.uuid, # 会话ID用于多轮对话
has_thoughts=has_thoughts,
# rag_options={ # 主要用于文件交互,暂不支持
# "session_file_ids": ["FILE_ID1"], # FILE_ID1 替换为实际的临时文件ID,逗号隔开多个
# }
)
idx_chunk = 0
try:
is_stream = await query.adapter.is_stream_output_supported()
for chunk in response:
if chunk.get('status_code') != 200:
raise DashscopeAPIError(
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
)
if not chunk:
continue
except AttributeError:
is_stream = False
if is_stream:
for chunk in response:
if chunk.get('status_code') != 200:
raise DashscopeAPIError(
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
)
if not chunk:
continue
idx_chunk += 1
# 获取流式传输的output
stream_output = chunk.get('output', {})
stream_think = stream_output.get('thoughts', [])
if stream_think[0].get('thought'):
if not think_start:
think_start = True
pending_content += f"<think>\n{stream_think[0].get('thought')}"
else:
# 继续输出 reasoning_content
pending_content += stream_think[0].get('thought')
elif stream_think[0].get('thought') == "" and not think_end:
think_end = True
pending_content += "\n</think>\n"
if stream_output.get('text') is not None:
pending_content += stream_output.get('text')
# 是否是流式最后一个chunk
is_final = False if stream_output.get('finish_reason', False) == 'null' else True
# 获取流式传输的output
stream_output = chunk.get('output', {})
if stream_output.get('text') is not None:
pending_content += stream_output.get('text')
# 获取模型传出的参考资料列表
references_dict_list = stream_output.get('doc_references', [])
# 保存当前会话的session_id用于下次对话的语境
query.session.using_conversation.uuid = stream_output.get('session_id')
# 从模型传出的参考资料信息中提取用于替换的字典
if references_dict_list is not None:
for doc in references_dict_list:
if doc.get('index_id') is not None:
references_dict[doc.get('index_id')] = doc.get('doc_name')
# 获取模型传出的参考资料列表
references_dict_list = stream_output.get('doc_references', [])
# 将参考资料替换到文本中
pending_content = self._replace_references(pending_content, references_dict)
# 从模型传出的参考资料信息中提取用于替换的字典
if references_dict_list is not None:
for doc in references_dict_list:
if doc.get('index_id') is not None:
references_dict[doc.get('index_id')] = doc.get('doc_name')
if idx_chunk % 8 == 0 or is_final:
yield llm_entities.MessageChunk(
role='assistant',
content=pending_content,
is_final=is_final,
)
# 保存当前会话的session_id用于下次对话的语境
query.session.using_conversation.uuid = stream_output.get('session_id')
else:
for chunk in response:
if chunk.get('status_code') != 200:
raise DashscopeAPIError(
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
)
if not chunk:
continue
idx_chunk += 1
# 获取流式传输的output
stream_output = chunk.get('output', {})
stream_think = stream_output.get('thoughts', [])
if stream_think[0].get('thought'):
if not think_start:
think_start = True
pending_content += f"<think>\n{stream_think[0].get('thought')}"
else:
# 继续输出 reasoning_content
pending_content += stream_think[0].get('thought')
elif stream_think[0].get('thought') == "" and not think_end:
think_end = True
pending_content += "\n</think>\n"
if stream_output.get('text') is not None:
pending_content += stream_output.get('text')
# 将参考资料替换到文本中
pending_content = self._replace_references(pending_content, references_dict)
# 保存当前会话的session_id用于下次对话的语境
query.session.using_conversation.uuid = stream_output.get('session_id')
yield llm_entities.Message(
role='assistant',
content=pending_content,
)
# 获取模型传出的参考资料列表
references_dict_list = stream_output.get('doc_references', [])
# 从模型传出的参考资料信息中提取用于替换的字典
if references_dict_list is not None:
for doc in references_dict_list:
if doc.get('index_id') is not None:
references_dict[doc.get('index_id')] = doc.get('doc_name')
# 将参考资料替换到文本中
pending_content = self._replace_references(pending_content, references_dict)
yield llm_entities.Message(
role='assistant',
content=pending_content,
)
async def _workflow_messages(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
"""Dashscope 工作流对话请求"""
@@ -171,52 +242,108 @@ class DashScopeAPIRunner(runner.RequestRunner):
incremental_output=True, # 增量输出,使用流式输出需要开启增量输出
session_id=query.session.using_conversation.uuid, # 会话ID用于多轮对话
biz_params=biz_params, # 工作流应用的自定义输入参数传递
flow_stream_mode="message_format" # 消息模式,输出/结束节点的流式结果
# rag_options={ # 主要用于文件交互,暂不支持
# "session_file_ids": ["FILE_ID1"], # FILE_ID1 替换为实际的临时文件ID,逗号隔开多个
# }
)
# 处理API返回的流式输出
for chunk in response:
if chunk.get('status_code') != 200:
raise DashscopeAPIError(
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
)
if not chunk:
continue
try:
is_stream = await query.adapter.is_stream_output_supported()
# 获取流式传输的output
stream_output = chunk.get('output', {})
if stream_output.get('text') is not None:
pending_content += stream_output.get('text')
except AttributeError:
is_stream = False
idx_chunk = 0
if is_stream:
for chunk in response:
if chunk.get('status_code') != 200:
raise DashscopeAPIError(
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
)
if not chunk:
continue
idx_chunk += 1
# 获取流式传输的output
stream_output = chunk.get('output', {})
if stream_output.get('workflow_message') is not None:
pending_content += stream_output.get('workflow_message').get('message').get('content')
# if stream_output.get('text') is not None:
# pending_content += stream_output.get('text')
# 保存当前会话的session_id用于下次对话的语境
query.session.using_conversation.uuid = stream_output.get('session_id')
is_final = False if stream_output.get('finish_reason', False) == 'null' else True
# 获取模型传出的参考资料列表
references_dict_list = stream_output.get('doc_references', [])
# 获取模型传出的参考资料列表
references_dict_list = stream_output.get('doc_references', [])
# 从模型传出的参考资料信息中提取用于替换的字典
if references_dict_list is not None:
for doc in references_dict_list:
if doc.get('index_id') is not None:
references_dict[doc.get('index_id')] = doc.get('doc_name')
# 从模型传出的参考资料信息中提取用于替换的字典
if references_dict_list is not None:
for doc in references_dict_list:
if doc.get('index_id') is not None:
references_dict[doc.get('index_id')] = doc.get('doc_name')
# 将参考资料替换到文本中
pending_content = self._replace_references(pending_content, references_dict)
# 将参考资料替换到文本中
pending_content = self._replace_references(pending_content, references_dict)
if idx_chunk % 8 == 0 or is_final:
yield llm_entities.MessageChunk(
role='assistant',
content=pending_content,
is_final=is_final,
)
yield llm_entities.Message(
role='assistant',
content=pending_content,
)
# 保存当前会话的session_id用于下次对话的语境
query.session.using_conversation.uuid = stream_output.get('session_id')
else:
for chunk in response:
if chunk.get('status_code') != 200:
raise DashscopeAPIError(
f'Dashscope API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
)
if not chunk:
continue
# 获取流式传输的output
stream_output = chunk.get('output', {})
if stream_output.get('text') is not None:
pending_content += stream_output.get('text')
is_final = False if stream_output.get('finish_reason', False) == 'null' else True
# 保存当前会话的session_id用于下次对话的语境
query.session.using_conversation.uuid = stream_output.get('session_id')
# 获取模型传出的参考资料列表
references_dict_list = stream_output.get('doc_references', [])
# 从模型传出的参考资料信息中提取用于替换的字典
if references_dict_list is not None:
for doc in references_dict_list:
if doc.get('index_id') is not None:
references_dict[doc.get('index_id')] = doc.get('doc_name')
# 将参考资料替换到文本中
pending_content = self._replace_references(pending_content, references_dict)
yield llm_entities.Message(
role='assistant',
content=pending_content,
)
async def run(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
"""运行"""
msg_seq = 0
if self.app_type == 'agent':
async for msg in self._agent_messages(query):
if isinstance(msg, llm_entities.MessageChunk):
msg_seq += 1
msg.msg_sequence = msg_seq
yield msg
elif self.app_type == 'workflow':
async for msg in self._workflow_messages(query):
if isinstance(msg, llm_entities.MessageChunk):
msg_seq += 1
msg.msg_sequence = msg_seq
yield msg
else:
raise DashscopeAPIError(f'不支持的 Dashscope 应用类型: {self.app_type}')

View File

@@ -3,7 +3,6 @@ from __future__ import annotations
import typing
import json
import uuid
import re
import base64
@@ -38,29 +37,38 @@ class DifyServiceAPIRunner(runner.RequestRunner):
base_url=self.pipeline_config['ai']['dify-service-api']['base-url'],
)
def _try_convert_thinking(self, resp_text: str) -> str:
"""尝试转换 Dify 的思考提示"""
if not resp_text.startswith(
'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> <summary> Thinking... </summary>'
):
return resp_text
def _process_thinking_content(
self,
content: str,
) -> tuple[str, str]:
"""处理思维链内容
if self.pipeline_config['ai']['dify-service-api']['thinking-convert'] == 'original':
return resp_text
Args:
content: 原始内容
Returns:
(处理后的内容, 提取的思维链内容)
"""
remove_think = self.pipeline_config['output'].get('misc', '').get('remove-think')
thinking_content = ''
# 从 content 中提取 <think> 标签内容
if content and '<think>' in content and '</think>' in content:
import re
if self.pipeline_config['ai']['dify-service-api']['thinking-convert'] == 'remove':
return re.sub(
r'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> <summary> Thinking... </summary>.*?</details>',
'',
resp_text,
flags=re.DOTALL,
)
think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, content, re.DOTALL)
if think_matches:
thinking_content = '\n'.join(think_matches)
# 移除 content 中的 <think> 标签
content = re.sub(think_pattern, '', content, flags=re.DOTALL).strip()
if self.pipeline_config['ai']['dify-service-api']['thinking-convert'] == 'plain':
pattern = r'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> <summary> Thinking... </summary>(.*?)</details>'
thinking_text = re.search(pattern, resp_text, flags=re.DOTALL)
content_text = re.sub(pattern, '', resp_text, flags=re.DOTALL)
return f'<think>{thinking_text.group(1)}</think>\n{content_text}'
# 3. 根据 remove_think 参数决定是否保留思维链
if remove_think:
return content, ''
else:
# 如果有思维链内容,将其以 <think> 格式添加到 content 开头
if thinking_content:
content = f'<think>\n{thinking_content}\n</think>\n{content}'.strip()
return content, thinking_content
async def _preprocess_user_message(self, query: core_entities.Query) -> tuple[str, list[str]]:
"""预处理用户消息,提取纯文本,并将图片上传到 Dify 服务
@@ -132,17 +140,20 @@ class DifyServiceAPIRunner(runner.RequestRunner):
if mode == 'workflow':
if chunk['event'] == 'node_finished':
if chunk['data']['node_type'] == 'answer':
content, _ = self._process_thinking_content(chunk['data']['outputs']['answer'])
yield llm_entities.Message(
role='assistant',
content=self._try_convert_thinking(chunk['data']['outputs']['answer']),
content=content,
)
elif mode == 'basic':
if chunk['event'] == 'message':
basic_mode_pending_chunk += chunk['answer']
elif chunk['event'] == 'message_end':
content, _ = self._process_thinking_content(basic_mode_pending_chunk)
yield llm_entities.Message(
role='assistant',
content=self._try_convert_thinking(basic_mode_pending_chunk),
content=content,
)
basic_mode_pending_chunk = ''
@@ -193,14 +204,15 @@ class DifyServiceAPIRunner(runner.RequestRunner):
if chunk['event'] in ignored_events:
continue
if chunk['event'] == 'agent_message':
if chunk['event'] == 'agent_message' or chunk['event'] == 'message':
pending_agent_message += chunk['answer']
else:
if pending_agent_message.strip() != '':
pending_agent_message = pending_agent_message.replace('</details>Action:', '</details>')
content, _ = self._process_thinking_content(pending_agent_message)
yield llm_entities.Message(
role='assistant',
content=self._try_convert_thinking(pending_agent_message),
content=content,
)
pending_agent_message = ''
@@ -308,26 +320,352 @@ class DifyServiceAPIRunner(runner.RequestRunner):
elif chunk['event'] == 'workflow_finished':
if chunk['data']['error']:
raise errors.DifyAPIError(chunk['data']['error'])
content, _ = self._process_thinking_content(chunk['data']['outputs']['summary'])
msg = llm_entities.Message(
role='assistant',
content=chunk['data']['outputs']['summary'],
content=content,
)
yield msg
async def _chat_messages_chunk(
self, query: core_entities.Query
) -> typing.AsyncGenerator[llm_entities.MessageChunk, None]:
"""调用聊天助手"""
cov_id = query.session.using_conversation.uuid or ''
query.variables['conversation_id'] = cov_id
plain_text, image_ids = await self._preprocess_user_message(query)
files = [
{
'type': 'image',
'transfer_method': 'local_file',
'upload_file_id': image_id,
}
for image_id in image_ids
]
basic_mode_pending_chunk = ''
inputs = {}
inputs.update(query.variables)
message_idx = 0
chunk = None # 初始化chunk变量防止在没有响应时引用错误
is_final = False
think_start = False
think_end = False
remove_think = self.pipeline_config['output'].get('misc', '').get('remove-think')
async for chunk in self.dify_client.chat_messages(
inputs=inputs,
query=plain_text,
user=f'{query.session.launcher_type.value}_{query.session.launcher_id}',
conversation_id=cov_id,
files=files,
timeout=120,
):
self.ap.logger.debug('dify-chat-chunk: ' + str(chunk))
# if chunk['event'] == 'workflow_started':
# mode = 'workflow'
# if mode == 'workflow':
# elif mode == 'basic':
# 因为都只是返回的 message也没有工具调用什么的暂时不分类
if chunk['event'] == 'message':
message_idx += 1
if remove_think:
if '<think>' in chunk['answer'] and not think_start:
think_start = True
continue
if '</think>' in chunk['answer'] and not think_end:
import re
content = re.sub(r'^\n</think>', '', chunk['answer'])
basic_mode_pending_chunk += content
think_end = True
elif think_end:
basic_mode_pending_chunk += chunk['answer']
if think_start:
continue
else:
basic_mode_pending_chunk += chunk['answer']
if chunk['event'] == 'message_end':
is_final = True
if is_final or message_idx % 8 == 0:
# content, _ = self._process_thinking_content(basic_mode_pending_chunk)
yield llm_entities.MessageChunk(
role='assistant',
content=basic_mode_pending_chunk,
is_final=is_final,
)
if chunk is None:
raise errors.DifyAPIError('Dify API 没有返回任何响应请检查网络连接和API配置')
query.session.using_conversation.uuid = chunk['conversation_id']
async def _agent_chat_messages_chunk(
self, query: core_entities.Query
) -> typing.AsyncGenerator[llm_entities.MessageChunk, None]:
"""调用聊天助手"""
cov_id = query.session.using_conversation.uuid or ''
query.variables['conversation_id'] = cov_id
plain_text, image_ids = await self._preprocess_user_message(query)
files = [
{
'type': 'image',
'transfer_method': 'local_file',
'upload_file_id': image_id,
}
for image_id in image_ids
]
ignored_events = []
inputs = {}
inputs.update(query.variables)
pending_agent_message = ''
chunk = None # 初始化chunk变量防止在没有响应时引用错误
message_idx = 0
is_final = False
think_start = False
think_end = False
remove_think = self.pipeline_config['output'].get('misc', '').get('remove-think')
async for chunk in self.dify_client.chat_messages(
inputs=inputs,
query=plain_text,
user=f'{query.session.launcher_type.value}_{query.session.launcher_id}',
response_mode='streaming',
conversation_id=cov_id,
files=files,
timeout=120,
):
self.ap.logger.debug('dify-agent-chunk: ' + str(chunk))
if chunk['event'] in ignored_events:
continue
if chunk['event'] == 'agent_message':
message_idx += 1
if remove_think:
if '<think>' in chunk['answer'] and not think_start:
think_start = True
continue
if '</think>' in chunk['answer'] and not think_end:
import re
content = re.sub(r'^\n</think>', '', chunk['answer'])
pending_agent_message += content
think_end = True
elif think_end or not think_start:
pending_agent_message += chunk['answer']
if think_start:
continue
else:
pending_agent_message += chunk['answer']
elif chunk['event'] == 'message_end':
is_final = True
else:
if chunk['event'] == 'agent_thought':
if chunk['tool'] != '' and chunk['observation'] != '': # 工具调用结果,跳过
continue
message_idx += 1
if chunk['tool']:
msg = llm_entities.MessageChunk(
role='assistant',
tool_calls=[
llm_entities.ToolCall(
id=chunk['id'],
type='function',
function=llm_entities.FunctionCall(
name=chunk['tool'],
arguments=json.dumps({}),
),
)
],
)
yield msg
if chunk['event'] == 'message_file':
message_idx += 1
if chunk['type'] == 'image' and chunk['belongs_to'] == 'assistant':
base_url = self.dify_client.base_url
if base_url.endswith('/v1'):
base_url = base_url[:-3]
image_url = base_url + chunk['url']
yield llm_entities.MessageChunk(
role='assistant',
content=[llm_entities.ContentElement.from_image_url(image_url)],
is_final=is_final,
)
if chunk['event'] == 'error':
raise errors.DifyAPIError('dify 服务错误: ' + chunk['message'])
if message_idx % 8 == 0 or is_final:
yield llm_entities.MessageChunk(
role='assistant',
content=pending_agent_message,
is_final=is_final,
)
if chunk is None:
raise errors.DifyAPIError('Dify API 没有返回任何响应请检查网络连接和API配置')
query.session.using_conversation.uuid = chunk['conversation_id']
async def _workflow_messages_chunk(
self, query: core_entities.Query
) -> typing.AsyncGenerator[llm_entities.MessageChunk, None]:
"""调用工作流"""
if not query.session.using_conversation.uuid:
query.session.using_conversation.uuid = str(uuid.uuid4())
query.variables['conversation_id'] = query.session.using_conversation.uuid
plain_text, image_ids = await self._preprocess_user_message(query)
files = [
{
'type': 'image',
'transfer_method': 'local_file',
'upload_file_id': image_id,
}
for image_id in image_ids
]
ignored_events = ['workflow_started']
inputs = { # these variables are legacy variables, we need to keep them for compatibility
'langbot_user_message_text': plain_text,
'langbot_session_id': query.variables['session_id'],
'langbot_conversation_id': query.variables['conversation_id'],
'langbot_msg_create_time': query.variables['msg_create_time'],
}
inputs.update(query.variables)
messsage_idx = 0
is_final = False
think_start = False
think_end = False
workflow_contents = ''
remove_think = self.pipeline_config['output'].get('misc', '').get('remove-think')
async for chunk in self.dify_client.workflow_run(
inputs=inputs,
user=f'{query.session.launcher_type.value}_{query.session.launcher_id}',
files=files,
timeout=120,
):
self.ap.logger.debug('dify-workflow-chunk: ' + str(chunk))
if chunk['event'] in ignored_events:
continue
if chunk['event'] == 'workflow_finished':
is_final = True
if chunk['data']['error']:
raise errors.DifyAPIError(chunk['data']['error'])
if chunk['event'] == 'text_chunk':
messsage_idx += 1
if remove_think:
if '<think>' in chunk['data']['text'] and not think_start:
think_start = True
continue
if '</think>' in chunk['data']['text'] and not think_end:
import re
content = re.sub(r'^\n</think>', '', chunk['data']['text'])
workflow_contents += content
think_end = True
elif think_end:
workflow_contents += chunk['data']['text']
if think_start:
continue
else:
workflow_contents += chunk['data']['text']
if chunk['event'] == 'node_started':
if chunk['data']['node_type'] == 'start' or chunk['data']['node_type'] == 'end':
continue
messsage_idx += 1
msg = llm_entities.MessageChunk(
role='assistant',
content=None,
tool_calls=[
llm_entities.ToolCall(
id=chunk['data']['node_id'],
type='function',
function=llm_entities.FunctionCall(
name=chunk['data']['title'],
arguments=json.dumps({}),
),
)
],
)
yield msg
if messsage_idx % 8 == 0 or is_final:
yield llm_entities.MessageChunk(
role='assistant',
content=workflow_contents,
is_final=is_final,
)
async def run(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
"""运行请求"""
if self.pipeline_config['ai']['dify-service-api']['app-type'] == 'chat':
async for msg in self._chat_messages(query):
yield msg
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'agent':
async for msg in self._agent_chat_messages(query):
yield msg
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'workflow':
async for msg in self._workflow_messages(query):
yield msg
if await query.adapter.is_stream_output_supported():
msg_idx = 0
if self.pipeline_config['ai']['dify-service-api']['app-type'] == 'chat':
async for msg in self._chat_messages_chunk(query):
msg_idx += 1
msg.msg_sequence = msg_idx
yield msg
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'agent':
async for msg in self._agent_chat_messages_chunk(query):
msg_idx += 1
msg.msg_sequence = msg_idx
yield msg
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'workflow':
async for msg in self._workflow_messages_chunk(query):
msg_idx += 1
msg.msg_sequence = msg_idx
yield msg
else:
raise errors.DifyAPIError(
f'不支持的 Dify 应用类型: {self.pipeline_config["ai"]["dify-service-api"]["app-type"]}'
)
else:
raise errors.DifyAPIError(
f'不支持的 Dify 应用类型: {self.pipeline_config["ai"]["dify-service-api"]["app-type"]}'
)
if self.pipeline_config['ai']['dify-service-api']['app-type'] == 'chat':
async for msg in self._chat_messages(query):
yield msg
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'agent':
async for msg in self._agent_chat_messages(query):
yield msg
elif self.pipeline_config['ai']['dify-service-api']['app-type'] == 'workflow':
async for msg in self._workflow_messages(query):
yield msg
else:
raise errors.DifyAPIError(
f'不支持的 Dify 应用类型: {self.pipeline_config["ai"]["dify-service-api"]["app-type"]}'
)

View File

@@ -27,7 +27,16 @@ Respond in the same language as the user's input.
class LocalAgentRunner(runner.RequestRunner):
"""本地Agent请求运行器"""
async def run(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
class ToolCallTracker:
"""工具调用追踪器"""
def __init__(self):
self.active_calls: dict[str, dict] = {}
self.completed_calls: list[llm_entities.ToolCall] = []
async def run(
self, query: core_entities.Query
) -> typing.AsyncGenerator[llm_entities.Message | llm_entities.MessageChunk, None]:
"""运行请求"""
pending_tool_calls = []
@@ -56,7 +65,7 @@ class LocalAgentRunner(runner.RequestRunner):
self.ap.logger.warning(f'Knowledge base {kb_uuid} not found')
raise ValueError(f'Knowledge base {kb_uuid} not found')
result = await kb.retrieve(user_message_text)
result = await kb.retrieve(user_message_text, kb.knowledge_base_entity.top_k)
final_user_message_text = ''
@@ -80,20 +89,92 @@ class LocalAgentRunner(runner.RequestRunner):
req_messages = query.prompt.messages.copy() + query.messages.copy() + [user_message]
# 首次请求
msg = await query.use_llm_model.requester.invoke_llm(
query,
query.use_llm_model,
req_messages,
query.use_funcs,
extra_args=query.use_llm_model.model_entity.extra_args,
)
try:
is_stream = await query.adapter.is_stream_output_supported()
except AttributeError:
is_stream = False
yield msg
remove_think = self.pipeline_config['output'].get('misc', '').get('remove-think')
pending_tool_calls = msg.tool_calls
if not is_stream:
# 非流式输出,直接请求
req_messages.append(msg)
msg = await query.use_llm_model.requester.invoke_llm(
query,
query.use_llm_model,
req_messages,
query.use_funcs,
extra_args=query.use_llm_model.model_entity.extra_args,
remove_think=remove_think,
)
yield msg
final_msg = msg
else:
# 流式输出,需要处理工具调用
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
msg_idx = 0
accumulated_content = '' # 从开始累积的所有内容
last_role = 'assistant'
msg_sequence = 1
async for msg in query.use_llm_model.requester.invoke_llm_stream(
query,
query.use_llm_model,
req_messages,
query.use_funcs,
extra_args=query.use_llm_model.model_entity.extra_args,
remove_think=remove_think,
):
msg_idx = msg_idx + 1
# 记录角色
if msg.role:
last_role = msg.role
# 累积内容
if msg.content:
accumulated_content += msg.content
# 处理工具调用
if msg.tool_calls:
for tool_call in msg.tool_calls:
if tool_call.id not in tool_calls_map:
tool_calls_map[tool_call.id] = llm_entities.ToolCall(
id=tool_call.id,
type=tool_call.type,
function=llm_entities.FunctionCall(
name=tool_call.function.name if tool_call.function else '', arguments=''
),
)
if tool_call.function and tool_call.function.arguments:
# 流式处理中工具调用参数可能分多个chunk返回需要追加而不是覆盖
tool_calls_map[tool_call.id].function.arguments += tool_call.function.arguments
# continue
# 每8个chunk或最后一个chunk时输出所有累积的内容
if msg_idx % 8 == 0 or msg.is_final:
msg_sequence += 1
yield llm_entities.MessageChunk(
role=last_role,
content=accumulated_content, # 输出所有累积内容
tool_calls=list(tool_calls_map.values()) if (tool_calls_map and msg.is_final) else None,
is_final=msg.is_final,
msg_sequence=msg_sequence,
)
# 创建最终消息用于后续处理
final_msg = llm_entities.MessageChunk(
role=last_role,
content=accumulated_content,
tool_calls=list(tool_calls_map.values()) if tool_calls_map else None,
msg_sequence=msg_sequence,
)
pending_tool_calls = final_msg.tool_calls
first_content = final_msg.content
if isinstance(final_msg, llm_entities.MessageChunk):
first_end_sequence = final_msg.msg_sequence
req_messages.append(final_msg)
# 持续请求,只要还有待处理的工具调用就继续处理调用
while pending_tool_calls:
@@ -104,12 +185,18 @@ class LocalAgentRunner(runner.RequestRunner):
parameters = json.loads(func.arguments)
func_ret = await self.ap.tool_mgr.execute_func_call(query, func.name, parameters)
msg = llm_entities.Message(
role='tool',
content=json.dumps(func_ret, ensure_ascii=False),
tool_call_id=tool_call.id,
)
if is_stream:
msg = llm_entities.MessageChunk(
role='tool',
content=json.dumps(func_ret, ensure_ascii=False),
tool_call_id=tool_call.id,
)
else:
msg = llm_entities.Message(
role='tool',
content=json.dumps(func_ret, ensure_ascii=False),
tool_call_id=tool_call.id,
)
yield msg
@@ -122,17 +209,82 @@ class LocalAgentRunner(runner.RequestRunner):
req_messages.append(err_msg)
# 处理完所有调用,再次请求
msg = await query.use_llm_model.requester.invoke_llm(
query,
query.use_llm_model,
req_messages,
query.use_funcs,
extra_args=query.use_llm_model.model_entity.extra_args,
)
if is_stream:
tool_calls_map = {}
msg_idx = 0
accumulated_content = '' # 从开始累积的所有内容
last_role = 'assistant'
msg_sequence = first_end_sequence
yield msg
async for msg in query.use_llm_model.requester.invoke_llm_stream(
query,
query.use_llm_model,
req_messages,
query.use_funcs,
extra_args=query.use_llm_model.model_entity.extra_args,
remove_think=remove_think,
):
msg_idx += 1
pending_tool_calls = msg.tool_calls
# 记录角色
if msg.role:
last_role = msg.role
req_messages.append(msg)
# 第一次请求工具调用时的内容
if msg_idx == 1:
accumulated_content = first_content if first_content is not None else accumulated_content
# 累积内容
if msg.content:
accumulated_content += msg.content
# 处理工具调用
if msg.tool_calls:
for tool_call in msg.tool_calls:
if tool_call.id not in tool_calls_map:
tool_calls_map[tool_call.id] = llm_entities.ToolCall(
id=tool_call.id,
type=tool_call.type,
function=llm_entities.FunctionCall(
name=tool_call.function.name if tool_call.function else '', arguments=''
),
)
if tool_call.function and tool_call.function.arguments:
# 流式处理中工具调用参数可能分多个chunk返回需要追加而不是覆盖
tool_calls_map[tool_call.id].function.arguments += tool_call.function.arguments
# 每8个chunk或最后一个chunk时输出所有累积的内容
if msg_idx % 8 == 0 or msg.is_final:
msg_sequence += 1
yield llm_entities.MessageChunk(
role=last_role,
content=accumulated_content, # 输出所有累积内容
tool_calls=list(tool_calls_map.values()) if (tool_calls_map and msg.is_final) else None,
is_final=msg.is_final,
msg_sequence=msg_sequence,
)
final_msg = llm_entities.MessageChunk(
role=last_role,
content=accumulated_content,
tool_calls=list(tool_calls_map.values()) if tool_calls_map else None,
msg_sequence=msg_sequence,
)
else:
# 处理完所有调用,再次请求
msg = await query.use_llm_model.requester.invoke_llm(
query,
query.use_llm_model,
req_messages,
query.use_funcs,
extra_args=query.use_llm_model.model_entity.extra_args,
remove_think=remove_think,
)
yield msg
final_msg = msg
pending_tool_calls = final_msg.tool_calls
req_messages.append(final_msg)

View File

@@ -1,6 +1,8 @@
from __future__ import annotations
import traceback
import uuid
import zipfile
import io
from .services import parser, chunker
from pkg.core import app
from pkg.rag.knowledge.services.embedder import Embedder
@@ -89,16 +91,23 @@ class RuntimeKnowledgeBase:
)
raise
finally:
# delete file from storage
await self.ap.storage_mgr.storage_provider.delete(file.file_name)
async def store_file(self, file_id: str) -> str:
# pre checking
if not await self.ap.storage_mgr.storage_provider.exists(file_id):
raise Exception(f'File {file_id} not found')
file_name = file_id
extension = file_name.split('.')[-1].lower()
if extension == 'zip':
return await self._store_zip_file(file_id)
file_uuid = str(uuid.uuid4())
kb_id = self.knowledge_base_entity.uuid
file_name = file_id
extension = file_name.split('.')[-1]
file_obj_data = {
'uuid': file_uuid,
@@ -123,11 +132,66 @@ class RuntimeKnowledgeBase:
)
return wrapper.id
async def retrieve(self, query: str) -> list[retriever_entities.RetrieveResultEntry]:
async def _store_zip_file(self, zip_file_id: str) -> str:
"""Handle ZIP file by extracting each document and storing them separately."""
self.ap.logger.info(f'Processing ZIP file: {zip_file_id}')
zip_bytes = await self.ap.storage_mgr.storage_provider.load(zip_file_id)
supported_extensions = {'txt', 'pdf', 'docx', 'md', 'html'}
stored_file_tasks = []
# use utf-8 encoding
with zipfile.ZipFile(io.BytesIO(zip_bytes), 'r', metadata_encoding='utf-8') as zip_ref:
for file_info in zip_ref.filelist:
# skip directories and hidden files
if file_info.is_dir() or file_info.filename.startswith('.'):
continue
file_extension = file_info.filename.split('.')[-1].lower()
if file_extension not in supported_extensions:
self.ap.logger.debug(f'Skipping unsupported file in ZIP: {file_info.filename}')
continue
try:
file_content = zip_ref.read(file_info.filename)
base_name = file_info.filename.replace('/', '_').replace('\\', '_')
extension = base_name.split('.')[-1]
file_name = base_name.split('.')[0]
if file_name.startswith('__MACOSX'):
continue
extracted_file_id = file_name + '_' + str(uuid.uuid4())[:8] + '.' + extension
# save file to storage
await self.ap.storage_mgr.storage_provider.save(extracted_file_id, file_content)
task_id = await self.store_file(extracted_file_id)
stored_file_tasks.append(task_id)
self.ap.logger.info(
f'Extracted and stored file from ZIP: {file_info.filename} -> {extracted_file_id}'
)
except Exception as e:
self.ap.logger.warning(f'Failed to extract file {file_info.filename} from ZIP: {e}')
continue
if not stored_file_tasks:
raise Exception('No supported files found in ZIP archive')
self.ap.logger.info(f'Successfully processed ZIP file {zip_file_id}, extracted {len(stored_file_tasks)} files')
await self.ap.storage_mgr.storage_provider.delete(zip_file_id)
return stored_file_tasks[0] if stored_file_tasks else ''
async def retrieve(self, query: str, top_k: int) -> list[retriever_entities.RetrieveResultEntry]:
embedding_model = await self.ap.model_mgr.get_embedding_model_by_uuid(
self.knowledge_base_entity.embedding_model_uuid
)
return await self.retriever.retrieve(self.knowledge_base_entity.uuid, query, embedding_model)
return await self.retriever.retrieve(self.knowledge_base_entity.uuid, query, embedding_model, top_k)
async def delete_file(self, file_id: str):
# delete vector

View File

@@ -45,17 +45,23 @@ class AnnouncementManager:
async def fetch_all(self) -> list[Announcement]:
"""获取所有公告"""
resp = requests.get(
url='https://api.github.com/repos/langbot-app/LangBot/contents/res/announcement.json',
proxies=self.ap.proxy_mgr.get_forward_proxies(),
timeout=5,
)
obj_json = resp.json()
b64_content = obj_json['content']
# 解码
content = base64.b64decode(b64_content).decode('utf-8')
try:
resp = requests.get(
url='https://api.github.com/repos/langbot-app/LangBot/contents/res/announcement.json',
proxies=self.ap.proxy_mgr.get_forward_proxies(),
timeout=5,
)
resp.raise_for_status() # 检查请求是否成功
obj_json = resp.json()
b64_content = obj_json['content']
# 解码
content = base64.b64decode(b64_content).decode('utf-8')
return [Announcement(**item) for item in json.loads(content)]
return [Announcement(**item) for item in json.loads(content)]
except (requests.RequestException, json.JSONDecodeError, KeyError) as e:
self.ap.logger.warning(f"获取公告失败: {e}")
pass
return [] # 请求失败时返回空列表
async def fetch_saved(self) -> list[Announcement]:
if not os.path.exists('data/labels/announcement_saved.json'):

View File

@@ -1,6 +1,6 @@
semantic_version = 'v4.1.0'
semantic_version = 'v4.2.2'
required_database_version = 4
required_database_version = 5
"""Tag the version of the database schema, used to check if the database needs to be migrated"""
debug_mode = False

View File

@@ -204,9 +204,9 @@ async def get_slack_image_to_base64(pic_url: str, bot_token: str):
try:
async with aiohttp.ClientSession() as session:
async with session.get(pic_url, headers=headers) as resp:
mime_type = resp.headers.get("Content-Type", "application/octet-stream")
mime_type = resp.headers.get('Content-Type', 'application/octet-stream')
file_bytes = await resp.read()
base64_str = base64.b64encode(file_bytes).decode("utf-8")
return f"data:{mime_type};base64,{base64_str}"
base64_str = base64.b64encode(file_bytes).decode('utf-8')
return f'data:{mime_type};base64,{base64_str}'
except Exception as e:
raise (e)
raise (e)

View File

@@ -32,7 +32,7 @@ def import_dir(path: str):
rel_path = full_path.replace(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), '')
rel_path = rel_path[1:]
rel_path = rel_path.replace('/', '.')[:-3]
rel_path = rel_path.replace("\\",".")
rel_path = rel_path.replace('\\', '.')
importlib.import_module(rel_path)

View File

@@ -28,15 +28,19 @@ class VersionManager:
async def get_release_list(self) -> list:
"""获取发行列表"""
rls_list_resp = requests.get(
url='https://api.github.com/repos/langbot-app/LangBot/releases',
proxies=self.ap.proxy_mgr.get_forward_proxies(),
timeout=5,
)
rls_list = rls_list_resp.json()
return rls_list
try:
rls_list_resp = requests.get(
url='https://api.github.com/repos/langbot-app/LangBot/releases',
proxies=self.ap.proxy_mgr.get_forward_proxies(),
timeout=5,
)
rls_list_resp.raise_for_status() # 检查请求是否成功
rls_list = rls_list_resp.json()
return rls_list
except Exception as e:
self.ap.logger.warning(f"获取发行列表失败: {e}")
pass
return []
async def update_all(self):
"""检查更新并下载源码"""

View File

@@ -1,6 +1,6 @@
[project]
name = "langbot"
version = "4.1.0"
version = "4.2.2"
description = "高稳定、支持扩展、多模态 - 大模型原生即时通信机器人平台"
readme = "README.md"
requires-python = ">=3.10.1"

View File

@@ -2,6 +2,7 @@ admins: []
api:
port: 5300
command:
enable: true
prefix:
- '!'
-

View File

@@ -51,7 +51,6 @@
"base-url": "https://api.dify.ai/v1",
"app-type": "chat",
"api-key": "your-api-key",
"thinking-convert": "plain",
"timeout": 30
},
"dashscope-app-api": {
@@ -87,7 +86,8 @@
"hide-exception": true,
"at-sender": true,
"quote-origin": true,
"track-function-calls": false
"track-function-calls": false,
"remove-think": false
}
}
}

View File

@@ -118,26 +118,6 @@ stages:
zh_Hans: API 密钥
type: string
required: true
- name: thinking-convert
label:
en_US: CoT Convert
zh_Hans: 思维链转换策略
type: select
required: true
default: plain
options:
- name: plain
label:
en_US: Convert to <think>...</think>
zh_Hans: 转换成 <think>...</think>
- name: original
label:
en_US: Original
zh_Hans: 原始
- name: remove
label:
en_US: Remove
zh_Hans: 移除
- name: dashscope-app-api
label:
en_US: Aliyun Dashscope App API

View File

@@ -105,3 +105,13 @@ stages:
type: boolean
required: true
default: false
- name: remove-think
label:
en_US: Remove CoT
zh_Hans: 删除思维链
description:
en_US: 'If enabled, LangBot will remove the LLM thought content in response. Note: When using streaming response, removing CoT may cause the first token to wait for a long time.'
zh_Hans: '如果启用,将自动删除大模型回复中的模型思考内容。注意:当您使用流式响应时,删除思维链可能会导致首个 Token 的等待时间过长'
type: boolean
required: true
default: false

View File

@@ -79,6 +79,9 @@ stages:
label:
en_US: Blacklist
zh_Hans: 黑名单
description:
en_US: Sessions in the blacklist will be ignored, the format is `{launcher_type}_{launcher_id}`remove quotes, for example `person_123` matches private chat, `group_456` matches group chat, `person_*` matches all private chats, `group_*` matches all group chats, `*_123` matches private and group chats with user ID 123
zh_Hans: 黑名单中的会话将被忽略;会话格式:`{launcher_type}_{launcher_id}`(删除引号),例如 `person_123` 匹配私聊会话,`group_456` 匹配群聊会话;`person_*` 匹配所有私聊会话,`group_*` 匹配所有群聊会话;`*_123` 匹配用户 ID 为 123 的私聊和群聊消息
type: array[string]
required: true
default: []
@@ -86,6 +89,9 @@ stages:
label:
en_US: Whitelist
zh_Hans: 白名单
description:
en_US: Only respond to sessions in the whitelist, the format is `{launcher_type}_{launcher_id}`remove quotes, for example `person_123` matches private chat, `group_456` matches group chat, `person_*` matches all private chats, `group_*` matches all group chats, `*_123` matches private and group chats with user ID 123
zh_Hans: 仅响应白名单中的会话;会话格式:`{launcher_type}_{launcher_id}`(删除引号),例如 `person_123` 匹配私聊会话,`group_456` 匹配群聊会话;`person_*` 匹配所有私聊会话,`group_*` 匹配所有群聊会话;`*_123` 匹配用户 ID 为 123 的私聊和群聊消息
type: array[string]
required: true
default: []

1
web/.env.example Normal file
View File

@@ -0,0 +1 @@
NEXT_PUBLIC_API_BASE_URL=http://localhost:5300

1
web/.gitignore vendored
View File

@@ -32,6 +32,7 @@ yarn-error.log*
# env files (can opt-in for committing if needed)
.env*
!.env.example
# vercel
.vercel

View File

@@ -1,36 +1,3 @@
This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app).
# Debug LangBot Frontend
## Getting Started
First, run the development server:
```bash
npm run dev
# or
yarn dev
# or
pnpm dev
# or
bun dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
## Learn More
To learn more about Next.js, take a look at the following resources:
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome!
## Deploy on Vercel
The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.
Please refer to the [Development Guide](https://docs.langbot.app/en/develop/dev-config.html) for more information.

478
web/package-lock.json generated
View File

@@ -10,9 +10,12 @@
"dependencies": {
"@dnd-kit/core": "^6.3.1",
"@dnd-kit/sortable": "^10.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@hookform/resolvers": "^5.0.1",
"@radix-ui/react-checkbox": "^1.3.1",
"@radix-ui/react-context-menu": "^2.2.15",
"@radix-ui/react-dialog": "^1.1.14",
"@radix-ui/react-dropdown-menu": "^2.1.15",
"@radix-ui/react-hover-card": "^1.1.13",
"@radix-ui/react-label": "^2.1.6",
"@radix-ui/react-popover": "^1.1.14",
@@ -144,6 +147,7 @@
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.3.1.tgz",
"integrity": "sha512-pVGjBIt1Y6gg3EJN8jTcfpP/+uuRksIo055oE/OBkDNcjZqVbfkWCksG1Jp4yZnj3iKWyWX8fdG/j6UDYPbFog==",
"dev": true,
"optional": true,
"dependencies": {
"@emnapi/wasi-threads": "1.0.1",
@@ -163,6 +167,7 @@
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.0.1.tgz",
"integrity": "sha512-iIBu7mwkq4UQGeMEM8bLwNK962nXdhodeScX4slfQnRhEMMzvYivHhutCIk8uojvmASXXPC2WNEjwxFWk72Oqw==",
"dev": true,
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
@@ -753,6 +758,7 @@
"version": "0.2.7",
"resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.7.tgz",
"integrity": "sha512-5yximcFK5FNompXfJFoWanu5l8v1hNGqNHh9du1xETp9HWk/B/PzvchX55WYOPaIeNglG8++68AAiauBAtbnzw==",
"dev": true,
"optional": true,
"dependencies": {
"@emnapi/core": "^1.3.1",
@@ -1089,6 +1095,57 @@
}
}
},
"node_modules/@radix-ui/react-context-menu": {
"version": "2.2.15",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context-menu/-/react-context-menu-2.2.15.tgz",
"integrity": "sha512-UsQUMjcYTsBjTSXw0P3GO0werEQvUY2plgRQuKoCTtkNr45q1DiL51j4m7gxhABzZ0BadoXNsIbg7F3KwiUBbw==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-menu": "2.1.15",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-controllable-state": "1.2.2"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-context-menu/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog": {
"version": "1.1.14",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.14.tgz",
@@ -1152,31 +1209,6 @@
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-scope": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
"integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
@@ -1266,6 +1298,58 @@
}
}
},
"node_modules/@radix-ui/react-dropdown-menu": {
"version": "2.1.15",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.15.tgz",
"integrity": "sha512-mIBnOjgwo9AH3FyKaSWoSu/dYj6VdhJ7frEPiGTeXCdUFHjl9h3mFh2wwhEtINOmYXWhdpf1rY2minFsmaNgVQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.2",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-menu": "2.1.15",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-controllable-state": "1.2.2"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-dropdown-menu/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-focus-guards": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.2.tgz",
@@ -1282,13 +1366,13 @@
}
},
"node_modules/@radix-ui/react-focus-scope": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.6.tgz",
"integrity": "sha512-r9zpYNUQY+2jWHWZGyddQLL9YHkM/XvSFHVcWs7bdVuxMAnCwTAuy6Pf47Z4nw7dYcUou1vg/VgjjrrH03VeBw==",
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
"integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
@@ -1306,6 +1390,29 @@
}
}
},
"node_modules/@radix-ui/react-focus-scope/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-hover-card": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/@radix-ui/react-hover-card/-/react-hover-card-1.1.13.tgz",
@@ -1378,6 +1485,232 @@
}
}
},
"node_modules/@radix-ui/react-menu": {
"version": "2.1.15",
"resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.15.tgz",
"integrity": "sha512-tVlmA3Vb9n8SZSd+YSbuFR66l87Wiy4du+YE+0hzKQEANA+7cWKH1WgqcEX4pXqxUFQKrWQGHdvEfw00TjFiew==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.2",
"@radix-ui/react-collection": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-direction": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.10",
"@radix-ui/react-focus-guards": "1.1.2",
"@radix-ui/react-focus-scope": "1.1.7",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-popper": "1.2.7",
"@radix-ui/react-portal": "1.1.9",
"@radix-ui/react-presence": "1.1.4",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-roving-focus": "1.1.10",
"@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"aria-hidden": "^1.2.4",
"react-remove-scroll": "^2.6.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-arrow": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz",
"integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-collection": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz",
"integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.1.10",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.10.tgz",
"integrity": "sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.2",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-escape-keydown": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-popper": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.7.tgz",
"integrity": "sha512-IUFAccz1JyKcf/RjB552PlWwxjeCJB8/4KxT7EhBHOJM+mN7LdW+B3kacJXILm32xawcMMjb2i0cIZpo+f9kiQ==",
"license": "MIT",
"dependencies": {
"@floating-ui/react-dom": "^2.0.0",
"@radix-ui/react-arrow": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-layout-effect": "1.1.1",
"@radix-ui/react-use-rect": "1.1.1",
"@radix-ui/react-use-size": "1.1.1",
"@radix-ui/rect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
"integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-layout-effect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-primitive": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz",
"integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-roving-focus": {
"version": "1.1.10",
"resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.10.tgz",
"integrity": "sha512-dT9aOXUen9JSsxnMPv/0VqySQf5eDQ6LCk5Sw28kamz8wSOW2bJdlX2Bg5VUIIcV+6XlHpWTIuTPCf/UNIyq8Q==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.2",
"@radix-ui/react-collection": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-direction": "1.1.1",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-controllable-state": "1.2.2"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-popover": {
"version": "1.1.14",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.14.tgz",
@@ -1465,31 +1798,6 @@
}
}
},
"node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-focus-scope": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
"integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-popper": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.7.tgz",
@@ -2603,6 +2911,60 @@
"node": ">=14.0.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
"version": "1.4.3",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"@emnapi/wasi-threads": "1.0.2",
"tslib": "^2.4.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
"version": "1.4.3",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
"version": "1.0.2",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
"version": "0.2.9",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"@emnapi/core": "^1.4.0",
"@emnapi/runtime": "^1.4.0",
"@tybys/wasm-util": "^0.9.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
"version": "0.9.0",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
"version": "2.8.0",
"inBundle": true,
"license": "0BSD",
"optional": true
},
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
"version": "4.1.5",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.5.tgz",
@@ -2685,6 +3047,7 @@
"version": "0.9.0",
"resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.9.0.tgz",
"integrity": "sha512-6+7nlbMVX/PVDCwaIQ8nTOPveOcFLSt8GcXdx8hD0bt39uWxYT88uXzqTd4fTvqta7oeUJqudepapKNt2DYJFw==",
"dev": true,
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
@@ -4762,14 +5125,15 @@
}
},
"node_modules/form-data": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz",
"integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==",
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"es-set-tostringtag": "^2.1.0",
"hasown": "^2.0.2",
"mime-types": "^2.1.12"
},
"engines": {

View File

@@ -4,8 +4,6 @@
"private": true,
"scripts": {
"dev": "next dev --turbopack",
"dev:local": "NEXT_PUBLIC_API_BASE_URL=http://localhost:5300 next dev --turbopack",
"dev:local:win": "set NEXT_PUBLIC_API_BASE_URL=http://localhost:5300&&next dev --turbopack",
"build": "next build",
"start": "next start",
"lint": "next lint",
@@ -17,11 +15,16 @@
"prettier --write"
]
},
"overrides": {
"@radix-ui/react-focus-scope": "1.1.7"
},
"dependencies": {
"@dnd-kit/core": "^6.3.1",
"@dnd-kit/sortable": "^10.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@hookform/resolvers": "^5.0.1",
"@radix-ui/react-checkbox": "^1.3.1",
"@radix-ui/react-context-menu": "^2.2.15",
"@radix-ui/react-dialog": "^1.1.14",
"@radix-ui/react-dropdown-menu": "^2.1.15",
"@radix-ui/react-hover-card": "^1.1.13",

View File

@@ -56,6 +56,15 @@
background: rgba(0, 0, 0, 0.35); /* 悬停加深 */
}
/* 暗黑模式下的滚动条 */
.dark ::-webkit-scrollbar-thumb {
background: rgba(255, 255, 255, 0.2); /* 半透明白色 */
}
.dark ::-webkit-scrollbar-thumb:hover {
background: rgba(255, 255, 255, 0.35); /* 悬停加深 */
}
/* 兼容 Edge */
@supports (-ms-ime-align: auto) {
body {
@@ -108,36 +117,36 @@
}
.dark {
--background: oklch(0.141 0.005 285.823);
--background: oklch(0.08 0.002 285.823);
--foreground: oklch(0.985 0 0);
--card: oklch(0.21 0.006 285.885);
--card: oklch(0.12 0.004 285.885);
--card-foreground: oklch(0.985 0 0);
--popover: oklch(0.21 0.006 285.885);
--popover: oklch(0.12 0.004 285.885);
--popover-foreground: oklch(0.985 0 0);
--primary: oklch(0.92 0.004 286.32);
--primary-foreground: oklch(0.21 0.006 285.885);
--secondary: oklch(0.274 0.006 286.033);
--primary: oklch(0.62 0.2 255);
--primary-foreground: oklch(1 0 0);
--secondary: oklch(0.18 0.004 286.033);
--secondary-foreground: oklch(0.985 0 0);
--muted: oklch(0.274 0.006 286.033);
--muted: oklch(0.18 0.004 286.033);
--muted-foreground: oklch(0.705 0.015 286.067);
--accent: oklch(0.274 0.006 286.033);
--accent: oklch(0.18 0.004 286.033);
--accent-foreground: oklch(0.985 0 0);
--destructive: oklch(0.704 0.191 22.216);
--border: oklch(1 0 0 / 10%);
--input: oklch(1 0 0 / 15%);
--border: oklch(1 0 0 / 8%);
--input: oklch(1 0 0 / 10%);
--ring: oklch(0.552 0.016 285.938);
--chart-1: oklch(0.488 0.243 264.376);
--chart-2: oklch(0.696 0.17 162.48);
--chart-3: oklch(0.769 0.188 70.08);
--chart-4: oklch(0.627 0.265 303.9);
--chart-5: oklch(0.645 0.246 16.439);
--sidebar: oklch(0.21 0.006 285.885);
--sidebar: oklch(0.1 0.003 285.885);
--sidebar-foreground: oklch(0.985 0 0);
--sidebar-primary: oklch(0.488 0.243 264.376);
--sidebar-primary-foreground: oklch(0.985 0 0);
--sidebar-accent: oklch(0.274 0.006 286.033);
--sidebar-primary: oklch(0.62 0.2 255);
--sidebar-primary-foreground: oklch(1 0 0);
--sidebar-accent: oklch(0.18 0.004 286.033);
--sidebar-accent-foreground: oklch(0.985 0 0);
--sidebar-border: oklch(1 0 0 / 10%);
--sidebar-border: oklch(1 0 0 / 8%);
--sidebar-ring: oklch(0.552 0.016 285.938);
}

View File

@@ -159,7 +159,7 @@ export default function BotDetailDialog({
<SidebarProvider className="items-start w-full flex">
<Sidebar
collapsible="none"
className="hidden md:flex h-[80vh] w-40 min-w-[120px] border-r bg-white"
className="hidden md:flex h-[80vh] w-40 min-w-[120px] border-r bg-white dark:bg-black"
>
<SidebarContent>
<SidebarGroup>

View File

@@ -6,12 +6,22 @@
box-shadow: 0px 2px 2px 0 rgba(0, 0, 0, 0.2);
padding: 1.2rem;
cursor: pointer;
transition: all 0.2s ease;
}
:global(.dark) .cardContainer {
background-color: #1f1f22;
box-shadow: 0;
}
.cardContainer:hover {
box-shadow: 0px 2px 8px 0 rgba(0, 0, 0, 0.1);
}
:global(.dark) .cardContainer:hover {
box-shadow: 0;
}
.iconBasicInfoContainer {
width: 100%;
height: 100%;
@@ -47,6 +57,11 @@
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
color: #1a1a1a;
}
:global(.dark) .basicInfoName {
color: #f0f0f0;
}
.basicInfoDescription {
@@ -58,6 +73,10 @@
text-overflow: ellipsis;
}
:global(.dark) .basicInfoDescription {
color: #888888;
}
.basicInfoAdapterContainer {
display: flex;
flex-direction: row;
@@ -71,12 +90,20 @@
color: #626262;
}
:global(.dark) .basicInfoAdapterIcon {
color: #a0a0a0;
}
.basicInfoAdapterLabel {
font-size: 1.2rem;
font-weight: 500;
color: #626262;
}
:global(.dark) .basicInfoAdapterLabel {
color: #a0a0a0;
}
.basicInfoPipelineContainer {
display: flex;
flex-direction: row;
@@ -90,12 +117,20 @@
margin-top: 0.2rem;
}
:global(.dark) .basicInfoPipelineIcon {
color: #a0a0a0;
}
.basicInfoPipelineLabel {
font-size: 1.2rem;
font-weight: 500;
color: #626262;
}
:global(.dark) .basicInfoPipelineLabel {
color: #a0a0a0;
}
.bigText {
white-space: nowrap;
overflow: hidden;

View File

@@ -394,7 +394,7 @@ export default function BotForm({
<FormLabel>{t('bots.bindPipeline')}</FormLabel>
<FormControl>
<Select onValueChange={field.onChange} {...field}>
<SelectTrigger>
<SelectTrigger className="bg-[#ffffff] dark:bg-[#2a2a2e]">
<SelectValue
placeholder={t('bots.selectPipeline')}
/>
@@ -467,7 +467,7 @@ export default function BotForm({
}}
value={field.value}
>
<SelectTrigger className="w-[180px]">
<SelectTrigger className="w-[180px] bg-[#ffffff] dark:bg-[#2a2a2e]">
<SelectValue placeholder={t('bots.selectAdapter')} />
</SelectTrigger>
<SelectContent className="fixed z-[1000]">

View File

@@ -18,6 +18,11 @@
cursor: pointer;
}
:global(.dark) .botLogCardContainer {
background-color: #1f1f22;
border: 1px solid #2a2a2e;
}
.listHeader {
width: 100%;
height: 2.5rem;

View File

@@ -26,6 +26,7 @@ import {
} from '@/components/ui/hover-card';
import { useTranslation } from 'react-i18next';
import { i18nObj } from '@/i18n/I18nProvider';
import { Textarea } from '@/components/ui/textarea';
export default function DynamicFormItemComponent({
config,
@@ -132,7 +133,7 @@ export default function DynamicFormItemComponent({
case DynamicFormItemType.SELECT:
return (
<Select value={field.value} onValueChange={field.onChange}>
<SelectTrigger>
<SelectTrigger className="bg-[#ffffff] dark:bg-[#2a2a2e]">
<SelectValue placeholder={t('common.select')} />
</SelectTrigger>
<SelectContent>
@@ -150,7 +151,7 @@ export default function DynamicFormItemComponent({
case DynamicFormItemType.LLM_MODEL_SELECTOR:
return (
<Select value={field.value} onValueChange={field.onChange}>
<SelectTrigger>
<SelectTrigger className="bg-[#ffffff] dark:bg-[#2a2a2e]">
<SelectValue placeholder={t('models.selectModel')} />
</SelectTrigger>
<SelectContent>
@@ -267,7 +268,7 @@ export default function DynamicFormItemComponent({
case DynamicFormItemType.KNOWLEDGE_BASE_SELECTOR:
return (
<Select value={field.value} onValueChange={field.onChange}>
<SelectTrigger>
<SelectTrigger className="bg-[#ffffff] dark:bg-[#2a2a2e]">
<SelectValue placeholder={t('knowledge.selectKnowledgeBase')} />
</SelectTrigger>
<SelectContent>
@@ -291,7 +292,7 @@ export default function DynamicFormItemComponent({
<div key={index} className="flex gap-2 items-center">
{/* 角色选择 */}
{index === 0 ? (
<div className="w-[120px] px-3 py-2 border rounded bg-gray-50 text-gray-500">
<div className="w-[120px] px-3 py-2 border rounded bg-gray-50 dark:bg-[#2a292e] text-gray-500 dark:text-white dark:border-gray-600">
system
</div>
) : (
@@ -303,7 +304,7 @@ export default function DynamicFormItemComponent({
field.onChange(newValue);
}}
>
<SelectTrigger className="w-[120px]">
<SelectTrigger className="w-[120px] bg-[#ffffff] dark:bg-[#2a2a2e]">
<SelectValue />
</SelectTrigger>
<SelectContent>
@@ -315,7 +316,7 @@ export default function DynamicFormItemComponent({
</Select>
)}
{/* 内容输入 */}
<Input
<Textarea
className="w-[300px]"
value={item.content}
onChange={(e) => {

Some files were not shown because too many files have changed in this diff Show More