Compare commits

..

91 Commits

Author SHA1 Message Date
Junyan Qin (Chin)
13e29a9966 chore: release v3.4.13.1 (#1299) 2025-04-14 20:19:18 +08:00
Guanchao Wang
601b0a8964 fix(moonshot): tool_call_id not found error (#1040) (#1298) 2025-04-14 20:17:11 +08:00
Guanchao Wang
7c2ceb0aca fix: add reasoning content for deepseek-reasoner (#1296) 2025-04-14 15:05:53 +08:00
Guanchao Wang
42fabd5133 fix: delete print function in lark (#1295) 2025-04-14 14:37:34 +08:00
Guanchao Wang
210a8856e2 fix: telegram markdown & supergroup bugs (#1293) 2025-04-13 18:48:38 +08:00
Guanchao Wang
c531cb11af fix: bailian api streaming mode can't be established 2025-04-13 17:47:05 +08:00
Junyan Qin (Chin)
07e073f526 chore: perf issue template (#1289) 2025-04-11 17:52:04 +08:00
Junyan Qin (Chin)
c5457374a8 chore: release v3.4.13 (#1284) 2025-04-09 21:58:23 +08:00
Junyan Qin (Chin)
5198349591 Merge pull request #1275 from yrk111222/master
Add ModelScope Support
2025-04-03 21:00:03 +08:00
Junyan Qin
8a4967525a fix(modelscope): bad base-url in migration 2025-04-03 20:52:01 +08:00
Junyan Qin
30b068c6e2 doc: reorder modelscope in README 2025-04-03 20:44:41 +08:00
Junyan Qin
ea3fff59ac chore: remove verbose models from llm-models.json 2025-04-03 20:40:36 +08:00
yrk
b09ce8296f Add ModelScope Support 2025-04-03 16:55:14 +08:00
Junyan Qin (Chin)
f9d07779a9 fix: slack is incorrectly enabled as default (#1274) 2025-04-03 14:17:21 +08:00
Junyan Qin (Chin)
51634c1caf chore: release v3.4.12.1 (#1271) 2025-04-02 15:23:38 +08:00
Guanchao Wang
0e00da6617 Merge pull request #1270 from RockChinQ/fix/telegram-markdown
fix: markdown and image problems in tg
2025-04-02 12:33:15 +08:00
Junyan Qin (Chin)
5ee6baeaaa Merge pull request #1268 from RockChinQ/version/3.4.12
chore: release v3.4.12
2025-04-01 21:15:46 +08:00
Junyan Qin
f11a036c60 chore: release v3.4.12 2025-04-01 21:13:41 +08:00
Junyan Qin (Chin)
0ac02ff4ce Merge pull request #1267 from RockChinQ/chore/default-prompt
chore: provide default prompt
2025-04-01 20:43:33 +08:00
Junyan Qin
99cc50b5cb chore: provide default prompt 2025-04-01 20:42:23 +08:00
Junyan Qin (Chin)
1d8fb02989 Merge pull request #1218 from fdc310/master
新增了微信发送小程序、转发小程序,发送emoji表情以及发送链接
2025-04-01 20:38:32 +08:00
Junyan Qin
122cb1188c style: standardized component names 2025-04-01 20:37:39 +08:00
Junyan Qin (Chin)
ca36ade288 Merge pull request #1266 from RockChinQ/chore/slack-schema
chore: add slack config schema
2025-04-01 20:04:08 +08:00
Junyan Qin
0877046db7 chore: add slack config schema 2025-04-01 20:03:42 +08:00
Junyan Qin (Chin)
ce9615a00e Merge pull request #1265 from RockChinQ/feat/markdowncard
add support for markdown card in dingtalk & tg
2025-04-01 20:01:44 +08:00
Junyan Qin
dbe5a41395 chore: schema for markdown config 2025-04-01 20:01:20 +08:00
Junyan Qin
4a4ca54c6e feat: migration for markdown config 2025-04-01 19:59:45 +08:00
wangcham
47acb63feb add support for markdown card in dingtalk & tg 2025-04-01 07:11:48 -04:00
Junyan Qin (Chin)
038c5d41e2 Merge pull request #1258 from RockChinQ/feat/slack
feat: add slack adapter
2025-04-01 15:33:22 +08:00
Junyan Qin
011a795895 doc(README): add slack 2025-04-01 15:32:48 +08:00
wangcham
873a0339d8 feat: add support for sending active message in slack 2025-04-01 03:03:48 -04:00
wangcham
715da548c8 fix: put the link and content together 2025-04-01 02:37:25 -04:00
Junyan Qin (Chin)
5378c6ba35 chore: provides TZ=Asia/Shanghai in docker-compose.yaml as default (#1259) 2025-03-31 14:00:08 +08:00
Guanchao Wang
8799f86ea4 Update pkg/platform/sources/slack.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-03-31 13:48:37 +08:00
wangcham
686be4acbc fix: eliminate host config 2025-03-31 01:10:45 -04:00
wangcham
5744eca37a fix: bot user id in slack 2025-03-30 23:06:03 -04:00
wangcham
70f8ddb1ba fix: delete useless image function in slack 2025-03-30 22:56:51 -04:00
wangcham
be1328cee9 feat: add support for slack 2025-03-30 22:24:53 -04:00
wangcham
c0dbf6fd13 feat:add support for slack 2025-03-30 12:53:48 -04:00
Junyan Qin (Chin)
ffe9c3e0f8 chore: release v3.4.11.2 (#1257) 2025-03-31 00:02:54 +08:00
Junyan Qin (Chin)
e20b79b0ed perf(chatcmpl): remove space from base-url (#1256) 2025-03-30 23:59:55 +08:00
Junyan Qin (Chin)
e04d46db2c perf(claude): ensure system message removed (#867) (#1255) 2025-03-30 23:51:53 +08:00
Junyan Qin (Chin)
7341435127 perf(chatcmpl): use extra_body to pass args (#1254) 2025-03-30 23:43:45 +08:00
Junyan Qin (Chin)
8b56f94667 perf: add debugging msg for webhook style adapters (#1253) 2025-03-30 23:23:31 +08:00
Junyan Qin (Chin)
f5e98d4ebb fix(gewe): should not block main launching process (#1163) (#1252) 2025-03-30 23:14:56 +08:00
Junyan Qin (Chin)
23a0dba470 feat(dify): throw error event (#1251) 2025-03-30 23:04:46 +08:00
fdc310
512371cc25 Merge branch 'RockChinQ:master' into master 2025-03-30 22:55:55 +08:00
Dong_master
cd4a06b692 修改因为手误的参数名错误以及类名规范化 2025-03-29 01:18:30 +08:00
Junyan Qin (Chin)
629ebae0e9 chore: release v3.4.11.1 (#1244) 2025-03-28 23:48:09 +08:00
Junyan Qin (Chin)
394d4b3c1b fix: static_file sent with wrong mimetype (#1243) 2025-03-28 23:46:24 +08:00
Dong_master
432440d6bf 新增reply发送消息及文件 2025-03-27 00:01:05 +08:00
Junyan Qin (Chin)
a0fd152d19 doc(README): add 3.13 in python version badge (#1232) 2025-03-24 15:43:46 +08:00
Junyan Qin (Chin)
1a62e08bab chore: update gitignore (#1231) 2025-03-24 15:40:51 +08:00
Junyan Qin (Chin)
edbc59c117 perf: use source_platform_object to pass source event in aiocqhttp (#1230) 2025-03-24 14:03:44 +08:00
WangCham
cfdd0f8cb2 fix: Format the code in a standardized way (#1222) 2025-03-21 14:04:13 +08:00
Junyan Qin (Chin)
808f30675d doc(README): contributors (#1223) 2025-03-21 10:47:32 +08:00
Junyan Qin (Chin)
46072abb41 doc(README): add planning platforms comments (#1219) 2025-03-19 23:23:56 +08:00
fdc310
71ffbb9eb5 Merge branch 'RockChinQ:master' into master 2025-03-19 23:13:58 +08:00
Junyan Qin (Chin)
27bbb2297a Merge pull request #1217 from RockChinQ/version/3.4.11
chore: bump version 3.4.11
2025-03-19 22:50:50 +08:00
Junyan Qin
0d235aaef8 chore: bump version 3.4.11 2025-03-19 22:50:15 +08:00
Dong_master
e22c804deb 新增发送emoji表情?(好像没啥用)和发送链接功能 2025-03-19 22:47:10 +08:00
Dong_master
c136e790ef 新增小程序发送,小程序转发更名为ForwardMiniPrograms 2025-03-19 21:56:13 +08:00
Dong_master
3697afd9d6 新增小程序发送,小程序转发更名为ForwardMiniPrograms 2025-03-19 21:55:36 +08:00
Dong_master
c597c6482a 新增小程序转发 2025-03-19 20:46:56 +08:00
Junyan Qin (Chin)
dda8c637d8 Merge pull request #1216 from RockChinQ/doc/mcp-readme
doc(README): add mcp comments
2025-03-19 13:10:30 +08:00
Junyan Qin
e6d7aaa440 doc(README): add mcp comments 2025-03-19 13:10:01 +08:00
Junyan Qin (Chin)
028458b33c Merge pull request #1210 from fdc310/master
处理at全员的atall,以及修复回复公众号和微信团队的问题
2025-03-19 13:03:07 +08:00
Junyan Qin (Chin)
9c7d8099cb Merge pull request #1215 from RockChinQ/feat/mcp-tools
feat: add supports for loading mcp server as LLM tools provider
2025-03-19 13:01:09 +08:00
Junyan Qin
5640dc332d feat(mcp): available for provider reloading 2025-03-19 12:41:04 +08:00
Junyan Qin
40275c3ef1 feat: add supports for loading mcp server as LLM tools provider 2025-03-19 12:08:47 +08:00
Junyan Qin (Chin)
ebe0b2f335 Merge pull request #1214 from RockChinQ/feat/tool-loaders
feat: tool loader abstraction
2025-03-19 09:37:10 +08:00
Junyan Qin
97603e8441 feat: tool loader abstraction 2025-03-19 09:36:03 +08:00
Dong_master
72cd444861 处理at全员的atall,以及修复回复公众号和微信团队的问题 2025-03-18 23:14:55 +08:00
Dong_master
955b859f2c 处理at全员的atall,以及修复回复公众号和微信团队的问题 2025-03-18 23:14:33 +08:00
Junyan Qin
dea5cc9c0c stash 2025-03-18 21:59:53 +08:00
Junyan Qin (Chin)
d13ab1703e Merge pull request #1209 from wangcham/feat/wxoa-notice-msg
feat: add support for loading message in wxoa
2025-03-18 20:39:22 +08:00
Junyan Qin
61ab6a009b chore: migration for wxoa loading message 2025-03-18 20:38:33 +08:00
wangcham
a9ae36d362 feat: add support for loading message in wxoa 2025-03-18 06:58:35 -04:00
Junyan Qin (Chin)
f518395ce5 Merge pull request #1204 from fdc310/master
增加引用消息回复(暂时只是引用是机器人消息时候构建at并传入消息),增加小程序,转账,红包,视频号等消息的判断。
2025-03-18 12:04:09 +08:00
Junyan Qin (Chin)
20b17fe378 Merge pull request #1203 from IGCrystal/master
fix: Fix SSL certificate verification error during GitHub plugin installation
2025-03-17 23:54:25 +08:00
Junyan Qin
572182180c deps: add certifi 2025-03-17 23:53:29 +08:00
Dong_master
de261099aa 增加引用消息回复(暂时只是引用是机器人消息时候构建at并传入消息),增加小程序,转账,红包,视频号等消息的判断。 2025-03-17 23:33:44 +08:00
Dong_master
50f0122955 增加引用消息回复(暂时只是引用是机器人消息时候构建at并传入消息),增加小程序,转账,红包,视频号等消息的判断。 2025-03-17 23:31:13 +08:00
Dong_master
fe9eff923e 增加引用消息回复(暂时只是引用是机器人消息时候构建at并传入消息),增加小程序,转账,红包,视频号等消息的判断。 2025-03-17 23:23:21 +08:00
冰苷晶
dd36278032 fix: add certifi to requirement 2025-03-17 23:16:51 +08:00
冰苷晶
a079821976 fix: fix SSL certificateverification error during GitHub plugin installation.
- Create a custom SSL context using certifi for proper HTTPS certificate verification, meow - Add the ssl parameter to aiohttp requests to prevent download failure due to missing root certificates, meow - Improve error messages and enhance the overall plugin installation process, meow!
2025-03-17 23:12:23 +08:00
Junyan Qin
fa233e0a24 fix(variables): user_message_text not provided 2025-03-17 22:04:00 +08:00
Junyan Qin (Chin)
22306cb4ea Merge pull request #1199 from fdc310/master
Fixed bot sending messages to others without PushContent field causing error in judgment, and fixed bot sending messages to reply to themselves
2025-03-16 22:12:59 +08:00
Dong_master
f2d45a3668 将bot账号手动发送消息拦截提前到转换层 2025-03-16 21:43:07 +08:00
Dong_master
db91ff12f7 修复bot自己给别人发消息时没有PushContent字段导致判断错误,并修复bot自己发消息自己回复问题 2025-03-16 19:10:07 +08:00
Dong_master
eb841fb73e 修复bot自己给别人发消息时没有PushContent字段导致判断错误,并修复bot自己发消息自己回复问题 2025-03-16 19:08:56 +08:00
55 changed files with 1707 additions and 296 deletions

View File

@@ -3,22 +3,6 @@ description: 报错或漏洞请使用这个模板创建,不使用此模板创
title: "[Bug]: "
labels: ["bug?"]
body:
- type: dropdown
attributes:
label: 消息平台适配器
description: "接入的消息平台类型"
options:
- 其他(或暂未使用)
- Nakurugo-cqhttp
- aiocqhttp使用 OneBot 协议接入的)
- qq-botpyQQ官方API WebSocket
- qqofficialQQ官方API Webhook
- lark飞书
- wecom企业微信
- gewechat个人微信
- discord
validations:
required: true
- type: input
attributes:
label: 运行环境

3
.gitignore vendored
View File

@@ -19,7 +19,7 @@ cookies.json
data/labels/announcement_saved.json
cmdpriv.json
tips.py
.venv
venv*
bin/
.vscode
test_*
@@ -39,3 +39,4 @@ botpy.log*
/libs/wecom_api/test.py
/venv
/jp-tyo-churros-05.rockchin.top
test.py

View File

@@ -26,7 +26,7 @@
[![QQ Group](https://img.shields.io/badge/%E7%A4%BE%E5%8C%BAQQ%E7%BE%A4-966235608-blue)](https://qm.qq.com/q/JLi38whHum)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/LangBot)](https://github.com/RockChinQ/LangBot/releases/latest)
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.qchatgpt.rockchin.top%2Fapi%2Fv2%2Fview%2Frealtime%2Fcount_query%3Fminute%3D10080&query=%24.data.count&label=%E4%BD%BF%E7%94%A8%E9%87%8F%EF%BC%887%E6%97%A5%EF%BC%89)
<img src="https://img.shields.io/badge/python-3.10 | 3.11 | 3.12-blue.svg" alt="python">
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
[![star](https://gitcode.com/RockChinQ/LangBot/star/badge.svg)](https://gitcode.com/RockChinQ/LangBot)
[简体中文](README.md) / [English](README_EN.md) / [日本語](README_JP.md)
@@ -39,7 +39,7 @@
- 💬 大模型对话、Agent支持多种大模型适配群聊和私聊具有多轮对话、工具调用、多模态能力并深度适配 [Dify](https://dify.ai)。目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;丰富生态,目前已有数十个[插件](https://docs.langbot.app/plugin/plugin-intro.html)
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/)目前已有数十个[插件](https://docs.langbot.app/plugin/plugin-intro.html)
- 😻 [New] Web 管理面板:支持通过浏览器管理 LangBot 实例,具体支持功能,查看[文档](https://docs.langbot.app/webui/intro.html)
## 📦 开始使用
@@ -93,11 +93,13 @@
| 钉钉 | ✅ | |
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
| LINE | 🚧 | |
| WhatsApp | 🚧 | |
🚧: 正在开发中
### 大模型
### 大模型能力
| 模型 | 状态 | 备注 |
| --- | --- | --- |
@@ -114,6 +116,8 @@
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
| [阿里云百炼](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支持通过 MCP 协议获取工具 |
### TTS
@@ -131,9 +135,15 @@
## 😘 社区贡献
LangBot 离不开以下贡献者和社区内所有人的贡献,我们欢迎任何形式的贡献和反馈。
感谢以下[代码贡献者](https://github.com/RockChinQ/LangBot/graphs/contributors)和社区里其他成员对 LangBot 的贡献:
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
</a>
以及 LangBot 核心团队成员:
- [RockChinQ](https://github.com/RockChinQ)
- [the-lazy-me](https://github.com/the-lazy-me)
- [wangcham](https://github.com/wangcham)
- [KaedeSAMA](https://github.com/KaedeSAMA)

View File

@@ -24,7 +24,7 @@
[![Discord](https://img.shields.io/discord/1335141740050649118?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/LangBot)](https://github.com/RockChinQ/LangBot/releases/latest)
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.qchatgpt.rockchin.top%2Fapi%2Fv2%2Fview%2Frealtime%2Fcount_query%3Fminute%3D10080&query=%24.data.count&label=Usage(7days))
<img src="https://img.shields.io/badge/python-3.10 | 3.11 | 3.12-blue.svg" alt="python">
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
[简体中文](README.md) / [English](README_EN.md) / [日本語](README_JP.md)
@@ -36,7 +36,7 @@
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, and multi-modal capabilities. Deeply integrates with [Dify](https://dify.ai). Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods.
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Rich ecology, currently has dozens of [plugins](https://docs.langbot.app/plugin/plugin-intro.html)
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has dozens of [plugins](https://docs.langbot.app/plugin/plugin-intro.html)
- 😻 [New] Web UI: Support management LangBot instance through the browser, for details, see [documentation](https://docs.langbot.app/webui/intro.html)
## 📦 Getting Started
@@ -90,6 +90,8 @@ Directly use the released version to run, see the [Manual Deployment](https://do
| DingTalk | ✅ | |
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
| LINE | 🚧 | |
| WhatsApp | 🚧 | |
🚧: In development
@@ -111,14 +113,20 @@ Directly use the released version to run, see the [Manual Deployment](https://do
| [SiliconFlow](https://siliconflow.cn/) | ✅ | LLM gateway(MaaS) |
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | LLM gateway(MaaS), LLMOps platform |
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | LLM gateway(MaaS), LLMOps platform |
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | LLM gateway(MaaS) |
| [MCP](https://modelcontextprotocol.io/) | ✅ | Support tool access through MCP protocol |
## 🤝 Community Contribution
Thanks to the following contributors and everyone in the community for their contributions.
Thank you for the following [code contributors](https://github.com/RockChinQ/LangBot/graphs/contributors) and other members in the community for their contributions to LangBot:
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
</a>
And the core team members of LangBot:
- [RockChinQ](https://github.com/RockChinQ)
- [the-lazy-me](https://github.com/the-lazy-me)
- [wangcham](https://github.com/wangcham)
- [KaedeSAMA](https://github.com/KaedeSAMA)

View File

@@ -23,7 +23,7 @@
[![Discord](https://img.shields.io/discord/1335141740050649118?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/LangBot)](https://github.com/RockChinQ/LangBot/releases/latest)
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.qchatgpt.rockchin.top%2Fapi%2Fv2%2Fview%2Frealtime%2Fcount_query%3Fminute%3D10080&query=%24.data.count&label=Usage(7days))
<img src="https://img.shields.io/badge/python-3.10 | 3.11 | 3.12-blue.svg" alt="python">
<img src="https://img.shields.io/badge/python-3.10 ~ 3.13 -blue.svg" alt="python">
[简体中文](README.md) / [English](README_EN.md) / [日本語](README_JP.md)
@@ -35,7 +35,7 @@
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル機能をサポート。 [Dify](https://dify.ai) と深く統合。現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。豊富なエコシステム、現在数十の[プラグイン](https://docs.langbot.app/plugin/plugin-intro.html)が存在。
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/)豊富なエコシステム、現在数十の[プラグイン](https://docs.langbot.app/plugin/plugin-intro.html)が存在。
- 😻 [新機能] Web UI: ブラウザを通じてLangBotインスタンスを管理することをサポート。詳細は[ドキュメント](https://docs.langbot.app/webui/intro.html)を参照。
## 📦 始め方
@@ -89,6 +89,8 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
| DingTalk | ✅ | |
| Discord | ✅ | |
| Telegram | ✅ | |
| Slack | ✅ | |
| LINE | 🚧 | |
| WhatsApp | 🚧 | |
🚧: 開発中
@@ -110,14 +112,20 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
| [SiliconFlow](https://siliconflow.cn/) | ✅ | LLMゲートウェイ(MaaS) |
| [Aliyun Bailian](https://bailian.console.aliyun.com/) | ✅ | LLMゲートウェイ(MaaS), LLMOpsプラットフォーム |
| [Volc Engine Ark](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | LLMゲートウェイ(MaaS), LLMOpsプラットフォーム |
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | LLMゲートウェイ(MaaS) |
| [MCP](https://modelcontextprotocol.io/) | ✅ | MCPプロトコルをサポート |
## 🤝 コミュニティ貢献
以下の貢献者とコミュニティの皆さんの貢献に感謝します。
LangBot への貢献に対して、以下の [コード貢献者](https://github.com/RockChinQ/LangBot/graphs/contributors) とコミュニティの他のメンバーに感謝します。
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
</a>
LangBot の核心チームメンバー:
- [RockChinQ](https://github.com/RockChinQ)
- [the-lazy-me](https://github.com/the-lazy-me)
- [wangcham](https://github.com/wangcham)
- [KaedeSAMA](https://github.com/KaedeSAMA)

View File

@@ -8,6 +8,8 @@ services:
- ./data:/app/data
- ./plugins:/app/plugins
restart: on-failure
environment:
- TZ=Asia/Shanghai
ports:
- 5300:5300 # 供 WebUI 使用
- 2280-2290:2280-2290 # 供消息平台适配器方向连接

View File

@@ -10,7 +10,7 @@ import traceback
class DingTalkClient:
def __init__(self, client_id: str, client_secret: str,robot_name:str,robot_code:str):
def __init__(self, client_id: str, client_secret: str,robot_name:str,robot_code:str,markdown_card:bool):
"""初始化 WebSocket 连接并自动启动"""
self.credential = dingtalk_stream.Credential(client_id, client_secret)
self.client = dingtalk_stream.DingTalkStreamClient(self.credential)
@@ -26,6 +26,7 @@ class DingTalkClient:
self.robot_name = robot_name
self.robot_code = robot_code
self.access_token_expiry_time = ''
self.markdown_card = markdown_card
@@ -128,7 +129,10 @@ class DingTalkClient:
async def send_message(self,content:str,incoming_message):
self.EchoTextHandler.reply_text(content,incoming_message)
if self.markdown_card:
self.EchoTextHandler.reply_markdown(title=self.robot_name+'的回答',text=content,incoming_message=incoming_message)
else:
self.EchoTextHandler.reply_text(content,incoming_message)
async def get_incoming_message(self):

View File

@@ -27,7 +27,6 @@ xml_template = """
</xml>
"""
user_msg_queue = {}
class OAClient():
@@ -45,6 +44,7 @@ class OAClient():
}
self.access_token_expiry_time = None
self.msg_id_map = {}
self.generated_content = {}
async def handle_callback_request(self):
@@ -87,12 +87,10 @@ class OAClient():
from_user = root.find("FromUserName").text # 发送者
to_user = root.find("ToUserName").text # 机器人
from pkg.platform.sources import officialaccount
timeout = 4.80
interval = 0.1
while True:
content = officialaccount.generated_content.pop(message_data["MsgId"], None)
content = self.generated_content.pop(message_data["MsgId"], None)
if content:
response_xml = xml_template.format(
to_user=from_user,
@@ -172,11 +170,14 @@ class OAClient():
for handler in self._message_handlers[msg_type]:
await handler(event)
async def set_message(self,msg_id:int,content:str):
self.generated_content[msg_id] = content
class OAClientForLongerResponse():
def __init__(self,token:str,EncodingAESKey:str,AppID:str,Appsecret:str):
def __init__(self,token:str,EncodingAESKey:str,AppID:str,Appsecret:str,LoadingMessage:str):
self.token = token
self.aes = EncodingAESKey
self.appid = AppID
@@ -189,6 +190,9 @@ class OAClientForLongerResponse():
"example":[],
}
self.access_token_expiry_time = None
self.loading_message = LoadingMessage
self.msg_queue = {}
self.user_msg_queue = {}
async def handle_callback_request(self):
try:
@@ -221,17 +225,15 @@ class OAClientForLongerResponse():
from_user = root.find("FromUserName").text
to_user = root.find("ToUserName").text
from pkg.platform.sources import officialaccount as oa
if oa.msg_queue.get(from_user) and oa.msg_queue[from_user][0]["content"]:
queue_top = oa.msg_queue[from_user].pop(0)
if self.msg_queue.get(from_user) and self.msg_queue[from_user][0]["content"]:
queue_top = self.msg_queue[from_user].pop(0)
queue_content = queue_top["content"]
# 弹出用户消息
if user_msg_queue.get(from_user) and user_msg_queue[from_user]:
user_msg_queue[from_user].pop(0)
if self.user_msg_queue.get(from_user) and self.user_msg_queue[from_user]:
self.user_msg_queue[from_user].pop(0)
response_xml = xml_template.format(
to_user=from_user,
@@ -246,10 +248,10 @@ class OAClientForLongerResponse():
to_user=from_user,
from_user=to_user,
create_time=int(time.time()),
content="AI正在思考中请发送任意内容获取回答。"
content=self.loading_message
)
if user_msg_queue.get(from_user) and user_msg_queue[from_user][0]["content"]:
if self.user_msg_queue.get(from_user) and self.user_msg_queue[from_user][0]["content"]:
return response_xml
else:
message_data = await self.get_message(xml_msg)
@@ -257,7 +259,7 @@ class OAClientForLongerResponse():
if message_data:
event = OAEvent.from_payload(message_data)
if event:
user_msg_queue.setdefault(from_user,[]).append(
self.user_msg_queue.setdefault(from_user,[]).append(
{
"content":event.message,
}
@@ -317,6 +319,18 @@ class OAClientForLongerResponse():
for handler in self._message_handlers[msg_type]:
await handler(event)
async def set_message(self,from_user:int,message_id:int,content:str):
if from_user not in self.msg_queue:
self.msg_queue[from_user] = []
self.msg_queue[from_user].append(
{
"msg_id":message_id,
"content":content,
}
)

View File

111
libs/slack_api/api.py Normal file
View File

@@ -0,0 +1,111 @@
import json
from quart import Quart, jsonify,request
from slack_sdk.web.async_client import AsyncWebClient
from .slackevent import SlackEvent
from typing import Callable, Dict, Any
from pkg.platform.types import events as platform_events, message as platform_message
class SlackClient():
def __init__(self,bot_token:str,signing_secret:str):
self.bot_token = bot_token
self.signing_secret = signing_secret
self.app = Quart(__name__)
self.client = AsyncWebClient(self.bot_token)
self.app.add_url_rule('/callback/command', 'handle_callback', self.handle_callback_request, methods=['GET', 'POST'])
self._message_handlers = {
"example":[],
}
self.bot_user_id = None # 避免机器人回复自己的消息
async def handle_callback_request(self):
try:
body = await request.get_data()
data = json.loads(body)
if 'type' in data:
if data['type'] == 'url_verification':
return data['challenge']
bot_user_id = data.get("event",{}).get("bot_id","")
if self.bot_user_id and bot_user_id == self.bot_user_id:
return jsonify({'status': 'ok'})
# 处理私信
if data and data.get("event", {}).get("channel_type") in ["im"]:
event = SlackEvent.from_payload(data)
await self._handle_message(event)
return jsonify({'status': 'ok'})
#处理群聊
if data.get("event",{}).get("type") == 'app_mention':
data.setdefault("event", {})["channel_type"] = "channel"
event = SlackEvent.from_payload(data)
await self._handle_message(event)
return jsonify({'status':'ok'})
return jsonify({'status': 'ok'})
except Exception as e:
raise(e)
async def _handle_message(self, event: SlackEvent):
"""
处理消息事件。
"""
msg_type = event.type
if msg_type in self._message_handlers:
for handler in self._message_handlers[msg_type]:
await handler(event)
def on_message(self, msg_type: str):
"""注册消息类型处理器"""
def decorator(func: Callable[[platform_events.Event], None]):
if msg_type not in self._message_handlers:
self._message_handlers[msg_type] = []
self._message_handlers[msg_type].append(func)
return func
return decorator
async def send_message_to_channel(self,text:str,channel_id:str):
try:
response = await self.client.chat_postMessage(
channel=channel_id,
text=text
)
if self.bot_user_id is None and response.get("ok"):
self.bot_user_id = response["message"]["bot_id"]
return
except Exception as e:
raise e
async def send_message_to_one(self,text:str,user_id:str):
try:
response = await self.client.chat_postMessage(
channel = '@'+user_id,
text= text
)
if self.bot_user_id is None and response.get("ok"):
self.bot_user_id = response["message"]["bot_id"]
return
except Exception as e:
raise e
async def run_task(self, host: str, port: int, *args, **kwargs):
"""
启动 Quart 应用。
"""
await self.app.run_task(host=host, port=port, *args, **kwargs)

View File

@@ -0,0 +1,91 @@
from typing import Dict, Any, Optional
class SlackEvent(dict):
@staticmethod
def from_payload(payload: Dict[str, Any]) -> Optional["SlackEvent"]:
try:
event = SlackEvent(payload)
return event
except KeyError:
return None
@property
def text(self) -> str:
if self.get("event", {}).get("channel_type") == "im":
blocks = self.get("event", {}).get("blocks", [])
if not blocks:
return ""
elements = blocks[0].get("elements", [])
if not elements:
return ""
elements = elements[0].get("elements", [])
text = ""
for el in elements:
if el.get("type") == "text":
text += el.get("text", "")
elif el.get("type") == "link":
text += el.get("url", "")
return text
if self.get("event",{}).get("channel_type") == 'channel':
message_text = ""
for block in self.get("event", {}).get("blocks", []):
if block.get("type") == "rich_text":
for element in block.get("elements", []):
if element.get("type") == "rich_text_section":
parts = []
for el in element.get("elements", []):
if el.get("type") == "text":
parts.append(el["text"])
elif el.get("type") == "link":
parts.append(el["url"])
message_text = "".join(parts)
return message_text
@property
def user_id(self) -> Optional[str]:
return self.get("event", {}).get("user","")
@property
def channel_id(self) -> Optional[str]:
return self.get("event", {}).get("channel","")
@property
def type(self) -> str:
""" message对应私聊app_mention对应频道at """
return self.get("event", {}).get("channel_type", "")
@property
def message_id(self) -> str:
return self.get("event_id","")
@property
def pic_url(self) -> str:
"""提取 Slack 事件中的图片 URL"""
files = self.get("event", {}).get("files", [])
if files:
return files[0].get("url_private", "")
return None
@property
def sender_name(self) -> str:
return self.get("event", {}).get("user","")
def __getattr__(self, key: str) -> Optional[Any]:
return self.get(key)
def __setattr__(self, key: str, value: Any) -> None:
self[key] = value
def __repr__(self) -> str:
return f"<SlackEvent {super().__repr__()}>"

View File

@@ -66,8 +66,42 @@ class HTTPController:
@self.quart_app.route("/")
async def index():
return await quart.send_from_directory(frontend_path, "index.html")
return await quart.send_from_directory(
frontend_path,
"index.html",
mimetype="text/html"
)
@self.quart_app.route("/<path:path>")
async def static_file(path: str):
return await quart.send_from_directory(frontend_path, path)
mimetype = None
if path.endswith(".html"):
mimetype = "text/html"
elif path.endswith(".js"):
mimetype = "application/javascript"
elif path.endswith(".css"):
mimetype = "text/css"
elif path.endswith(".png"):
mimetype = "image/png"
elif path.endswith(".jpg"):
mimetype = "image/jpeg"
elif path.endswith(".jpeg"):
mimetype = "image/jpeg"
elif path.endswith(".gif"):
mimetype = "image/gif"
elif path.endswith(".svg"):
mimetype = "image/svg+xml"
elif path.endswith(".ico"):
mimetype = "image/x-icon"
elif path.endswith(".json"):
mimetype = "application/json"
elif path.endswith(".txt"):
mimetype = "text/plain"
return await quart.send_from_directory(
frontend_path,
path,
mimetype=mimetype
)

View File

@@ -16,7 +16,6 @@ class FuncOperator(operator.CommandOperator):
all_functions = await self.ap.tool_mgr.get_all_functions(
plugin_enabled=True,
plugin_status=plugin_context.RuntimeContainerStatus.INITIALIZED,
)
for func in all_functions:

View File

@@ -204,6 +204,8 @@ class Application:
case core_entities.LifecycleControlScope.PROVIDER.value:
self.logger.info("执行热重载 scope="+scope)
await self.tool_mgr.shutdown()
latest_llm_model_config = await config.load_json_config("data/metadata/llm-models.json", "templates/metadata/llm-models.json")
self.llm_models_meta = latest_llm_model_config
llm_model_mgr_inst = llm_model_mgr.ModelManager(self)

View File

@@ -33,6 +33,9 @@ required_deps = {
"dingtalk_stream": "dingtalk_stream",
"dashscope": "dashscope",
"telegram": "python-telegram-bot",
"certifi": "certifi",
"mcp": "mcp",
"telegramify_markdown":"telegramify-markdown",
}

View File

@@ -0,0 +1,26 @@
from __future__ import annotations
from .. import migration
@migration.migration_class("wxoa-loading-message", 36)
class WxoaLoadingMessageMigration(migration.Migration):
"""迁移"""
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移"""
for adapter in self.ap.platform_cfg.data['platform-adapters']:
if adapter['adapter'] == 'officialaccount':
if 'LoadingMessage' not in adapter:
return True
return False
async def run(self):
"""执行迁移"""
for adapter in self.ap.platform_cfg.data['platform-adapters']:
if adapter['adapter'] == 'officialaccount':
if 'LoadingMessage' not in adapter:
adapter['LoadingMessage'] = 'AI正在思考中请发送任意内容获取回复。'
await self.ap.platform_cfg.dump_config()

View File

@@ -0,0 +1,20 @@
from __future__ import annotations
from .. import migration
@migration.migration_class("mcp-config", 37)
class MCPConfigMigration(migration.Migration):
"""迁移"""
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移"""
return 'mcp' not in self.ap.provider_cfg.data
async def run(self):
"""执行迁移"""
self.ap.provider_cfg.data['mcp'] = {
"servers": []
}
await self.ap.provider_cfg.dump_config()

View File

@@ -0,0 +1,26 @@
from __future__ import annotations
from .. import migration
@migration.migration_class("tg-dingtalk-markdown", 38)
class TgDingtalkMarkdownMigration(migration.Migration):
"""迁移"""
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移"""
for adapter in self.ap.platform_cfg.data['platform-adapters']:
if adapter['adapter'] in ['dingtalk','telegram']:
if 'markdown_card' not in adapter:
return True
return False
async def run(self):
"""执行迁移"""
for adapter in self.ap.platform_cfg.data['platform-adapters']:
if adapter['adapter'] in ['dingtalk','telegram']:
if 'markdown_card' not in adapter:
adapter['markdown_card'] = False
await self.ap.platform_cfg.dump_config()

View File

@@ -0,0 +1,30 @@
from __future__ import annotations
from .. import migration
@migration.migration_class("modelscope-config-completion", 4)
class ModelScopeConfigCompletionMigration(migration.Migration):
"""OpenAI配置迁移
"""
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移
"""
return 'modelscope-chat-completions' not in self.ap.provider_cfg.data['requester'] \
or 'modelscope' not in self.ap.provider_cfg.data['keys']
async def run(self):
"""执行迁移
"""
if 'modelscope-chat-completions' not in self.ap.provider_cfg.data['requester']:
self.ap.provider_cfg.data['requester']['modelscope-chat-completions'] = {
'base-url': 'https://api-inference.modelscope.cn/v1',
'args': {},
'timeout': 120,
}
if 'modelscope' not in self.ap.provider_cfg.data['keys']:
self.ap.provider_cfg.data['keys']['modelscope'] = []
await self.ap.provider_cfg.dump_config()

View File

@@ -11,7 +11,9 @@ from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_
from ..migrations import m020_wecom_config, m021_lark_config, m022_lmstudio_config, m023_siliconflow_config, m024_discord_config, m025_gewechat_config
from ..migrations import m026_qqofficial_config, m027_wx_official_account_config, m028_aliyun_requester_config
from ..migrations import m029_dashscope_app_api_config, m030_lark_config_cmpl, m031_dingtalk_config, m032_volcark_config
from ..migrations import m033_dify_thinking_config, m034_gewechat_file_url_config, m035_wxoa_mode
from ..migrations import m033_dify_thinking_config, m034_gewechat_file_url_config, m035_wxoa_mode, m036_wxoa_loading_message
from ..migrations import m037_mcp_config, m038_tg_dingtalk_markdown, m039_modelscope_cfg_completion
@stage.stage_class("MigrationStage")
class MigrationStage(stage.BootingStage):

View File

@@ -60,11 +60,14 @@ class PreProcessor(stage.PipelineStage):
content_list = []
plain_text = ""
for me in query.message_chain:
if isinstance(me, platform_message.Plain):
content_list.append(
llm_entities.ContentElement.from_text(me.text)
)
plain_text += me.text
elif isinstance(me, platform_message.Image):
if self.ap.provider_cfg.data['enable-vision'] and (self.ap.provider_cfg.data['runner'] != 'local-agent' or query.use_model.vision_supported):
if me.base64 is not None:
@@ -72,6 +75,8 @@ class PreProcessor(stage.PipelineStage):
llm_entities.ContentElement.from_image_base64(me.base64)
)
query.variables['user_message_text'] = plain_text
query.user_message = llm_entities.Message(
role='user',
content=content_list

View File

@@ -110,7 +110,7 @@ class PlatformManager:
if len(self.adapters) == 0:
self.ap.logger.warning('未运行平台适配器,请根据文档配置并启用平台适配器。')
async def write_back_config(self, adapter_name: str, adapter_inst: msadapter.MessagePlatformAdapter, config: dict):
def write_back_config(self, adapter_name: str, adapter_inst: msadapter.MessagePlatformAdapter, config: dict):
index = -2
for i, adapter in enumerate(self.adapters):
@@ -137,7 +137,7 @@ class PlatformManager:
**config
}
self.ap.platform_cfg.data['platform-adapters'][real_index] = new_cfg
await self.ap.platform_cfg.dump_config()
self.ap.platform_cfg.dump_config_sync()
async def send(self, event: platform_events.MessageEvent, msg: platform_message.MessageChain, adapter: msadapter.MessagePlatformAdapter):
@@ -170,7 +170,8 @@ class PlatformManager:
self.ap.logger.debug(f"Traceback: {traceback.format_exc()}")
tasks.append(exception_wrapper(adapter))
for task in tasks:
self.ap.task_mgr.create_task(
task,

View File

@@ -57,7 +57,7 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
elif msg.path:
arg = msg.path
msg_list.append(aiocqhttp.MessageSegment.record(msg.path))
elif type(msg) is forward.Forward:
elif type(msg) is platform_message.Forward:
for node in msg.node_list:
msg_list.extend((await AiocqhttpMessageConverter.yiri2target(node.message_chain))[0])
@@ -101,69 +101,8 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
class AiocqhttpEventConverter(adapter.EventConverter):
@staticmethod
async def yiri2target(event: platform_events.Event, bot_account_id: int):
msg, msg_id, msg_time = await AiocqhttpMessageConverter.yiri2target(event.message_chain)
if type(event) is platform_events.GroupMessage:
role = "member"
if event.sender.permission == "ADMINISTRATOR":
role = "admin"
elif event.sender.permission == "OWNER":
role = "owner"
payload = {
"post_type": "message",
"message_type": "group",
"time": int(msg_time.timestamp()),
"self_id": bot_account_id,
"sub_type": "normal",
"anonymous": None,
"font": 0,
"message": str(msg),
"raw_message": str(msg),
"sender": {
"age": 0,
"area": "",
"card": "",
"level": "",
"nickname": event.sender.member_name,
"role": role,
"sex": "unknown",
"title": "",
"user_id": event.sender.id,
},
"user_id": event.sender.id,
"message_id": msg_id,
"group_id": event.group.id,
"message_seq": 0,
}
return aiocqhttp.Event.from_payload(payload)
elif type(event) is platform_events.FriendMessage:
payload = {
"post_type": "message",
"message_type": "private",
"time": int(msg_time.timestamp()),
"self_id": bot_account_id,
"sub_type": "friend",
"target_id": bot_account_id,
"message": str(msg),
"raw_message": str(msg),
"font": 0,
"sender": {
"age": 0,
"nickname": event.sender.nickname,
"sex": "unknown",
"user_id": event.sender.id,
},
"message_id": msg_id,
"user_id": event.sender.id,
}
return aiocqhttp.Event.from_payload(payload)
async def yiri2target(event: platform_events.MessageEvent, bot_account_id: int):
return event.source_platform_object
@staticmethod
async def target2yiri(event: aiocqhttp.Event):
@@ -196,6 +135,7 @@ class AiocqhttpEventConverter(adapter.EventConverter):
),
message_chain=yiri_chain,
time=event.time,
source_platform_object=event
)
return converted_event
elif event.message_type == "private":
@@ -207,6 +147,7 @@ class AiocqhttpEventConverter(adapter.EventConverter):
),
message_chain=yiri_chain,
time=event.time,
source_platform_object=event
)

View File

@@ -131,7 +131,8 @@ class DingTalkAdapter(adapter.MessagePlatformAdapter):
client_id=config["client_id"],
client_secret=config["client_secret"],
robot_name = config["robot_name"],
robot_code=config["robot_code"]
robot_code=config["robot_code"],
markdown_card=config["markdown_card"]
)
async def reply_message(

View File

@@ -25,6 +25,7 @@ from ..types import message as platform_message
from ..types import events as platform_events
from ..types import entities as platform_entities
from ...utils import image
import xml.etree.ElementTree as ET
class GewechatMessageConverter(adapter.MessageConverter):
@@ -46,6 +47,17 @@ class GewechatMessageConverter(adapter.MessageConverter):
if not component.url:
pass
content_list.append({"type": "image", "image": component.url})
elif isinstance(component, platform_message.WeChatMiniPrograms):
content_list.append({"type": 'WeChatMiniPrograms', 'mini_app_id': component.mini_app_id, 'display_name': component.display_name,
'page_path': component.page_path, 'cover_img_url': component.image_url, 'title': component.title,
'user_name': component.user_name})
elif isinstance(component, platform_message.WeChatForwardMiniPrograms):
content_list.append({"type": 'WeChatForwardMiniPrograms', 'xml_data': component.xml_data, 'image_url': component.image_url})
elif isinstance(component, platform_message.WeChatEmoji):
content_list.append({'type': 'WeChatEmoji', 'emoji_md5': component.emoji_md5, 'emoji_size': component.emoji_size})
elif isinstance(component, platform_message.WeChatLink):
content_list.append({'type': 'WeChatLink', 'link_title': component.link_title, 'link_desc': component.link_desc,
'link_thumb_url': component.link_thumb_url, 'link_url': component.link_url})
elif isinstance(component, platform_message.Voice):
@@ -74,6 +86,7 @@ class GewechatMessageConverter(adapter.MessageConverter):
if len(line_split) > 0 and regex.match(line_split[0]):
message["Data"]["Content"]["string"] = "\n".join(line_split[1:])
# 正则表达式模式,匹配'@'后跟任意数量的非空白字符
pattern = r'@\S+'
at_string = f"@{bot_account_id}"
@@ -82,8 +95,11 @@ class GewechatMessageConverter(adapter.MessageConverter):
content_list.append(platform_message.At(target=bot_account_id))
content_list.append(platform_message.Plain(message["Data"]["Content"]["string"].replace(at_string, '', 1)))
# 更优雅的替换改名后@机器人仅仅限于单独AT的情况
elif '在群聊中@了你' in message["Data"]["PushContent"]:
content_list.append(platform_message.At(target=bot_account_id))
elif "PushContent" in message['Data'] and '在群聊中@了你' in message["Data"]["PushContent"]:
if '@所有人' in message["Data"]["Content"]["string"]: # at全员时候传入atll不当作at自己
content_list.append(platform_message.AtAll())
else:
content_list.append(platform_message.At(target=bot_account_id))
content_list.append(platform_message.Plain(re.sub(pattern, '', message["Data"]["Content"]["string"])))
else:
content_list = [platform_message.Plain(message["Data"]["Content"]["string"])]
@@ -127,17 +143,78 @@ class GewechatMessageConverter(adapter.MessageConverter):
# 支持微信聊天记录的消息类型,将 XML 内容转换为 MessageChain 传递
try:
content = message["Data"]["Content"]["string"]
# 有三种可能的消息结构weid开头私聊直接<?xml>和直接<msg>
if content.startswith('wxid'):
xml_list = content.split('\n')[2:]
xml_data = '\n'.join(xml_list)
elif content.startswith('<?xml'):
xml_list = content.split('\n')[1:]
xml_data = '\n'.join(xml_list)
else:
xml_data = content
try:
content_bytes = content.encode('utf-8')
decoded_content = base64.b64decode(content_bytes)
content_data = ET.fromstring(xml_data)
# print(xml_data)
# 拿到细分消息类型按照gewe接口中描述
'''
小程序33/36
引用消息57
转账消息2000
红包消息2001
视频号消息51
'''
appmsg_data = content_data.find('.//appmsg')
data_type = appmsg_data.find('.//type').text
if data_type == '57':
user_data = appmsg_data.find('.//title').text # 拿到用户消息
quote_data = appmsg_data.find('.//refermsg').find('.//content').text # 引用原文
sender_id = appmsg_data.find('.//refermsg').find('.//chatusr').text # 引用用户id
from_name = message['Data']['FromUserName']['string']
message_list =[]
if message['Wxid'] == sender_id and from_name.endswith('@chatroom'): # 因为引用机制暂时无法响应用户所以当引用用户是机器人是构建一个at激活机器人
message_list.append(platform_message.At(target=bot_account_id))
message_list.append(platform_message.Quote(
sender_id=sender_id,
origin=platform_message.MessageChain(
[platform_message.Plain(quote_data)]
)))
message_list.append(platform_message.Plain(user_data))
return platform_message.MessageChain(message_list)
elif data_type == '51':
return platform_message.MessageChain(
[platform_message.Unknown(content=decoded_content)]
[platform_message.Plain(text=f'[视频号消息]')]
)
except Exception as e:
# print(content_data)
elif data_type == '2000':
return platform_message.MessageChain(
[platform_message.Plain(text=content)]
[platform_message.Plain(text=f'[转账消息]')]
)
elif data_type == '2001':
return platform_message.MessageChain(
[platform_message.Plain(text=f'[红包消息]')]
)
elif data_type == '5':
return platform_message.MessageChain(
[platform_message.Plain(text=f'[公众号消息]')]
)
elif data_type == '33' or data_type == '36':
return platform_message.MessageChain(
[platform_message.Plain(text=f'[小程序消息]')]
)
# print(data_type.text)
else:
try:
content_bytes = content.encode('utf-8')
decoded_content = base64.b64decode(content_bytes)
return platform_message.MessageChain(
[platform_message.Unknown(content=decoded_content)]
)
except Exception as e:
return platform_message.MessageChain(
[platform_message.Plain(text=content)]
)
except Exception as e:
print(f"Error processing type 49 message: {str(e)}")
return platform_message.MessageChain(
@@ -161,6 +238,14 @@ class GewechatEventConverter(adapter.EventConverter):
event: dict,
bot_account_id: str
) -> platform_events.MessageEvent:
# print(event)
# 排除自己发消息回调回答问题
if event['Wxid'] == event['Data']['FromUserName']['string']:
return None
# 排除公众号以及微信团队消息
if event['Data']['FromUserName']['string'].startswith('gh_')\
or event['Data']['FromUserName']['string'].startswith('weixin'):
return None
message_chain = await self.message_converter.target2yiri(copy.deepcopy(event), bot_account_id)
if not message_chain:
@@ -236,6 +321,9 @@ class GeWeChatAdapter(adapter.MessagePlatformAdapter):
async def gewechat_callback():
data = await quart.request.json
# print(json.dumps(data, indent=4, ensure_ascii=False))
self.ap.logger.debug(
f"Gewechat callback event: {data}"
)
if 'data' in data:
data['Data'] = data['data']
@@ -287,6 +375,19 @@ class GeWeChatAdapter(adapter.MessagePlatformAdapter):
elif msg['type'] == 'image':
self.bot.post_image(app_id=self.config['app_id'], to_wxid=target_id, img_url=msg["image"])
elif msg['type'] == 'WeChatMiniPrograms':
self.bot.post_mini_app(app_id=self.config['app_id'], to_wxid=target_id, mini_app_id=msg['mini_app_id']
, display_name=msg['display_name'], page_path=msg['page_path']
, cover_img_url=msg['cover_img_url'], title=msg['title'], user_name=msg['user_name'])
elif msg['type'] == 'WeChatForwardMiniPrograms':
self.bot.forward_mini_app(app_id=self.config['app_id'], to_wxid=target_id, xml=msg['xml_data'], cover_img_url=msg['image_url'])
elif msg['type'] == 'WeChatEmoji':
self.bot.post_emoji(app_id=self.config['app_id'], to_wxid=target_id,
emoji_md5=msg['emoji_md5'], emoji_size=msg['emoji_size'])
elif msg['type'] == 'WeChatLink':
self.bot.post_link(app_id=self.config['app_id'], to_wxid=target_id
,title=msg['link_title'], desc=msg['link_desc']
, link_url=msg['link_url'], thumb_url=msg['link_thumb_url'])
@@ -299,6 +400,7 @@ class GeWeChatAdapter(adapter.MessagePlatformAdapter):
content_list = await self.message_converter.yiri2target(message)
ats = [item["target"] for item in content_list if item["type"] == "at"]
target_id = message_source.source_platform_object["Data"]["FromUserName"]["string"]
for msg in content_list:
if msg["type"] == "text":
@@ -319,6 +421,22 @@ class GeWeChatAdapter(adapter.MessagePlatformAdapter):
content=msg["content"],
ats=",".join(ats)
)
elif msg['type'] == 'image':
self.bot.post_image(app_id=self.config['app_id'], to_wxid=target_id, img_url=msg["image"])
elif msg['type'] == 'WeChatMiniPrograms':
self.bot.post_mini_app(app_id=self.config['app_id'], to_wxid=target_id, mini_app_id=msg['mini_app_id']
, display_name=msg['display_name'], page_path=msg['page_path']
, cover_img_url=msg['cover_img_url'], title=msg['title'], user_name=msg['user_name'])
elif msg['type'] == 'WeChatForwardMiniPrograms':
self.bot.forward_mini_app(app_id=self.config['app_id'], to_wxid=target_id, xml=msg['xml_data'], cover_img_url=msg['image_url'])
elif msg['type'] == 'WeChatEmoji':
self.bot.post_emoji(app_id=self.config['app_id'], to_wxid=target_id,
emoji_md5=msg['emoji_md5'], emoji_size=msg['emoji_size'])
elif msg['type'] == 'WeChatLink':
self.bot.post_link(app_id=self.config['app_id'], to_wxid=target_id
, title=msg['link_title'], desc=msg['link_desc']
, link_url=msg['link_url'], thumb_url=msg['link_thumb_url'])
async def is_muted(self, group_id: int) -> bool:
pass
@@ -354,26 +472,28 @@ class GeWeChatAdapter(adapter.MessagePlatformAdapter):
self.config["token"]
)
app_id, error_msg = self.bot.login(self.config["app_id"])
if error_msg:
raise Exception(f"Gewechat 登录失败: {error_msg}")
def gewechat_login_process():
self.config["app_id"] = app_id
app_id, error_msg = self.bot.login(self.config["app_id"])
if error_msg:
raise Exception(f"Gewechat 登录失败: {error_msg}")
self.ap.logger.info(f"Gewechat 登录成功app_id: {app_id}")
self.config["app_id"] = app_id
await self.ap.platform_mgr.write_back_config('gewechat', self, self.config)
self.ap.logger.info(f"Gewechat 登录成功app_id: {app_id}")
# 获取 nickname
profile = self.bot.get_profile(self.config["app_id"])
self.bot_account_id = profile["data"]["nickName"]
self.ap.platform_mgr.write_back_config('gewechat', self, self.config)
# 获取 nickname
profile = self.bot.get_profile(self.config["app_id"])
self.bot_account_id = profile["data"]["nickName"]
time.sleep(2)
def thread_set_callback():
time.sleep(3)
ret = self.bot.set_callback(self.config["token"], self.config["callback_url"])
print('设置 Gewechat 回调:', ret)
threading.Thread(target=thread_set_callback).start()
threading.Thread(target=gewechat_login_process).start()
async def shutdown_trigger_placeholder():
while True:

View File

@@ -328,6 +328,10 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
try:
data = await quart.request.json
self.ap.logger.debug(
f"Lark callback event: {data}"
)
if 'encrypt' in data:
cipher = AESCipher(self.config['encrypt-key'])
data = cipher.decrypt_string(data['encrypt'])
@@ -339,7 +343,6 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
type = context.header.event_type
if 'url_verification' == type:
print(data.get("challenge"))
# todo 验证verification token
return {
"challenge": data.get("challenge")

View File

@@ -22,10 +22,6 @@ from ..types import entities as platform_entities
from ...command.errors import ParamNotEnoughError
# 生成的ai回答
generated_content = {}
msg_queue = {}
class OAMessageConverter(adapter.MessageConverter):
@staticmethod
async def yiri2target(message_chain: platform_message.MessageChain):
@@ -69,7 +65,7 @@ class OAEventConverter(adapter.EventConverter):
class OfficialAccountAdapter(adapter.MessagePlatformAdapter):
bot : OAClient
bot : OAClient | OAClientForLongerResponse
ap : app.Application
bot_account_id: str
message_converter: OAMessageConverter = OAMessageConverter()
@@ -107,32 +103,22 @@ class OfficialAccountAdapter(adapter.MessagePlatformAdapter):
EncodingAESKey=config['EncodingAESKey'],
Appsecret=config['AppSecret'],
AppID=config['AppID'],
LoadingMessage=config['LoadingMessage']
)
else:
raise KeyError("请设置微信公众号通信模式")
async def reply_message(self, message_source: platform_events.FriendMessage, message: platform_message.MessageChain, quote_origin: bool = False):
global generated_content
content = await OAMessageConverter.yiri2target(
message
)
generated_content[message_source.message_chain.message_id] = content
from_user = message_source.sender.id
if from_user not in msg_queue:
msg_queue[from_user] = []
msg_queue[from_user].append(
{
"msg_id":message_source.message_chain.message_id,
"content":content,
}
)
if type(self.bot) == OAClient:
await self.bot.set_message(message_source.message_chain.message_id,content)
if type(self.bot) == OAClientForLongerResponse:
from_user = message_source.sender.id
await self.bot.set_message(from_user,message_source.message_chain.message_id,content)
async def send_message(

View File

@@ -0,0 +1,204 @@
from __future__ import annotations
import typing
import asyncio
import traceback
import datetime
from libs.slack_api.api import SlackClient
from pkg.platform.adapter import MessagePlatformAdapter
from pkg.platform.types import events as platform_events, message as platform_message
from libs.slack_api.slackevent import SlackEvent
from pkg.core import app
from .. import adapter
from ...core import app
from ..types import message as platform_message
from ..types import events as platform_events
from ..types import entities as platform_entities
from ...command.errors import ParamNotEnoughError
from ...utils import image
class SlackMessageConverter(adapter.MessageConverter):
@staticmethod
async def yiri2target(message_chain:platform_message.MessageChain):
content_list = []
for msg in message_chain:
if type(msg) is platform_message.Plain:
content_list.append({
"content":msg.text,
})
return content_list
@staticmethod
async def target2yiri(message:str,message_id:str,pic_url:str,bot:SlackClient):
yiri_msg_list = []
yiri_msg_list.append(
platform_message.Source(id=message_id,time=datetime.datetime.now())
)
if pic_url is not None:
base64_url = await image.get_slack_image_to_base64(pic_url=pic_url,bot_token=bot.bot_token)
yiri_msg_list.append(
platform_message.Image(base64=base64_url)
)
yiri_msg_list.append(platform_message.Plain(text=message))
chain = platform_message.MessageChain(yiri_msg_list)
return chain
class SlackEventConverter(adapter.EventConverter):
@staticmethod
async def yiri2target(event:platform_events.MessageEvent) -> SlackEvent:
return event.source_platform_object
@staticmethod
async def target2yiri(event:SlackEvent,bot:SlackClient):
yiri_chain = await SlackMessageConverter.target2yiri(
message=event.text,message_id=event.message_id,pic_url=event.pic_url,bot=bot
)
if event.type == 'channel':
yiri_chain.insert(0, platform_message.At(target="SlackBot"))
sender = platform_entities.GroupMember(
id = event.user_id,
member_name= str(event.sender_name),
permission= 'MEMBER',
group = platform_entities.Group(
id = event.channel_id,
name = 'MEMBER',
permission= platform_entities.Permission.Member
),
special_title='',
join_timestamp=0,
last_speak_timestamp=0,
mute_time_remaining=0
)
time = int(datetime.datetime.utcnow().timestamp())
return platform_events.GroupMessage(
sender = sender,
message_chain=yiri_chain,
time = time,
source_platform_object=event
)
if event.type == 'im':
return platform_events.FriendMessage(
sender=platform_entities.Friend(
id=event.user_id,
nickname = event.sender_name,
remark=""
),
message_chain = yiri_chain,
time = float(datetime.datetime.now().timestamp()),
source_platform_object=event,
)
class SlackAdapter(adapter.MessagePlatformAdapter):
bot: SlackClient
ap: app.Application
bot_account_id: str
message_converter: SlackMessageConverter = SlackMessageConverter()
event_converter: SlackEventConverter = SlackEventConverter()
config: dict
def __init__(self,config:dict,ap:app.Application):
self.config = config
self.ap = ap
required_keys = [
"bot_token",
"signing_secret",
]
missing_keys = [key for key in required_keys if key not in config]
if missing_keys:
raise ParamNotEnoughError("Slack机器人缺少相关配置项请查看文档或联系管理员")
self.bot = SlackClient(
bot_token=self.config["bot_token"],
signing_secret=self.config["signing_secret"]
)
async def reply_message(
self,
message_source: platform_events.MessageEvent,
message: platform_message.MessageChain,
quote_origin: bool = False,
):
slack_event = await SlackEventConverter.yiri2target(
message_source
)
content_list = await SlackMessageConverter.yiri2target(message)
for content in content_list:
if slack_event.type == 'channel':
await self.bot.send_message_to_channel(
content['content'],slack_event.channel_id
)
if slack_event.type == 'im':
await self.bot.send_message_to_one(
content['content'],slack_event.user_id
)
async def send_message(self, target_type: str, target_id: str, message: platform_message.MessageChain):
content_list = await SlackMessageConverter.yiri2target(message)
for content in content_list:
if target_type == 'person':
await self.bot.send_message_to_one(content['content'],target_id)
if target_type == 'group':
await self.bot.send_message_to_channel(content['content'],target_id)
def register_listener(
self,
event_type: typing.Type[platform_events.Event],
callback: typing.Callable[
[platform_events.Event, adapter.MessagePlatformAdapter], None
],
):
async def on_message(event:SlackEvent):
self.bot_account_id = 'SlackBot'
try:
return await callback(
await self.event_converter.target2yiri(event,self.bot),self
)
except:
traceback.print_exc()
if event_type == platform_events.FriendMessage:
self.bot.on_message("im")(on_message)
elif event_type == platform_events.GroupMessage:
self.bot.on_message("channel")(on_message)
async def run_async(self):
async def shutdown_trigger_placeholder():
while True:
await asyncio.sleep(1)
await self.bot.run_task(
host="0.0.0.0",
port=self.config["port"],
shutdown_trigger=shutdown_trigger_placeholder,
)
async def kill(self) -> bool:
return False
async def unregister_listener(
self,
event_type: type,
callback: typing.Callable[[platform_events.Event, MessagePlatformAdapter], None],
):
return super().unregister_listener(event_type, callback)

View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: MessagePlatformAdapter
metadata:
name: slack
label:
en_US: Slack API
zh_CN: Slack API
description:
en_US: Slack API
zh_CN: Slack API
spec:
config:
- name: bot_token
label:
en_US: Bot Token
zh_CN: 机器人令牌
type: string
required: true
default: ""
- name: signing_secret
label:
en_US: signing_secret
zh_CN: 密钥
type: string
required: true
default: ""
- name: port
label:
en_US: Port
zh_CN: 监听端口
type: int
required: true
default: 2288
execution:
python:
path: ./slack.py
attr: SlackAdapter

View File

@@ -4,7 +4,7 @@ import telegram
import telegram.ext
from telegram import Update
from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler, filters
import telegramify_markdown
import typing
import asyncio
import traceback
@@ -86,9 +86,10 @@ class TelegramMessageConverter(adapter.MessageConverter):
if message.text:
message_text = message.text
message_components.extend(parse_message_text(message_text))
if message.photo:
message_components.extend(parse_message_text(message.caption))
if message.caption:
message_components.extend(parse_message_text(message.caption))
file = await message.photo[-1].get_file()
@@ -126,7 +127,7 @@ class TelegramEventConverter(adapter.EventConverter):
time=event.message.date.timestamp(),
source_platform_object=event
)
elif event.effective_chat.type == 'group':
elif event.effective_chat.type == 'group' or 'supergroup' :
return platform_events.GroupMessage(
sender=platform_entities.GroupMember(
id=event.effective_chat.id,
@@ -201,16 +202,23 @@ class TelegramAdapter(adapter.MessagePlatformAdapter):
for component in components:
if component['type'] == 'text':
if self.config['markdown_card'] is True:
content = telegramify_markdown.markdownify(
content= component['text'],
)
else:
content = component['text']
args = {
"chat_id": message_source.source_platform_object.effective_chat.id,
"text": component['text'],
"text": content,
}
if self.config['markdown_card'] is True:
args["parse_mode"] = "MarkdownV2"
if quote_origin:
args['reply_to_message_id'] = message_source.source_platform_object.message.id
if quote_origin:
args['reply_to_message_id'] = message_source.source_platform_object.message.id
await self.bot.send_message(**args)
await self.bot.send_message(**args)
async def is_muted(self, group_id: int) -> bool:
return False

View File

@@ -807,6 +807,56 @@ class File(MessageComponent):
"""文件名称。"""
size: int
"""文件大小。"""
def __str__(self):
return f'[文件]{self.name}'
# ================ 个人微信专用组件 ================
class WeChatMiniPrograms(MessageComponent):
"""小程序。个人微信专用组件。"""
type: str = 'WeChatMiniPrograms'
"""小程序id"""
mini_app_id: str
"""小程序归属用户id"""
user_name: str
"""小程序名称"""
display_name: typing.Optional[str] = ''
"""打开地址"""
page_path: typing.Optional[str] = ''
"""小程序标题"""
title: typing.Optional[str] = ''
"""首页图片"""
image_url: typing.Optional[str] = ''
class WeChatForwardMiniPrograms(MessageComponent):
"""转发小程序。个人微信专用组件。"""
type: str = 'WeChatForwardMiniPrograms'
"""xml数据"""
xml_data: str
"""首页图片"""
image_url: typing.Optional[str] = None
class WeChatEmoji(MessageComponent):
"""emoji表情。个人微信专用组件。"""
type: str = 'WeChatEmoji'
"""emojimd5"""
emoji_md5: str
"""emoji大小"""
emoji_size: int
class WeChatLink(MessageComponent):
"""发送链接。个人微信专用组件。"""
type: str = 'WeChatLink'
"""标题"""
link_title: str = ''
"""链接描述"""
link_desc: str = ''
"""链接地址"""
link_url: str = ''
"""链接略缩图"""
link_thumb_url: str = ''

View File

@@ -4,6 +4,8 @@ import re
import os
import shutil
import zipfile
import ssl
import certifi
import aiohttp
import aiofiles
@@ -21,44 +23,39 @@ class GitHubRepoInstaller(installer.PluginInstaller):
def get_github_plugin_repo_label(self, repo_url: str) -> list[str]:
"""获取username, repo"""
# 提取 username/repo , 正则表达式
repo = re.findall(
r"(?:https?://github\.com/|git@github\.com:)([^/]+/[^/]+?)(?:\.git|/|$)",
repo_url,
)
if len(repo) > 0: # github
if len(repo) > 0:
return repo[0].split("/")
else:
return None
async def download_plugin_source_code(self, repo_url: str, target_path: str, task_context: taskmgr.TaskContext = taskmgr.TaskContext.placeholder()) -> str:
"""下载插件源码(全异步)"""
# 提取 username/repo , 正则表达式
repo = self.get_github_plugin_repo_label(repo_url)
target_path += repo[1]
if repo is None:
raise errors.PluginInstallerError('仅支持GitHub仓库地址')
target_path += repo[1]
self.ap.logger.debug("正在下载源码...")
task_context.trace("下载源码...", "download-plugin-source-code")
zipball_url = f"https://api.github.com/repos/{'/'.join(repo)}/zipball/HEAD"
zip_resp: bytes = None
# 创建自定义SSL上下文使用certifi提供的根证书
ssl_context = ssl.create_default_context(cafile=certifi.where())
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
url=zipball_url,
timeout=aiohttp.ClientTimeout(total=300)
timeout=aiohttp.ClientTimeout(total=300),
ssl=ssl_context # 使用自定义SSL上下文来验证证书
) as resp:
if resp.status != 200:
raise errors.PluginInstallerError(f"下载源码失败: {resp.text}")
raise errors.PluginInstallerError(f"下载源码失败: {await resp.text()}")
zip_resp = await resp.read()
if await aiofiles_os.path.exists("temp/" + target_path):
@@ -80,15 +77,11 @@ class GitHubRepoInstaller(installer.PluginInstaller):
await aiofiles_os.remove("temp/" + target_path + "/source.zip")
import glob
unzip_dir = glob.glob("temp/" + target_path + "/*")[0]
await aioshutil.copytree(unzip_dir, target_path + "/")
await aioshutil.rmtree(unzip_dir)
self.ap.logger.debug("源码下载完成。")
return repo[1]
async def install_requirements(self, path: str):
@@ -100,20 +93,14 @@ class GitHubRepoInstaller(installer.PluginInstaller):
plugin_source: str,
task_context: taskmgr.TaskContext = taskmgr.TaskContext.placeholder(),
):
"""安装插件
"""
"""安装插件"""
task_context.trace("下载插件源码...", "install-plugin")
repo_label = await self.download_plugin_source_code(plugin_source, "plugins/", task_context)
task_context.trace("安装插件依赖...", "install-plugin")
await self.install_requirements("plugins/" + repo_label)
task_context.trace("完成.", "install-plugin")
await self.ap.plugin_mgr.setting.record_installed_plugin_source(
"plugins/"+repo_label+'/', plugin_source
"plugins/" + repo_label + '/', plugin_source
)
async def uninstall_plugin(
@@ -121,10 +108,8 @@ class GitHubRepoInstaller(installer.PluginInstaller):
plugin_name: str,
task_context: taskmgr.TaskContext = taskmgr.TaskContext.placeholder(),
):
"""卸载插件
"""
"""卸载插件"""
plugin_container = self.ap.plugin_mgr.get_plugin_by_name(plugin_name)
if plugin_container is None:
raise errors.PluginInstallerError('插件不存在或未成功加载')
else:
@@ -135,24 +120,18 @@ class GitHubRepoInstaller(installer.PluginInstaller):
async def update_plugin(
self,
plugin_name: str,
plugin_source: str=None,
plugin_source: str = None,
task_context: taskmgr.TaskContext = taskmgr.TaskContext.placeholder(),
):
"""更新插件
"""
"""更新插件"""
task_context.trace("更新插件...", "update-plugin")
plugin_container = self.ap.plugin_mgr.get_plugin_by_name(plugin_name)
if plugin_container is None:
raise errors.PluginInstallerError('插件不存在或未成功加载')
else:
if plugin_container.plugin_source:
plugin_source = plugin_container.plugin_source
task_context.trace("转交安装任务.", "update-plugin")
await self.install_plugin(plugin_source, task_context)
else:
raise errors.PluginInstallerError('插件无源码信息,无法更新')
raise errors.PluginInstallerError('插件无源码信息,无法更新')

View File

@@ -6,7 +6,7 @@ from . import entities, requester
from ...core import app
from ...discover import engine
from . import token
from .requesters import bailianchatcmpl, chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, volcarkchatcmpl, xaichatcmpl, zhipuaichatcmpl, lmstudiochatcmpl, siliconflowchatcmpl, volcarkchatcmpl
from .requesters import bailianchatcmpl, chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, volcarkchatcmpl, xaichatcmpl, zhipuaichatcmpl, lmstudiochatcmpl, siliconflowchatcmpl, volcarkchatcmpl, modelscopechatcmpl
FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list"

View File

@@ -25,7 +25,7 @@ class AnthropicMessages(requester.LLMAPIRequester):
async def initialize(self):
httpx_client = anthropic._base_client.AsyncHttpxClientWrapper(
base_url=self.ap.provider_cfg.data['requester']['anthropic-messages']['base-url'],
base_url=self.ap.provider_cfg.data['requester']['anthropic-messages']['base-url'].replace(' ', ''),
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=typing.cast(httpx.Timeout, self.ap.provider_cfg.data['requester']['anthropic-messages']['timeout']),
limits=anthropic._constants.DEFAULT_CONNECTION_LIMITS,
@@ -59,9 +59,11 @@ class AnthropicMessages(requester.LLMAPIRequester):
if m.role == "system":
system_role_message = m
messages.pop(i)
break
if system_role_message:
messages.pop(i)
if isinstance(system_role_message, llm_entities.Message) \
and isinstance(system_role_message.content, str):
args['system'] = system_role_message.content

View File

@@ -2,12 +2,12 @@ from __future__ import annotations
import openai
from . import chatcmpl
from . import chatcmpl, modelscopechatcmpl
from .. import requester
from ....core import app
class BailianChatCompletions(chatcmpl.OpenAIChatCompletions):
class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
"""阿里云百炼大模型平台 ChatCompletion API 请求器"""
client: openai.AsyncClient
@@ -18,3 +18,4 @@ class BailianChatCompletions(chatcmpl.OpenAIChatCompletions):
self.ap = ap
self.requester_cfg = self.ap.provider_cfg.data['requester']['bailian-chat-completions']

View File

@@ -36,7 +36,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
self.client = openai.AsyncClient(
api_key="",
base_url=self.requester_cfg['base-url'],
base_url=self.requester_cfg['base-url'].replace(' ', ''),
timeout=self.requester_cfg['timeout'],
http_client=httpx.AsyncClient(
trust_env=True,
@@ -47,8 +47,9 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
async def _req(
self,
args: dict,
extra_body: dict = {},
) -> chat_completion.ChatCompletion:
return await self.client.chat.completions.create(**args)
return await self.client.chat.completions.create(**args, extra_body=extra_body)
async def _make_msg(
self,
@@ -60,6 +61,12 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
chatcmpl_message['role'] = 'assistant'
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
# deepseek的reasoner模型
if reasoning_content is not None:
chatcmpl_message['content'] = "<think>\n" + reasoning_content + "\n</think>\n\n"+ chatcmpl_message['content']
message = llm_entities.Message(**chatcmpl_message)
return message
@@ -73,7 +80,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
args = self.requester_cfg['args'].copy()
args = {}
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_funcs:
@@ -99,7 +106,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
args["messages"] = messages
# 发送请求
resp = await self._req(args)
resp = await self._req(args, extra_body=self.requester_cfg['args'])
# 处理请求结果
message = await self._make_msg(resp)

View File

@@ -23,7 +23,7 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
args = self.requester_cfg['args'].copy()
args = {}
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_funcs:
@@ -43,7 +43,7 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
args["messages"] = messages
# 发送请求
resp = await self._req(args)
resp = await self._req(args, extra_body=self.requester_cfg['args'])
if resp is None:
raise errors.RequesterError('接口返回为空,请确定模型提供商服务是否正常')

View File

@@ -30,7 +30,7 @@ class GiteeAIChatCompletions(chatcmpl.OpenAIChatCompletions):
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
args = self.requester_cfg['args'].copy()
args = {}
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_funcs:
@@ -46,7 +46,7 @@ class GiteeAIChatCompletions(chatcmpl.OpenAIChatCompletions):
args["messages"] = req_messages
resp = await self._req(args)
resp = await self._req(args, extra_body=self.requester_cfg['args'])
message = await self._make_msg(resp)

View File

@@ -0,0 +1,207 @@
from __future__ import annotations
import asyncio
import typing
import json
import base64
from typing import AsyncGenerator
import openai
import openai.types.chat.chat_completion as chat_completion
import openai.types.chat.chat_completion_message_tool_call as chat_completion_message_tool_call
import httpx
import aiohttp
import async_lru
from .. import entities, errors, requester
from ....core import entities as core_entities, app
from ... import entities as llm_entities
from ...tools import entities as tools_entities
from ....utils import image
class ModelScopeChatCompletions(requester.LLMAPIRequester):
"""ModelScope ChatCompletion API 请求器"""
client: openai.AsyncClient
requester_cfg: dict
def __init__(self, ap: app.Application):
self.ap = ap
self.requester_cfg = self.ap.provider_cfg.data['requester']['modelscope-chat-completions']
async def initialize(self):
self.client = openai.AsyncClient(
api_key="",
base_url=self.requester_cfg['base-url'],
timeout=self.requester_cfg['timeout'],
http_client=httpx.AsyncClient(
trust_env=True,
timeout=self.requester_cfg['timeout']
)
)
async def _req(
self,
args: dict,
) -> chat_completion.ChatCompletion:
args["stream"] = True
chunk = None
pending_content = ""
tool_calls = []
resp_gen: openai.AsyncStream = await self.client.chat.completions.create(**args)
async for chunk in resp_gen:
# print(chunk)
if not chunk or not chunk.id or not chunk.choices or not chunk.choices[0] or not chunk.choices[0].delta:
continue
if chunk.choices[0].delta.content is not None:
pending_content += chunk.choices[0].delta.content
if chunk.choices[0].delta.tool_calls is not None:
for tool_call in chunk.choices[0].delta.tool_calls:
for tc in tool_calls:
if tc.index == tool_call.index:
tc.function.arguments += tool_call.function.arguments
break
else:
tool_calls.append(tool_call)
if chunk.choices[0].finish_reason is not None:
break
real_tool_calls = []
for tc in tool_calls:
function = chat_completion_message_tool_call.Function(
name=tc.function.name,
arguments=tc.function.arguments
)
real_tool_calls.append(chat_completion_message_tool_call.ChatCompletionMessageToolCall(
id=tc.id,
function=function,
type="function"
))
return chat_completion.ChatCompletion(
id=chunk.id,
object="chat.completion",
created=chunk.created,
choices=[
chat_completion.Choice(
index=0,
message=chat_completion.ChatCompletionMessage(
role="assistant",
content=pending_content,
tool_calls=real_tool_calls if len(real_tool_calls) > 0 else None
),
finish_reason=chunk.choices[0].finish_reason if hasattr(chunk.choices[0], 'finish_reason') and chunk.choices[0].finish_reason is not None else 'stop',
logprobs=chunk.choices[0].logprobs,
)
],
model=chunk.model,
service_tier=chunk.service_tier if hasattr(chunk, 'service_tier') else None,
system_fingerprint=chunk.system_fingerprint if hasattr(chunk, 'system_fingerprint') else None,
usage=chunk.usage if hasattr(chunk, 'usage') else None
) if chunk else None
return await self.client.chat.completions.create(**args)
async def _make_msg(
self,
chat_completion: chat_completion.ChatCompletion,
) -> llm_entities.Message:
chatcmpl_message = chat_completion.choices[0].message.dict()
# 确保 role 字段存在且不为 None
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
chatcmpl_message['role'] = 'assistant'
message = llm_entities.Message(**chatcmpl_message)
return message
async def _closure(
self,
query: core_entities.Query,
req_messages: list[dict],
use_model: entities.LLMModelInfo,
use_funcs: list[tools_entities.LLMFunction] = None,
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
args = self.requester_cfg['args'].copy()
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_funcs:
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
if tools:
args["tools"] = tools
# 设置此次请求中的messages
messages = req_messages.copy()
# 检查vision
for msg in messages:
if 'content' in msg and isinstance(msg["content"], list):
for me in msg["content"]:
if me["type"] == "image_base64":
me["image_url"] = {
"url": me["image_base64"]
}
me["type"] = "image_url"
del me["image_base64"]
args["messages"] = messages
# 发送请求
resp = await self._req(args)
# 处理请求结果
message = await self._make_msg(resp)
return message
async def call(
self,
query: core_entities.Query,
model: entities.LLMModelInfo,
messages: typing.List[llm_entities.Message],
funcs: typing.List[tools_entities.LLMFunction] = None,
) -> llm_entities.Message:
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
for m in messages:
msg_dict = m.dict(exclude_none=True)
content = msg_dict.get("content")
if isinstance(content, list):
# 检查 content 列表中是否每个部分都是文本
if all(isinstance(part, dict) and part.get("type") == "text" for part in content):
# 将所有文本部分合并为一个字符串
msg_dict["content"] = "\n".join(part["text"] for part in content)
req_messages.append(msg_dict)
try:
return await self._closure(query=query, req_messages=req_messages, use_model=model, use_funcs=funcs)
except asyncio.TimeoutError:
raise errors.RequesterError('请求超时')
except openai.BadRequestError as e:
if 'context_length_exceeded' in e.message:
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
else:
raise errors.RequesterError(f'请求参数错误: {e.message}')
except openai.AuthenticationError as e:
raise errors.RequesterError(f'无效的 api-key: {e.message}')
except openai.NotFoundError as e:
raise errors.RequesterError(f'请求路径错误: {e.message}')
except openai.RateLimitError as e:
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
except openai.APIError as e:
raise errors.RequesterError(f'请求错误: {e.message}')

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: LLMAPIRequester
metadata:
name: modelscope-chat-completions
label:
en_US: ModelScope
zh_CN: 魔搭社区
spec:
config:
- name: base-url
label:
en_US: Base URL
zh_CN: 基础 URL
type: string
required: true
default: "https://api-inference.modelscope.cn/v1"
- name: args
label:
en_US: Args
zh_CN: 附加参数
type: object
required: true
default: {}
- name: timeout
label:
en_US: Timeout
zh_CN: 超时时间
type: int
required: true
default: 120
execution:
python:
path: ./modelscopechatcmpl.py
attr: ModelScopeChatCompletions

View File

@@ -25,7 +25,7 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
) -> llm_entities.Message:
self.client.api_key = use_model.token_mgr.get_token()
args = self.requester_cfg['args'].copy()
args = {}
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
if use_funcs:
@@ -42,13 +42,13 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
if 'content' in m and isinstance(m["content"], list):
m["content"] = " ".join([c["text"] for c in m["content"]])
# 删除空的
messages = [m for m in messages if m["content"].strip() != ""]
# 删除空的,不知道干嘛的,直接删了。
# messages = [m for m in messages if m["content"].strip() != "" and ('tool_calls' not in m or not m['tool_calls'])]
args["messages"] = messages
# 发送请求
resp = await self._req(args)
resp = await self._req(args, extra_body=self.requester_cfg['args'])
# 处理请求结果
message = await self._make_msg(resp)

View File

@@ -225,6 +225,8 @@ class DifyServiceAPIRunner(runner.RequestRunner):
role="assistant",
content=[llm_entities.ContentElement.from_image_url(image_url)],
)
if chunk['event'] == 'error':
raise errors.DifyAPIError("dify 服务错误: " + chunk['message'])
query.session.using_conversation.uuid = chunk["conversation_id"]

View File

@@ -54,7 +54,6 @@ class SessionManager:
use_model=await self.ap.model_mgr.get_model_by_name(self.ap.provider_cfg.data['model']),
use_funcs=await self.ap.tool_mgr.get_all_functions(
plugin_enabled=True,
plugin_status=plugin_context.RuntimeContainerStatus.INITIALIZED,
),
)
session.conversations.append(conversation)

View File

@@ -0,0 +1,54 @@
from __future__ import annotations
import abc
import typing
from ...core import app, entities as core_entities
from . import entities as tools_entities
preregistered_loaders: list[typing.Type[ToolLoader]] = []
def loader_class(name: str):
"""注册一个工具加载器
"""
def decorator(cls: typing.Type[ToolLoader]) -> typing.Type[ToolLoader]:
cls.name = name
preregistered_loaders.append(cls)
return cls
return decorator
class ToolLoader(abc.ABC):
"""工具加载器"""
name: str = None
ap: app.Application
def __init__(self, ap: app.Application):
self.ap = ap
async def initialize(self):
pass
@abc.abstractmethod
async def get_tools(self, enabled: bool=True) -> list[tools_entities.LLMFunction]:
"""获取所有工具"""
pass
@abc.abstractmethod
async def has_tool(self, name: str) -> bool:
"""检查工具是否存在"""
pass
@abc.abstractmethod
async def invoke_tool(self, query: core_entities.Query, name: str, parameters: dict) -> typing.Any:
"""执行工具调用"""
pass
@abc.abstractmethod
async def shutdown(self):
"""关闭工具"""
pass

View File

View File

@@ -0,0 +1,161 @@
from __future__ import annotations
import typing
from contextlib import AsyncExitStack
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.client.sse import sse_client
from .. import loader, entities as tools_entities
from ....core import app, entities as core_entities
class RuntimeMCPSession:
"""运行时 MCP 会话"""
ap: app.Application
server_name: str
server_config: dict
session: ClientSession
exit_stack: AsyncExitStack
functions: list[tools_entities.LLMFunction] = []
def __init__(self, server_name: str, server_config: dict, ap: app.Application):
self.server_name = server_name
self.server_config = server_config
self.ap = ap
self.session = None
self.exit_stack = AsyncExitStack()
self.functions = []
async def _init_stdio_python_server(self):
server_params = StdioServerParameters(
command=self.server_config["command"],
args=self.server_config["args"],
env=self.server_config["env"],
)
stdio_transport = await self.exit_stack.enter_async_context(
stdio_client(server_params)
)
stdio, write = stdio_transport
self.session = await self.exit_stack.enter_async_context(
ClientSession(stdio, write)
)
await self.session.initialize()
async def _init_sse_server(self):
sse_transport = await self.exit_stack.enter_async_context(
sse_client(
self.server_config["url"],
headers=self.server_config.get("headers", {}),
timeout=self.server_config.get("timeout", 10),
)
)
sseio, write = sse_transport
self.session = await self.exit_stack.enter_async_context(
ClientSession(sseio, write)
)
await self.session.initialize()
async def initialize(self):
self.ap.logger.debug(f"初始化 MCP 会话: {self.server_name} {self.server_config}")
if self.server_config["mode"] == "stdio":
await self._init_stdio_python_server()
elif self.server_config["mode"] == "sse":
await self._init_sse_server()
else:
raise ValueError(f"无法识别 MCP 服务器类型: {self.server_name}: {self.server_config}")
tools = await self.session.list_tools()
self.ap.logger.debug(f"获取 MCP 工具: {tools}")
for tool in tools.tools:
async def func(query: core_entities.Query, **kwargs):
result = await self.session.call_tool(tool.name, kwargs)
if result.isError:
raise Exception(result.content[0].text)
return result.content[0].text
func.__name__ = tool.name
self.functions.append(tools_entities.LLMFunction(
name=tool.name,
human_desc=tool.description,
description=tool.description,
parameters=tool.inputSchema,
func=func,
))
async def shutdown(self):
"""关闭工具"""
await self.session._exit_stack.aclose()
@loader.loader_class("mcp")
class MCPLoader(loader.ToolLoader):
"""MCP 工具加载器。
在此加载器中管理所有与 MCP Server 的连接。
"""
sessions: dict[str, RuntimeMCPSession] = {}
_last_listed_functions: list[tools_entities.LLMFunction] = []
def __init__(self, ap: app.Application):
super().__init__(ap)
self.sessions = {}
self._last_listed_functions = []
async def initialize(self):
for server_config in self.ap.provider_cfg.data.get("mcp", {}).get("servers", []):
if not server_config["enable"]:
continue
session = RuntimeMCPSession(server_config["name"], server_config, self.ap)
await session.initialize()
# self.ap.event_loop.create_task(session.initialize())
self.sessions[server_config["name"]] = session
async def get_tools(self, enabled: bool=True) -> list[tools_entities.LLMFunction]:
all_functions = []
for session in self.sessions.values():
all_functions.extend(session.functions)
self._last_listed_functions = all_functions
return all_functions
async def has_tool(self, name: str) -> bool:
return name in [f.name for f in self._last_listed_functions]
async def invoke_tool(self, query: core_entities.Query, name: str, parameters: dict) -> typing.Any:
for server_name, session in self.sessions.items():
for function in session.functions:
if function.name == name:
return await function.func(query, **parameters)
raise ValueError(f"未找到工具: {name}")
async def shutdown(self):
"""关闭工具"""
for session in self.sessions.values():
await session.shutdown()

View File

@@ -0,0 +1,92 @@
from __future__ import annotations
import typing
import traceback
from .. import loader, entities as tools_entities
from ....core import app, entities as core_entities
from ....plugin import context as plugin_context
@loader.loader_class("plugin-tool-loader")
class PluginToolLoader(loader.ToolLoader):
"""插件工具加载器。
本加载器中不存储工具信息,仅负责从插件系统中获取工具信息。
"""
async def get_tools(self, enabled: bool=True) -> list[tools_entities.LLMFunction]:
# 从插件系统获取工具(内容函数)
all_functions: list[tools_entities.LLMFunction] = []
for plugin in self.ap.plugin_mgr.plugins(
enabled=enabled, status=plugin_context.RuntimeContainerStatus.INITIALIZED
):
all_functions.extend(plugin.content_functions)
return all_functions
async def has_tool(self, name: str) -> bool:
"""检查工具是否存在"""
for plugin in self.ap.plugin_mgr.plugins(
enabled=True, status=plugin_context.RuntimeContainerStatus.INITIALIZED
):
for function in plugin.content_functions:
if function.name == name:
return True
return False
async def _get_function_and_plugin(
self, name: str
) -> typing.Tuple[tools_entities.LLMFunction, plugin_context.BasePlugin]:
"""获取函数和插件实例"""
for plugin in self.ap.plugin_mgr.plugins(
enabled=True, status=plugin_context.RuntimeContainerStatus.INITIALIZED
):
for function in plugin.content_functions:
if function.name == name:
return function, plugin.plugin_inst
return None, None
async def invoke_tool(self, query: core_entities.Query, name: str, parameters: dict) -> typing.Any:
try:
function, plugin = await self._get_function_and_plugin(name)
if function is None:
return None
parameters = parameters.copy()
parameters = {"query": query, **parameters}
return await function.func(plugin, **parameters)
except Exception as e:
self.ap.logger.error(f"执行函数 {name} 时发生错误: {e}")
traceback.print_exc()
return f"error occurred when executing function {name}: {e}"
finally:
plugin = None
for p in self.ap.plugin_mgr.plugins():
if function in p.content_functions:
plugin = p
break
if plugin is not None:
await self.ap.ctr_mgr.usage.post_function_record(
plugin={
"name": plugin.plugin_name,
"remote": plugin.plugin_source,
"version": plugin.plugin_version,
"author": plugin.plugin_author,
},
function_name=function.name,
function_description=function.description,
)
async def shutdown(self):
"""关闭工具"""
pass

View File

@@ -4,8 +4,9 @@ import typing
import traceback
from ...core import app, entities as core_entities
from . import entities
from . import entities, loader as tools_loader
from ...plugin import context as plugin_context
from .loaders import plugin, mcp
class ToolManager:
@@ -13,33 +14,26 @@ class ToolManager:
ap: app.Application
loaders: list[tools_loader.ToolLoader]
def __init__(self, ap: app.Application):
self.ap = ap
self.all_functions = []
self.loaders = []
async def initialize(self):
pass
async def get_function_and_plugin(
self, name: str
) -> typing.Tuple[entities.LLMFunction, plugin_context.BasePlugin]:
"""获取函数和插件实例"""
for plugin in self.ap.plugin_mgr.plugins(
enabled=True, status=plugin_context.RuntimeContainerStatus.INITIALIZED
):
for function in plugin.content_functions:
if function.name == name:
return function, plugin.plugin_inst
return None, None
for loader_cls in tools_loader.preregistered_loaders:
loader_inst = loader_cls(self.ap)
await loader_inst.initialize()
self.loaders.append(loader_inst)
async def get_all_functions(self, plugin_enabled: bool=None, plugin_status: plugin_context.RuntimeContainerStatus=None) -> list[entities.LLMFunction]:
async def get_all_functions(self, plugin_enabled: bool=None) -> list[entities.LLMFunction]:
"""获取所有函数"""
all_functions: list[entities.LLMFunction] = []
for plugin in self.ap.plugin_mgr.plugins(
enabled=plugin_enabled, status=plugin_status
):
all_functions.extend(plugin.content_functions)
for loader in self.loaders:
all_functions.extend(await loader.get_tools(plugin_enabled))
return all_functions
@@ -102,38 +96,13 @@ class ToolManager:
) -> typing.Any:
"""执行函数调用"""
try:
for loader in self.loaders:
if await loader.has_tool(name):
return await loader.invoke_tool(query, name, parameters)
else:
raise ValueError(f"未找到工具: {name}")
function, plugin = await self.get_function_and_plugin(name)
if function is None:
return None
parameters = parameters.copy()
parameters = {"query": query, **parameters}
return await function.func(plugin, **parameters)
except Exception as e:
self.ap.logger.error(f"执行函数 {name} 时发生错误: {e}")
traceback.print_exc()
return f"error occurred when executing function {name}: {e}"
finally:
plugin = None
for p in self.ap.plugin_mgr.plugins():
if function in p.content_functions:
plugin = p
break
if plugin is not None:
await self.ap.ctr_mgr.usage.post_function_record(
plugin={
"name": plugin.plugin_name,
"remote": plugin.plugin_source,
"version": plugin.plugin_version,
"author": plugin.plugin_author,
},
function_name=function.name,
function_description=function.description,
)
async def shutdown(self):
"""关闭所有工具"""
for loader in self.loaders:
await loader.shutdown()

View File

@@ -1,4 +1,4 @@
semantic_version = "v3.4.10.4"
semantic_version = "v3.4.13.1"
debug_mode = False

View File

@@ -15,6 +15,9 @@ import asyncio
from urllib.parse import urlparse
async def get_gewechat_image_base64(
gewechat_url: str,
gewechat_file_url: str,
@@ -67,6 +70,7 @@ async def get_gewechat_image_base64(
}
) as response:
if response.status != 200:
# print(response)
raise Exception(f"获取gewechat图片下载失败: {await response.text()}")
resp_data = await response.json()
@@ -108,6 +112,9 @@ async def get_gewechat_image_base64(
raise Exception(f"获取图片失败: {str(e)}") from e
async def get_wecom_image_base64(pic_url: str) -> tuple[str, str]:
"""
下载企业微信图片并转换为 base64
@@ -205,4 +212,19 @@ async def extract_b64_and_format(image_base64_data: str) -> typing.Tuple[str, st
"""
base64_str = image_base64_data.split(',')[-1]
image_format = image_base64_data.split(':')[-1].split(';')[0].split('/')[-1]
return base64_str, image_format
return base64_str, image_format
async def get_slack_image_to_base64(pic_url:str, bot_token:str):
headers = {"Authorization": f"Bearer {bot_token}"}
try:
async with aiohttp.ClientSession() as session:
async with session.get(pic_url, headers=headers) as resp:
image_data = await resp.read()
return base64.b64encode(image_data).decode('utf-8')
except Exception as e:
raise(e)

View File

@@ -32,6 +32,10 @@ gewechat-client
dingtalk_stream
dashscope
python-telegram-bot
certifi
mcp
slack_sdk
telegramify-markdown
# indirect
taskgroup==0.0.0a4
taskgroup==0.0.0a4
python-socks

View File

@@ -71,28 +71,38 @@
"token": ""
},
{
"adapter":"officialaccount",
"adapter": "officialaccount",
"enable": false,
"token": "",
"EncodingAESKey":"",
"AppID":"",
"AppSecret":"",
"Mode":"drop",
"EncodingAESKey": "",
"AppID": "",
"AppSecret": "",
"Mode": "drop",
"LoadingMessage": "AI正在思考中请发送任意内容获取回复。",
"host": "0.0.0.0",
"port": 2287
},
{
"adapter":"dingtalk",
"adapter": "dingtalk",
"enable": false,
"client_id":"",
"client_secret":"",
"robot_code":"",
"robot_name":""
"client_id": "",
"client_secret": "",
"robot_code": "",
"robot_name": "",
"markdown_card": false
},
{
"adapter":"telegram",
"adapter": "telegram",
"enable": false,
"token":""
"token": "",
"markdown_card": false
},
{
"adapter": "slack",
"enable": false,
"bot_token": "",
"signing_secret": "",
"port": 2288
}
],
"track-function-calls": true,

View File

@@ -31,6 +31,9 @@
],
"volcark": [
"xxxxxxxx"
],
"modelscope": [
"xxxxxxxx"
]
},
"requester": {
@@ -95,12 +98,17 @@
"args": {},
"base-url": "https://ark.cn-beijing.volces.com/api/v3",
"timeout": 120
},
"modelscope-chat-completions": {
"base-url": "https://api-inference.modelscope.cn/v1",
"args": {},
"timeout": 120
}
},
"model": "gpt-4o",
"prompt-mode": "normal",
"prompt": {
"default": ""
"default": "You are a helpful assistant."
},
"runner": "local-agent",
"dify-service-api": {
@@ -138,5 +146,8 @@
"date": "2023-08-10"
}
}
},
"mcp": {
"servers": []
}
}

View File

@@ -391,6 +391,11 @@
"description": "对于超过15s的响应的处理模式",
"enum": ["drop", "passive"]
},
"LoadingMessage": {
"type": "string",
"default": "AI正在思考中请发送任意内容获取回复。",
"description": "当使用被动模式时,显示给用户的提示信息"
},
"host": {
"type": "string",
"default": "0.0.0.0",
@@ -441,6 +446,11 @@
"type": "string",
"default": "",
"description": "钉钉的robot_name"
},
"markdown_card": {
"type": "boolean",
"default": false,
"description": "是否使用 Markdown 卡片发送消息"
}
}
},
@@ -461,6 +471,41 @@
"type": "string",
"default": "",
"description": "Telegram 的 token"
},
"markdown_card": {
"type": "boolean",
"default": false,
"description": "是否使用 Markdown 卡片发送消息"
}
}
},
{
"title": "Slack 适配器",
"description": "用于接入 Slack",
"properties": {
"adapter": {
"type": "string",
"const": "slack"
},
"enable": {
"type": "boolean",
"default": false,
"description": "是否启用此适配器"
},
"bot_token": {
"type": "string",
"default": "",
"description": "Slack 的 bot_token"
},
"signing_secret": {
"type": "string",
"default": "",
"description": "Slack 的 signing_secret"
},
"port": {
"type": "integer",
"default": 2288,
"description": "监听的端口"
}
}
}

View File

@@ -368,7 +368,7 @@
"type": "string",
"title": "默认情景预设",
"description": "设置默认情景预设。值为空字符串时,将不使用情景预设(人格)",
"default": ""
"default": "You are a helpful assistant."
}
},
"patternProperties": {
@@ -520,6 +520,87 @@
}
}
}
},
"mcp": {
"type": "object",
"title": "MCP 配置",
"properties": {
"servers": {
"type": "array",
"title": "MCP 服务器配置",
"default": [],
"items": {
"type": "object",
"oneOf": [
{
"title": "Stdio 模式服务器",
"properties": {
"mode": {
"type": "string",
"title": "模式",
"const": "stdio"
},
"enable": {
"type": "boolean",
"title": "启用"
},
"name": {
"type": "string",
"title": "名称"
},
"command": {
"type": "string",
"title": "启动命令"
},
"args": {
"type": "array",
"title": "启动参数",
"items": {
"type": "string"
},
"default": []
},
"env": {
"type": "object",
"default": {}
}
}
},
{
"title": "SSE 模式服务器",
"properties": {
"mode": {
"type": "string",
"title": "模式",
"const": "sse"
},
"enable": {
"type": "boolean",
"title": "启用"
},
"name": {
"type": "string",
"title": "名称"
},
"url": {
"type": "string",
"title": "URL"
},
"headers": {
"type": "object",
"default": {}
},
"timeout": {
"type": "number",
"title": "请求超时时间",
"default": 10
}
}
}
]
}
}
}
}
}
}