Compare commits

..

16 Commits

Author SHA1 Message Date
Junyan Qin
de2bf79004 chore: release v3.4.4 2025-01-30 00:16:33 +08:00
Junyan Qin (Chin)
83ed7a9f38 Merge pull request #991 from RockChinQ/feat/lark
feat: add adapter `lark`
2025-01-30 00:15:27 +08:00
Junyan Qin
c326e72758 fix: migration not imported 2025-01-29 23:43:32 +08:00
Junyan Qin
ac9cef82cc chore: migrations 2025-01-29 23:41:29 +08:00
Junyan Qin
ea254d57d2 feat: lark adapter 2025-01-29 23:31:40 +08:00
Junyan Qin
a661f24ae0 doc: add contributors graph 2025-01-29 16:53:09 +08:00
Junyan Qin
afabf9256b chore: add model info deepseek-reasoner 2025-01-28 15:14:23 +08:00
Junyan Qin
74a8f9c9e2 fix: deps Crypto not checked 2025-01-27 21:33:10 +08:00
Junyan Qin
1d11e448f9 doc(README): update slogan 2025-01-26 10:15:14 +08:00
Junyan Qin
e3e23cbccb chore: release v3.4.3.2 2025-01-25 17:25:06 +08:00
Junyan Qin (Chin)
79132aa11d Merge pull request #988 from wangcham/bugfix-branch
fix:修复了企业微信的accesstoken问题
2025-01-25 17:23:19 +08:00
wangcham
7bb9e6e951 fix:修复了企业微信的accesstoken问题 2025-01-25 04:17:01 -05:00
Junyan Qin
37dc5b4135 chore: release v3.4.3.1 2025-01-23 13:32:51 +08:00
Junyan Qin
d588faf470 fix(httpx): deprecated proxies param 2025-01-23 13:32:27 +08:00
Junyan Qin
8b51a81158 doc(README): update qq group badge 2025-01-22 00:11:43 +08:00
Junyan Qin
9f125974bf doc: update qq group 2025-01-22 00:07:16 +08:00
15 changed files with 581 additions and 21 deletions

View File

@@ -17,19 +17,13 @@
<div align="center"> <div align="center">
😎高稳定、🧩支持扩展、🦄多模态 - 基于大语言模型的即时通机器人平台🤖 😎高稳定、🧩支持扩展、🦄多模态 - 大模型原生即时通机器人平台🤖
</div> </div>
<br/> <br/>
<a href="http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=66-aWvn8cbP4c1ut_1YYkvvGVeEtyTH8&authKey=pTaKBK5C%2B8dFzQ4XlENf6MHTCLaHnlKcCRx7c14EeVVlpX2nRSaS8lJm8YeM4mCU&noverify=0&group_code=195992197">
<img alt="Static Badge" src="https://img.shields.io/badge/%E5%AE%98%E6%96%B9%E7%BE%A4-195992197-green">
</a>
<a href="https://qm.qq.com/q/PClALFK242">
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-619154800-green">
</a>
<br/>
[![QQ Group](https://img.shields.io/badge/%E7%A4%BE%E5%8C%BA%E7%BE%A4-619154800-blue)](https://qm.qq.com/q/PClALFK242)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/LangBot)](https://github.com/RockChinQ/LangBot/releases/latest) [![GitHub release (latest by date)](https://img.shields.io/github/v/release/RockChinQ/LangBot)](https://github.com/RockChinQ/LangBot/releases/latest)
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.qchatgpt.rockchin.top%2Fapi%2Fv2%2Fview%2Frealtime%2Fcount_query%3Fminute%3D10080&query=%24.data.count&label=%E4%BD%BF%E7%94%A8%E9%87%8F%EF%BC%887%E6%97%A5%EF%BC%89) ![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.qchatgpt.rockchin.top%2Fapi%2Fv2%2Fview%2Frealtime%2Fcount_query%3Fminute%3D10080&query=%24.data.count&label=%E4%BD%BF%E7%94%A8%E9%87%8F%EF%BC%887%E6%97%A5%EF%BC%89)
<img src="https://img.shields.io/badge/python-3.10 | 3.11 | 3.12-blue.svg" alt="python"> <img src="https://img.shields.io/badge/python-3.10 | 3.11 | 3.12-blue.svg" alt="python">
@@ -39,7 +33,7 @@
## ✨ Features ## ✨ Features
- 💬 大模型对话、Agent支持多种大模型适配群聊和私聊具有多轮对话、工具调用、多模态能力并深度适配 [Dify](https://dify.ai)。目前支持 QQ、QQ频道后续还将支持微信、WhatsApp、Discord等平台。 - 💬 大模型对话、Agent支持多种大模型适配群聊和私聊具有多轮对话、工具调用、多模态能力并深度适配 [Dify](https://dify.ai)。目前支持 QQ、QQ频道、企业微信、飞书后续还将支持微信、WhatsApp、Discord等平台。
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。 - 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;丰富生态,目前已有数十个[插件](https://docs.langbot.app/plugin/plugin-intro.html) - 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;丰富生态,目前已有数十个[插件](https://docs.langbot.app/plugin/plugin-intro.html)
- 😻 [New] Web 管理面板:支持通过浏览器管理 LangBot 实例,具体支持功能,查看[文档](https://docs.langbot.app/webui/intro.html) - 😻 [New] Web 管理面板:支持通过浏览器管理 LangBot 实例,具体支持功能,查看[文档](https://docs.langbot.app/webui/intro.html)
@@ -89,6 +83,7 @@
| QQ 个人号 | ✅ | QQ 个人号私聊、群聊 | | QQ 个人号 | ✅ | QQ 个人号私聊、群聊 |
| QQ 官方机器人 | ✅ | QQ 频道机器人,支持频道、私聊、群聊 | | QQ 官方机器人 | ✅ | QQ 频道机器人,支持频道、私聊、群聊 |
| 企业微信 | ✅ | | | 企业微信 | ✅ | |
| 飞书 | ✅ | |
| 钉钉 | 🚧 | | | 钉钉 | 🚧 | |
🚧: 正在开发中 🚧: 正在开发中
@@ -106,3 +101,12 @@
| [Dify](https://dify.ai) | ✅ | LLMOps 平台 | | [Dify](https://dify.ai) | ✅ | LLMOps 平台 |
| [Ollama](https://ollama.com/) | ✅ | 本地大模型管理平台 | | [Ollama](https://ollama.com/) | ✅ | 本地大模型管理平台 |
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 | | [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 |
## 😘 社区贡献
LangBot 离不开以下贡献者和社区内所有人的贡献,我们欢迎任何形式的贡献和反馈。
<a href="https://github.com/RockChinQ/LangBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=RockChinQ/LangBot" />
</a>

View File

@@ -110,8 +110,17 @@ class WecomClient():
"enable_duplicate_check": 0, "enable_duplicate_check": 0,
"duplicate_check_interval": 1800 "duplicate_check_interval": 1800
} }
response = await client.post(url,json=params) try:
data = response.json() response = await client.post(url,json=params)
data = response.json()
except Exception as e:
raise Exception("Failed to send image: "+str(e))
# 企业微信错误码40014和42001代表accesstoken问题
if data['errcode'] == 40014 or data['errcode'] == 42001:
self.access_token = await self.get_access_token(self.secret)
return await self.send_image(user_id,agent_id,media_id)
if data['errcode'] != 0: if data['errcode'] != 0:
raise Exception("Failed to send image: "+str(data)) raise Exception("Failed to send image: "+str(data))
@@ -136,7 +145,9 @@ class WecomClient():
} }
response = await client.post(url,json=params) response = await client.post(url,json=params)
data = response.json() data = response.json()
if data['errcode'] == 40014 or data['errcode'] == 42001:
self.access_token = await self.get_access_token(self.secret)
return await self.send_private_msg(user_id,agent_id,content)
if data['errcode'] != 0: if data['errcode'] != 0:
raise Exception("Failed to send message: "+str(data)) raise Exception("Failed to send message: "+str(data))
@@ -286,11 +297,14 @@ class WecomClient():
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:
response = await client.post(url, headers=headers, content=body) response = await client.post(url, headers=headers, content=body)
data = response.json() data = response.json()
if data['errcode'] == 40014 or data['errcode'] == 42001:
self.access_token = await self.get_access_token(self.secret)
media_id = await self.upload_to_work(image)
if data.get('errcode', 0) != 0: if data.get('errcode', 0) != 0:
raise Exception("failed to upload file") raise Exception("failed to upload file")
return data.get('media_id') media_id = data.get('media_id')
return media_id
async def download_image_to_bytes(self,url:str) -> bytes: async def download_image_to_bytes(self,url:str) -> bytes:
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:

View File

@@ -1,5 +1,7 @@
import pip import pip
# 检查依赖,防止用户未安装
# 左边为引入名称,右边为依赖名称
required_deps = { required_deps = {
"requests": "requests", "requests": "requests",
"openai": "openai", "openai": "openai",
@@ -23,6 +25,8 @@ required_deps = {
"aioshutil": "aioshutil", "aioshutil": "aioshutil",
"argon2": "argon2-cffi", "argon2": "argon2-cffi",
"jwt": "pyjwt", "jwt": "pyjwt",
"Crypto": "pycryptodome",
"lark_oapi": "lark-oapi"
} }

View File

@@ -0,0 +1,29 @@
from __future__ import annotations
from .. import migration
@migration.migration_class("lark-config", 21)
class LarkConfigMigration(migration.Migration):
"""迁移"""
async def need_migrate(self) -> bool:
"""判断当前环境是否需要运行此迁移"""
for adapter in self.ap.platform_cfg.data['platform-adapters']:
if adapter['adapter'] == 'lark':
return False
return True
async def run(self):
"""执行迁移"""
self.ap.platform_cfg.data['platform-adapters'].append({
"adapter": "lark",
"enable": False,
"app_id": "cli_abcdefgh",
"app_secret": "XXXXXXXXXX",
"bot_name": "LangBot"
})
await self.ap.platform_cfg.dump_config()

View File

@@ -8,7 +8,7 @@ from ..migrations import m001_sensitive_word_migration, m002_openai_config_migra
from ..migrations import m005_deepseek_cfg_completion, m006_vision_config, m007_qcg_center_url, m008_ad_fixwin_config_migrate, m009_msg_truncator_cfg from ..migrations import m005_deepseek_cfg_completion, m006_vision_config, m007_qcg_center_url, m008_ad_fixwin_config_migrate, m009_msg_truncator_cfg
from ..migrations import m010_ollama_requester_config, m011_command_prefix_config, m012_runner_config, m013_http_api_config, m014_force_delay_config from ..migrations import m010_ollama_requester_config, m011_command_prefix_config, m012_runner_config, m013_http_api_config, m014_force_delay_config
from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_api_timeout_params, m018_xai_config, m019_zhipuai_config from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_api_timeout_params, m018_xai_config, m019_zhipuai_config
from ..migrations import m020_wecom_config from ..migrations import m020_wecom_config, m021_lark_config
@stage.stage_class("MigrationStage") @stage.stage_class("MigrationStage")

View File

@@ -37,7 +37,7 @@ class PlatformManager:
async def initialize(self): async def initialize(self):
from .sources import nakuru, aiocqhttp, qqbotpy,wecom from .sources import nakuru, aiocqhttp, qqbotpy, wecom, lark
async def on_friend_message(event: platform_events.FriendMessage, adapter: msadapter.MessageSourceAdapter): async def on_friend_message(event: platform_events.FriendMessage, adapter: msadapter.MessageSourceAdapter):

View File

@@ -0,0 +1,404 @@
from __future__ import annotations
import lark_oapi
import typing
import asyncio
import traceback
import time
import re
import base64
import uuid
import json
import datetime
import aiohttp
import lark_oapi.ws.exception
from lark_oapi.api.im.v1 import *
from .. import adapter
from ...pipeline.longtext.strategies import forward
from ...core import app
from ..types import message as platform_message
from ..types import events as platform_events
from ..types import entities as platform_entities
from ...utils import image
class LarkMessageConverter(adapter.MessageConverter):
@staticmethod
async def yiri2target(
message_chain: platform_message.MessageChain, api_client: lark_oapi.Client
) -> typing.Tuple[list]:
message_elements = []
pending_paragraph = []
for msg in message_chain:
if isinstance(msg, platform_message.Plain):
pending_paragraph.append({"tag": "md", "text": msg.text})
elif isinstance(msg, platform_message.At):
pending_paragraph.append(
{"tag": "at", "user_id": msg.target, "style": []}
)
elif isinstance(msg, platform_message.AtAll):
pending_paragraph.append({"tag": "at", "user_id": "all", "style": []})
elif isinstance(msg, platform_message.Image):
image_bytes = None
if msg.base64:
image_bytes = base64.b64decode(msg.base64)
elif msg.url:
async with aiohttp.ClientSession() as session:
async with session.get(msg.url) as response:
image_bytes = await response.read()
elif msg.path:
with open(msg.path, "rb") as f:
image_bytes = f.read()
request: CreateImageRequest = (
CreateImageRequest.builder()
.request_body(
CreateImageRequestBody.builder()
.image_type("message")
.image(image_bytes)
.build()
)
.build()
)
response: CreateImageResponse = await api_client.im.v1.image.acreate(
request
)
if not response.success():
raise Exception(
f"client.im.v1.image.create failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}"
)
image_key = response.data.image_key
message_elements.append(pending_paragraph)
message_elements.append(
[
{
"tag": "img",
"image_key": image_key,
}
]
)
pending_paragraph = []
if pending_paragraph:
message_elements.append(pending_paragraph)
return message_elements
@staticmethod
async def target2yiri(
message: lark_oapi.api.im.v1.model.event_message.EventMessage,
api_client: lark_oapi.Client,
) -> platform_message.MessageChain:
message_content = json.loads(message.content)
lb_msg_list = []
msg_create_time = datetime.datetime.fromtimestamp(
int(message.create_time) / 1000
)
lb_msg_list.append(
platform_message.Source(id=message.message_id, time=msg_create_time)
)
if message.message_type == "text":
element_list = []
def text_element_recur(text_ele: dict) -> list[dict]:
if text_ele["text"] == "":
return []
at_pattern = re.compile(r"@_user_[\d]+")
at_matches = at_pattern.findall(text_ele["text"])
name_mapping = {}
for mathc in at_matches:
for mention in message.mentions:
if mention.key == mathc:
name_mapping[mathc] = mention.name
break
if len(name_mapping.keys()) == 0:
return [text_ele]
# 只处理第一个,剩下的递归处理
text_split = text_ele["text"].split(list(name_mapping.keys())[0])
new_list = []
left_text = text_split[0]
right_text = text_split[1]
new_list.extend(
text_element_recur({"tag": "text", "text": left_text, "style": []})
)
new_list.append(
{
"tag": "at",
"user_id": list(name_mapping.keys())[0],
"user_name": name_mapping[list(name_mapping.keys())[0]],
"style": [],
}
)
new_list.extend(
text_element_recur({"tag": "text", "text": right_text, "style": []})
)
return new_list
element_list = text_element_recur(
{"tag": "text", "text": message_content["text"], "style": []}
)
message_content = {"title": "", "content": element_list}
elif message.message_type == "post":
new_list = []
for ele in message_content["content"]:
if type(ele) is dict:
new_list.append(ele)
elif type(ele) is list:
new_list.extend(ele)
message_content["content"] = new_list
elif message.message_type == "image":
message_content["content"] = [
{"tag": "img", "image_key": message_content["image_key"], "style": []}
]
for ele in message_content["content"]:
if ele["tag"] == "text":
lb_msg_list.append(platform_message.Plain(text=ele["text"]))
elif ele["tag"] == "at":
lb_msg_list.append(platform_message.At(target=ele["user_name"]))
elif ele["tag"] == "img":
image_key = ele["image_key"]
request: GetMessageResourceRequest = (
GetMessageResourceRequest.builder()
.message_id(message.message_id)
.file_key(image_key)
.type("image")
.build()
)
response: GetMessageResourceResponse = (
await api_client.im.v1.message_resource.aget(request)
)
if not response.success():
raise Exception(
f"client.im.v1.message_resource.get failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}"
)
image_bytes = response.file.read()
image_base64 = base64.b64encode(image_bytes).decode()
image_format = response.raw.headers["content-type"]
lb_msg_list.append(
platform_message.Image(
base64=f"data:{image_format};base64,{image_base64}"
)
)
return platform_message.MessageChain(lb_msg_list)
class LarkEventConverter(adapter.EventConverter):
@staticmethod
async def yiri2target(
event: platform_events.MessageEvent,
) -> lark_oapi.im.v1.P2ImMessageReceiveV1:
pass
@staticmethod
async def target2yiri(
event: lark_oapi.im.v1.P2ImMessageReceiveV1, api_client: lark_oapi.Client
) -> platform_events.Event:
message_chain = await LarkMessageConverter.target2yiri(
event.event.message, api_client
)
if event.event.message.chat_type == "p2p":
return platform_events.FriendMessage(
sender=platform_entities.Friend(
id=event.event.sender.sender_id.open_id,
nickname=event.event.sender.sender_id.union_id,
remark="",
),
message_chain=message_chain,
time=event.event.message.create_time,
)
elif event.event.message.chat_type == "group":
return platform_events.GroupMessage(
sender=platform_entities.GroupMember(
id=event.event.sender.sender_id.open_id,
member_name=event.event.sender.sender_id.union_id,
permission=platform_entities.Permission.Member,
group=platform_entities.Group(
id=event.event.message.chat_id,
name="",
permission=platform_entities.Permission.Member,
),
special_title="",
join_timestamp=0,
last_speak_timestamp=0,
mute_time_remaining=0,
),
message_chain=message_chain,
time=event.event.message.create_time,
)
@adapter.adapter_class("lark")
class LarkMessageSourceAdapter(adapter.MessageSourceAdapter):
bot: lark_oapi.ws.Client
api_client: lark_oapi.Client
bot_account_id: str # 用于在流水线中识别at是否是本bot直接以bot_name作为标识
lark_tenant_key: str # 飞书企业key
message_converter: LarkMessageConverter = LarkMessageConverter()
event_converter: LarkEventConverter = LarkEventConverter()
listeners: typing.Dict[
typing.Type[platform_events.Event],
typing.Callable[[platform_events.Event, adapter.MessageSourceAdapter], None],
] = {}
config: dict
ap: app.Application
def __init__(self, config: dict, ap: app.Application):
self.config = config
self.ap = ap
async def on_message(event: lark_oapi.im.v1.P2ImMessageReceiveV1):
lb_event = await self.event_converter.target2yiri(event, self.api_client)
await self.listeners[type(lb_event)](lb_event, self)
def sync_on_message(event: lark_oapi.im.v1.P2ImMessageReceiveV1):
asyncio.create_task(on_message(event))
event_handler = (
lark_oapi.EventDispatcherHandler.builder("", "")
.register_p2_im_message_receive_v1(sync_on_message)
.build()
)
self.bot_account_id = config["bot_name"]
self.bot = lark_oapi.ws.Client(
config["app_id"], config["app_secret"], event_handler=event_handler
)
self.api_client = (
lark_oapi.Client.builder()
.app_id(config["app_id"])
.app_secret(config["app_secret"])
.build()
)
async def send_message(
self, target_type: str, target_id: str, message: platform_message.MessageChain
):
pass
async def reply_message(
self,
message_source: platform_events.MessageEvent,
message: platform_message.MessageChain,
quote_origin: bool = False,
):
# 不再需要了因为message_id已经被包含到message_chain中
# lark_event = await self.event_converter.yiri2target(message_source)
lark_message = await self.message_converter.yiri2target(
message, self.api_client
)
final_content = {
"zh_cn": {
"title": "",
"content": lark_message,
},
}
request: ReplyMessageRequest = (
ReplyMessageRequest.builder()
.message_id(message_source.message_chain.message_id)
.request_body(
ReplyMessageRequestBody.builder()
.content(json.dumps(final_content))
.msg_type("post")
.reply_in_thread(False)
.uuid(str(uuid.uuid4()))
.build()
)
.build()
)
response: ReplyMessageResponse = await self.api_client.im.v1.message.areply(
request
)
if not response.success():
raise Exception(
f"client.im.v1.message.reply failed, code: {response.code}, msg: {response.msg}, log_id: {response.get_log_id()}, resp: \n{json.dumps(json.loads(response.raw.content), indent=4, ensure_ascii=False)}"
)
async def is_muted(self, group_id: int) -> bool:
return False
def register_listener(
self,
event_type: typing.Type[platform_events.Event],
callback: typing.Callable[
[platform_events.Event, adapter.MessageSourceAdapter], None
],
):
self.listeners[event_type] = callback
def unregister_listener(
self,
event_type: typing.Type[platform_events.Event],
callback: typing.Callable[
[platform_events.Event, adapter.MessageSourceAdapter], None
],
):
self.listeners.pop(event_type)
async def run_async(self):
try:
await self.bot._connect()
except lark_oapi.ws.exception.ClientException as e:
raise e
except Exception as e:
await self.bot._disconnect()
if self.bot._auto_reconnect:
await self.bot._reconnect()
else:
raise e
async def kill(self) -> bool:
return False

View File

@@ -460,7 +460,7 @@ class Source(MessageComponent):
"""源。包含消息的基本信息。""" """源。包含消息的基本信息。"""
type: str = "Source" type: str = "Source"
"""消息组件类型。""" """消息组件类型。"""
id: int id: typing.Union[int, str]
"""消息的识别号用于引用回复Source 类型永远为 MessageChain 的第一个元素)。""" """消息的识别号用于引用回复Source 类型永远为 MessageChain 的第一个元素)。"""
time: datetime time: datetime
"""消息时间。""" """消息时间。"""
@@ -503,7 +503,7 @@ class At(MessageComponent):
"""At某人。""" """At某人。"""
type: str = "At" type: str = "At"
"""消息组件类型。""" """消息组件类型。"""
target: int target: typing.Union[int, str]
"""群员 QQ 号。""" """群员 QQ 号。"""
display: typing.Optional[str] = None display: typing.Optional[str] = None
"""At时显示的文字发送消息时无效自动使用群名片。""" """At时显示的文字发送消息时无效自动使用群名片。"""

View File

@@ -30,7 +30,7 @@ class AnthropicMessages(requester.LLMAPIRequester):
timeout=typing.cast(httpx.Timeout, self.ap.provider_cfg.data['requester']['anthropic-messages']['timeout']), timeout=typing.cast(httpx.Timeout, self.ap.provider_cfg.data['requester']['anthropic-messages']['timeout']),
limits=anthropic._constants.DEFAULT_CONNECTION_LIMITS, limits=anthropic._constants.DEFAULT_CONNECTION_LIMITS,
follow_redirects=True, follow_redirects=True,
proxies=self.ap.proxy_mgr.get_forward_proxies() trust_env=True,
) )
self.client = anthropic.AsyncAnthropic( self.client = anthropic.AsyncAnthropic(

View File

@@ -39,7 +39,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
base_url=self.requester_cfg['base-url'], base_url=self.requester_cfg['base-url'],
timeout=self.requester_cfg['timeout'], timeout=self.requester_cfg['timeout'],
http_client=httpx.AsyncClient( http_client=httpx.AsyncClient(
proxies=self.ap.proxy_mgr.get_forward_proxies() trust_env=True,
) )
) )

View File

@@ -1,4 +1,4 @@
semantic_version = "v3.4.3" semantic_version = "v3.4.4"
debug_mode = False debug_mode = False

View File

@@ -25,6 +25,7 @@ aioshutil
argon2-cffi argon2-cffi
pyjwt pyjwt
pycryptodome pycryptodome
lark-oapi
# indirect # indirect
taskgroup==0.0.0a4 taskgroup==0.0.0a4

View File

@@ -116,6 +116,11 @@
"requester": "deepseek-chat-completions", "requester": "deepseek-chat-completions",
"token_mgr": "deepseek" "token_mgr": "deepseek"
}, },
{
"name": "deepseek-reasoner",
"requester": "deepseek-chat-completions",
"token_mgr": "deepseek"
},
{ {
"name": "grok-2-latest", "name": "grok-2-latest",
"requester": "xai-chat-completions", "requester": "xai-chat-completions",

View File

@@ -35,6 +35,13 @@
"token": "", "token": "",
"EncodingAESKey": "", "EncodingAESKey": "",
"contacts_secret": "" "contacts_secret": ""
},
{
"adapter": "lark",
"enable": false,
"app_id": "cli_abcdefgh",
"app_secret": "XXXXXXXXXX",
"bot_name": "LangBot"
} }
], ],
"track-function-calls": true, "track-function-calls": true,

View File

@@ -121,6 +121,98 @@
] ]
} }
} }
},
{
"title": "企业微信适配器",
"description": "用于接入企业微信",
"properties": {
"adapter": {
"type": "string",
"const": "wecom"
},
"enable": {
"type": "boolean",
"default": false,
"description": "是否启用此适配器",
"layout": {
"comp": "switch",
"props": {
"color": "primary"
}
}
},
"host": {
"type": "string",
"default": "0.0.0.0",
"description": "监听的IP地址"
},
"port": {
"type": "integer",
"default": 2290,
"description": "监听的端口"
},
"corpid": {
"type": "string",
"default": "",
"description": "企业微信的corpid"
},
"secret": {
"type": "string",
"default": "",
"description": "企业微信的secret"
},
"token": {
"type": "string",
"default": "",
"description": "企业微信的token"
},
"EncodingAESKey": {
"type": "string",
"default": "",
"description": "企业微信的EncodingAESKey"
},
"contacts_secret": {
"type": "string",
"default": "",
"description": "企业微信的contacts_secret"
}
}
},
{
"title": "飞书适配器",
"description": "用于接入飞书",
"properties": {
"adapter": {
"type": "string",
"const": "lark"
},
"enable": {
"type": "boolean",
"default": false,
"description": "是否启用此适配器",
"layout": {
"comp": "switch",
"props": {
"color": "primary"
}
}
},
"app_id": {
"type": "string",
"default": "",
"description": "飞书的app_id"
},
"app_secret": {
"type": "string",
"default": "",
"description": "飞书的app_secret"
},
"bot_name": {
"type": "string",
"default": "",
"description": "飞书的bot_name"
}
}
} }
] ]
} }