mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 19:37:36 +08:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa7c08ee00 | ||
|
|
b98de29b07 | ||
|
|
c7c2eb4518 | ||
|
|
37fa318258 | ||
|
|
ff7bebb782 | ||
|
|
30bb26f898 |
@@ -69,7 +69,7 @@ docker compose up -d
|
||||
|
||||
## ✨ 特性
|
||||
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态能力,自带 RAG(知识库)实现,并深度适配 [Dify](https://dify.ai)。
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态、流式输出能力,自带 RAG(知识库)实现,并深度适配 [Dify](https://dify.ai)。
|
||||
- 🤖 多平台支持:目前支持 QQ、QQ频道、企业微信、个人微信、飞书、Discord、Telegram 等平台。
|
||||
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。支持多流水线配置,不同机器人用于不同应用场景。
|
||||
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;适配 Anthropic [MCP 协议](https://modelcontextprotocol.io/);目前已有数百个插件。
|
||||
|
||||
@@ -63,7 +63,7 @@ Click the Star and Watch button in the upper right corner of the repository to g
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, and multi-modal capabilities. Built-in RAG (knowledge base) implementation, and deeply integrates with [Dify](https://dify.ai).
|
||||
- 💬 Chat with LLM / Agent: Supports multiple LLMs, adapt to group chats and private chats; Supports multi-round conversations, tool calls, multi-modal, and streaming output capabilities. Built-in RAG (knowledge base) implementation, and deeply integrates with [Dify](https://dify.ai).
|
||||
- 🤖 Multi-platform Support: Currently supports QQ, QQ Channel, WeCom, personal WeChat, Lark, DingTalk, Discord, Telegram, etc.
|
||||
- 🛠️ High Stability, Feature-rich: Native access control, rate limiting, sensitive word filtering, etc. mechanisms; Easy to use, supports multiple deployment methods. Supports multiple pipeline configurations, different bots can be used for different scenarios.
|
||||
- 🧩 Plugin Extension, Active Community: Support event-driven, component extension, etc. plugin mechanisms; Integrate Anthropic [MCP protocol](https://modelcontextprotocol.io/); Currently has hundreds of plugins.
|
||||
|
||||
@@ -63,7 +63,7 @@ LangBotはBTPanelにリストされています。BTPanelをインストール
|
||||
|
||||
## ✨ 機能
|
||||
|
||||
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル機能をサポート、RAG(知識ベース)を組み込み、[Dify](https://dify.ai) と深く統合。
|
||||
- 💬 LLM / エージェントとのチャット: 複数のLLMをサポートし、グループチャットとプライベートチャットに対応。マルチラウンドの会話、ツールの呼び出し、マルチモーダル、ストリーミング出力機能をサポート、RAG(知識ベース)を組み込み、[Dify](https://dify.ai) と深く統合。
|
||||
- 🤖 多プラットフォーム対応: 現在、QQ、QQ チャンネル、WeChat、個人 WeChat、Lark、DingTalk、Discord、Telegram など、複数のプラットフォームをサポートしています。
|
||||
- 🛠️ 高い安定性、豊富な機能: ネイティブのアクセス制御、レート制限、敏感な単語のフィルタリングなどのメカニズムをサポート。使いやすく、複数のデプロイ方法をサポート。複数のパイプライン設定をサポートし、異なるボットを異なる用途に使用できます。
|
||||
- 🧩 プラグイン拡張、活発なコミュニティ: イベント駆動、コンポーネント拡張などのプラグインメカニズムをサポート。適配 Anthropic [MCP プロトコル](https://modelcontextprotocol.io/);豊富なエコシステム、現在数百のプラグインが存在。
|
||||
|
||||
@@ -65,7 +65,7 @@ docker compose up -d
|
||||
|
||||
## ✨ 特性
|
||||
|
||||
- 💬 大模型對話、Agent:支援多種大模型,適配群聊和私聊;具有多輪對話、工具調用、多模態能力,自帶 RAG(知識庫)實現,並深度適配 [Dify](https://dify.ai)。
|
||||
- 💬 大模型對話、Agent:支援多種大模型,適配群聊和私聊;具有多輪對話、工具調用、多模態、流式輸出能力,自帶 RAG(知識庫)實現,並深度適配 [Dify](https://dify.ai)。
|
||||
- 🤖 多平台支援:目前支援 QQ、QQ頻道、企業微信、個人微信、飛書、Discord、Telegram 等平台。
|
||||
- 🛠️ 高穩定性、功能完備:原生支援訪問控制、限速、敏感詞過濾等機制;配置簡單,支援多種部署方式。支援多流水線配置,不同機器人用於不同應用場景。
|
||||
- 🧩 外掛擴展、活躍社群:支援事件驅動、組件擴展等外掛機制;適配 Anthropic [MCP 協議](https://modelcontextprotocol.io/);目前已有數百個外掛。
|
||||
|
||||
@@ -4,6 +4,13 @@ import typing
|
||||
|
||||
from . import chatcmpl
|
||||
|
||||
import uuid
|
||||
|
||||
from .. import errors, requester
|
||||
from ....core import entities as core_entities
|
||||
from ... import entities as llm_entities
|
||||
from ...tools import entities as tools_entities
|
||||
|
||||
|
||||
class GeminiChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""Google Gemini API 请求器"""
|
||||
@@ -12,3 +19,127 @@ class GeminiChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
'base_url': 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
|
||||
async def _closure_stream(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: requester.RuntimeLLMModel,
|
||||
use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
remove_think: bool = False,
|
||||
) -> llm_entities.MessageChunk:
|
||||
self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
args = {}
|
||||
args['model'] = use_model.model_entity.name
|
||||
|
||||
if use_funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
if tools:
|
||||
args['tools'] = tools
|
||||
|
||||
# 设置此次请求中的messages
|
||||
messages = req_messages.copy()
|
||||
|
||||
# 检查vision
|
||||
for msg in messages:
|
||||
if 'content' in msg and isinstance(msg['content'], list):
|
||||
for me in msg['content']:
|
||||
if me['type'] == 'image_base64':
|
||||
me['image_url'] = {'url': me['image_base64']}
|
||||
me['type'] = 'image_url'
|
||||
del me['image_base64']
|
||||
|
||||
args['messages'] = messages
|
||||
args['stream'] = True
|
||||
|
||||
# 流式处理状态
|
||||
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
|
||||
chunk_idx = 0
|
||||
thinking_started = False
|
||||
thinking_ended = False
|
||||
role = 'assistant' # 默认角色
|
||||
tool_id = ""
|
||||
tool_name = ''
|
||||
# accumulated_reasoning = '' # 仅用于判断何时结束思维链
|
||||
|
||||
async for chunk in self._req_stream(args, extra_body=extra_args):
|
||||
# 解析 chunk 数据
|
||||
|
||||
if hasattr(chunk, 'choices') and chunk.choices:
|
||||
choice = chunk.choices[0]
|
||||
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
|
||||
|
||||
finish_reason = getattr(choice, 'finish_reason', None)
|
||||
else:
|
||||
delta = {}
|
||||
finish_reason = None
|
||||
# 从第一个 chunk 获取 role,后续使用这个 role
|
||||
if 'role' in delta and delta['role']:
|
||||
role = delta['role']
|
||||
|
||||
# 获取增量内容
|
||||
delta_content = delta.get('content', '')
|
||||
reasoning_content = delta.get('reasoning_content', '')
|
||||
|
||||
# 处理 reasoning_content
|
||||
if reasoning_content:
|
||||
# accumulated_reasoning += reasoning_content
|
||||
# 如果设置了 remove_think,跳过 reasoning_content
|
||||
if remove_think:
|
||||
chunk_idx += 1
|
||||
continue
|
||||
|
||||
# 第一次出现 reasoning_content,添加 <think> 开始标签
|
||||
if not thinking_started:
|
||||
thinking_started = True
|
||||
delta_content = '<think>\n' + reasoning_content
|
||||
else:
|
||||
# 继续输出 reasoning_content
|
||||
delta_content = reasoning_content
|
||||
elif thinking_started and not thinking_ended and delta_content:
|
||||
# reasoning_content 结束,normal content 开始,添加 </think> 结束标签
|
||||
thinking_ended = True
|
||||
delta_content = '\n</think>\n' + delta_content
|
||||
|
||||
# 处理 content 中已有的 <think> 标签(如果需要移除)
|
||||
# if delta_content and remove_think and '<think>' in delta_content:
|
||||
# import re
|
||||
#
|
||||
# # 移除 <think> 标签及其内容
|
||||
# delta_content = re.sub(r'<think>.*?</think>', '', delta_content, flags=re.DOTALL)
|
||||
|
||||
# 处理工具调用增量
|
||||
# delta_tool_calls = None
|
||||
if delta.get('tool_calls'):
|
||||
for tool_call in delta['tool_calls']:
|
||||
if tool_call['id'] == '' and tool_id == '':
|
||||
tool_id = str(uuid.uuid4())
|
||||
if tool_call['function']['name']:
|
||||
tool_name = tool_call['function']['name']
|
||||
tool_call['id'] = tool_id
|
||||
tool_call['function']['name'] = tool_name
|
||||
if tool_call['type'] is None:
|
||||
tool_call['type'] = 'function'
|
||||
|
||||
|
||||
|
||||
# 跳过空的第一个 chunk(只有 role 没有内容)
|
||||
if chunk_idx == 0 and not delta_content and not reasoning_content and not delta.get('tool_calls'):
|
||||
chunk_idx += 1
|
||||
continue
|
||||
# 构建 MessageChunk - 只包含增量内容
|
||||
chunk_data = {
|
||||
'role': role,
|
||||
'content': delta_content if delta_content else None,
|
||||
'tool_calls': delta.get('tool_calls'),
|
||||
'is_final': bool(finish_reason),
|
||||
}
|
||||
|
||||
# 移除 None 值
|
||||
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
|
||||
|
||||
yield llm_entities.MessageChunk(**chunk_data)
|
||||
chunk_idx += 1
|
||||
@@ -139,8 +139,8 @@ class OllamaChatCompletions(requester.ProviderAPIRequester):
|
||||
input_text: list[str],
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
) -> list[list[float]]:
|
||||
return await self.client.embed(
|
||||
return (await self.client.embed(
|
||||
model=model.model_entity.name,
|
||||
input=input_text,
|
||||
**extra_args,
|
||||
)
|
||||
)).embeddings
|
||||
|
||||
32
pkg/provider/modelmgr/requesters/shengsuanyun.py
Normal file
32
pkg/provider/modelmgr/requesters/shengsuanyun.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import openai
|
||||
import typing
|
||||
|
||||
from . import chatcmpl
|
||||
import openai.types.chat.chat_completion as chat_completion
|
||||
|
||||
|
||||
class ShengSuanYunChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""胜算云 ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://router.shengsuanyun.com/api/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
async def _req(
|
||||
self,
|
||||
args: dict,
|
||||
extra_body: dict = {},
|
||||
) -> chat_completion.ChatCompletion:
|
||||
return await self.client.chat.completions.create(
|
||||
**args,
|
||||
extra_body=extra_body,
|
||||
extra_headers={
|
||||
'HTTP-Referer': 'https://langbot.app',
|
||||
'X-Title': 'LangBot',
|
||||
},
|
||||
)
|
||||
1
pkg/provider/modelmgr/requesters/shengsuanyun.svg
Normal file
1
pkg/provider/modelmgr/requesters/shengsuanyun.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 7.4 KiB |
38
pkg/provider/modelmgr/requesters/shengsuanyun.yaml
Normal file
38
pkg/provider/modelmgr/requesters/shengsuanyun.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: shengsuanyun-chat-completions
|
||||
label:
|
||||
en_US: ShengSuanYun
|
||||
zh_Hans: 胜算云
|
||||
icon: shengsuanyun.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://router.shengsuanyun.com/api/v1"
|
||||
- name: args
|
||||
label:
|
||||
en_US: Args
|
||||
zh_Hans: 附加参数
|
||||
type: object
|
||||
required: true
|
||||
default: {}
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: int
|
||||
required: true
|
||||
default: 120
|
||||
support_type:
|
||||
- llm
|
||||
- text-embedding
|
||||
execution:
|
||||
python:
|
||||
path: ./shengsuanyun.py
|
||||
attr: ShengSuanYunChatCompletions
|
||||
@@ -499,7 +499,7 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
content = re.sub(r'^\n</think>', '', chunk['answer'])
|
||||
pending_agent_message += content
|
||||
think_end = True
|
||||
elif think_end:
|
||||
elif think_end or not think_start:
|
||||
pending_agent_message += chunk['answer']
|
||||
if think_start:
|
||||
continue
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
semantic_version = 'v4.2.0'
|
||||
semantic_version = 'v4.2.1'
|
||||
|
||||
required_database_version = 5
|
||||
"""Tag the version of the database schema, used to check if the database needs to be migrated"""
|
||||
|
||||
Reference in New Issue
Block a user