mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-26 03:44:58 +08:00
perf:del dify stream in ai.yaml config.and enbale stream in lark.yaml.
fix:localagent remove_think bug
This commit is contained in:
@@ -407,7 +407,7 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
|
||||
|
||||
async def is_stream_output_supported() -> bool:
|
||||
is_stream = False
|
||||
if self.config.get("enable-card-reply",None):
|
||||
if self.config.get("enable-stream-reply",None):
|
||||
is_stream = True
|
||||
self.is_stream = is_stream
|
||||
|
||||
@@ -603,8 +603,8 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
|
||||
"""
|
||||
lark_message = await self.message_converter.yiri2target(message, self.api_client)
|
||||
|
||||
if not is_final:
|
||||
self.seq += 1
|
||||
|
||||
self.seq += 1
|
||||
|
||||
|
||||
|
||||
@@ -620,7 +620,7 @@ class LarkAdapter(adapter.MessagePlatformAdapter):
|
||||
'type': 'card_json',
|
||||
'data': {'card_id': self.card_id_dict[message_id], 'elements': {'content': text_message}},
|
||||
}
|
||||
print(self.seq)
|
||||
|
||||
request: ContentCardElementRequest = ContentCardElementRequest.builder() \
|
||||
.card_id(self.card_id_dict[message_id]) \
|
||||
.element_id("markdown_1") \
|
||||
|
||||
@@ -65,23 +65,16 @@ spec:
|
||||
type: string
|
||||
required: true
|
||||
default: ""
|
||||
- name: enable-card-reply
|
||||
- name: enable-stream-reply
|
||||
label:
|
||||
en_US: Enable Card Reply Mode
|
||||
zh_Hans: 启用飞书卡片回复模式
|
||||
en_US: Enable Stream Reply Mode
|
||||
zh_Hans: 启用飞书流式回复模式
|
||||
description:
|
||||
en_US: If enabled, the bot will use the card of lark reply mode
|
||||
zh_Hans: 如果启用,将使用飞书卡片方式来回复内容
|
||||
en_US: If enabled, the bot will use the stream of lark reply mode
|
||||
zh_Hans: 如果启用,将使用飞书流式方式来回复内容
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
- name: card_template_id
|
||||
label:
|
||||
en_US: card template id
|
||||
zh_Hans: 卡片模板ID
|
||||
type: string
|
||||
required: true
|
||||
default: "填写你的卡片template_id"
|
||||
execution:
|
||||
python:
|
||||
path: ./lark.py
|
||||
|
||||
@@ -64,7 +64,6 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
|
||||
|
||||
# deepseek的reasoner模型
|
||||
print(pipeline_config['trigger'].get('misc', '').get('remove_think'))
|
||||
if pipeline_config['trigger'].get('misc', '').get('remove_think'):
|
||||
pass
|
||||
else:
|
||||
@@ -79,6 +78,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
self,
|
||||
pipeline_config: dict[str, typing.Any],
|
||||
chat_completion: chat_completion.ChatCompletion,
|
||||
idx: int,
|
||||
) -> llm_entities.MessageChunk:
|
||||
|
||||
# 处理流式chunk和完整响应的差异
|
||||
@@ -106,7 +106,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
else:
|
||||
delta['content'] = delta['content']
|
||||
else:
|
||||
if reasoning_content is not None:
|
||||
if reasoning_content is not None and idx == 0:
|
||||
delta['content'] += f'<think>\n{reasoning_content}'
|
||||
elif reasoning_content is None:
|
||||
if self.is_content:
|
||||
@@ -165,11 +165,10 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
pipeline_config = query.pipeline_config
|
||||
async for chunk in self._req_stream(args, extra_body=extra_args):
|
||||
# 处理流式消息
|
||||
delta_message = await self._make_msg_chunk(pipeline_config,chunk)
|
||||
delta_message = await self._make_msg_chunk(pipeline_config,chunk,chunk_idx)
|
||||
if delta_message.content:
|
||||
current_content += delta_message.content
|
||||
delta_message.content = current_content
|
||||
print(current_content)
|
||||
# delta_message.all_content = current_content
|
||||
if delta_message.tool_calls:
|
||||
for tool_call in delta_message.tool_calls:
|
||||
@@ -324,7 +323,6 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
extra_args=extra_args,
|
||||
):
|
||||
yield item
|
||||
print(item)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
raise errors.RequesterError('请求超时')
|
||||
|
||||
@@ -115,9 +115,7 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
|
||||
stream_output_pending_chunk = ''
|
||||
|
||||
batch_pending_max_size = self.pipeline_config['ai']['dify-service-api'].get(
|
||||
'output-batch-size', 0
|
||||
) # 积累一定量的消息更新消息一次
|
||||
batch_pending_max_size = 64 # 积累一定量的消息更新消息一次
|
||||
|
||||
batch_pending_index = 0
|
||||
|
||||
@@ -255,14 +253,13 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
|
||||
if chunk['event'] == 'agent_message' or chunk['event'] == 'message_end':
|
||||
if chunk['event'] == 'message_end':
|
||||
print(chunk['event'])
|
||||
# break
|
||||
is_final = True
|
||||
else:
|
||||
is_final = False
|
||||
pending_agent_message += chunk['answer']
|
||||
if is_stream:
|
||||
if batch_pending_index % 64 == 0 or is_final:
|
||||
if batch_pending_index % 32 == 0 or is_final:
|
||||
yield llm_entities.MessageChunk(
|
||||
role='assistant',
|
||||
content=self._try_convert_thinking(pending_agent_message),
|
||||
@@ -276,7 +273,7 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
role='assistant',
|
||||
content=self._try_convert_thinking(pending_agent_message),
|
||||
)
|
||||
pending_agent_message = ''
|
||||
|
||||
|
||||
if chunk['event'] == 'agent_thought':
|
||||
if chunk['tool'] != '' and chunk['observation'] != '': # 工具调用结果,跳过
|
||||
@@ -312,7 +309,7 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
],
|
||||
)
|
||||
yield msg
|
||||
if chunk['event'] == 'message_file':
|
||||
elif chunk['event'] == 'message_file':
|
||||
if chunk['type'] == 'image' and chunk['belongs_to'] == 'assistant':
|
||||
base_url = self.dify_client.base_url
|
||||
|
||||
@@ -330,8 +327,10 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
role='assistant',
|
||||
content=[llm_entities.ContentElement.from_image_url(image_url)],
|
||||
)
|
||||
if chunk['event'] == 'error':
|
||||
elif chunk['event'] == 'error':
|
||||
raise errors.DifyAPIError('dify 服务错误: ' + chunk['message'])
|
||||
else:
|
||||
pending_agent_message = ''
|
||||
|
||||
if chunk is None:
|
||||
raise errors.DifyAPIError('Dify API 没有返回任何响应,请检查网络连接和API配置')
|
||||
|
||||
@@ -128,20 +128,7 @@ stages:
|
||||
label:
|
||||
en_US: Remove
|
||||
zh_Hans: 移除
|
||||
- name: enable-streaming
|
||||
label:
|
||||
en_US: enable streaming mode
|
||||
zh_Hans: 开启流式输出
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
- name: output-batch-size
|
||||
label:
|
||||
en_US: output batch size
|
||||
zh_Hans: 输出批次大小(积累多少条消息后一起输出)
|
||||
type: integer
|
||||
required: true
|
||||
default: 10
|
||||
|
||||
|
||||
- name: dashscope-app-api
|
||||
label:
|
||||
|
||||
Reference in New Issue
Block a user