2024-01-27 00:06:38 +08:00
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
|
|
import asyncio
|
|
|
|
|
|
import typing
|
|
|
|
|
|
|
|
|
|
|
|
import openai
|
2024-01-27 21:50:40 +08:00
|
|
|
|
import openai.types.chat.chat_completion as chat_completion
|
2024-02-07 23:58:22 +08:00
|
|
|
|
import httpx
|
2024-01-27 00:06:38 +08:00
|
|
|
|
|
2025-04-29 17:24:07 +08:00
|
|
|
|
from .. import errors, requester
|
2025-07-13 22:41:39 +08:00
|
|
|
|
from ....core import entities as core_entities, app
|
2024-01-27 00:06:38 +08:00
|
|
|
|
from ... import entities as llm_entities
|
2024-02-01 16:35:00 +08:00
|
|
|
|
from ...tools import entities as tools_entities
|
2024-01-27 00:06:38 +08:00
|
|
|
|
|
|
|
|
|
|
|
2025-05-21 12:42:39 +08:00
|
|
|
|
class OpenAIChatCompletions(requester.ProviderAPIRequester):
|
2024-03-03 16:34:59 +08:00
|
|
|
|
"""OpenAI ChatCompletion API 请求器"""
|
|
|
|
|
|
|
2024-01-27 21:50:40 +08:00
|
|
|
|
client: openai.AsyncClient
|
2025-07-12 18:09:24 +08:00
|
|
|
|
is_content:bool
|
2024-01-27 00:06:38 +08:00
|
|
|
|
|
2025-03-16 23:16:06 +08:00
|
|
|
|
default_config: dict[str, typing.Any] = {
|
2025-04-29 17:24:07 +08:00
|
|
|
|
'base_url': 'https://api.openai.com/v1',
|
|
|
|
|
|
'timeout': 120,
|
2025-03-16 23:16:06 +08:00
|
|
|
|
}
|
2024-03-19 22:39:45 +08:00
|
|
|
|
|
2025-07-12 18:09:24 +08:00
|
|
|
|
|
2024-01-27 00:06:38 +08:00
|
|
|
|
async def initialize(self):
|
2024-01-27 21:50:40 +08:00
|
|
|
|
self.client = openai.AsyncClient(
|
2025-04-29 17:24:07 +08:00
|
|
|
|
api_key='',
|
2025-05-10 18:02:05 +08:00
|
|
|
|
base_url=self.requester_cfg['base_url'].replace(' ', ''),
|
2025-04-29 17:24:07 +08:00
|
|
|
|
timeout=self.requester_cfg['timeout'],
|
2025-05-10 18:04:58 +08:00
|
|
|
|
http_client=httpx.AsyncClient(trust_env=True, timeout=self.requester_cfg['timeout']),
|
2024-01-27 00:06:38 +08:00
|
|
|
|
)
|
2025-07-12 18:09:24 +08:00
|
|
|
|
self.is_content = False
|
2024-01-27 00:06:38 +08:00
|
|
|
|
|
2024-01-27 21:50:40 +08:00
|
|
|
|
async def _req(
|
|
|
|
|
|
self,
|
|
|
|
|
|
args: dict,
|
2025-03-30 23:43:45 +08:00
|
|
|
|
extra_body: dict = {},
|
2024-01-27 21:50:40 +08:00
|
|
|
|
) -> chat_completion.ChatCompletion:
|
2025-03-30 23:43:45 +08:00
|
|
|
|
return await self.client.chat.completions.create(**args, extra_body=extra_body)
|
2024-01-27 00:06:38 +08:00
|
|
|
|
|
2025-07-04 03:26:44 +08:00
|
|
|
|
async def _req_stream(
|
|
|
|
|
|
self,
|
|
|
|
|
|
args: dict,
|
|
|
|
|
|
extra_body: dict = {},
|
|
|
|
|
|
) -> chat_completion.ChatCompletion:
|
|
|
|
|
|
|
|
|
|
|
|
async for chunk in await self.client.chat.completions.create(**args, extra_body=extra_body):
|
|
|
|
|
|
yield chunk
|
|
|
|
|
|
|
2024-01-27 21:50:40 +08:00
|
|
|
|
async def _make_msg(
|
|
|
|
|
|
self,
|
2025-07-13 22:41:39 +08:00
|
|
|
|
pipeline_config: dict[str, typing.Any],
|
2024-01-27 21:50:40 +08:00
|
|
|
|
chat_completion: chat_completion.ChatCompletion,
|
|
|
|
|
|
) -> llm_entities.Message:
|
2025-04-02 11:54:01 +08:00
|
|
|
|
chatcmpl_message = chat_completion.choices[0].message.model_dump()
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
2024-09-23 23:25:54 +08:00
|
|
|
|
# 确保 role 字段存在且不为 None
|
2025-04-29 17:24:07 +08:00
|
|
|
|
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
|
|
|
|
|
|
chatcmpl_message['role'] = 'assistant'
|
2024-09-23 23:25:54 +08:00
|
|
|
|
|
2025-05-10 18:04:58 +08:00
|
|
|
|
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
|
2025-05-10 17:47:14 +08:00
|
|
|
|
|
2025-04-14 15:05:53 +08:00
|
|
|
|
# deepseek的reasoner模型
|
2025-07-13 22:41:39 +08:00
|
|
|
|
print(pipeline_config['trigger'].get('misc', '').get('remove_think'))
|
|
|
|
|
|
if pipeline_config['trigger'].get('misc', '').get('remove_think'):
|
|
|
|
|
|
pass
|
|
|
|
|
|
else:
|
|
|
|
|
|
if reasoning_content is not None :
|
|
|
|
|
|
chatcmpl_message['content'] = '<think>\n' + reasoning_content + '\n</think>\n' + chatcmpl_message['content']
|
2025-04-14 15:05:53 +08:00
|
|
|
|
|
2024-01-27 21:50:40 +08:00
|
|
|
|
message = llm_entities.Message(**chatcmpl_message)
|
|
|
|
|
|
|
|
|
|
|
|
return message
|
2025-06-30 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
|
async def _make_msg_chunk(
|
|
|
|
|
|
self,
|
2025-07-13 22:41:39 +08:00
|
|
|
|
pipeline_config: dict[str, typing.Any],
|
2025-06-30 17:58:18 +08:00
|
|
|
|
chat_completion: chat_completion.ChatCompletion,
|
|
|
|
|
|
) -> llm_entities.MessageChunk:
|
2025-07-04 03:26:44 +08:00
|
|
|
|
|
|
|
|
|
|
# 处理流式chunk和完整响应的差异
|
|
|
|
|
|
# print(chat_completion.choices[0])
|
|
|
|
|
|
if hasattr(chat_completion, 'choices'):
|
|
|
|
|
|
# 完整响应模式
|
|
|
|
|
|
choice = chat_completion.choices[0]
|
|
|
|
|
|
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else choice.message.model_dump()
|
|
|
|
|
|
else:
|
|
|
|
|
|
# 流式chunk模式
|
|
|
|
|
|
delta = chat_completion.delta.model_dump() if hasattr(chat_completion, 'delta') else {}
|
|
|
|
|
|
|
2025-06-30 17:58:18 +08:00
|
|
|
|
# 确保 role 字段存在且不为 None
|
2025-07-12 18:09:24 +08:00
|
|
|
|
# print(delta.values())
|
2025-06-30 17:58:18 +08:00
|
|
|
|
if 'role' not in delta or delta['role'] is None:
|
|
|
|
|
|
delta['role'] = 'assistant'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reasoning_content = delta['reasoning_content'] if 'reasoning_content' in delta else None
|
|
|
|
|
|
|
|
|
|
|
|
# deepseek的reasoner模型
|
2025-07-13 22:41:39 +08:00
|
|
|
|
if pipeline_config['trigger'].get('misc', '').get('remove_think'):
|
|
|
|
|
|
if reasoning_content is not None :
|
|
|
|
|
|
pass
|
2025-07-12 18:09:24 +08:00
|
|
|
|
else:
|
2025-07-13 22:41:39 +08:00
|
|
|
|
delta['content'] = delta['content']
|
2025-07-12 18:09:24 +08:00
|
|
|
|
else:
|
2025-07-13 22:41:39 +08:00
|
|
|
|
if reasoning_content is not None:
|
|
|
|
|
|
delta['content'] += f'<think>\n{reasoning_content}'
|
|
|
|
|
|
elif reasoning_content is None:
|
|
|
|
|
|
if self.is_content:
|
|
|
|
|
|
delta['content'] = delta['content']
|
|
|
|
|
|
else:
|
|
|
|
|
|
delta['content'] = f'\n<think>\n\n{delta["content"]}'
|
|
|
|
|
|
self.is_content = True
|
|
|
|
|
|
else:
|
|
|
|
|
|
delta['content'] += reasoning_content
|
2025-07-12 18:09:24 +08:00
|
|
|
|
|
2025-06-30 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
|
message = llm_entities.MessageChunk(**delta)
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
2025-06-30 17:58:18 +08:00
|
|
|
|
return message
|
2025-07-04 03:26:44 +08:00
|
|
|
|
|
|
|
|
|
|
async def _closure_stream(
|
2024-01-27 21:50:40 +08:00
|
|
|
|
self,
|
2024-12-24 10:57:17 +08:00
|
|
|
|
query: core_entities.Query,
|
2024-01-27 21:50:40 +08:00
|
|
|
|
req_messages: list[dict],
|
2025-03-29 17:50:45 +08:00
|
|
|
|
use_model: requester.RuntimeLLMModel,
|
2024-02-01 16:35:00 +08:00
|
|
|
|
use_funcs: list[tools_entities.LLMFunction] = None,
|
2025-06-30 17:58:18 +08:00
|
|
|
|
stream: bool = False,
|
2025-04-09 21:35:59 +08:00
|
|
|
|
extra_args: dict[str, typing.Any] = {},
|
2025-07-04 03:26:44 +08:00
|
|
|
|
) -> llm_entities.Message | typing.AsyncGenerator[llm_entities.MessageChunk, None]:
|
2024-02-01 16:35:00 +08:00
|
|
|
|
self.client.api_key = use_model.token_mgr.get_token()
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
2025-05-10 18:02:05 +08:00
|
|
|
|
args = {}
|
2025-04-29 17:24:07 +08:00
|
|
|
|
args['model'] = use_model.model_entity.name
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
2024-05-16 20:11:54 +08:00
|
|
|
|
if use_funcs:
|
2024-02-01 16:35:00 +08:00
|
|
|
|
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
2024-01-29 21:22:27 +08:00
|
|
|
|
|
|
|
|
|
|
if tools:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
args['tools'] = tools
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
|
|
|
|
|
# 设置此次请求中的messages
|
2024-05-15 21:40:18 +08:00
|
|
|
|
messages = req_messages.copy()
|
|
|
|
|
|
|
|
|
|
|
|
# 检查vision
|
2024-05-16 20:32:30 +08:00
|
|
|
|
for msg in messages:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
if 'content' in msg and isinstance(msg['content'], list):
|
|
|
|
|
|
for me in msg['content']:
|
|
|
|
|
|
if me['type'] == 'image_base64':
|
|
|
|
|
|
me['image_url'] = {'url': me['image_base64']}
|
|
|
|
|
|
me['type'] = 'image_url'
|
|
|
|
|
|
del me['image_base64']
|
2024-05-15 21:40:18 +08:00
|
|
|
|
|
2025-04-29 17:24:07 +08:00
|
|
|
|
args['messages'] = messages
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
2025-06-30 17:58:18 +08:00
|
|
|
|
if stream:
|
|
|
|
|
|
current_content = ''
|
2025-07-04 03:26:44 +08:00
|
|
|
|
args["stream"] = True
|
2025-07-12 18:09:24 +08:00
|
|
|
|
chunk_idx = 0
|
|
|
|
|
|
self.is_content = False
|
2025-07-13 22:41:39 +08:00
|
|
|
|
tool_calls_map: dict[str, llm_entities.ToolCall] = {}
|
|
|
|
|
|
pipeline_config = query.pipeline_config
|
2025-07-04 03:26:44 +08:00
|
|
|
|
async for chunk in self._req_stream(args, extra_body=extra_args):
|
2025-06-30 17:58:18 +08:00
|
|
|
|
# 处理流式消息
|
2025-07-13 22:41:39 +08:00
|
|
|
|
delta_message = await self._make_msg_chunk(pipeline_config,chunk)
|
2025-06-30 17:58:18 +08:00
|
|
|
|
if delta_message.content:
|
|
|
|
|
|
current_content += delta_message.content
|
2025-07-04 03:26:44 +08:00
|
|
|
|
delta_message.content = current_content
|
2025-07-13 22:41:39 +08:00
|
|
|
|
print(current_content)
|
2025-07-12 18:09:24 +08:00
|
|
|
|
# delta_message.all_content = current_content
|
2025-07-13 22:41:39 +08:00
|
|
|
|
if delta_message.tool_calls:
|
|
|
|
|
|
for tool_call in delta_message.tool_calls:
|
|
|
|
|
|
if tool_call.id not in tool_calls_map:
|
|
|
|
|
|
tool_calls_map[tool_call.id] = llm_entities.ToolCall(
|
|
|
|
|
|
id=tool_call.id,
|
|
|
|
|
|
type=tool_call.type,
|
|
|
|
|
|
function=llm_entities.FunctionCall(
|
|
|
|
|
|
name=tool_call.function.name if tool_call.function else '',
|
|
|
|
|
|
arguments=''
|
|
|
|
|
|
),
|
|
|
|
|
|
)
|
|
|
|
|
|
if tool_call.function and tool_call.function.arguments:
|
|
|
|
|
|
# 流式处理中,工具调用参数可能分多个chunk返回,需要追加而不是覆盖
|
|
|
|
|
|
tool_calls_map[tool_call.id].function.arguments += tool_call.function.arguments
|
|
|
|
|
|
|
|
|
|
|
|
|
2025-07-12 18:09:24 +08:00
|
|
|
|
chunk_idx += 1
|
2025-07-04 03:26:44 +08:00
|
|
|
|
chunk_choices = getattr(chunk, 'choices', None)
|
|
|
|
|
|
if chunk_choices and getattr(chunk_choices[0], 'finish_reason', None):
|
2025-06-30 17:58:18 +08:00
|
|
|
|
delta_message.is_final = True
|
2025-07-13 22:41:39 +08:00
|
|
|
|
delta_message.content = current_content
|
|
|
|
|
|
|
|
|
|
|
|
if chunk_idx % 64 == 0 or delta_message.is_final:
|
2024-01-27 21:50:40 +08:00
|
|
|
|
|
2025-07-13 22:41:39 +08:00
|
|
|
|
yield delta_message
|
2025-07-04 03:26:44 +08:00
|
|
|
|
# return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _closure(
|
|
|
|
|
|
self,
|
|
|
|
|
|
query: core_entities.Query,
|
|
|
|
|
|
req_messages: list[dict],
|
|
|
|
|
|
use_model: requester.RuntimeLLMModel,
|
|
|
|
|
|
use_funcs: list[tools_entities.LLMFunction] = None,
|
|
|
|
|
|
stream: bool = False,
|
|
|
|
|
|
extra_args: dict[str, typing.Any] = {},
|
|
|
|
|
|
) -> llm_entities.Message | typing.AsyncGenerator[llm_entities.MessageChunk, None]:
|
|
|
|
|
|
self.client.api_key = use_model.token_mgr.get_token()
|
|
|
|
|
|
|
|
|
|
|
|
args = {}
|
|
|
|
|
|
args['model'] = use_model.model_entity.name
|
|
|
|
|
|
|
|
|
|
|
|
if use_funcs:
|
|
|
|
|
|
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
2025-06-30 17:58:18 +08:00
|
|
|
|
|
2025-07-04 03:26:44 +08:00
|
|
|
|
if tools:
|
|
|
|
|
|
args['tools'] = tools
|
2025-06-30 17:58:18 +08:00
|
|
|
|
|
2025-07-04 03:26:44 +08:00
|
|
|
|
# 设置此次请求中的messages
|
|
|
|
|
|
messages = req_messages.copy()
|
2025-06-30 17:58:18 +08:00
|
|
|
|
|
2025-07-04 03:26:44 +08:00
|
|
|
|
# 检查vision
|
|
|
|
|
|
for msg in messages:
|
|
|
|
|
|
if 'content' in msg and isinstance(msg['content'], list):
|
|
|
|
|
|
for me in msg['content']:
|
|
|
|
|
|
if me['type'] == 'image_base64':
|
|
|
|
|
|
me['image_url'] = {'url': me['image_base64']}
|
|
|
|
|
|
me['type'] = 'image_url'
|
|
|
|
|
|
del me['image_base64']
|
|
|
|
|
|
|
|
|
|
|
|
args['messages'] = messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 发送请求
|
|
|
|
|
|
|
|
|
|
|
|
resp = await self._req(args, extra_body=extra_args)
|
|
|
|
|
|
# 处理请求结果
|
2025-07-13 22:41:39 +08:00
|
|
|
|
pipeline_config = query.pipeline_config
|
|
|
|
|
|
message = await self._make_msg(pipeline_config,resp)
|
2025-07-04 03:26:44 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return message
|
2025-06-30 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2025-03-16 23:16:06 +08:00
|
|
|
|
|
2025-03-29 17:50:45 +08:00
|
|
|
|
async def invoke_llm(
|
2024-05-14 22:20:31 +08:00
|
|
|
|
self,
|
2024-12-24 10:57:17 +08:00
|
|
|
|
query: core_entities.Query,
|
2025-03-29 17:50:45 +08:00
|
|
|
|
model: requester.RuntimeLLMModel,
|
2024-05-14 22:20:31 +08:00
|
|
|
|
messages: typing.List[llm_entities.Message],
|
|
|
|
|
|
funcs: typing.List[tools_entities.LLMFunction] = None,
|
2025-03-16 23:16:06 +08:00
|
|
|
|
extra_args: dict[str, typing.Any] = {},
|
2025-07-12 18:09:24 +08:00
|
|
|
|
) -> llm_entities.Message:
|
2024-06-19 19:13:00 +08:00
|
|
|
|
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
|
2024-06-19 17:26:06 +08:00
|
|
|
|
for m in messages:
|
|
|
|
|
|
msg_dict = m.dict(exclude_none=True)
|
2025-04-29 17:24:07 +08:00
|
|
|
|
content = msg_dict.get('content')
|
2024-06-19 19:13:00 +08:00
|
|
|
|
if isinstance(content, list):
|
|
|
|
|
|
# 检查 content 列表中是否每个部分都是文本
|
2025-05-10 18:04:58 +08:00
|
|
|
|
if all(isinstance(part, dict) and part.get('type') == 'text' for part in content):
|
2024-06-19 19:13:00 +08:00
|
|
|
|
# 将所有文本部分合并为一个字符串
|
2025-04-29 17:24:07 +08:00
|
|
|
|
msg_dict['content'] = '\n'.join(part['text'] for part in content)
|
2024-06-19 17:26:06 +08:00
|
|
|
|
req_messages.append(msg_dict)
|
2024-02-01 18:11:47 +08:00
|
|
|
|
|
|
|
|
|
|
try:
|
2025-07-04 03:26:44 +08:00
|
|
|
|
|
2025-07-12 18:09:24 +08:00
|
|
|
|
msg = await self._closure(
|
|
|
|
|
|
query=query,
|
|
|
|
|
|
req_messages=req_messages,
|
|
|
|
|
|
use_model=model,
|
|
|
|
|
|
use_funcs=funcs,
|
|
|
|
|
|
extra_args=extra_args,
|
|
|
|
|
|
)
|
|
|
|
|
|
return msg
|
2024-02-01 18:11:47 +08:00
|
|
|
|
except asyncio.TimeoutError:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
raise errors.RequesterError('请求超时')
|
2024-02-01 18:11:47 +08:00
|
|
|
|
except openai.BadRequestError as e:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
if 'context_length_exceeded' in e.message:
|
|
|
|
|
|
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
|
2024-03-12 16:04:11 +08:00
|
|
|
|
else:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
raise errors.RequesterError(f'请求参数错误: {e.message}')
|
2024-02-01 18:11:47 +08:00
|
|
|
|
except openai.AuthenticationError as e:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
raise errors.RequesterError(f'无效的 api-key: {e.message}')
|
2024-02-01 18:11:47 +08:00
|
|
|
|
except openai.NotFoundError as e:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
raise errors.RequesterError(f'请求路径错误: {e.message}')
|
2024-02-01 18:11:47 +08:00
|
|
|
|
except openai.RateLimitError as e:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
2024-02-01 18:11:47 +08:00
|
|
|
|
except openai.APIError as e:
|
2025-04-29 17:24:07 +08:00
|
|
|
|
raise errors.RequesterError(f'请求错误: {e.message}')
|
2025-05-21 12:42:39 +08:00
|
|
|
|
|
|
|
|
|
|
async def invoke_embedding(
|
|
|
|
|
|
self,
|
|
|
|
|
|
model: requester.RuntimeEmbeddingModel,
|
2025-07-16 21:17:18 +08:00
|
|
|
|
input_text: list[str],
|
2025-05-21 12:42:39 +08:00
|
|
|
|
extra_args: dict[str, typing.Any] = {},
|
2025-07-16 21:17:18 +08:00
|
|
|
|
) -> list[list[float]]:
|
2025-05-21 12:42:39 +08:00
|
|
|
|
"""调用 Embedding API"""
|
|
|
|
|
|
self.client.api_key = model.token_mgr.get_token()
|
|
|
|
|
|
|
|
|
|
|
|
args = {
|
|
|
|
|
|
'model': model.model_entity.name,
|
|
|
|
|
|
'input': input_text,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if model.model_entity.extra_args:
|
|
|
|
|
|
args.update(model.model_entity.extra_args)
|
|
|
|
|
|
|
|
|
|
|
|
args.update(extra_args)
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
resp = await self.client.embeddings.create(**args)
|
2025-07-16 21:17:18 +08:00
|
|
|
|
|
|
|
|
|
|
return [d.embedding for d in resp.data]
|
2025-05-21 12:42:39 +08:00
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
|
|
raise errors.RequesterError('请求超时')
|
|
|
|
|
|
except openai.BadRequestError as e:
|
|
|
|
|
|
raise errors.RequesterError(f'请求参数错误: {e.message}')
|
2025-07-04 03:26:44 +08:00
|
|
|
|
|
|
|
|
|
|
async def invoke_llm_stream(
|
|
|
|
|
|
self,
|
|
|
|
|
|
query: core_entities.Query,
|
|
|
|
|
|
model: requester.RuntimeLLMModel,
|
|
|
|
|
|
messages: typing.List[llm_entities.Message],
|
|
|
|
|
|
funcs: typing.List[tools_entities.LLMFunction] = None,
|
|
|
|
|
|
stream: bool = False,
|
|
|
|
|
|
extra_args: dict[str, typing.Any] = {},
|
|
|
|
|
|
) -> llm_entities.MessageChunk:
|
|
|
|
|
|
req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
|
|
|
|
|
|
for m in messages:
|
|
|
|
|
|
msg_dict = m.dict(exclude_none=True)
|
|
|
|
|
|
content = msg_dict.get('content')
|
|
|
|
|
|
if isinstance(content, list):
|
|
|
|
|
|
# 检查 content 列表中是否每个部分都是文本
|
|
|
|
|
|
if all(isinstance(part, dict) and part.get('type') == 'text' for part in content):
|
|
|
|
|
|
# 将所有文本部分合并为一个字符串
|
|
|
|
|
|
msg_dict['content'] = '\n'.join(part['text'] for part in content)
|
|
|
|
|
|
req_messages.append(msg_dict)
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
2025-07-12 18:09:24 +08:00
|
|
|
|
async for item in self._closure_stream(
|
|
|
|
|
|
query=query,
|
|
|
|
|
|
req_messages=req_messages,
|
|
|
|
|
|
use_model=model,
|
|
|
|
|
|
use_funcs=funcs,
|
|
|
|
|
|
stream=stream,
|
|
|
|
|
|
extra_args=extra_args,
|
|
|
|
|
|
):
|
|
|
|
|
|
yield item
|
|
|
|
|
|
print(item)
|
2025-07-04 03:26:44 +08:00
|
|
|
|
|
|
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
|
|
raise errors.RequesterError('请求超时')
|
|
|
|
|
|
except openai.BadRequestError as e:
|
|
|
|
|
|
if 'context_length_exceeded' in e.message:
|
|
|
|
|
|
raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
|
|
|
|
|
|
else:
|
|
|
|
|
|
raise errors.RequesterError(f'请求参数错误: {e.message}')
|
2025-05-21 12:42:39 +08:00
|
|
|
|
except openai.AuthenticationError as e:
|
|
|
|
|
|
raise errors.RequesterError(f'无效的 api-key: {e.message}')
|
|
|
|
|
|
except openai.NotFoundError as e:
|
|
|
|
|
|
raise errors.RequesterError(f'请求路径错误: {e.message}')
|
|
|
|
|
|
except openai.RateLimitError as e:
|
|
|
|
|
|
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
|
|
|
|
|
except openai.APIError as e:
|
|
|
|
|
|
raise errors.RequesterError(f'请求错误: {e.message}')
|