Fix: 豆包模型response_format兼容性问题

- 添加 ENABLE_RESPONSE_FORMAT 环境变量控制是否使用response_format参数
- 豆包模型不支持json_object响应格式,默认设为false
- 更新get_ai_request_params函数处理response_format参数移除逻辑
- 修改AI调用代码根据配置动态决定是否使用response_format

解决 issue #272 中豆包模型调用失败的问题

Co-authored-by: rainsfly <dingyufei615@users.noreply.github.com>
This commit is contained in:
claude[bot]
2025-10-27 03:00:02 +00:00
parent 424bd1c5b0
commit f8d8b0ef9a
3 changed files with 24 additions and 8 deletions

View File

@@ -56,6 +56,9 @@ AI_DEBUG_MODE=false
# 是否启用enable_thinking参数 (true/false)。某些AI模型需要此参数而有些则不支持。
ENABLE_THINKING=false
# 是否启用response_format参数 (true/false)。豆包模型不支持json_object响应格式需要设为false。其他模型如Gemini支持可设为true。
ENABLE_RESPONSE_FORMAT=true
# 服务端口自定义 不配置默认8000
SERVER_PORT=8000

View File

@@ -36,6 +36,7 @@ from src.config import (
WEBHOOK_CONTENT_TYPE,
WEBHOOK_QUERY_PARAMETERS,
WEBHOOK_BODY,
ENABLE_RESPONSE_FORMAT,
client,
)
from src.utils import convert_goofish_link, retry_on_failure
@@ -582,14 +583,20 @@ async def get_ai_analysis(product_data, image_paths=None, prompt_text=""):
from src.config import get_ai_request_params
# 构建请求参数根据ENABLE_RESPONSE_FORMAT决定是否使用response_format
request_params = {
"model": MODEL_NAME,
"messages": messages,
"temperature": current_temperature,
"max_tokens": 4000
}
# 只有启用response_format时才添加该参数
if ENABLE_RESPONSE_FORMAT:
request_params["response_format"] = {"type": "json_object"}
response = await client.chat.completions.create(
**get_ai_request_params(
model=MODEL_NAME,
messages=messages,
response_format={"type": "json_object"},
temperature=current_temperature,
max_tokens=4000
)
**get_ai_request_params(**request_params)
)
ai_response_content = response.choices[0].message.content

View File

@@ -45,6 +45,7 @@ RUNNING_IN_DOCKER = os.getenv("RUNNING_IN_DOCKER", "false").lower() == "true"
AI_DEBUG_MODE = os.getenv("AI_DEBUG_MODE", "false").lower() == "true"
SKIP_AI_ANALYSIS = os.getenv("SKIP_AI_ANALYSIS", "false").lower() == "true"
ENABLE_THINKING = os.getenv("ENABLE_THINKING", "false").lower() == "true"
ENABLE_RESPONSE_FORMAT = os.getenv("ENABLE_RESPONSE_FORMAT", "true").lower() == "true"
# --- Headers ---
IMAGE_DOWNLOAD_HEADERS = {
@@ -87,8 +88,13 @@ if not all([BASE_URL, MODEL_NAME]) and 'prompt_generator.py' in sys.argv[0]:
def get_ai_request_params(**kwargs):
"""
构建AI请求参数根据ENABLE_THINKING环境变量决定是否添加enable_thinking参数
构建AI请求参数根据ENABLE_THINKING和ENABLE_RESPONSE_FORMAT环境变量决定是否添加相应参数
"""
if ENABLE_THINKING:
kwargs["extra_body"] = {"enable_thinking": False}
# 如果禁用response_format则移除该参数
if not ENABLE_RESPONSE_FORMAT and "response_format" in kwargs:
del kwargs["response_format"]
return kwargs