Merge branch 'feature_vlmparam' into 'master' (merge request !25)

feat: 新增多模态模型配置及 VLM 模型认证
This commit is contained in:
wizardchen
2025-08-08 09:09:03 +00:00
3 changed files with 16 additions and 0 deletions

View File

@@ -170,3 +170,15 @@ INIT_EMBEDDING_MODEL_ID=builtin:nomic-embed-text:768
# Rerank模型的API密钥如果需要身份验证可以设置
# 支持第三方模型服务的API密钥
# INIT_RERANK_MODEL_API_KEY=your_rerank_model_api_key
# VLM_MODEL_NAME 使用的多模态模型名称
# 用于解析图片数据
# VLM_MODEL_NAME=your_vlm_model_name
# VLM_MODEL_BASE_URL 使用的多模态模型访问地址
# 支持第三方模型服务的URL
# VLM_MODEL_BASE_URL=your_vlm_model_base_url
# VLM_MODEL_API_KEY 使用的多模态模型API密钥
# 支持第三方模型服务的API密钥
# VLM_MODEL_API_KEY=your_vlm_model_api_key

View File

@@ -100,6 +100,7 @@ services:
- COS_ENABLE_OLD_DOMAIN=${COS_ENABLE_OLD_DOMAIN}
- VLM_MODEL_BASE_URL=${VLM_MODEL_BASE_URL}
- VLM_MODEL_NAME=${VLM_MODEL_NAME}
- VLM_MODEL_API_KEY=${VLM_MODEL_API_KEY}
networks:
- WeKnora-network
restart: unless-stopped

View File

@@ -177,6 +177,7 @@ class Caption:
return
self.completion_url = os.getenv("VLM_MODEL_BASE_URL") + "/v1/chat/completions"
self.model = os.getenv("VLM_MODEL_NAME")
self.api_key = os.getenv("VLM_MODEL_API_KEY")
logger.info(
f"Service configured with model: {self.model}, endpoint: {self.completion_url}"
)
@@ -218,6 +219,8 @@ class Caption:
"Cache-Control": "no-cache",
"Connection": "keep-alive",
}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
try:
logger.info(f"Sending request to Caption API with model: {self.model}")