mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 11:29:39 +08:00
Merge pull request #971 from RockChinQ/feat/zhipuai
feat: add supports for zhipuai(chatglm)
This commit is contained in:
25
pkg/core/migrations/m019_zhipuai_config.py
Normal file
25
pkg/core/migrations/m019_zhipuai_config.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .. import migration
|
||||
|
||||
|
||||
@migration.migration_class("zhipuai-config", 19)
|
||||
class ZhipuaiConfigMigration(migration.Migration):
|
||||
"""迁移"""
|
||||
|
||||
async def need_migrate(self) -> bool:
|
||||
"""判断当前环境是否需要运行此迁移"""
|
||||
return 'zhipuai-chat-completions' not in self.ap.provider_cfg.data['requester']
|
||||
|
||||
async def run(self):
|
||||
"""执行迁移"""
|
||||
self.ap.provider_cfg.data['requester']['zhipuai-chat-completions'] = {
|
||||
"base-url": "https://open.bigmodel.cn/api/paas/v4",
|
||||
"args": {},
|
||||
"timeout": 120
|
||||
}
|
||||
self.ap.provider_cfg.data['keys']['zhipuai'] = [
|
||||
"xxxxxxx"
|
||||
]
|
||||
|
||||
await self.ap.provider_cfg.dump_config()
|
||||
@@ -7,7 +7,7 @@ from .. import migration
|
||||
from ..migrations import m001_sensitive_word_migration, m002_openai_config_migration, m003_anthropic_requester_cfg_completion, m004_moonshot_cfg_completion
|
||||
from ..migrations import m005_deepseek_cfg_completion, m006_vision_config, m007_qcg_center_url, m008_ad_fixwin_config_migrate, m009_msg_truncator_cfg
|
||||
from ..migrations import m010_ollama_requester_config, m011_command_prefix_config, m012_runner_config, m013_http_api_config, m014_force_delay_config
|
||||
from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_api_timeout_params, m018_xai_config
|
||||
from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_api_timeout_params, m018_xai_config, m019_zhipuai_config
|
||||
|
||||
|
||||
@stage.stage_class("MigrationStage")
|
||||
|
||||
@@ -6,7 +6,7 @@ from . import entities, requester
|
||||
from ...core import app
|
||||
|
||||
from . import token
|
||||
from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl
|
||||
from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl, zhipuaichatcmpl
|
||||
|
||||
FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list"
|
||||
|
||||
|
||||
@@ -1,23 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
import json
|
||||
import base64
|
||||
from typing import AsyncGenerator
|
||||
|
||||
import openai
|
||||
import openai.types.chat.chat_completion as chat_completion
|
||||
import httpx
|
||||
import aiohttp
|
||||
import async_lru
|
||||
|
||||
from . import chatcmpl
|
||||
from .. import entities, errors, requester
|
||||
from ....core import entities as core_entities, app
|
||||
from ... import entities as llm_entities
|
||||
from ...tools import entities as tools_entities
|
||||
from ....utils import image
|
||||
from .. import requester
|
||||
from ....core import app
|
||||
|
||||
|
||||
@requester.requester_class("xai-chat-completions")
|
||||
@@ -32,114 +19,3 @@ class XaiChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
self.ap = ap
|
||||
|
||||
self.requester_cfg = self.ap.provider_cfg.data['requester']['xai-chat-completions']
|
||||
|
||||
# async def initialize(self):
|
||||
|
||||
# self.client = openai.AsyncClient(
|
||||
# api_key="",
|
||||
# base_url=self.requester_cfg['base-url'],
|
||||
# timeout=self.requester_cfg['timeout'],
|
||||
# http_client=httpx.AsyncClient(
|
||||
# proxies=self.ap.proxy_mgr.get_forward_proxies()
|
||||
# )
|
||||
# )
|
||||
|
||||
# async def _req(
|
||||
# self,
|
||||
# args: dict,
|
||||
# ) -> chat_completion.ChatCompletion:
|
||||
# return await self.client.chat.completions.create(**args)
|
||||
|
||||
# async def _make_msg(
|
||||
# self,
|
||||
# chat_completion: chat_completion.ChatCompletion,
|
||||
# ) -> llm_entities.Message:
|
||||
# chatcmpl_message = chat_completion.choices[0].message.dict()
|
||||
|
||||
# # 确保 role 字段存在且不为 None
|
||||
# if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
|
||||
# chatcmpl_message['role'] = 'assistant'
|
||||
|
||||
# message = llm_entities.Message(**chatcmpl_message)
|
||||
|
||||
# return message
|
||||
|
||||
# async def _closure(
|
||||
# self,
|
||||
# req_messages: list[dict],
|
||||
# use_model: entities.LLMModelInfo,
|
||||
# use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
# ) -> llm_entities.Message:
|
||||
# self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
# args = self.requester_cfg['args'].copy()
|
||||
# args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
|
||||
|
||||
# if use_funcs:
|
||||
# tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
|
||||
# if tools:
|
||||
# args["tools"] = tools
|
||||
|
||||
# # 设置此次请求中的messages
|
||||
# messages = req_messages.copy()
|
||||
|
||||
# # 检查vision
|
||||
# for msg in messages:
|
||||
# if 'content' in msg and isinstance(msg["content"], list):
|
||||
# for me in msg["content"]:
|
||||
# if me["type"] == "image_url":
|
||||
# me["image_url"]['url'] = await self.get_base64_str(me["image_url"]['url'])
|
||||
|
||||
# args["messages"] = messages
|
||||
|
||||
# # 发送请求
|
||||
# resp = await self._req(args)
|
||||
|
||||
# # 处理请求结果
|
||||
# message = await self._make_msg(resp)
|
||||
|
||||
# return message
|
||||
|
||||
# async def call(
|
||||
# self,
|
||||
# model: entities.LLMModelInfo,
|
||||
# messages: typing.List[llm_entities.Message],
|
||||
# funcs: typing.List[tools_entities.LLMFunction] = None,
|
||||
# ) -> llm_entities.Message:
|
||||
# req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
|
||||
# for m in messages:
|
||||
# msg_dict = m.dict(exclude_none=True)
|
||||
# content = msg_dict.get("content")
|
||||
# if isinstance(content, list):
|
||||
# # 检查 content 列表中是否每个部分都是文本
|
||||
# if all(isinstance(part, dict) and part.get("type") == "text" for part in content):
|
||||
# # 将所有文本部分合并为一个字符串
|
||||
# msg_dict["content"] = "\n".join(part["text"] for part in content)
|
||||
# req_messages.append(msg_dict)
|
||||
|
||||
# try:
|
||||
# return await self._closure(req_messages, model, funcs)
|
||||
# except asyncio.TimeoutError:
|
||||
# raise errors.RequesterError('请求超时')
|
||||
# except openai.BadRequestError as e:
|
||||
# if 'context_length_exceeded' in e.message:
|
||||
# raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
|
||||
# else:
|
||||
# raise errors.RequesterError(f'请求参数错误: {e.message}')
|
||||
# except openai.AuthenticationError as e:
|
||||
# raise errors.RequesterError(f'无效的 api-key: {e.message}')
|
||||
# except openai.NotFoundError as e:
|
||||
# raise errors.RequesterError(f'请求路径错误: {e.message}')
|
||||
# except openai.RateLimitError as e:
|
||||
# raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
||||
# except openai.APIError as e:
|
||||
# raise errors.RequesterError(f'请求错误: {e.message}')
|
||||
|
||||
# @async_lru.alru_cache(maxsize=128)
|
||||
# async def get_base64_str(
|
||||
# self,
|
||||
# original_url: str,
|
||||
# ) -> str:
|
||||
# base64_image, image_format = await image.qq_image_url_to_base64(original_url)
|
||||
# return f"data:image/{image_format};base64,{base64_image}"
|
||||
|
||||
21
pkg/provider/modelmgr/requesters/zhipuaichatcmpl.py
Normal file
21
pkg/provider/modelmgr/requesters/zhipuaichatcmpl.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import openai
|
||||
|
||||
from ....core import app
|
||||
from . import chatcmpl
|
||||
from .. import requester
|
||||
|
||||
|
||||
@requester.requester_class("zhipuai-chat-completions")
|
||||
class ZhipuAIChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""智谱AI ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
requester_cfg: dict
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
self.ap = ap
|
||||
|
||||
self.requester_cfg = self.ap.provider_cfg.data['requester']['zhipuai-chat-completions']
|
||||
@@ -147,6 +147,65 @@
|
||||
"name": "grok-beta",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-plus",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-0520",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-air",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-airx",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-long",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-flashx",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4-flash",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai"
|
||||
},
|
||||
{
|
||||
"name": "glm-4v-plus",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai",
|
||||
"vision_supported": true
|
||||
},
|
||||
{
|
||||
"name": "glm-4v",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai",
|
||||
"vision_supported": true
|
||||
},
|
||||
{
|
||||
"name": "glm-4v-flash",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai",
|
||||
"vision_supported": true
|
||||
},
|
||||
{
|
||||
"name": "glm-zero-preview",
|
||||
"requester": "zhipuai-chat-completions",
|
||||
"token_mgr": "zhipuai",
|
||||
"vision_supported": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -19,6 +19,9 @@
|
||||
],
|
||||
"xai": [
|
||||
"xai-1234567890"
|
||||
],
|
||||
"zhipuai": [
|
||||
"xxxxxxx"
|
||||
]
|
||||
},
|
||||
"requester": {
|
||||
@@ -58,6 +61,11 @@
|
||||
"base-url": "https://api.x.ai/v1",
|
||||
"args": {},
|
||||
"timeout": 120
|
||||
},
|
||||
"zhipuai-chat-completions": {
|
||||
"base-url": "https://open.bigmodel.cn/api/paas/v4",
|
||||
"args": {},
|
||||
"timeout": 120
|
||||
}
|
||||
},
|
||||
"model": "gpt-4o",
|
||||
|
||||
@@ -66,6 +66,14 @@
|
||||
"type": "string"
|
||||
},
|
||||
"default": []
|
||||
},
|
||||
"zhipuai": {
|
||||
"type": "array",
|
||||
"title": "智谱AI API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"default": []
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -210,6 +218,23 @@
|
||||
"default": 120
|
||||
}
|
||||
}
|
||||
},
|
||||
"zhipuai-chat-completions": {
|
||||
"type": "object",
|
||||
"title": "智谱AI API 请求配置",
|
||||
"description": "仅可编辑 URL 和 超时时间,额外请求参数不支持可视化编辑,请到编辑器编辑",
|
||||
"properties": {
|
||||
"base-url": {
|
||||
"type": "string",
|
||||
"title": "API URL"
|
||||
},
|
||||
"args": {
|
||||
"type": "object"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user