From 9eefbcb6f2790397fb7df9c9925d993c0cea15c6 Mon Sep 17 00:00:00 2001 From: wanjiaju <邮箱> Date: Sat, 8 Feb 2025 10:27:19 +0800 Subject: [PATCH 1/4] =?UTF-8?q?=E9=98=BF=E9=87=8C=E4=BA=91=E7=99=BE?= =?UTF-8?q?=E7=82=BC=E9=80=82=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增阿里云请求器配置、阿里云模型配置、阿里云令牌配置 新增硅基模型配置 --- pkg/provider/modelmgr/modelmgr.py | 2 +- .../modelmgr/requesters/aliyunchatcmpl.py | 21 ++++++++++++++++ templates/metadata/llm-models.json | 24 +++++++++++++++++++ templates/provider.json | 2 +- 4 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 pkg/provider/modelmgr/requesters/aliyunchatcmpl.py diff --git a/pkg/provider/modelmgr/modelmgr.py b/pkg/provider/modelmgr/modelmgr.py index 33a65ff3..6748291c 100644 --- a/pkg/provider/modelmgr/modelmgr.py +++ b/pkg/provider/modelmgr/modelmgr.py @@ -6,7 +6,7 @@ from . import entities, requester from ...core import app from . import token -from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl, zhipuaichatcmpl, lmstudiochatcmpl, siliconflowchatcmpl +from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl, zhipuaichatcmpl, lmstudiochatcmpl, siliconflowchatcmpl, aliyunchatcmpl FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list" diff --git a/pkg/provider/modelmgr/requesters/aliyunchatcmpl.py b/pkg/provider/modelmgr/requesters/aliyunchatcmpl.py new file mode 100644 index 00000000..4f4900d7 --- /dev/null +++ b/pkg/provider/modelmgr/requesters/aliyunchatcmpl.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +import openai + +from . import chatcmpl +from .. import requester +from ....core import app + + +@requester.requester_class("aliyun-chat-completions") +class AliyunChatCompletions(chatcmpl.OpenAIChatCompletions): + """Aliyun ChatCompletion API 请求器""" + + client: openai.AsyncClient + + requester_cfg: dict + + def __init__(self, ap: app.Application): + self.ap = ap + + self.requester_cfg = self.ap.provider_cfg.data['requester']['aliyun-chat-completions'] diff --git a/templates/metadata/llm-models.json b/templates/metadata/llm-models.json index b5c29cf3..110c18da 100644 --- a/templates/metadata/llm-models.json +++ b/templates/metadata/llm-models.json @@ -211,6 +211,30 @@ "requester": "zhipuai-chat-completions", "token_mgr": "zhipuai", "vision_supported": true + }, + { + "name": "siliconflow-r1", + "model_name": "deepseek-ai/DeepSeek-R1", + "requester": "siliconflow-chat-completions", + "token_mgr": "siliconflow" + }, + { + "name": "siliconflow-v3", + "model_name": "deepseek-ai/DeepSeek-V3", + "requester": "siliconflow-chat-completions", + "token_mgr": "siliconflow" + }, + { + "name": "aliyun-r1", + "model_name": "deepseek-r1", + "requester": "aliyun-chat-completions", + "token_mgr": "aliyun" + }, + { + "name": "aliyun-v3", + "model_name": "deepseek-v3", + "requester": "aliyun-chat-completions", + "token_mgr": "aliyun" } ] } \ No newline at end of file diff --git a/templates/provider.json b/templates/provider.json index 5ab5cd64..0d6e6ea0 100644 --- a/templates/provider.json +++ b/templates/provider.json @@ -84,7 +84,7 @@ "model": "gpt-4o", "prompt-mode": "normal", "prompt": { - "default": "" + "default": "" }, "runner": "local-agent", "dify-service-api": { From 8e9f43885a46c44dcff942acf4b2e7b923540990 Mon Sep 17 00:00:00 2001 From: wanjiaju <邮箱> Date: Sat, 8 Feb 2025 10:30:19 +0800 Subject: [PATCH 2/4] =?UTF-8?q?=E9=98=BF=E9=87=8C=E4=BA=91=E7=99=BE?= =?UTF-8?q?=E7=82=BC=E9=80=82=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增阿里云请求器配置、阿里云模型配置、阿里云令牌配置 新增硅基模型配置 --- templates/provider.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/templates/provider.json b/templates/provider.json index 0d6e6ea0..c22ba42c 100644 --- a/templates/provider.json +++ b/templates/provider.json @@ -25,6 +25,9 @@ ], "siliconflow": [ "xxxxxxx" + ], + "aliyun": [ + "sk-aliyun_token" ] }, "requester": { @@ -79,6 +82,11 @@ "base-url": "https://api.siliconflow.cn/v1", "args": {}, "timeout": 120 + }, + "aliyun-chat-completions": { + "args": {}, + "base-url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "timeout": 120 } }, "model": "gpt-4o", From e17da4e2ee2074e156080b5004cdec965724ba24 Mon Sep 17 00:00:00 2001 From: Junyan Qin Date: Wed, 12 Feb 2025 11:11:07 +0800 Subject: [PATCH 3/4] chore: remove models of MaaS from `llm-models.json` --- templates/metadata/llm-models.json | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/templates/metadata/llm-models.json b/templates/metadata/llm-models.json index 110c18da..b5c29cf3 100644 --- a/templates/metadata/llm-models.json +++ b/templates/metadata/llm-models.json @@ -211,30 +211,6 @@ "requester": "zhipuai-chat-completions", "token_mgr": "zhipuai", "vision_supported": true - }, - { - "name": "siliconflow-r1", - "model_name": "deepseek-ai/DeepSeek-R1", - "requester": "siliconflow-chat-completions", - "token_mgr": "siliconflow" - }, - { - "name": "siliconflow-v3", - "model_name": "deepseek-ai/DeepSeek-V3", - "requester": "siliconflow-chat-completions", - "token_mgr": "siliconflow" - }, - { - "name": "aliyun-r1", - "model_name": "deepseek-r1", - "requester": "aliyun-chat-completions", - "token_mgr": "aliyun" - }, - { - "name": "aliyun-v3", - "model_name": "deepseek-v3", - "requester": "aliyun-chat-completions", - "token_mgr": "aliyun" } ] } \ No newline at end of file From 191f8866ae11a3896271229e47782a7298155eae Mon Sep 17 00:00:00 2001 From: Junyan Qin Date: Wed, 12 Feb 2025 11:25:28 +0800 Subject: [PATCH 4/4] chore(bailian): related configuration --- .../m028_aliyun_requester_config.py | 27 ++++++++++++++++++ pkg/core/stages/migrate.py | 2 +- pkg/provider/modelmgr/modelmgr.py | 2 +- .../{aliyunchatcmpl.py => bailianchatcmpl.py} | 8 +++--- templates/provider.json | 6 ++-- templates/schema/provider.json | 28 +++++++++++++++++++ 6 files changed, 64 insertions(+), 9 deletions(-) create mode 100644 pkg/core/migrations/m028_aliyun_requester_config.py rename pkg/provider/modelmgr/requesters/{aliyunchatcmpl.py => bailianchatcmpl.py} (58%) diff --git a/pkg/core/migrations/m028_aliyun_requester_config.py b/pkg/core/migrations/m028_aliyun_requester_config.py new file mode 100644 index 00000000..f28bc04f --- /dev/null +++ b/pkg/core/migrations/m028_aliyun_requester_config.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from .. import migration + + +@migration.migration_class("bailian-requester-config", 28) +class BailianRequesterConfigMigration(migration.Migration): + """迁移""" + + async def need_migrate(self) -> bool: + """判断当前环境是否需要运行此迁移""" + + return 'bailian-chat-completions' not in self.ap.provider_cfg.data['requester'] + + async def run(self): + """执行迁移""" + self.ap.provider_cfg.data['keys']['bailian'] = [ + "sk-xxxxxxx" + ] + + self.ap.provider_cfg.data['requester']['bailian-chat-completions'] = { + "base-url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "args": {}, + "timeout": 120 + } + + await self.ap.provider_cfg.dump_config() diff --git a/pkg/core/stages/migrate.py b/pkg/core/stages/migrate.py index 16faa53a..e9b0b4d7 100644 --- a/pkg/core/stages/migrate.py +++ b/pkg/core/stages/migrate.py @@ -9,7 +9,7 @@ from ..migrations import m005_deepseek_cfg_completion, m006_vision_config, m007_ from ..migrations import m010_ollama_requester_config, m011_command_prefix_config, m012_runner_config, m013_http_api_config, m014_force_delay_config from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_api_timeout_params, m018_xai_config, m019_zhipuai_config from ..migrations import m020_wecom_config, m021_lark_config, m022_lmstudio_config, m023_siliconflow_config, m024_discord_config, m025_gewechat_config -from ..migrations import m026_qqofficial_config +from ..migrations import m026_qqofficial_config, m028_aliyun_requester_config @stage.stage_class("MigrationStage") class MigrationStage(stage.BootingStage): diff --git a/pkg/provider/modelmgr/modelmgr.py b/pkg/provider/modelmgr/modelmgr.py index 6748291c..489a322f 100644 --- a/pkg/provider/modelmgr/modelmgr.py +++ b/pkg/provider/modelmgr/modelmgr.py @@ -6,7 +6,7 @@ from . import entities, requester from ...core import app from . import token -from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl, zhipuaichatcmpl, lmstudiochatcmpl, siliconflowchatcmpl, aliyunchatcmpl +from .requesters import bailianchatcmpl, chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl, zhipuaichatcmpl, lmstudiochatcmpl, siliconflowchatcmpl FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list" diff --git a/pkg/provider/modelmgr/requesters/aliyunchatcmpl.py b/pkg/provider/modelmgr/requesters/bailianchatcmpl.py similarity index 58% rename from pkg/provider/modelmgr/requesters/aliyunchatcmpl.py rename to pkg/provider/modelmgr/requesters/bailianchatcmpl.py index 4f4900d7..cce003bd 100644 --- a/pkg/provider/modelmgr/requesters/aliyunchatcmpl.py +++ b/pkg/provider/modelmgr/requesters/bailianchatcmpl.py @@ -7,9 +7,9 @@ from .. import requester from ....core import app -@requester.requester_class("aliyun-chat-completions") -class AliyunChatCompletions(chatcmpl.OpenAIChatCompletions): - """Aliyun ChatCompletion API 请求器""" +@requester.requester_class("bailian-chat-completions") +class BailianChatCompletions(chatcmpl.OpenAIChatCompletions): + """阿里云百炼大模型平台 ChatCompletion API 请求器""" client: openai.AsyncClient @@ -18,4 +18,4 @@ class AliyunChatCompletions(chatcmpl.OpenAIChatCompletions): def __init__(self, ap: app.Application): self.ap = ap - self.requester_cfg = self.ap.provider_cfg.data['requester']['aliyun-chat-completions'] + self.requester_cfg = self.ap.provider_cfg.data['requester']['bailian-chat-completions'] diff --git a/templates/provider.json b/templates/provider.json index c22ba42c..551c7d88 100644 --- a/templates/provider.json +++ b/templates/provider.json @@ -26,8 +26,8 @@ "siliconflow": [ "xxxxxxx" ], - "aliyun": [ - "sk-aliyun_token" + "bailian": [ + "sk-xxxxxxx" ] }, "requester": { @@ -83,7 +83,7 @@ "args": {}, "timeout": 120 }, - "aliyun-chat-completions": { + "bailian-chat-completions": { "args": {}, "base-url": "https://dashscope.aliyuncs.com/compatible-mode/v1", "timeout": 120 diff --git a/templates/schema/provider.json b/templates/schema/provider.json index bd2084b8..e6f806c5 100644 --- a/templates/schema/provider.json +++ b/templates/schema/provider.json @@ -82,6 +82,14 @@ "type": "string" }, "default": [] + }, + "bailian": { + "type": "array", + "title": "阿里云百炼大模型平台 API 密钥", + "items": { + "type": "string" + }, + "default": [] } } }, @@ -288,6 +296,26 @@ "default": 120 } } + }, + "bailian-chat-completions": { + "type": "object", + "title": "阿里云百炼大模型平台 API 请求配置", + "description": "仅可编辑 URL 和 超时时间,额外请求参数不支持可视化编辑,请到编辑器编辑", + "properties": { + "base-url": { + "type": "string", + "title": "API URL" + }, + "args": { + "type": "object", + "default": {} + }, + "timeout": { + "type": "number", + "title": "API 请求超时时间", + "default": 120 + } + } } } },