mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 19:37:36 +08:00
Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a31b450f54 | ||
|
|
97bb24c5b9 | ||
|
|
5e5a3639d1 | ||
|
|
0a68a77e28 | ||
|
|
11a0c4142e | ||
|
|
d214d80579 | ||
|
|
ed719fd44e | ||
|
|
5dc6bed0d1 | ||
|
|
b1244a4d4e | ||
|
|
6aa325a4b1 | ||
|
|
88a11561f9 | ||
|
|
fd30022065 | ||
|
|
9486312737 | ||
|
|
e37070a985 | ||
|
|
ffb98ecca2 | ||
|
|
29bd69ef97 | ||
|
|
e46c9530cc | ||
|
|
7ddd303e2d | ||
|
|
66798a1d0f | ||
|
|
bd05afdf14 | ||
|
|
136e48f7ee | ||
|
|
facb5f177a | ||
|
|
10ce31cc46 | ||
|
|
3b4f3c516b | ||
|
|
a1e3981ce4 | ||
|
|
89f26781fe | ||
|
|
914292a80b | ||
|
|
8227e3299b | ||
|
|
07ca48d652 | ||
|
|
243f45c7db | ||
|
|
12cfce3622 | ||
|
|
535c4a8a11 | ||
|
|
6606c671b2 | ||
|
|
242f24840d | ||
|
|
486f636b2d | ||
|
|
b293d7a7cd | ||
|
|
f4fa0b42a6 | ||
|
|
209e89712d | ||
|
|
3314a7a9e9 | ||
|
|
793d64303e | ||
|
|
6642498f00 | ||
|
|
32b400dcb1 | ||
|
|
0dcd2d8179 | ||
|
|
736f8b613c |
23
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
23
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -6,26 +6,19 @@ body:
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: 消息平台适配器
|
||||
description: "连接QQ使用的框架"
|
||||
description: "接入的消息平台类型"
|
||||
options:
|
||||
- 其他(或暂未使用)
|
||||
- Nakuru(go-cqhttp)
|
||||
- aiocqhttp(使用 OneBot 协议接入的)
|
||||
- qq-botpy(QQ官方API)
|
||||
- 其他
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
attributes:
|
||||
label: 运行环境
|
||||
description: 操作系统、系统架构、**Python版本**、**主机地理位置**
|
||||
placeholder: 例如: CentOS x64 Python 3.10.3、Docker 的直接写 Docker 就行
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: LangBot 版本
|
||||
description: LangBot (QChatGPT) 版本号
|
||||
placeholder: 例如:v3.3.0,可以使用`!version`命令查看,或者到 pkg/utils/constants.py 查看
|
||||
label: 运行环境
|
||||
description: LangBot 版本、操作系统、系统架构、**Python版本**、**主机地理位置**
|
||||
placeholder: 例如:v3.3.0、CentOS x64 Python 3.10.3、Docker 的系统直接写 Docker 就行
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
@@ -34,6 +27,12 @@ body:
|
||||
description: 完整描述异常情况,什么时候发生的、发生了什么。**请附带日志信息。**
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 复现步骤
|
||||
description: 如何重现这个问题,越详细越好
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 启用的插件
|
||||
|
||||
5
.github/workflows/build-dev-image.yaml
vendored
5
.github/workflows/build-dev-image.yaml
vendored
@@ -7,9 +7,14 @@ on:
|
||||
jobs:
|
||||
build-dev-image:
|
||||
runs-on: ubuntu-latest
|
||||
# 如果是tag则跳过
|
||||
if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
run: |
|
||||
|
||||
3
.github/workflows/build-docker-image.yml
vendored
3
.github/workflows/build-docker-image.yml
vendored
@@ -13,6 +13,9 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: judge has env GITHUB_REF # 如果没有GITHUB_REF环境变量,则把github.ref变量赋值给GITHUB_REF
|
||||
run: |
|
||||
if [ -z "$GITHUB_REF" ]; then
|
||||
|
||||
10
.github/workflows/build-release-artifacts.yaml
vendored
10
.github/workflows/build-release-artifacts.yaml
vendored
@@ -12,6 +12,8 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check version
|
||||
id: check_version
|
||||
@@ -50,3 +52,11 @@ jobs:
|
||||
with:
|
||||
name: langbot-${{ steps.check_version.outputs.version }}-all
|
||||
path: .
|
||||
|
||||
- name: Upload To Release
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.RELEASE_UPLOAD_GITHUB_TOKEN }}
|
||||
run: |
|
||||
# 本目录下所有文件打包成zip
|
||||
zip -r langbot-${{ steps.check_version.outputs.version }}-all.zip .
|
||||
gh release upload ${{ github.event.release.tag_name }} langbot-${{ steps.check_version.outputs.version }}-all.zip
|
||||
|
||||
80
.github/workflows/test-pr.yml
vendored
80
.github/workflows/test-pr.yml
vendored
@@ -1,80 +0,0 @@
|
||||
name: Test Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ready_for_review]
|
||||
paths:
|
||||
# 任何py文件改动都会触发
|
||||
- '**.py'
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
# 允许手动触发
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
perform-test:
|
||||
runs-on: ubuntu-latest
|
||||
# 如果事件为pull_request_review且review状态为approved,则执行
|
||||
if: >
|
||||
github.event_name == 'pull_request' ||
|
||||
(github.event_name == 'pull_request_review' && github.event.review.state == 'APPROVED') ||
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'issue_comment' && github.event.issue.pull_request != '' && contains(github.event.comment.body, '/test') && github.event.comment.user.login == 'RockChinQ')
|
||||
steps:
|
||||
# 签出测试工程仓库代码
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# 仓库地址
|
||||
repository: RockChinQ/qcg-tester
|
||||
# 仓库路径
|
||||
path: qcg-tester
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd qcg-tester
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Get PR details
|
||||
id: get-pr
|
||||
if: github.event_name == 'issue_comment'
|
||||
uses: octokit/request-action@v2.x
|
||||
with:
|
||||
route: GET /repos/${{ github.repository }}/pulls/${{ github.event.issue.number }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set PR source branch as env variable
|
||||
if: github.event_name == 'issue_comment'
|
||||
run: |
|
||||
PR_SOURCE_BRANCH=$(echo '${{ steps.get-pr.outputs.data }}' | jq -r '.head.ref')
|
||||
echo "BRANCH=$PR_SOURCE_BRANCH" >> $GITHUB_ENV
|
||||
|
||||
- name: Set PR Branch as bash env
|
||||
if: github.event_name != 'issue_comment'
|
||||
run: |
|
||||
echo "BRANCH=${{ github.head_ref }}" >> $GITHUB_ENV
|
||||
- name: Set OpenAI API Key from Secrets
|
||||
run: |
|
||||
echo "OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}" >> $GITHUB_ENV
|
||||
- name: Set OpenAI Reverse Proxy URL from Secrets
|
||||
run: |
|
||||
echo "OPENAI_REVERSE_PROXY=${{ secrets.OPENAI_REVERSE_PROXY }}" >> $GITHUB_ENV
|
||||
- name: Run test
|
||||
run: |
|
||||
cd qcg-tester
|
||||
python main.py
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
run: |
|
||||
cd qcg-tester/resource/QChatGPT
|
||||
curl -Os https://uploader.codecov.io/latest/linux/codecov
|
||||
chmod +x codecov
|
||||
./codecov -t ${{ secrets.CODECOV_TOKEN }}
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -2,7 +2,6 @@
|
||||
.idea/
|
||||
__pycache__/
|
||||
database.db
|
||||
qchatgpt.log
|
||||
langbot.log
|
||||
/banlist.py
|
||||
/plugins/
|
||||
@@ -17,8 +16,7 @@ scenario/
|
||||
!scenario/default-template.json
|
||||
override.json
|
||||
cookies.json
|
||||
res/announcement_saved
|
||||
res/announcement_saved.json
|
||||
data/labels/announcement_saved.json
|
||||
cmdpriv.json
|
||||
tips.py
|
||||
.venv
|
||||
@@ -32,7 +30,7 @@ claude.json
|
||||
bard.json
|
||||
/*yaml
|
||||
!/docker-compose.yaml
|
||||
res/instance_id.json
|
||||
data/labels/instance_id.json
|
||||
.DS_Store
|
||||
/data
|
||||
botpy.log*
|
||||
|
||||
28
README.md
28
README.md
@@ -37,25 +37,35 @@
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态能力,并支持接入 Dify。目前支持 QQ、QQ频道,后续还将支持微信、WhatsApp、Discord等平台。
|
||||
- 💬 大模型对话、Agent:支持多种大模型,适配群聊和私聊;具有多轮对话、工具调用、多模态能力,并深度适配 [Dify](https://dify.ai)。目前支持 QQ、QQ频道,后续还将支持微信、WhatsApp、Discord等平台。
|
||||
- 🛠️ 高稳定性、功能完备:原生支持访问控制、限速、敏感词过滤等机制;配置简单,支持多种部署方式。
|
||||
- 🧩 插件扩展、活跃社区:支持事件驱动、组件扩展等插件机制;丰富生态,目前已有数十个[插件](https://docs.langbot.app/plugin/plugin-intro.html)
|
||||
- 😻 [New] Web 管理面板:支持通过浏览器管理 LangBot 实例,具体支持功能,查看[文档](https://docs.langbot.app/webui/intro.html)
|
||||
|
||||
## 📦 开始使用
|
||||
|
||||
> **INFO**
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> 在您开始任何方式部署之前,请务必阅读[新手指引](https://docs.langbot.app/insight/guide.html)。
|
||||
|
||||
#### 宝塔面板部署
|
||||
|
||||
LangBot 已上架宝塔面板,若您已安装宝塔面板,可以根据[文档](https://docs.langbot.app/deploy/langbot/one-click/bt.html)使用。
|
||||
|
||||
#### Docker 部署
|
||||
#### Docker Compose 部署
|
||||
|
||||
适合熟悉 Docker 的用户,查看文档[Docker 部署](https://docs.langbot.app/deploy/langbot/docker.html)。
|
||||
|
||||
#### 宝塔面板部署
|
||||
|
||||
已上架宝塔面板,若您已安装宝塔面板,可以根据[文档](https://docs.langbot.app/deploy/langbot/one-click/bt.html)使用。
|
||||
|
||||
#### Zeabur 云部署
|
||||
|
||||
社区贡献的 Zeabur 模板。
|
||||
|
||||
[](https://zeabur.com/zh-CN/templates/ZKTBDH)
|
||||
|
||||
#### Railway 云部署
|
||||
|
||||
[](https://railway.app/template/yRrAyL?referralCode=vogKPF)
|
||||
|
||||
#### 手动部署
|
||||
|
||||
直接使用发行版运行,查看文档[手动部署](https://docs.langbot.app/deploy/langbot/manual.html)。
|
||||
@@ -63,3 +73,7 @@ LangBot 已上架宝塔面板,若您已安装宝塔面板,可以根据[文
|
||||
## 📸 效果展示
|
||||
|
||||
<img alt="回复效果(带有联网插件)" src="https://docs.langbot.app/QChatGPT-0516.png" width="500px"/>
|
||||
|
||||
- WebUI Demo: https://demo.langbot.dev/
|
||||
- 登录信息:邮箱:`demo@langbot.app` 密码:`langbot123456`
|
||||
- 注意:仅展示webui效果,公开环境,请不要在其中填入您的任何敏感信息。
|
||||
|
||||
@@ -10,8 +10,8 @@ class TestDifyClient:
|
||||
async def test_chat_messages(self):
|
||||
cln = client.AsyncDifyServiceClient(api_key=os.getenv("DIFY_API_KEY"), base_url=os.getenv("DIFY_BASE_URL"))
|
||||
|
||||
resp = await cln.chat_messages(inputs={}, query="Who are you?", user="test")
|
||||
print(json.dumps(resp, ensure_ascii=False, indent=4))
|
||||
async for chunk in cln.chat_messages(inputs={}, query="调用工具查看现在几点?", user="test"):
|
||||
print(json.dumps(chunk, ensure_ascii=False, indent=4))
|
||||
|
||||
async def test_upload_file(self):
|
||||
cln = client.AsyncDifyServiceClient(api_key=os.getenv("DIFY_API_KEY"), base_url=os.getenv("DIFY_BASE_URL"))
|
||||
@@ -41,4 +41,4 @@ class TestDifyClient:
|
||||
print(json.dumps(chunks, ensure_ascii=False, indent=4))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(TestDifyClient().test_workflow_run())
|
||||
asyncio.run(TestDifyClient().test_chat_messages())
|
||||
|
||||
@@ -26,21 +26,22 @@ class AsyncDifyServiceClient:
|
||||
inputs: dict[str, typing.Any],
|
||||
query: str,
|
||||
user: str,
|
||||
response_mode: str = "blocking", # 当前不支持 streaming
|
||||
response_mode: str = "streaming", # 当前不支持 blocking
|
||||
conversation_id: str = "",
|
||||
files: list[dict[str, typing.Any]] = [],
|
||||
timeout: float = 30.0,
|
||||
) -> dict[str, typing.Any]:
|
||||
) -> typing.AsyncGenerator[dict[str, typing.Any], None]:
|
||||
"""发送消息"""
|
||||
if response_mode != "blocking":
|
||||
raise DifyAPIError("当前仅支持 blocking 模式")
|
||||
if response_mode != "streaming":
|
||||
raise DifyAPIError("当前仅支持 streaming 模式")
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=self.base_url,
|
||||
trust_env=True,
|
||||
timeout=timeout,
|
||||
) as client:
|
||||
response = await client.post(
|
||||
async with client.stream(
|
||||
"POST",
|
||||
"/chat-messages",
|
||||
headers={"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"},
|
||||
json={
|
||||
@@ -51,12 +52,14 @@ class AsyncDifyServiceClient:
|
||||
"conversation_id": conversation_id,
|
||||
"files": files,
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise DifyAPIError(f"{response.status_code} {response.text}")
|
||||
|
||||
return response.json()
|
||||
) as r:
|
||||
async for chunk in r.aiter_lines():
|
||||
if r.status_code != 200:
|
||||
raise DifyAPIError(f"{r.status_code} {chunk}")
|
||||
if chunk.strip() == "":
|
||||
continue
|
||||
if chunk.startswith("data:"):
|
||||
yield json.loads(chunk[5:])
|
||||
|
||||
async def workflow_run(
|
||||
self,
|
||||
@@ -88,6 +91,8 @@ class AsyncDifyServiceClient:
|
||||
},
|
||||
) as r:
|
||||
async for chunk in r.aiter_lines():
|
||||
if r.status_code != 200:
|
||||
raise DifyAPIError(f"{r.status_code} {chunk}")
|
||||
if chunk.strip() == "":
|
||||
continue
|
||||
if chunk.startswith("data:"):
|
||||
@@ -100,10 +105,6 @@ class AsyncDifyServiceClient:
|
||||
timeout: float = 30.0,
|
||||
) -> str:
|
||||
"""上传文件"""
|
||||
# curl -X POST 'http://dify.rockchin.top/v1/files/upload' \
|
||||
# --header 'Authorization: Bearer {api_key}' \
|
||||
# --form 'file=@localfile;type=image/[png|jpeg|jpg|webp|gif] \
|
||||
# --form 'user=abc-123'
|
||||
async with httpx.AsyncClient(
|
||||
base_url=self.base_url,
|
||||
trust_env=True,
|
||||
|
||||
4
main.py
4
main.py
@@ -49,12 +49,10 @@ async def main_entry(loop: asyncio.AbstractEventLoop):
|
||||
generated_files = await files.generate_files()
|
||||
|
||||
if generated_files:
|
||||
print("以下文件不存在,已自动生成,请按需修改配置文件后重启:")
|
||||
print("以下文件不存在,已自动生成:")
|
||||
for file in generated_files:
|
||||
print("-", file)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
from pkg.core import boot
|
||||
await boot.main(loop)
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ from .. import group
|
||||
class LogsRouterGroup(group.RouterGroup):
|
||||
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET'])
|
||||
@self.route('', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
|
||||
start_page_number = int(quart.request.args.get('start_page_number', 0))
|
||||
|
||||
@@ -13,7 +13,7 @@ from .. import group
|
||||
class PluginsRouterGroup(group.RouterGroup):
|
||||
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET'])
|
||||
@self.route('', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
plugins = self.ap.plugin_mgr.plugins()
|
||||
|
||||
@@ -23,14 +23,14 @@ class PluginsRouterGroup(group.RouterGroup):
|
||||
'plugins': plugins_data
|
||||
})
|
||||
|
||||
@self.route('/<author>/<plugin_name>/toggle', methods=['PUT'])
|
||||
@self.route('/<author>/<plugin_name>/toggle', methods=['PUT'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(author: str, plugin_name: str) -> str:
|
||||
data = await quart.request.json
|
||||
target_enabled = data.get('target_enabled')
|
||||
await self.ap.plugin_mgr.update_plugin_switch(plugin_name, target_enabled)
|
||||
return self.success()
|
||||
|
||||
@self.route('/<author>/<plugin_name>/update', methods=['POST'])
|
||||
@self.route('/<author>/<plugin_name>/update', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(author: str, plugin_name: str) -> str:
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
@@ -44,7 +44,7 @@ class PluginsRouterGroup(group.RouterGroup):
|
||||
'task_id': wrapper.id
|
||||
})
|
||||
|
||||
@self.route('/<author>/<plugin_name>', methods=['DELETE'])
|
||||
@self.route('/<author>/<plugin_name>', methods=['DELETE'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(author: str, plugin_name: str) -> str:
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
@@ -59,13 +59,13 @@ class PluginsRouterGroup(group.RouterGroup):
|
||||
'task_id': wrapper.id
|
||||
})
|
||||
|
||||
@self.route('/reorder', methods=['PUT'])
|
||||
@self.route('/reorder', methods=['PUT'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
data = await quart.request.json
|
||||
await self.ap.plugin_mgr.reorder_plugins(data.get('plugins'))
|
||||
return self.success()
|
||||
|
||||
@self.route('/install/github', methods=['POST'])
|
||||
@self.route('/install/github', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
data = await quart.request.json
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ class SettingsRouterGroup(group.RouterGroup):
|
||||
|
||||
async def initialize(self) -> None:
|
||||
|
||||
@self.route('', methods=['GET'])
|
||||
@self.route('', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
return self.success(
|
||||
data={
|
||||
@@ -23,7 +23,7 @@ class SettingsRouterGroup(group.RouterGroup):
|
||||
}
|
||||
)
|
||||
|
||||
@self.route('/<manager_name>', methods=['GET'])
|
||||
@self.route('/<manager_name>', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(manager_name: str) -> str:
|
||||
|
||||
manager = self.ap.settings_mgr.get_manager(manager_name)
|
||||
@@ -44,7 +44,7 @@ class SettingsRouterGroup(group.RouterGroup):
|
||||
}
|
||||
)
|
||||
|
||||
@self.route('/<manager_name>/data', methods=['PUT'])
|
||||
@self.route('/<manager_name>/data', methods=['PUT'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(manager_name: str) -> str:
|
||||
data = await quart.request.json
|
||||
manager = self.ap.settings_mgr.get_manager(manager_name)
|
||||
|
||||
@@ -9,7 +9,7 @@ from .. import group
|
||||
class StatsRouterGroup(group.RouterGroup):
|
||||
|
||||
async def initialize(self) -> None:
|
||||
@self.route('/basic', methods=['GET'])
|
||||
@self.route('/basic', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
|
||||
conv_count = 0
|
||||
|
||||
@@ -20,7 +20,7 @@ class SystemRouterGroup(group.RouterGroup):
|
||||
}
|
||||
)
|
||||
|
||||
@self.route('/tasks', methods=['GET'])
|
||||
@self.route('/tasks', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
task_type = quart.request.args.get("type")
|
||||
|
||||
@@ -31,7 +31,7 @@ class SystemRouterGroup(group.RouterGroup):
|
||||
data=self.ap.task_mgr.get_tasks_dict(task_type)
|
||||
)
|
||||
|
||||
@self.route('/tasks/<task_id>', methods=['GET'])
|
||||
@self.route('/tasks/<task_id>', methods=['GET'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(task_id: str) -> str:
|
||||
task = self.ap.task_mgr.get_task_by_id(int(task_id))
|
||||
|
||||
@@ -40,7 +40,7 @@ class SystemRouterGroup(group.RouterGroup):
|
||||
|
||||
return self.success(data=task.to_dict())
|
||||
|
||||
@self.route('/reload', methods=['POST'])
|
||||
@self.route('/reload', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
json_data = await quart.request.json
|
||||
|
||||
@@ -51,7 +51,7 @@ class SystemRouterGroup(group.RouterGroup):
|
||||
)
|
||||
return self.success()
|
||||
|
||||
@self.route('/_debug/exec', methods=['POST'])
|
||||
@self.route('/_debug/exec', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
if not constants.debug_mode:
|
||||
return self.http_status(403, 403, "Forbidden")
|
||||
|
||||
@@ -13,14 +13,14 @@ identifier = {
|
||||
'instance_create_ts': 0,
|
||||
}
|
||||
|
||||
HOST_ID_FILE = os.path.expanduser('~/.qchatgpt/host_id.json')
|
||||
INSTANCE_ID_FILE = 'res/instance_id.json'
|
||||
HOST_ID_FILE = os.path.expanduser('~/.langbot/host_id.json')
|
||||
INSTANCE_ID_FILE = 'data/labels/instance_id.json'
|
||||
|
||||
def init():
|
||||
global identifier
|
||||
|
||||
if not os.path.exists(os.path.expanduser('~/.qchatgpt')):
|
||||
os.mkdir(os.path.expanduser('~/.qchatgpt'))
|
||||
if not os.path.exists(os.path.expanduser('~/.langbot')):
|
||||
os.mkdir(os.path.expanduser('~/.langbot'))
|
||||
|
||||
if not os.path.exists(HOST_ID_FILE):
|
||||
new_host_id = 'host_'+str(uuid.uuid4())
|
||||
|
||||
@@ -143,9 +143,7 @@ class Application:
|
||||
self.logger.warning("WebUI 文件缺失,请根据文档获取:https://docs.langbot.app/webui/intro.html")
|
||||
return
|
||||
|
||||
import socket
|
||||
|
||||
host_ip = socket.gethostbyname(socket.gethostname())
|
||||
host_ip = "127.0.0.1"
|
||||
|
||||
public_ip = await ip.get_myip()
|
||||
|
||||
@@ -199,5 +197,27 @@ class Application:
|
||||
|
||||
await self.plugin_mgr.load_plugins()
|
||||
await self.plugin_mgr.initialize_plugins()
|
||||
case core_entities.LifecycleControlScope.PROVIDER.value:
|
||||
self.logger.info("执行热重载 scope="+scope)
|
||||
|
||||
llm_model_mgr_inst = llm_model_mgr.ModelManager(self)
|
||||
await llm_model_mgr_inst.initialize()
|
||||
self.model_mgr = llm_model_mgr_inst
|
||||
|
||||
llm_session_mgr_inst = llm_session_mgr.SessionManager(self)
|
||||
await llm_session_mgr_inst.initialize()
|
||||
self.sess_mgr = llm_session_mgr_inst
|
||||
|
||||
llm_prompt_mgr_inst = llm_prompt_mgr.PromptManager(self)
|
||||
await llm_prompt_mgr_inst.initialize()
|
||||
self.prompt_mgr = llm_prompt_mgr_inst
|
||||
|
||||
llm_tool_mgr_inst = llm_tool_mgr.ToolManager(self)
|
||||
await llm_tool_mgr_inst.initialize()
|
||||
self.tool_mgr = llm_tool_mgr_inst
|
||||
|
||||
runner_mgr_inst = runnermgr.RunnerManager(self)
|
||||
await runner_mgr_inst.initialize()
|
||||
self.runner_mgr = runner_mgr_inst
|
||||
case _:
|
||||
pass
|
||||
pass
|
||||
@@ -24,6 +24,7 @@ required_paths = [
|
||||
"data/scenario",
|
||||
"data/logs",
|
||||
"data/config",
|
||||
"data/labels",
|
||||
"plugins"
|
||||
]
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ class LifecycleControlScope(enum.Enum):
|
||||
APPLICATION = "application"
|
||||
PLATFORM = "platform"
|
||||
PLUGIN = "plugin"
|
||||
PROVIDER = "provider"
|
||||
|
||||
|
||||
class LauncherTypes(enum.Enum):
|
||||
|
||||
24
pkg/core/migrations/m017_dify_api_timeout_params.py
Normal file
24
pkg/core/migrations/m017_dify_api_timeout_params.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .. import migration
|
||||
|
||||
|
||||
@migration.migration_class("dify-api-timeout-params", 17)
|
||||
class DifyAPITimeoutParamsMigration(migration.Migration):
|
||||
"""迁移"""
|
||||
|
||||
async def need_migrate(self) -> bool:
|
||||
"""判断当前环境是否需要运行此迁移"""
|
||||
return 'timeout' not in self.ap.provider_cfg.data['dify-service-api']['chat'] or 'timeout' not in self.ap.provider_cfg.data['dify-service-api']['workflow'] \
|
||||
or 'agent' not in self.ap.provider_cfg.data['dify-service-api']
|
||||
|
||||
async def run(self):
|
||||
"""执行迁移"""
|
||||
self.ap.provider_cfg.data['dify-service-api']['chat']['timeout'] = 120
|
||||
self.ap.provider_cfg.data['dify-service-api']['workflow']['timeout'] = 120
|
||||
self.ap.provider_cfg.data['dify-service-api']['agent'] = {
|
||||
"api-key": "app-1234567890",
|
||||
"timeout": 120
|
||||
}
|
||||
|
||||
await self.ap.provider_cfg.dump_config()
|
||||
25
pkg/core/migrations/m018_xai_config.py
Normal file
25
pkg/core/migrations/m018_xai_config.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .. import migration
|
||||
|
||||
|
||||
@migration.migration_class("xai-config", 18)
|
||||
class XaiConfigMigration(migration.Migration):
|
||||
"""迁移"""
|
||||
|
||||
async def need_migrate(self) -> bool:
|
||||
"""判断当前环境是否需要运行此迁移"""
|
||||
return 'xai-chat-completions' not in self.ap.provider_cfg.data['requester']
|
||||
|
||||
async def run(self):
|
||||
"""执行迁移"""
|
||||
self.ap.provider_cfg.data['requester']['xai-chat-completions'] = {
|
||||
"base-url": "https://api.x.ai/v1",
|
||||
"args": {},
|
||||
"timeout": 120
|
||||
}
|
||||
self.ap.provider_cfg.data['keys']['xai'] = [
|
||||
"xai-1234567890"
|
||||
]
|
||||
|
||||
await self.ap.provider_cfg.dump_config()
|
||||
@@ -7,7 +7,7 @@ from .. import migration
|
||||
from ..migrations import m001_sensitive_word_migration, m002_openai_config_migration, m003_anthropic_requester_cfg_completion, m004_moonshot_cfg_completion
|
||||
from ..migrations import m005_deepseek_cfg_completion, m006_vision_config, m007_qcg_center_url, m008_ad_fixwin_config_migrate, m009_msg_truncator_cfg
|
||||
from ..migrations import m010_ollama_requester_config, m011_command_prefix_config, m012_runner_config, m013_http_api_config, m014_force_delay_config
|
||||
from ..migrations import m015_gitee_ai_config, m016_dify_service_api
|
||||
from ..migrations import m015_gitee_ai_config, m016_dify_service_api, m017_dify_api_timeout_params, m018_xai_config
|
||||
|
||||
|
||||
@stage.stage_class("MigrationStage")
|
||||
|
||||
@@ -45,7 +45,7 @@ class PreProcessor(stage.PipelineStage):
|
||||
|
||||
|
||||
# 检查vision是否启用,没启用就删除所有图片
|
||||
if not self.ap.provider_cfg.data['enable-vision'] or not query.use_model.vision_supported:
|
||||
if not self.ap.provider_cfg.data['enable-vision'] or (self.ap.provider_cfg.data['runner'] == 'local-agent' and not query.use_model.vision_supported):
|
||||
for msg in query.messages:
|
||||
if isinstance(msg.content, list):
|
||||
for me in msg.content:
|
||||
@@ -60,13 +60,13 @@ class PreProcessor(stage.PipelineStage):
|
||||
llm_entities.ContentElement.from_text(me.text)
|
||||
)
|
||||
elif isinstance(me, platform_message.Image):
|
||||
if self.ap.provider_cfg.data['enable-vision'] and query.use_model.vision_supported:
|
||||
if me.url is not None:
|
||||
if self.ap.provider_cfg.data['enable-vision'] and (self.ap.provider_cfg.data['runner'] != 'local-agent' or query.use_model.vision_supported):
|
||||
if me.base64 is not None:
|
||||
content_list.append(
|
||||
llm_entities.ContentElement.from_image_url(str(me.url))
|
||||
llm_entities.ContentElement.from_image_base64(me.base64)
|
||||
)
|
||||
|
||||
query.user_message = llm_entities.Message( # TODO 适配多模态输入
|
||||
query.user_message = llm_entities.Message(
|
||||
role='user',
|
||||
content=content_list
|
||||
)
|
||||
|
||||
@@ -91,7 +91,7 @@ class ChatMessageHandler(handler.MessageHandler):
|
||||
query.session.using_conversation.messages.extend(query.resp_messages)
|
||||
except Exception as e:
|
||||
|
||||
self.ap.logger.error(f'对话({query.query_id})请求失败: {str(e)}')
|
||||
self.ap.logger.error(f'对话({query.query_id})请求失败: {type(e).__name__} {str(e)}')
|
||||
|
||||
yield entities.StageProcessResult(
|
||||
result_type=entities.ResultType.INTERRUPT,
|
||||
@@ -105,7 +105,7 @@ class ChatMessageHandler(handler.MessageHandler):
|
||||
await self.ap.ctr_mgr.usage.post_query_record(
|
||||
session_type=query.session.launcher_type.value,
|
||||
session_id=str(query.session.launcher_id),
|
||||
query_ability_provider="QChatGPT.Chat",
|
||||
query_ability_provider="LangBot.Chat",
|
||||
usage=text_length,
|
||||
model_name=query.use_model.name,
|
||||
response_seconds=int(time.time() - start_time),
|
||||
|
||||
@@ -50,17 +50,6 @@ class PlatformManager:
|
||||
adapter=adapter
|
||||
)
|
||||
|
||||
async def on_stranger_message(event: platform_events.StrangerMessage, adapter: msadapter.MessageSourceAdapter):
|
||||
|
||||
await self.ap.query_pool.add_query(
|
||||
launcher_type=core_entities.LauncherTypes.PERSON,
|
||||
launcher_id=event.sender.id,
|
||||
sender_id=event.sender.id,
|
||||
message_event=event,
|
||||
message_chain=event.message_chain,
|
||||
adapter=adapter
|
||||
)
|
||||
|
||||
async def on_group_message(event: platform_events.GroupMessage, adapter: msadapter.MessageSourceAdapter):
|
||||
|
||||
await self.ap.query_pool.add_query(
|
||||
@@ -96,12 +85,6 @@ class PlatformManager:
|
||||
)
|
||||
self.adapters.append(adapter_inst)
|
||||
|
||||
if adapter_name == 'yiri-mirai':
|
||||
adapter_inst.register_listener(
|
||||
platform_events.StrangerMessage,
|
||||
on_stranger_message
|
||||
)
|
||||
|
||||
adapter_inst.register_listener(
|
||||
platform_events.FriendMessage,
|
||||
on_friend_message
|
||||
|
||||
@@ -6,6 +6,7 @@ import time
|
||||
import datetime
|
||||
|
||||
import aiocqhttp
|
||||
import aiohttp
|
||||
|
||||
from .. import adapter
|
||||
from ...pipeline.longtext.strategies import forward
|
||||
@@ -13,12 +14,12 @@ from ...core import app
|
||||
from ..types import message as platform_message
|
||||
from ..types import events as platform_events
|
||||
from ..types import entities as platform_entities
|
||||
|
||||
from ...utils import image
|
||||
|
||||
class AiocqhttpMessageConverter(adapter.MessageConverter):
|
||||
|
||||
@staticmethod
|
||||
def yiri2target(message_chain: platform_message.MessageChain) -> typing.Tuple[list, int, datetime.datetime]:
|
||||
async def yiri2target(message_chain: platform_message.MessageChain) -> typing.Tuple[list, int, datetime.datetime]:
|
||||
msg_list = aiocqhttp.Message()
|
||||
|
||||
msg_id = 0
|
||||
@@ -59,7 +60,7 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
|
||||
elif type(msg) is forward.Forward:
|
||||
|
||||
for node in msg.node_list:
|
||||
msg_list.extend(AiocqhttpMessageConverter.yiri2target(node.message_chain)[0])
|
||||
msg_list.extend((await AiocqhttpMessageConverter.yiri2target(node.message_chain))[0])
|
||||
|
||||
else:
|
||||
msg_list.append(aiocqhttp.MessageSegment.text(str(msg)))
|
||||
@@ -67,7 +68,7 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
|
||||
return msg_list, msg_id, msg_time
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(message: str, message_id: int = -1):
|
||||
async def target2yiri(message: str, message_id: int = -1):
|
||||
message = aiocqhttp.Message(message)
|
||||
|
||||
yiri_msg_list = []
|
||||
@@ -89,7 +90,8 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
|
||||
elif msg.type == "text":
|
||||
yiri_msg_list.append(platform_message.Plain(text=msg.data["text"]))
|
||||
elif msg.type == "image":
|
||||
yiri_msg_list.append(platform_message.Image(url=msg.data["url"]))
|
||||
image_base64, image_format = await image.qq_image_url_to_base64(msg.data['url'])
|
||||
yiri_msg_list.append(platform_message.Image(base64=f"data:image/{image_format};base64,{image_base64}"))
|
||||
|
||||
chain = platform_message.MessageChain(yiri_msg_list)
|
||||
|
||||
@@ -99,9 +101,9 @@ class AiocqhttpMessageConverter(adapter.MessageConverter):
|
||||
class AiocqhttpEventConverter(adapter.EventConverter):
|
||||
|
||||
@staticmethod
|
||||
def yiri2target(event: platform_events.Event, bot_account_id: int):
|
||||
async def yiri2target(event: platform_events.Event, bot_account_id: int):
|
||||
|
||||
msg, msg_id, msg_time = AiocqhttpMessageConverter.yiri2target(event.message_chain)
|
||||
msg, msg_id, msg_time = await AiocqhttpMessageConverter.yiri2target(event.message_chain)
|
||||
|
||||
if type(event) is platform_events.GroupMessage:
|
||||
role = "member"
|
||||
@@ -164,8 +166,8 @@ class AiocqhttpEventConverter(adapter.EventConverter):
|
||||
return aiocqhttp.Event.from_payload(payload)
|
||||
|
||||
@staticmethod
|
||||
def target2yiri(event: aiocqhttp.Event):
|
||||
yiri_chain = AiocqhttpMessageConverter.target2yiri(
|
||||
async def target2yiri(event: aiocqhttp.Event):
|
||||
yiri_chain = await AiocqhttpMessageConverter.target2yiri(
|
||||
event.message, event.message_id
|
||||
)
|
||||
|
||||
@@ -242,7 +244,7 @@ class AiocqhttpAdapter(adapter.MessageSourceAdapter):
|
||||
async def send_message(
|
||||
self, target_type: str, target_id: str, message: platform_message.MessageChain
|
||||
):
|
||||
aiocq_msg = AiocqhttpMessageConverter.yiri2target(message)[0]
|
||||
aiocq_msg = (await AiocqhttpMessageConverter.yiri2target(message))[0]
|
||||
|
||||
if target_type == "group":
|
||||
await self.bot.send_group_msg(group_id=int(target_id), message=aiocq_msg)
|
||||
@@ -255,8 +257,8 @@ class AiocqhttpAdapter(adapter.MessageSourceAdapter):
|
||||
message: platform_message.MessageChain,
|
||||
quote_origin: bool = False,
|
||||
):
|
||||
aiocq_event = AiocqhttpEventConverter.yiri2target(message_source, self.bot_account_id)
|
||||
aiocq_msg = AiocqhttpMessageConverter.yiri2target(message)[0]
|
||||
aiocq_event = await AiocqhttpEventConverter.yiri2target(message_source, self.bot_account_id)
|
||||
aiocq_msg = (await AiocqhttpMessageConverter.yiri2target(message))[0]
|
||||
if quote_origin:
|
||||
aiocq_msg = aiocqhttp.MessageSegment.reply(aiocq_event.message_id) + aiocq_msg
|
||||
|
||||
@@ -276,7 +278,7 @@ class AiocqhttpAdapter(adapter.MessageSourceAdapter):
|
||||
async def on_message(event: aiocqhttp.Event):
|
||||
self.bot_account_id = event.self_id
|
||||
try:
|
||||
return await callback(self.event_converter.target2yiri(event), self)
|
||||
return await callback(await self.event_converter.target2yiri(event), self)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
@@ -127,16 +127,16 @@ class APIHost:
|
||||
Returns:
|
||||
bool: 是否满足要求, False时为无法获取版本号,True时为满足要求,报错为不满足要求
|
||||
"""
|
||||
qchatgpt_version = ""
|
||||
langbot_version = ""
|
||||
|
||||
try:
|
||||
qchatgpt_version = self.ap.ver_mgr.get_current_version() # 从updater模块获取版本号
|
||||
langbot_version = self.ap.ver_mgr.get_current_version() # 从updater模块获取版本号
|
||||
except:
|
||||
return False
|
||||
|
||||
if self.ap.ver_mgr.compare_version_str(qchatgpt_version, ge) < 0 or \
|
||||
(self.ap.ver_mgr.compare_version_str(qchatgpt_version, le) > 0):
|
||||
raise Exception("LangBot 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, qchatgpt_version))
|
||||
if self.ap.ver_mgr.compare_version_str(langbot_version, ge) < 0 or \
|
||||
(self.ap.ver_mgr.compare_version_str(langbot_version, le) > 0):
|
||||
raise Exception("LangBot 版本不满足要求,某些功能(可能是由插件提供的)无法正常使用。(要求版本:{}-{},但当前版本:{})".format(ge, le, langbot_version))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@@ -38,6 +38,8 @@ class ContentElement(pydantic.BaseModel):
|
||||
|
||||
image_url: typing.Optional[ImageURLContentObject] = None
|
||||
|
||||
image_base64: typing.Optional[str] = None
|
||||
|
||||
def __str__(self):
|
||||
if self.type == 'text':
|
||||
return self.text
|
||||
@@ -53,6 +55,10 @@ class ContentElement(pydantic.BaseModel):
|
||||
@classmethod
|
||||
def from_image_url(cls, image_url: str):
|
||||
return cls(type='image_url', image_url=ImageURLContentObject(url=image_url))
|
||||
|
||||
@classmethod
|
||||
def from_image_base64(cls, image_base64: str):
|
||||
return cls(type='image_base64', image_base64=image_base64)
|
||||
|
||||
|
||||
class Message(pydantic.BaseModel):
|
||||
|
||||
@@ -6,7 +6,7 @@ from . import entities, requester
|
||||
from ...core import app
|
||||
|
||||
from . import token
|
||||
from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl
|
||||
from .requesters import chatcmpl, anthropicmsgs, moonshotchatcmpl, deepseekchatcmpl, ollamachat, giteeaichatcmpl, xaichatcmpl
|
||||
|
||||
FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list"
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ class LLMAPIRequester(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
async def call(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
model: modelmgr_entities.LLMModelInfo,
|
||||
messages: typing.List[llm_entities.Message],
|
||||
funcs: typing.List[tools_entities.LLMFunction] = None,
|
||||
|
||||
@@ -2,8 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import traceback
|
||||
import base64
|
||||
|
||||
import anthropic
|
||||
import httpx
|
||||
|
||||
from .. import entities, errors, requester
|
||||
|
||||
@@ -21,15 +23,24 @@ class AnthropicMessages(requester.LLMAPIRequester):
|
||||
client: anthropic.AsyncAnthropic
|
||||
|
||||
async def initialize(self):
|
||||
|
||||
httpx_client = anthropic._base_client.AsyncHttpxClientWrapper(
|
||||
base_url=self.ap.provider_cfg.data['requester']['anthropic-messages']['base-url'],
|
||||
# cast to a valid type because mypy doesn't understand our type narrowing
|
||||
timeout=typing.cast(httpx.Timeout, self.ap.provider_cfg.data['requester']['anthropic-messages']['timeout']),
|
||||
limits=anthropic._constants.DEFAULT_CONNECTION_LIMITS,
|
||||
follow_redirects=True,
|
||||
proxies=self.ap.proxy_mgr.get_forward_proxies()
|
||||
)
|
||||
|
||||
self.client = anthropic.AsyncAnthropic(
|
||||
api_key="",
|
||||
base_url=self.ap.provider_cfg.data['requester']['anthropic-messages']['base-url'],
|
||||
timeout=self.ap.provider_cfg.data['requester']['anthropic-messages']['timeout'],
|
||||
proxies=self.ap.proxy_mgr.get_forward_proxies()
|
||||
http_client=httpx_client,
|
||||
)
|
||||
|
||||
async def call(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
model: entities.LLMModelInfo,
|
||||
messages: typing.List[llm_entities.Message],
|
||||
funcs: typing.List[tools_entities.LLMFunction] = None,
|
||||
@@ -61,24 +72,20 @@ class AnthropicMessages(requester.LLMAPIRequester):
|
||||
if isinstance(m.content, str) and m.content.strip() != "":
|
||||
req_messages.append(m.dict(exclude_none=True))
|
||||
elif isinstance(m.content, list):
|
||||
# m.content = [
|
||||
# c for c in m.content if c.type == "text"
|
||||
# ]
|
||||
|
||||
# if len(m.content) > 0:
|
||||
# req_messages.append(m.dict(exclude_none=True))
|
||||
|
||||
msg_dict = m.dict(exclude_none=True)
|
||||
|
||||
for i, ce in enumerate(m.content):
|
||||
if ce.type == "image_url":
|
||||
base64_image, image_format = await image.qq_image_url_to_base64(ce.image_url.url)
|
||||
|
||||
if ce.type == "image_base64":
|
||||
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
|
||||
alter_image_ele = {
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": f"image/{image_format}",
|
||||
"data": base64_image
|
||||
"data": image_b64
|
||||
}
|
||||
}
|
||||
msg_dict["content"][i] = alter_image_ele
|
||||
|
||||
@@ -65,6 +65,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: entities.LLMModelInfo,
|
||||
use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
@@ -87,8 +88,12 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
for msg in messages:
|
||||
if 'content' in msg and isinstance(msg["content"], list):
|
||||
for me in msg["content"]:
|
||||
if me["type"] == "image_url":
|
||||
me["image_url"]['url'] = await self.get_base64_str(me["image_url"]['url'])
|
||||
if me["type"] == "image_base64":
|
||||
me["image_url"] = {
|
||||
"url": me["image_base64"]
|
||||
}
|
||||
me["type"] = "image_url"
|
||||
del me["image_base64"]
|
||||
|
||||
args["messages"] = messages
|
||||
|
||||
@@ -102,6 +107,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
|
||||
async def call(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
model: entities.LLMModelInfo,
|
||||
messages: typing.List[llm_entities.Message],
|
||||
funcs: typing.List[tools_entities.LLMFunction] = None,
|
||||
@@ -118,7 +124,7 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
req_messages.append(msg_dict)
|
||||
|
||||
try:
|
||||
return await self._closure(req_messages, model, funcs)
|
||||
return await self._closure(query=query, req_messages=req_messages, use_model=model, use_funcs=funcs)
|
||||
except asyncio.TimeoutError:
|
||||
raise errors.RequesterError('请求超时')
|
||||
except openai.BadRequestError as e:
|
||||
@@ -134,11 +140,3 @@ class OpenAIChatCompletions(requester.LLMAPIRequester):
|
||||
raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
||||
except openai.APIError as e:
|
||||
raise errors.RequesterError(f'请求错误: {e.message}')
|
||||
|
||||
@async_lru.alru_cache(maxsize=128)
|
||||
async def get_base64_str(
|
||||
self,
|
||||
original_url: str,
|
||||
) -> str:
|
||||
base64_image, image_format = await image.qq_image_url_to_base64(original_url)
|
||||
return f"data:image/{image_format};base64,{base64_image}"
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ....core import app
|
||||
|
||||
from . import chatcmpl
|
||||
from .. import entities, errors, requester
|
||||
from ....core import entities as core_entities, app
|
||||
@@ -19,6 +17,7 @@ class DeepseekChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: entities.LLMModelInfo,
|
||||
use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
|
||||
@@ -8,7 +8,7 @@ import typing
|
||||
|
||||
from . import chatcmpl
|
||||
from .. import entities, errors, requester
|
||||
from ....core import app
|
||||
from ....core import app, entities as core_entities
|
||||
from ... import entities as llm_entities
|
||||
from ...tools import entities as tools_entities
|
||||
from .. import entities as modelmgr_entities
|
||||
@@ -24,6 +24,7 @@ class GiteeAIChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: entities.LLMModelInfo,
|
||||
use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
|
||||
@@ -19,6 +19,7 @@ class MoonshotChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
|
||||
async def _closure(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: entities.LLMModelInfo,
|
||||
use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
|
||||
@@ -4,6 +4,9 @@ import asyncio
|
||||
import os
|
||||
import typing
|
||||
from typing import Union, Mapping, Any, AsyncIterator
|
||||
import uuid
|
||||
import json
|
||||
import base64
|
||||
|
||||
import async_lru
|
||||
import ollama
|
||||
@@ -11,7 +14,7 @@ import ollama
|
||||
from .. import entities, errors, requester
|
||||
from ... import entities as llm_entities
|
||||
from ...tools import entities as tools_entities
|
||||
from ....core import app
|
||||
from ....core import app, entities as core_entities
|
||||
from ....utils import image
|
||||
|
||||
REQUESTER_NAME: str = "ollama-chat"
|
||||
@@ -41,7 +44,7 @@ class OllamaChatCompletions(requester.LLMAPIRequester):
|
||||
**args
|
||||
)
|
||||
|
||||
async def _closure(self, req_messages: list[dict], use_model: entities.LLMModelInfo,
|
||||
async def _closure(self, query: core_entities.Query, req_messages: list[dict], use_model: entities.LLMModelInfo,
|
||||
user_funcs: list[tools_entities.LLMFunction] = None) -> (
|
||||
llm_entities.Message):
|
||||
args: Any = self.request_cfg['args'].copy()
|
||||
@@ -55,30 +58,59 @@ class OllamaChatCompletions(requester.LLMAPIRequester):
|
||||
for me in msg["content"]:
|
||||
if me["type"] == "text":
|
||||
text_content.append(me["text"])
|
||||
elif me["type"] == "image_url":
|
||||
image_url = await self.get_base64_str(me["image_url"]['url'])
|
||||
image_urls.append(image_url)
|
||||
elif me["type"] == "image_base64":
|
||||
image_urls.append(me["image_base64"])
|
||||
|
||||
msg["content"] = "\n".join(text_content)
|
||||
msg["images"] = [url.split(',')[1] for url in image_urls]
|
||||
if 'tool_calls' in msg: # LangBot 内部以 str 存储 tool_calls 的参数,这里需要转换为 dict
|
||||
for tool_call in msg['tool_calls']:
|
||||
tool_call['function']['arguments'] = json.loads(tool_call['function']['arguments'])
|
||||
args["messages"] = messages
|
||||
|
||||
resp: Mapping[str, Any] | AsyncIterator[Mapping[str, Any]] = await self._req(args)
|
||||
args["tools"] = []
|
||||
if user_funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_openai(user_funcs)
|
||||
if tools:
|
||||
args["tools"] = tools
|
||||
|
||||
resp = await self._req(args)
|
||||
message: llm_entities.Message = await self._make_msg(resp)
|
||||
return message
|
||||
|
||||
async def _make_msg(
|
||||
self,
|
||||
chat_completions: Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]) -> llm_entities.Message:
|
||||
message: Any = chat_completions.pop('message', None)
|
||||
chat_completions: ollama.ChatResponse) -> llm_entities.Message:
|
||||
message: ollama.Message = chat_completions.message
|
||||
if message is None:
|
||||
raise ValueError("chat_completions must contain a 'message' field")
|
||||
|
||||
message.update(chat_completions)
|
||||
ret_msg: llm_entities.Message = llm_entities.Message(**message)
|
||||
ret_msg: llm_entities.Message = None
|
||||
|
||||
if message.content is not None:
|
||||
ret_msg = llm_entities.Message(
|
||||
role="assistant",
|
||||
content=message.content
|
||||
)
|
||||
if message.tool_calls is not None and len(message.tool_calls) > 0:
|
||||
tool_calls: list[llm_entities.ToolCall] = []
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
tool_calls.append(llm_entities.ToolCall(
|
||||
id=uuid.uuid4().hex,
|
||||
type="function",
|
||||
function=llm_entities.FunctionCall(
|
||||
name=tool_call.function.name,
|
||||
arguments=json.dumps(tool_call.function.arguments)
|
||||
)
|
||||
))
|
||||
ret_msg.tool_calls = tool_calls
|
||||
|
||||
return ret_msg
|
||||
|
||||
async def call(
|
||||
self,
|
||||
query: core_entities.Query,
|
||||
model: entities.LLMModelInfo,
|
||||
messages: typing.List[llm_entities.Message],
|
||||
funcs: typing.List[tools_entities.LLMFunction] = None,
|
||||
@@ -92,14 +124,6 @@ class OllamaChatCompletions(requester.LLMAPIRequester):
|
||||
msg_dict["content"] = "\n".join(part["text"] for part in content)
|
||||
req_messages.append(msg_dict)
|
||||
try:
|
||||
return await self._closure(req_messages, model)
|
||||
return await self._closure(query, req_messages, model, funcs)
|
||||
except asyncio.TimeoutError:
|
||||
raise errors.RequesterError('请求超时')
|
||||
|
||||
@async_lru.alru_cache(maxsize=128)
|
||||
async def get_base64_str(
|
||||
self,
|
||||
original_url: str,
|
||||
) -> str:
|
||||
base64_image, image_format = await image.qq_image_url_to_base64(original_url)
|
||||
return f"data:image/{image_format};base64,{base64_image}"
|
||||
|
||||
145
pkg/provider/modelmgr/requesters/xaichatcmpl.py
Normal file
145
pkg/provider/modelmgr/requesters/xaichatcmpl.py
Normal file
@@ -0,0 +1,145 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import typing
|
||||
import json
|
||||
import base64
|
||||
from typing import AsyncGenerator
|
||||
|
||||
import openai
|
||||
import openai.types.chat.chat_completion as chat_completion
|
||||
import httpx
|
||||
import aiohttp
|
||||
import async_lru
|
||||
|
||||
from . import chatcmpl
|
||||
from .. import entities, errors, requester
|
||||
from ....core import entities as core_entities, app
|
||||
from ... import entities as llm_entities
|
||||
from ...tools import entities as tools_entities
|
||||
from ....utils import image
|
||||
|
||||
|
||||
@requester.requester_class("xai-chat-completions")
|
||||
class XaiChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""xAI ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
requester_cfg: dict
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
self.ap = ap
|
||||
|
||||
self.requester_cfg = self.ap.provider_cfg.data['requester']['xai-chat-completions']
|
||||
|
||||
# async def initialize(self):
|
||||
|
||||
# self.client = openai.AsyncClient(
|
||||
# api_key="",
|
||||
# base_url=self.requester_cfg['base-url'],
|
||||
# timeout=self.requester_cfg['timeout'],
|
||||
# http_client=httpx.AsyncClient(
|
||||
# proxies=self.ap.proxy_mgr.get_forward_proxies()
|
||||
# )
|
||||
# )
|
||||
|
||||
# async def _req(
|
||||
# self,
|
||||
# args: dict,
|
||||
# ) -> chat_completion.ChatCompletion:
|
||||
# return await self.client.chat.completions.create(**args)
|
||||
|
||||
# async def _make_msg(
|
||||
# self,
|
||||
# chat_completion: chat_completion.ChatCompletion,
|
||||
# ) -> llm_entities.Message:
|
||||
# chatcmpl_message = chat_completion.choices[0].message.dict()
|
||||
|
||||
# # 确保 role 字段存在且不为 None
|
||||
# if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
|
||||
# chatcmpl_message['role'] = 'assistant'
|
||||
|
||||
# message = llm_entities.Message(**chatcmpl_message)
|
||||
|
||||
# return message
|
||||
|
||||
# async def _closure(
|
||||
# self,
|
||||
# req_messages: list[dict],
|
||||
# use_model: entities.LLMModelInfo,
|
||||
# use_funcs: list[tools_entities.LLMFunction] = None,
|
||||
# ) -> llm_entities.Message:
|
||||
# self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
# args = self.requester_cfg['args'].copy()
|
||||
# args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
|
||||
|
||||
# if use_funcs:
|
||||
# tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
|
||||
# if tools:
|
||||
# args["tools"] = tools
|
||||
|
||||
# # 设置此次请求中的messages
|
||||
# messages = req_messages.copy()
|
||||
|
||||
# # 检查vision
|
||||
# for msg in messages:
|
||||
# if 'content' in msg and isinstance(msg["content"], list):
|
||||
# for me in msg["content"]:
|
||||
# if me["type"] == "image_url":
|
||||
# me["image_url"]['url'] = await self.get_base64_str(me["image_url"]['url'])
|
||||
|
||||
# args["messages"] = messages
|
||||
|
||||
# # 发送请求
|
||||
# resp = await self._req(args)
|
||||
|
||||
# # 处理请求结果
|
||||
# message = await self._make_msg(resp)
|
||||
|
||||
# return message
|
||||
|
||||
# async def call(
|
||||
# self,
|
||||
# model: entities.LLMModelInfo,
|
||||
# messages: typing.List[llm_entities.Message],
|
||||
# funcs: typing.List[tools_entities.LLMFunction] = None,
|
||||
# ) -> llm_entities.Message:
|
||||
# req_messages = [] # req_messages 仅用于类内,外部同步由 query.messages 进行
|
||||
# for m in messages:
|
||||
# msg_dict = m.dict(exclude_none=True)
|
||||
# content = msg_dict.get("content")
|
||||
# if isinstance(content, list):
|
||||
# # 检查 content 列表中是否每个部分都是文本
|
||||
# if all(isinstance(part, dict) and part.get("type") == "text" for part in content):
|
||||
# # 将所有文本部分合并为一个字符串
|
||||
# msg_dict["content"] = "\n".join(part["text"] for part in content)
|
||||
# req_messages.append(msg_dict)
|
||||
|
||||
# try:
|
||||
# return await self._closure(req_messages, model, funcs)
|
||||
# except asyncio.TimeoutError:
|
||||
# raise errors.RequesterError('请求超时')
|
||||
# except openai.BadRequestError as e:
|
||||
# if 'context_length_exceeded' in e.message:
|
||||
# raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
|
||||
# else:
|
||||
# raise errors.RequesterError(f'请求参数错误: {e.message}')
|
||||
# except openai.AuthenticationError as e:
|
||||
# raise errors.RequesterError(f'无效的 api-key: {e.message}')
|
||||
# except openai.NotFoundError as e:
|
||||
# raise errors.RequesterError(f'请求路径错误: {e.message}')
|
||||
# except openai.RateLimitError as e:
|
||||
# raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
|
||||
# except openai.APIError as e:
|
||||
# raise errors.RequesterError(f'请求错误: {e.message}')
|
||||
|
||||
# @async_lru.alru_cache(maxsize=128)
|
||||
# async def get_base64_str(
|
||||
# self,
|
||||
# original_url: str,
|
||||
# ) -> str:
|
||||
# base64_image, image_format = await image.qq_image_url_to_base64(original_url)
|
||||
# return f"data:image/{image_format};base64,{base64_image}"
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import typing
|
||||
import json
|
||||
import uuid
|
||||
import base64
|
||||
|
||||
from .. import runner
|
||||
from ...core import entities as core_entities
|
||||
@@ -20,125 +21,258 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
||||
|
||||
async def initialize(self):
|
||||
"""初始化"""
|
||||
valid_app_types = ['chat', 'workflow']
|
||||
if self.ap.provider_cfg.data['dify-service-api']['app-type'] not in valid_app_types:
|
||||
raise errors.DifyAPIError(f"不支持的 Dify 应用类型: {self.ap.provider_cfg.data['dify-service-api']['app-type']}")
|
||||
valid_app_types = ["chat", "agent", "workflow"]
|
||||
if (
|
||||
self.ap.provider_cfg.data["dify-service-api"]["app-type"]
|
||||
not in valid_app_types
|
||||
):
|
||||
raise errors.DifyAPIError(
|
||||
f"不支持的 Dify 应用类型: {self.ap.provider_cfg.data['dify-service-api']['app-type']}"
|
||||
)
|
||||
|
||||
api_key = self.ap.provider_cfg.data['dify-service-api'][self.ap.provider_cfg.data['dify-service-api']['app-type']]['api-key']
|
||||
api_key = self.ap.provider_cfg.data["dify-service-api"][
|
||||
self.ap.provider_cfg.data["dify-service-api"]["app-type"]
|
||||
]["api-key"]
|
||||
|
||||
self.dify_client = client.AsyncDifyServiceClient(
|
||||
api_key=api_key,
|
||||
base_url=self.ap.provider_cfg.data['dify-service-api']['base-url']
|
||||
base_url=self.ap.provider_cfg.data["dify-service-api"]["base-url"],
|
||||
)
|
||||
|
||||
async def _preprocess_user_message(self, query: core_entities.Query) -> tuple[str, list[str]]:
|
||||
async def _preprocess_user_message(
|
||||
self, query: core_entities.Query
|
||||
) -> tuple[str, list[str]]:
|
||||
"""预处理用户消息,提取纯文本,并将图片上传到 Dify 服务
|
||||
|
||||
|
||||
Returns:
|
||||
tuple[str, list[str]]: 纯文本和图片的 Dify 服务图片 ID
|
||||
"""
|
||||
plain_text = ''
|
||||
plain_text = ""
|
||||
image_ids = []
|
||||
if isinstance(query.user_message.content, list):
|
||||
for ce in query.user_message.content:
|
||||
if ce.type == 'text':
|
||||
if ce.type == "text":
|
||||
plain_text += ce.text
|
||||
elif ce.type == 'image_url':
|
||||
file_bytes, image_format = await image.get_qq_image_bytes(ce.image_url.url)
|
||||
elif ce.type == "image_base64":
|
||||
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
file_bytes = base64.b64decode(image_b64)
|
||||
file = ("img.png", file_bytes, f"image/{image_format}")
|
||||
file_upload_resp = await self.dify_client.upload_file(file, f"{query.session.launcher_type.value}_{query.session.launcher_id}")
|
||||
image_id = file_upload_resp['id']
|
||||
file_upload_resp = await self.dify_client.upload_file(
|
||||
file,
|
||||
f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
)
|
||||
image_id = file_upload_resp["id"]
|
||||
image_ids.append(image_id)
|
||||
elif isinstance(query.user_message.content, str):
|
||||
plain_text = query.user_message.content
|
||||
|
||||
return plain_text, image_ids
|
||||
|
||||
async def _chat_messages(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
async def _chat_messages(
|
||||
self, query: core_entities.Query
|
||||
) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
"""调用聊天助手"""
|
||||
cov_id = query.session.using_conversation.uuid or ""
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
files = [{
|
||||
'type': 'image',
|
||||
'transfer_method': 'local_file',
|
||||
'upload_file_id': image_id,
|
||||
} for image_id in image_ids]
|
||||
files = [
|
||||
{
|
||||
"type": "image",
|
||||
"transfer_method": "local_file",
|
||||
"upload_file_id": image_id,
|
||||
}
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
resp = await self.dify_client.chat_messages(inputs={}, query=plain_text, user=f"{query.session.launcher_type.value}_{query.session.launcher_id}", conversation_id=cov_id, files=files)
|
||||
mode = "basic" # 标记是基础编排还是工作流编排
|
||||
|
||||
msg = llm_entities.Message(
|
||||
role='assistant',
|
||||
content=resp['answer'],
|
||||
)
|
||||
basic_mode_pending_chunk = ''
|
||||
|
||||
yield msg
|
||||
async for chunk in self.dify_client.chat_messages(
|
||||
inputs={},
|
||||
query=plain_text,
|
||||
user=f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
conversation_id=cov_id,
|
||||
files=files,
|
||||
timeout=self.ap.provider_cfg.data["dify-service-api"]["chat"]["timeout"],
|
||||
):
|
||||
self.ap.logger.debug("dify-chat-chunk: ", chunk)
|
||||
|
||||
query.session.using_conversation.uuid = resp['conversation_id']
|
||||
if chunk['event'] == 'workflow_started':
|
||||
mode = "workflow"
|
||||
|
||||
async def _workflow_messages(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
if mode == "workflow":
|
||||
if chunk['event'] == 'node_finished':
|
||||
if chunk['data']['node_type'] == 'answer':
|
||||
yield llm_entities.Message(
|
||||
role="assistant",
|
||||
content=chunk['data']['outputs']['answer'],
|
||||
)
|
||||
elif mode == "basic":
|
||||
if chunk['event'] == 'message':
|
||||
basic_mode_pending_chunk += chunk['answer']
|
||||
elif chunk['event'] == 'message_end':
|
||||
yield llm_entities.Message(
|
||||
role="assistant",
|
||||
content=basic_mode_pending_chunk,
|
||||
)
|
||||
basic_mode_pending_chunk = ''
|
||||
|
||||
query.session.using_conversation.uuid = chunk["conversation_id"]
|
||||
|
||||
async def _agent_chat_messages(
|
||||
self, query: core_entities.Query
|
||||
) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
"""调用聊天助手"""
|
||||
cov_id = query.session.using_conversation.uuid or ""
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
files = [
|
||||
{
|
||||
"type": "image",
|
||||
"transfer_method": "local_file",
|
||||
"upload_file_id": image_id,
|
||||
}
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
ignored_events = ["agent_message"]
|
||||
|
||||
async for chunk in self.dify_client.chat_messages(
|
||||
inputs={},
|
||||
query=plain_text,
|
||||
user=f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
response_mode="streaming",
|
||||
conversation_id=cov_id,
|
||||
files=files,
|
||||
timeout=self.ap.provider_cfg.data["dify-service-api"]["chat"]["timeout"],
|
||||
):
|
||||
self.ap.logger.debug("dify-agent-chunk: ", chunk)
|
||||
if chunk["event"] in ignored_events:
|
||||
continue
|
||||
if chunk["event"] == "agent_thought":
|
||||
|
||||
if chunk['tool'] != '' and chunk['observation'] != '': # 工具调用结果,跳过
|
||||
continue
|
||||
|
||||
if chunk['thought'].strip() != '': # 文字回复内容
|
||||
msg = llm_entities.Message(
|
||||
role="assistant",
|
||||
content=chunk["thought"],
|
||||
)
|
||||
yield msg
|
||||
|
||||
if chunk['tool']:
|
||||
msg = llm_entities.Message(
|
||||
role="assistant",
|
||||
tool_calls=[
|
||||
llm_entities.ToolCall(
|
||||
id=chunk['id'],
|
||||
type="function",
|
||||
function=llm_entities.FunctionCall(
|
||||
name=chunk["tool"],
|
||||
arguments=json.dumps({}),
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
yield msg
|
||||
|
||||
query.session.using_conversation.uuid = chunk["conversation_id"]
|
||||
|
||||
async def _workflow_messages(
|
||||
self, query: core_entities.Query
|
||||
) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
"""调用工作流"""
|
||||
|
||||
if not query.session.using_conversation.uuid:
|
||||
query.session.using_conversation.uuid = str(uuid.uuid4())
|
||||
|
||||
|
||||
cov_id = query.session.using_conversation.uuid
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
|
||||
files = [{
|
||||
'type': 'image',
|
||||
'transfer_method': 'local_file',
|
||||
'upload_file_id': image_id,
|
||||
} for image_id in image_ids]
|
||||
files = [
|
||||
{
|
||||
"type": "image",
|
||||
"transfer_method": "local_file",
|
||||
"upload_file_id": image_id,
|
||||
}
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
ignored_events = ['text_chunk', 'workflow_started']
|
||||
ignored_events = ["text_chunk", "workflow_started"]
|
||||
|
||||
async for chunk in self.dify_client.workflow_run(inputs={
|
||||
"langbot_user_message_text": plain_text,
|
||||
"langbot_session_id": f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
"langbot_conversation_id": cov_id,
|
||||
}, user=f"{query.session.launcher_type.value}_{query.session.launcher_id}", files=files):
|
||||
if chunk['event'] in ignored_events:
|
||||
async for chunk in self.dify_client.workflow_run(
|
||||
inputs={
|
||||
"langbot_user_message_text": plain_text,
|
||||
"langbot_session_id": f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
"langbot_conversation_id": cov_id,
|
||||
},
|
||||
user=f"{query.session.launcher_type.value}_{query.session.launcher_id}",
|
||||
files=files,
|
||||
timeout=self.ap.provider_cfg.data["dify-service-api"]["workflow"]["timeout"],
|
||||
):
|
||||
self.ap.logger.debug("dify-workflow-chunk: ", chunk)
|
||||
if chunk["event"] in ignored_events:
|
||||
continue
|
||||
|
||||
if chunk['event'] == 'node_started':
|
||||
|
||||
if chunk['data']['node_type'] == 'start' or chunk['data']['node_type'] == 'end':
|
||||
if chunk["event"] == "node_started":
|
||||
|
||||
if (
|
||||
chunk["data"]["node_type"] == "start"
|
||||
or chunk["data"]["node_type"] == "end"
|
||||
):
|
||||
continue
|
||||
|
||||
msg = llm_entities.Message(
|
||||
role='assistant',
|
||||
role="assistant",
|
||||
content=None,
|
||||
tool_calls=[llm_entities.ToolCall(
|
||||
id=chunk['data']['node_id'],
|
||||
type='function',
|
||||
function=llm_entities.FunctionCall(
|
||||
name=chunk['data']['title'],
|
||||
arguments=json.dumps({}),
|
||||
),
|
||||
)],
|
||||
tool_calls=[
|
||||
llm_entities.ToolCall(
|
||||
id=chunk["data"]["node_id"],
|
||||
type="function",
|
||||
function=llm_entities.FunctionCall(
|
||||
name=chunk["data"]["title"],
|
||||
arguments=json.dumps({}),
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
elif chunk['event'] == 'workflow_finished':
|
||||
elif chunk["event"] == "workflow_finished":
|
||||
if chunk['data']['error']:
|
||||
raise errors.DifyAPIError(chunk['data']['error'])
|
||||
|
||||
msg = llm_entities.Message(
|
||||
role='assistant',
|
||||
content=chunk['data']['outputs'][self.ap.provider_cfg.data['dify-service-api']['workflow']['output-key']],
|
||||
role="assistant",
|
||||
content=chunk["data"]["outputs"][
|
||||
self.ap.provider_cfg.data["dify-service-api"]["workflow"][
|
||||
"output-key"
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
yield msg
|
||||
|
||||
async def run(self, query: core_entities.Query) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
async def run(
|
||||
self, query: core_entities.Query
|
||||
) -> typing.AsyncGenerator[llm_entities.Message, None]:
|
||||
"""运行请求"""
|
||||
if self.ap.provider_cfg.data['dify-service-api']['app-type'] == 'chat':
|
||||
if self.ap.provider_cfg.data["dify-service-api"]["app-type"] == "chat":
|
||||
async for msg in self._chat_messages(query):
|
||||
yield msg
|
||||
elif self.ap.provider_cfg.data['dify-service-api']['app-type'] == 'workflow':
|
||||
elif self.ap.provider_cfg.data["dify-service-api"]["app-type"] == "agent":
|
||||
async for msg in self._agent_chat_messages(query):
|
||||
yield msg
|
||||
elif self.ap.provider_cfg.data["dify-service-api"]["app-type"] == "workflow":
|
||||
async for msg in self._workflow_messages(query):
|
||||
yield msg
|
||||
else:
|
||||
raise errors.DifyAPIError(f"不支持的 Dify 应用类型: {self.ap.provider_cfg.data['dify-service-api']['app-type']}")
|
||||
raise errors.DifyAPIError(
|
||||
f"不支持的 Dify 应用类型: {self.ap.provider_cfg.data['dify-service-api']['app-type']}"
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ class LocalAgentRunner(runner.RequestRunner):
|
||||
req_messages = query.prompt.messages.copy() + query.messages.copy() + [query.user_message]
|
||||
|
||||
# 首次请求
|
||||
msg = await query.use_model.requester.call(query.use_model, req_messages, query.use_funcs)
|
||||
msg = await query.use_model.requester.call(query, query.use_model, req_messages, query.use_funcs)
|
||||
|
||||
yield msg
|
||||
|
||||
@@ -61,7 +61,7 @@ class LocalAgentRunner(runner.RequestRunner):
|
||||
req_messages.append(err_msg)
|
||||
|
||||
# 处理完所有调用,再次请求
|
||||
msg = await query.use_model.requester.call(query.use_model, req_messages, query.use_funcs)
|
||||
msg = await query.use_model.requester.call(query, query.use_model, req_messages, query.use_funcs)
|
||||
|
||||
yield msg
|
||||
|
||||
|
||||
@@ -62,11 +62,11 @@ class AnnouncementManager:
|
||||
async def fetch_saved(
|
||||
self
|
||||
) -> list[Announcement]:
|
||||
if not os.path.exists("res/announcement_saved.json"):
|
||||
with open("res/announcement_saved.json", "w", encoding="utf-8") as f:
|
||||
if not os.path.exists("data/labels/announcement_saved.json"):
|
||||
with open("data/labels/announcement_saved.json", "w", encoding="utf-8") as f:
|
||||
f.write("[]")
|
||||
|
||||
with open("res/announcement_saved.json", "r", encoding="utf-8") as f:
|
||||
with open("data/labels/announcement_saved.json", "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
if not content:
|
||||
@@ -79,7 +79,7 @@ class AnnouncementManager:
|
||||
content: list[Announcement]
|
||||
):
|
||||
|
||||
with open("res/announcement_saved.json", "w", encoding="utf-8") as f:
|
||||
with open("data/labels/announcement_saved.json", "w", encoding="utf-8") as f:
|
||||
f.write(json.dumps([
|
||||
item.to_dict() for item in content
|
||||
], indent=4, ensure_ascii=False))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
semantic_version = "v3.4.1.1"
|
||||
semantic_version = "v3.4.2"
|
||||
|
||||
debug_mode = False
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import base64
|
||||
import typing
|
||||
import io
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
import ssl
|
||||
|
||||
import aiohttp
|
||||
import PIL.Image
|
||||
|
||||
|
||||
def get_qq_image_downloadable_url(image_url: str) -> tuple[str, dict]:
|
||||
@@ -13,9 +15,10 @@ def get_qq_image_downloadable_url(image_url: str) -> tuple[str, dict]:
|
||||
return f"http://{parsed.netloc}{parsed.path}", query
|
||||
|
||||
|
||||
async def get_qq_image_bytes(image_url: str) -> tuple[bytes, str]:
|
||||
"""获取QQ图片的bytes"""
|
||||
image_url, query = get_qq_image_downloadable_url(image_url)
|
||||
async def get_qq_image_bytes(image_url: str, query: dict={}) -> tuple[bytes, str]:
|
||||
"""[弃用]获取QQ图片的bytes"""
|
||||
image_url, query_in_url = get_qq_image_downloadable_url(image_url)
|
||||
query = {**query, **query_in_url}
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
@@ -24,8 +27,11 @@ async def get_qq_image_bytes(image_url: str) -> tuple[bytes, str]:
|
||||
resp.raise_for_status()
|
||||
file_bytes = await resp.read()
|
||||
content_type = resp.headers.get('Content-Type')
|
||||
if not content_type or not content_type.startswith('image/'):
|
||||
if not content_type:
|
||||
image_format = 'jpeg'
|
||||
elif not content_type.startswith('image/'):
|
||||
pil_img = PIL.Image.open(io.BytesIO(file_bytes))
|
||||
image_format = pil_img.format.lower()
|
||||
else:
|
||||
image_format = content_type.split('/')[-1]
|
||||
return file_bytes, image_format
|
||||
@@ -34,7 +40,7 @@ async def get_qq_image_bytes(image_url: str) -> tuple[bytes, str]:
|
||||
async def qq_image_url_to_base64(
|
||||
image_url: str
|
||||
) -> typing.Tuple[str, str]:
|
||||
"""将QQ图片URL转为base64,并返回图片格式
|
||||
"""[弃用]将QQ图片URL转为base64,并返回图片格式
|
||||
|
||||
Args:
|
||||
image_url (str): QQ图片URL
|
||||
@@ -47,8 +53,18 @@ async def qq_image_url_to_base64(
|
||||
# Flatten the query dictionary
|
||||
query = {k: v[0] for k, v in query.items()}
|
||||
|
||||
file_bytes, image_format = await get_qq_image_bytes(image_url)
|
||||
file_bytes, image_format = await get_qq_image_bytes(image_url, query)
|
||||
|
||||
base64_str = base64.b64encode(file_bytes).decode()
|
||||
|
||||
return base64_str, image_format
|
||||
|
||||
async def extract_b64_and_format(image_base64_data: str) -> typing.Tuple[str, str]:
|
||||
"""提取base64编码和图片格式
|
||||
|
||||
data:image/jpeg;base64,xxx
|
||||
提取出base64编码和图片格式
|
||||
"""
|
||||
base64_str = image_base64_data.split(',')[-1]
|
||||
image_format = image_base64_data.split(':')[-1].split(';')[0].split('/')[-1]
|
||||
return base64_str, image_format
|
||||
@@ -2,7 +2,7 @@ import aiohttp
|
||||
|
||||
async def get_myip() -> str:
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10)) as session:
|
||||
async with session.get("https://ip.useragentinfo.com/myip") as response:
|
||||
return await response.text()
|
||||
except Exception as e:
|
||||
|
||||
@@ -22,13 +22,3 @@ def install_requirements(file):
|
||||
pipmain(['install', '-r', file, "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
|
||||
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
|
||||
# log.reset_logging()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
install("openai11")
|
||||
except Exception as e:
|
||||
print(111)
|
||||
print(e)
|
||||
|
||||
print(222)
|
||||
@@ -115,6 +115,38 @@
|
||||
"name": "deepseek-coder",
|
||||
"requester": "deepseek-chat-completions",
|
||||
"token_mgr": "deepseek"
|
||||
},
|
||||
{
|
||||
"name": "grok-2-latest",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai"
|
||||
},
|
||||
{
|
||||
"name": "grok-2",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai"
|
||||
},
|
||||
{
|
||||
"name": "grok-2-vision-1212",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai",
|
||||
"vision_supported": true
|
||||
},
|
||||
{
|
||||
"name": "grok-2-1212",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai"
|
||||
},
|
||||
{
|
||||
"name": "grok-vision-beta",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai",
|
||||
"vision_supported": true
|
||||
},
|
||||
{
|
||||
"name": "grok-beta",
|
||||
"requester": "xai-chat-completions",
|
||||
"token_mgr": "xai"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -16,6 +16,9 @@
|
||||
],
|
||||
"gitee-ai": [
|
||||
"XXXXX"
|
||||
],
|
||||
"xai": [
|
||||
"xai-1234567890"
|
||||
]
|
||||
},
|
||||
"requester": {
|
||||
@@ -50,6 +53,11 @@
|
||||
"base-url": "https://ai.gitee.com/v1",
|
||||
"args": {},
|
||||
"timeout": 120
|
||||
},
|
||||
"xai-chat-completions": {
|
||||
"base-url": "https://api.x.ai/v1",
|
||||
"args": {},
|
||||
"timeout": 120
|
||||
}
|
||||
},
|
||||
"model": "gpt-4o",
|
||||
@@ -62,11 +70,17 @@
|
||||
"base-url": "https://api.dify.ai/v1",
|
||||
"app-type": "chat",
|
||||
"chat": {
|
||||
"api-key": "app-1234567890"
|
||||
"api-key": "app-1234567890",
|
||||
"timeout": 120
|
||||
},
|
||||
"agent": {
|
||||
"api-key": "app-1234567890",
|
||||
"timeout": 120
|
||||
},
|
||||
"workflow": {
|
||||
"api-key": "app-1234567890",
|
||||
"output-key": "summary"
|
||||
"output-key": "summary",
|
||||
"timeout": 120
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,6 @@
|
||||
"openai": {
|
||||
"type": "array",
|
||||
"title": "OpenAI API 密钥",
|
||||
"description": "OpenAI API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -31,7 +30,6 @@
|
||||
"anthropic": {
|
||||
"type": "array",
|
||||
"title": "Anthropic API 密钥",
|
||||
"description": "Anthropic API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -40,7 +38,6 @@
|
||||
"moonshot": {
|
||||
"type": "array",
|
||||
"title": "Moonshot API 密钥",
|
||||
"description": "Moonshot API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -49,7 +46,6 @@
|
||||
"deepseek": {
|
||||
"type": "array",
|
||||
"title": "DeepSeek API 密钥",
|
||||
"description": "DeepSeek API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -57,8 +53,15 @@
|
||||
},
|
||||
"gitee": {
|
||||
"type": "array",
|
||||
"title": "Gitee API 密钥",
|
||||
"description": "Gitee API 密钥",
|
||||
"title": "Gitee AI API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"default": []
|
||||
},
|
||||
"xai": {
|
||||
"type": "array",
|
||||
"title": "xAI API 密钥",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -188,6 +191,25 @@
|
||||
"default": 120
|
||||
}
|
||||
}
|
||||
},
|
||||
"xai-chat-completions": {
|
||||
"type": "object",
|
||||
"title": "xAI API 请求配置",
|
||||
"description": "仅可编辑 URL 和 超时时间,额外请求参数不支持可视化编辑,请到编辑器编辑",
|
||||
"properties": {
|
||||
"base-url": {
|
||||
"type": "string",
|
||||
"title": "API URL"
|
||||
},
|
||||
"args": {
|
||||
"type": "object"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"title": "API 请求超时时间",
|
||||
"default": 120
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -255,6 +277,24 @@
|
||||
"api-key": {
|
||||
"type": "string",
|
||||
"title": "API 密钥"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"title":"API 请求超时时间"
|
||||
}
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"type": "object",
|
||||
"title": "Agent API 参数",
|
||||
"properties": {
|
||||
"api-key": {
|
||||
"type": "string",
|
||||
"title": "API 密钥"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"title":"API 请求超时时间"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -271,6 +311,10 @@
|
||||
"title": "工作流输出键",
|
||||
"description": "设置工作流输出键,用于从 Dify Workflow 结束节点返回的 JSON 数据中提取输出内容",
|
||||
"default": "summary"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"title": "API 请求超时时间"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,6 +79,12 @@
|
||||
重载插件
|
||||
</v-list-item-title>
|
||||
</v-list-item>
|
||||
|
||||
<v-list-item @click="reload('provider')">
|
||||
<v-list-item-title>
|
||||
重载 LLM 管理器
|
||||
</v-list-item-title>
|
||||
</v-list-item>
|
||||
</v-list>
|
||||
</v-menu>
|
||||
</v-list-item>
|
||||
@@ -169,7 +175,8 @@ function openDocs() {
|
||||
|
||||
const reloadScopeLabel = {
|
||||
'platform': "消息平台",
|
||||
'plugin': "插件"
|
||||
'plugin': "插件",
|
||||
'provider': "LLM 管理器"
|
||||
}
|
||||
|
||||
function reload(scope) {
|
||||
|
||||
Reference in New Issue
Block a user