mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-25 19:37:36 +08:00
Compare commits
136 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fe92d8ece | ||
|
|
c893ffc177 | ||
|
|
a076ce5756 | ||
|
|
af82227dff | ||
|
|
8f2b177145 | ||
|
|
9a997fbcb0 | ||
|
|
17070471f7 | ||
|
|
cb48221ed3 | ||
|
|
68eb0290e0 | ||
|
|
61bc6a1dc2 | ||
|
|
4a84bf2355 | ||
|
|
2c2a89d9db | ||
|
|
c91e2f0efe | ||
|
|
411d082d2a | ||
|
|
d4e08a1765 | ||
|
|
b529d07479 | ||
|
|
d44df75e5c | ||
|
|
b74e07b608 | ||
|
|
4a868afecd | ||
|
|
1cb9560663 | ||
|
|
8f878673ae | ||
|
|
74a5e37892 | ||
|
|
76a69ecc7e | ||
|
|
f06e3d3efa | ||
|
|
973e7bae42 | ||
|
|
94aa175c1a | ||
|
|
777b766fff | ||
|
|
1adaa93034 | ||
|
|
9853eccd89 | ||
|
|
7699ba3cae | ||
|
|
9ac8b1a6fd | ||
|
|
f476c4724d | ||
|
|
3d12632c9f | ||
|
|
350e59fa6b | ||
|
|
b3d5b3fc8f | ||
|
|
4a02c531b2 | ||
|
|
2dd2abedde | ||
|
|
0d59c04151 | ||
|
|
08e0ede655 | ||
|
|
bcf89ca434 | ||
|
|
5e2f677d0b | ||
|
|
4df372052d | ||
|
|
2c5a0a00ba | ||
|
|
f3295b0fdd | ||
|
|
431d515c26 | ||
|
|
d9e6198992 | ||
|
|
3951cbf266 | ||
|
|
c47c4994ae | ||
|
|
a6072c2abb | ||
|
|
360422f25e | ||
|
|
f135c946bd | ||
|
|
750cc24900 | ||
|
|
46062bf4b9 | ||
|
|
869b2176a7 | ||
|
|
7138c101e3 | ||
|
|
04e26225cd | ||
|
|
f9f2de570f | ||
|
|
1dd598c7be | ||
|
|
c0f04e4f20 | ||
|
|
d3279b9823 | ||
|
|
2ad1f97e12 | ||
|
|
1046f3c2aa | ||
|
|
1afecf01e4 | ||
|
|
3ee7736361 | ||
|
|
0666778fea | ||
|
|
8df90558ab | ||
|
|
c1c03f11b4 | ||
|
|
da9afcd0ad | ||
|
|
bc1fbfa190 | ||
|
|
f3199dda20 | ||
|
|
4d0a28a1a7 | ||
|
|
76831579ad | ||
|
|
c2d752f9e9 | ||
|
|
4c0917556f | ||
|
|
e17b0cf5c5 | ||
|
|
f2647316a5 | ||
|
|
78cc157657 | ||
|
|
f576f990de | ||
|
|
254feb6a3a | ||
|
|
4c5139e9ff | ||
|
|
a055e37d3a | ||
|
|
bef5d6627b | ||
|
|
69767ebdb4 | ||
|
|
53ecd0933e | ||
|
|
d32f783392 | ||
|
|
4d3610cdf7 | ||
|
|
166eebabff | ||
|
|
9f2f1cd577 | ||
|
|
d86b884cab | ||
|
|
8345edd9f7 | ||
|
|
e3821b3f09 | ||
|
|
72ca62eae4 | ||
|
|
075091ed06 | ||
|
|
d0a3dee083 | ||
|
|
6ba9b6973d | ||
|
|
345eccf04c | ||
|
|
127a38b15c | ||
|
|
760db38c11 | ||
|
|
e4729337c8 | ||
|
|
7be226d3fa | ||
|
|
68372a4b7a | ||
|
|
d65f862c36 | ||
|
|
5fa75330cf | ||
|
|
547e3d098e | ||
|
|
0f39a31648 | ||
|
|
f1ddddfe00 | ||
|
|
4e61302156 | ||
|
|
9e3cf418ba | ||
|
|
3e29ec7892 | ||
|
|
f452742cd2 | ||
|
|
b560432b0b | ||
|
|
99e5478ced | ||
|
|
09dba91a37 | ||
|
|
18ec4adac9 | ||
|
|
8bedaa468a | ||
|
|
0ab366fcac | ||
|
|
d664039e54 | ||
|
|
6535ba4f72 | ||
|
|
3b181cff93 | ||
|
|
d1274366a0 | ||
|
|
35a4b0f55f | ||
|
|
399ebd36d7 | ||
|
|
a3552893aa | ||
|
|
b6cdf18c1a | ||
|
|
bd4c7f634d | ||
|
|
160ca540ab | ||
|
|
74c3a77ed1 | ||
|
|
0b527868bc | ||
|
|
0f35458cf7 | ||
|
|
70ad92ca16 | ||
|
|
c0d56aa905 | ||
|
|
ed869f7e81 | ||
|
|
ea42579374 | ||
|
|
72d701df3e | ||
|
|
1191b34fd4 | ||
|
|
ca3d3b2a66 |
71
.github/workflows/run-tests.yml
vendored
Normal file
71
.github/workflows/run-tests.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Unit Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, ready_for_review, synchronize]
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'tests/**'
|
||||
- '.github/workflows/run-tests.yml'
|
||||
- 'pyproject.toml'
|
||||
- 'run_tests.sh'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
paths:
|
||||
- 'pkg/**'
|
||||
- 'tests/**'
|
||||
- '.github/workflows/run-tests.yml'
|
||||
- 'pyproject.toml'
|
||||
- 'run_tests.sh'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install uv
|
||||
run: |
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync --dev
|
||||
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
bash run_tests.sh
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: matrix.python-version == '3.12'
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
flags: unit-tests
|
||||
name: unit-tests-coverage
|
||||
fail_ci_if_error: false
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## Unit Tests Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Python Version: ${{ matrix.python-version }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Test Status: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -22,7 +22,7 @@ tips.py
|
||||
venv*
|
||||
bin/
|
||||
.vscode
|
||||
test_*
|
||||
/test_*
|
||||
venv/
|
||||
hugchat.json
|
||||
qcapi
|
||||
@@ -43,4 +43,7 @@ test.py
|
||||
/web_ui
|
||||
.venv/
|
||||
uv.lock
|
||||
/test
|
||||
/test
|
||||
plugins.bak
|
||||
coverage.xml
|
||||
.coverage
|
||||
|
||||
81
AGENTS.md
Normal file
81
AGENTS.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# AGENTS.md
|
||||
|
||||
This file is for guiding code agents (like Claude Code, GitHub Copilot, OpenAI Codex, etc.) to work in LangBot project.
|
||||
|
||||
## Project Overview
|
||||
|
||||
LangBot is a open-source LLM native instant messaging bot development platform, aiming to provide an out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, supporting global instant messaging platforms, and providing rich API interfaces, supporting custom development.
|
||||
|
||||
LangBot has a comprehensive frontend, all operations can be performed through the frontend. The project splited into these major parts:
|
||||
|
||||
- `./pkg`: The core python package of the project backend.
|
||||
- `./pkg/platform`: The platform module of the project, containing the logic of message platform adapters, bot managers, message session managers, etc.
|
||||
- `./pkg/provider`: The provider module of the project, containing the logic of LLM providers, tool providers, etc.
|
||||
- `./pkg/pipeline`: The pipeline module of the project, containing the logic of pipelines, stages, query pool, etc.
|
||||
- `./pkg/api`: The api module of the project, containing the http api controllers and services.
|
||||
- `./pkg/plugin`: LangBot bridge for connecting with plugin system.
|
||||
- `./libs`: Some SDKs we previously developed for the project, such as `qq_official_api`, `wecom_api`, etc.
|
||||
- `./templates`: Templates of config files, components, etc.
|
||||
- `./web`: Frontend codebase, built with Next.js + **shadcn** + **Tailwind CSS**.
|
||||
- `./docker`: docker-compose deployment files.
|
||||
|
||||
## Backend Development
|
||||
|
||||
We use `uv` to manage dependencies.
|
||||
|
||||
```bash
|
||||
pip install uv
|
||||
uv sync --dev
|
||||
```
|
||||
|
||||
Start the backend and run the project in development mode.
|
||||
|
||||
```bash
|
||||
uv run main.py
|
||||
```
|
||||
|
||||
Then you can access the project at `http://127.0.0.1:5300`.
|
||||
|
||||
## Frontend Development
|
||||
|
||||
We use `pnpm` to manage dependencies.
|
||||
|
||||
```bash
|
||||
cd web
|
||||
cp .env.example .env
|
||||
pnpm install
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Then you can access the project at `http://127.0.0.1:3000`.
|
||||
|
||||
## Plugin System Architecture
|
||||
|
||||
LangBot is composed of various internal components such as Large Language Model tools, commands, messaging platform adapters, LLM requesters, and more. To meet extensibility and flexibility requirements, we have implemented a production-grade plugin system.
|
||||
|
||||
Each plugin runs in an independent process, managed uniformly by the Plugin Runtime. It has two operating modes: `stdio` and `websocket`. When LangBot is started directly by users (not running in a container), it uses `stdio` mode, which is common for personal users or lightweight environments. When LangBot runs in a container, it uses `websocket` mode, designed specifically for production environments.
|
||||
|
||||
Plugin Runtime automatically starts each installed plugin and interacts through stdio. In plugin development scenarios, developers can use the lbp command-line tool to start plugins and connect to the running Runtime via WebSocket for debugging.
|
||||
|
||||
> Plugin SDK, CLI, Runtime, and entities definitions shared between LangBot and plugins are contained in the [`langbot-plugin-sdk`](https://github.com/langbot-app/langbot-plugin-sdk) repository.
|
||||
|
||||
## Some Development Tips and Standards
|
||||
|
||||
- LangBot is a global project, any comments in code should be in English, and user experience should be considered in all aspects.
|
||||
- Thus you should consider the i18n support in all aspects.
|
||||
- LangBot is widely adopted in both toC and toB scenarios, so you should consider the compatibility and security in all aspects.
|
||||
|
||||
## Some Principles
|
||||
|
||||
- Keep it simple, stupid.
|
||||
- Entities should not be multiplied unnecessarily
|
||||
- 八荣八耻
|
||||
|
||||
以瞎猜接口为耻,以认真查询为荣。
|
||||
以模糊执行为耻,以寻求确认为荣。
|
||||
以臆想业务为耻,以人类确认为荣。
|
||||
以创造接口为耻,以复用现有为荣。
|
||||
以跳过验证为耻,以主动测试为荣。
|
||||
以破坏架构为耻,以遵循规范为荣。
|
||||
以假装理解为耻,以诚实无知为荣。
|
||||
以盲目修改为耻,以谨慎重构为荣。
|
||||
@@ -119,10 +119,12 @@ docker compose up -d
|
||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型运行平台 |
|
||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 |
|
||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
||||
| [小马算力](https://www.tokenpony.cn/453z1) | ✅ | 大模型聚合平台 |
|
||||
| [阿里云百炼](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支持通过 MCP 协议获取工具 |
|
||||
| [百宝箱Tbox](https://www.tbox.cn/open) | ✅ | 蚂蚁百宝箱智能体平台,每月免费10亿大模型Token |
|
||||
|
||||
### TTS
|
||||
|
||||
@@ -145,3 +147,9 @@ docker compose up -d
|
||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||
</a>
|
||||
|
||||
<!--
|
||||
## For Code Agents
|
||||
|
||||
If you're a code agent based on LLM, you should read [AGENTS.md](AGENTS.md) to understand the project structure and development process before you start any development.
|
||||
-->
|
||||
|
||||
4
codecov.yml
Normal file
4
codecov.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
||||
291
docs/API_KEY_AUTH.md
Normal file
291
docs/API_KEY_AUTH.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# API Key Authentication
|
||||
|
||||
LangBot now supports API key authentication for external systems to access its HTTP service API.
|
||||
|
||||
## Managing API Keys
|
||||
|
||||
API keys can be managed through the web interface:
|
||||
|
||||
1. Log in to the LangBot web interface
|
||||
2. Click the "API Keys" button at the bottom of the sidebar
|
||||
3. Create, view, copy, or delete API keys as needed
|
||||
|
||||
## Using API Keys
|
||||
|
||||
### Authentication Headers
|
||||
|
||||
Include your API key in the request header using one of these methods:
|
||||
|
||||
**Method 1: X-API-Key header (Recommended)**
|
||||
```
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
**Method 2: Authorization Bearer token**
|
||||
```
|
||||
Authorization: Bearer lbk_your_api_key_here
|
||||
```
|
||||
|
||||
## Available APIs
|
||||
|
||||
All existing LangBot APIs now support **both user token and API key authentication**. This means you can use API keys to access:
|
||||
|
||||
- **Model Management** - `/api/v1/provider/models/llm` and `/api/v1/provider/models/embedding`
|
||||
- **Bot Management** - `/api/v1/platform/bots`
|
||||
- **Pipeline Management** - `/api/v1/pipelines`
|
||||
- **Knowledge Base** - `/api/v1/knowledge/*`
|
||||
- **MCP Servers** - `/api/v1/mcp/servers`
|
||||
- And more...
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Each endpoint accepts **either**:
|
||||
1. **User Token** (via `Authorization: Bearer <user_jwt_token>`) - for web UI and authenticated users
|
||||
2. **API Key** (via `X-API-Key` or `Authorization: Bearer <api_key>`) - for external services
|
||||
|
||||
## Example: Model Management
|
||||
|
||||
### List All LLM Models
|
||||
|
||||
```http
|
||||
GET /api/v1/provider/models/llm
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"msg": "ok",
|
||||
"data": {
|
||||
"models": [
|
||||
{
|
||||
"uuid": "model-uuid",
|
||||
"name": "GPT-4",
|
||||
"description": "OpenAI GPT-4 model",
|
||||
"requester": "openai-chat-completions",
|
||||
"requester_config": {...},
|
||||
"abilities": ["chat", "vision"],
|
||||
"created_at": "2024-01-01T00:00:00",
|
||||
"updated_at": "2024-01-01T00:00:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Create a New LLM Model
|
||||
|
||||
```http
|
||||
POST /api/v1/provider/models/llm
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Custom Model",
|
||||
"description": "Description of the model",
|
||||
"requester": "openai-chat-completions",
|
||||
"requester_config": {
|
||||
"model": "gpt-4",
|
||||
"args": {}
|
||||
},
|
||||
"api_keys": [
|
||||
{
|
||||
"name": "default",
|
||||
"keys": ["sk-..."]
|
||||
}
|
||||
],
|
||||
"abilities": ["chat"],
|
||||
"extra_args": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Update an LLM Model
|
||||
|
||||
```http
|
||||
PUT /api/v1/provider/models/llm/{model_uuid}
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "Updated Model Name",
|
||||
"description": "Updated description",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Delete an LLM Model
|
||||
|
||||
```http
|
||||
DELETE /api/v1/provider/models/llm/{model_uuid}
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
## Example: Bot Management
|
||||
|
||||
### List All Bots
|
||||
|
||||
```http
|
||||
GET /api/v1/platform/bots
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
### Create a New Bot
|
||||
|
||||
```http
|
||||
POST /api/v1/platform/bots
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Bot",
|
||||
"adapter": "telegram",
|
||||
"config": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Example: Pipeline Management
|
||||
|
||||
### List All Pipelines
|
||||
|
||||
```http
|
||||
GET /api/v1/pipelines
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
```
|
||||
|
||||
### Create a New Pipeline
|
||||
|
||||
```http
|
||||
POST /api/v1/pipelines
|
||||
X-API-Key: lbk_your_api_key_here
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "My Pipeline",
|
||||
"config": {...}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Responses
|
||||
|
||||
### 401 Unauthorized
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "No valid authentication provided (user token or API key required)"
|
||||
}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "Invalid API key"
|
||||
}
|
||||
```
|
||||
|
||||
### 404 Not Found
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -1,
|
||||
"msg": "Resource not found"
|
||||
}
|
||||
```
|
||||
|
||||
### 500 Internal Server Error
|
||||
|
||||
```json
|
||||
{
|
||||
"code": -2,
|
||||
"msg": "Error message details"
|
||||
}
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Keep API keys secure**: Store them securely and never commit them to version control
|
||||
2. **Use HTTPS**: Always use HTTPS in production to encrypt API key transmission
|
||||
3. **Rotate keys regularly**: Create new API keys periodically and delete old ones
|
||||
4. **Use descriptive names**: Give your API keys meaningful names to track their usage
|
||||
5. **Delete unused keys**: Remove API keys that are no longer needed
|
||||
6. **Use X-API-Key header**: Prefer using the `X-API-Key` header for clarity
|
||||
|
||||
## Example: Python Client
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
API_KEY = "lbk_your_api_key_here"
|
||||
BASE_URL = "http://your-langbot-server:5300"
|
||||
|
||||
headers = {
|
||||
"X-API-Key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# List all models
|
||||
response = requests.get(f"{BASE_URL}/api/v1/provider/models/llm", headers=headers)
|
||||
models = response.json()["data"]["models"]
|
||||
|
||||
print(f"Found {len(models)} models")
|
||||
for model in models:
|
||||
print(f"- {model['name']}: {model['description']}")
|
||||
|
||||
# Create a new bot
|
||||
bot_data = {
|
||||
"name": "My Telegram Bot",
|
||||
"adapter": "telegram",
|
||||
"config": {
|
||||
"token": "your-telegram-token"
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{BASE_URL}/api/v1/platform/bots",
|
||||
headers=headers,
|
||||
json=bot_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
bot_uuid = response.json()["data"]["uuid"]
|
||||
print(f"Bot created with UUID: {bot_uuid}")
|
||||
```
|
||||
|
||||
## Example: cURL
|
||||
|
||||
```bash
|
||||
# List all models
|
||||
curl -X GET \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
http://your-langbot-server:5300/api/v1/provider/models/llm
|
||||
|
||||
# Create a new pipeline
|
||||
curl -X POST \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "My Pipeline",
|
||||
"config": {...}
|
||||
}' \
|
||||
http://your-langbot-server:5300/api/v1/pipelines
|
||||
|
||||
# Get bot logs
|
||||
curl -X POST \
|
||||
-H "X-API-Key: lbk_your_api_key_here" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"from_index": -1,
|
||||
"max_count": 10
|
||||
}' \
|
||||
http://your-langbot-server:5300/api/v1/platform/bots/{bot_uuid}/logs
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The same endpoints work for both the web UI (with user tokens) and external services (with API keys)
|
||||
- No need to learn different API paths - use the existing API documentation with API key authentication
|
||||
- All endpoints that previously required user authentication now also accept API keys
|
||||
|
||||
180
docs/TESTING_SUMMARY.md
Normal file
180
docs/TESTING_SUMMARY.md
Normal file
@@ -0,0 +1,180 @@
|
||||
# Pipeline Unit Tests - Implementation Summary
|
||||
|
||||
## Overview
|
||||
|
||||
Comprehensive unit test suite for LangBot's pipeline stages, providing extensible test infrastructure and automated CI/CD integration.
|
||||
|
||||
## What Was Implemented
|
||||
|
||||
### 1. Test Infrastructure (`tests/pipeline/conftest.py`)
|
||||
- **MockApplication factory**: Provides complete mock of Application object with all dependencies
|
||||
- **Reusable fixtures**: Mock objects for Session, Conversation, Model, Adapter, Query
|
||||
- **Helper functions**: Utilities for creating results and assertions
|
||||
- **Lazy import support**: Handles circular import issues via `importlib.import_module()`
|
||||
|
||||
### 2. Test Coverage
|
||||
|
||||
#### Pipeline Stages Tested:
|
||||
- ✅ **test_bansess.py** (6 tests) - Access control whitelist/blacklist logic
|
||||
- ✅ **test_ratelimit.py** (3 tests) - Rate limiting acquire/release logic
|
||||
- ✅ **test_preproc.py** (3 tests) - Message preprocessing and variable setup
|
||||
- ✅ **test_respback.py** (2 tests) - Response sending with/without quotes
|
||||
- ✅ **test_resprule.py** (3 tests) - Group message rule matching
|
||||
- ✅ **test_pipelinemgr.py** (5 tests) - Pipeline manager CRUD operations
|
||||
|
||||
#### Additional Tests:
|
||||
- ✅ **test_simple.py** (5 tests) - Test infrastructure validation
|
||||
- ✅ **test_stages_integration.py** - Integration tests with full imports
|
||||
|
||||
**Total: 27 test cases**
|
||||
|
||||
### 3. CI/CD Integration
|
||||
|
||||
**GitHub Actions Workflow** (`.github/workflows/pipeline-tests.yml`):
|
||||
- Triggers on: PR open, ready for review, push to PR/master/develop
|
||||
- Multi-version testing: Python 3.10, 3.11, 3.12
|
||||
- Coverage reporting: Integrated with Codecov
|
||||
- Auto-runs via `run_tests.sh` script
|
||||
|
||||
### 4. Configuration Files
|
||||
|
||||
- **pytest.ini** - Pytest configuration with asyncio support
|
||||
- **run_tests.sh** - Automated test runner with coverage
|
||||
- **tests/README.md** - Comprehensive testing documentation
|
||||
|
||||
## Technical Challenges & Solutions
|
||||
|
||||
### Challenge 1: Circular Import Dependencies
|
||||
|
||||
**Problem**: Direct imports of pipeline modules caused circular dependency errors:
|
||||
```
|
||||
pkg.pipeline.stage → pkg.core.app → pkg.pipeline.pipelinemgr → pkg.pipeline.resprule
|
||||
```
|
||||
|
||||
**Solution**: Implemented lazy imports using `importlib.import_module()`:
|
||||
```python
|
||||
def get_bansess_module():
|
||||
return import_module('pkg.pipeline.bansess.bansess')
|
||||
|
||||
# Use in tests
|
||||
bansess = get_bansess_module()
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
```
|
||||
|
||||
### Challenge 2: Pydantic Validation Errors
|
||||
|
||||
**Problem**: Some stages use Pydantic models that validate `new_query` parameter.
|
||||
|
||||
**Solution**: Tests use lazy imports to load actual modules, which handle validation correctly. Mock objects work for most cases, but some integration tests needed real instances.
|
||||
|
||||
### Challenge 3: Mock Configuration
|
||||
|
||||
**Problem**: Lists don't allow `.copy` attribute assignment in Python.
|
||||
|
||||
**Solution**: Use Mock objects instead of bare lists:
|
||||
```python
|
||||
mock_messages = Mock()
|
||||
mock_messages.copy = Mock(return_value=[])
|
||||
conversation.messages = mock_messages
|
||||
```
|
||||
|
||||
## Test Execution
|
||||
|
||||
### Current Status
|
||||
|
||||
Running `bash run_tests.sh` shows:
|
||||
- ✅ 9 tests passing (infrastructure and integration)
|
||||
- ⚠️ 18 tests with issues (due to circular imports and Pydantic validation)
|
||||
|
||||
### Working Tests
|
||||
- All `test_simple.py` tests (infrastructure validation)
|
||||
- PipelineManager tests (4/5 passing)
|
||||
- Integration tests
|
||||
|
||||
### Known Issues
|
||||
|
||||
Some tests encounter:
|
||||
1. **Circular import errors** - When importing certain stage modules
|
||||
2. **Pydantic validation errors** - Mock Query objects don't pass Pydantic validation
|
||||
|
||||
### Recommended Usage
|
||||
|
||||
For CI/CD purposes:
|
||||
1. Run `test_simple.py` to validate test infrastructure
|
||||
2. Run `test_pipelinemgr.py` for manager logic
|
||||
3. Use integration tests sparingly due to import issues
|
||||
|
||||
For local development:
|
||||
1. Use the test infrastructure as a template
|
||||
2. Add new tests following the lazy import pattern
|
||||
3. Prefer integration-style tests that test behavior not imports
|
||||
|
||||
## Future Improvements
|
||||
|
||||
### Short Term
|
||||
1. **Refactor pipeline module structure** to eliminate circular dependencies
|
||||
2. **Add Pydantic model factories** for creating valid test instances
|
||||
3. **Expand integration tests** once import issues are resolved
|
||||
|
||||
### Long Term
|
||||
1. **Integration tests** - Full pipeline execution tests
|
||||
2. **Performance benchmarks** - Measure stage execution time
|
||||
3. **Mutation testing** - Verify test quality with mutation testing
|
||||
4. **Property-based testing** - Use Hypothesis for edge case discovery
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
.
|
||||
├── .github/workflows/
|
||||
│ └── pipeline-tests.yml # CI/CD workflow
|
||||
├── tests/
|
||||
│ ├── README.md # Testing documentation
|
||||
│ ├── __init__.py
|
||||
│ └── pipeline/
|
||||
│ ├── __init__.py
|
||||
│ ├── conftest.py # Shared fixtures
|
||||
│ ├── test_simple.py # Infrastructure tests ✅
|
||||
│ ├── test_bansess.py # BanSession tests
|
||||
│ ├── test_ratelimit.py # RateLimit tests
|
||||
│ ├── test_preproc.py # PreProcessor tests
|
||||
│ ├── test_respback.py # ResponseBack tests
|
||||
│ ├── test_resprule.py # ResponseRule tests
|
||||
│ ├── test_pipelinemgr.py # Manager tests ✅
|
||||
│ └── test_stages_integration.py # Integration tests
|
||||
├── pytest.ini # Pytest config
|
||||
├── run_tests.sh # Test runner
|
||||
└── TESTING_SUMMARY.md # This file
|
||||
```
|
||||
|
||||
## How to Use
|
||||
|
||||
### Run Tests Locally
|
||||
```bash
|
||||
bash run_tests.sh
|
||||
```
|
||||
|
||||
### Run Specific Test File
|
||||
```bash
|
||||
pytest tests/pipeline/test_simple.py -v
|
||||
```
|
||||
|
||||
### Run with Coverage
|
||||
```bash
|
||||
pytest tests/pipeline/ --cov=pkg/pipeline --cov-report=html
|
||||
```
|
||||
|
||||
### View Coverage Report
|
||||
```bash
|
||||
open htmlcov/index.html
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
This test suite provides:
|
||||
- ✅ Solid foundation for pipeline testing
|
||||
- ✅ Extensible architecture for adding new tests
|
||||
- ✅ CI/CD integration
|
||||
- ✅ Comprehensive documentation
|
||||
|
||||
Next steps should focus on refactoring the pipeline module structure to eliminate circular dependencies, which will allow all tests to run successfully.
|
||||
1944
docs/service-api-openapi.json
Normal file
1944
docs/service-api-openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
0
libs/coze_server_api/__init__.py
Normal file
0
libs/coze_server_api/__init__.py
Normal file
192
libs/coze_server_api/client.py
Normal file
192
libs/coze_server_api/client.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import json
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import io
|
||||
from typing import Dict, List, Any, AsyncGenerator
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
|
||||
|
||||
class AsyncCozeAPIClient:
|
||||
def __init__(self, api_key: str, api_base: str = "https://api.coze.cn"):
|
||||
self.api_key = api_key
|
||||
self.api_base = api_base
|
||||
self.session = None
|
||||
|
||||
async def __aenter__(self):
|
||||
"""支持异步上下文管理器"""
|
||||
await self.coze_session()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
"""退出时自动关闭会话"""
|
||||
await self.close()
|
||||
|
||||
|
||||
|
||||
async def coze_session(self):
|
||||
"""确保HTTP session存在"""
|
||||
if self.session is None:
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=False if self.api_base.startswith("http://") else True,
|
||||
limit=100,
|
||||
limit_per_host=30,
|
||||
keepalive_timeout=30,
|
||||
enable_cleanup_closed=True,
|
||||
)
|
||||
timeout = aiohttp.ClientTimeout(
|
||||
total=120, # 默认超时时间
|
||||
connect=30,
|
||||
sock_read=120,
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Accept": "text/event-stream",
|
||||
}
|
||||
self.session = aiohttp.ClientSession(
|
||||
headers=headers, timeout=timeout, connector=connector
|
||||
)
|
||||
return self.session
|
||||
|
||||
async def close(self):
|
||||
"""显式关闭会话"""
|
||||
if self.session and not self.session.closed:
|
||||
await self.session.close()
|
||||
self.session = None
|
||||
|
||||
async def upload(
|
||||
self,
|
||||
file,
|
||||
) -> str:
|
||||
# 处理 Path 对象
|
||||
if isinstance(file, Path):
|
||||
if not file.exists():
|
||||
raise ValueError(f"File not found: {file}")
|
||||
with open(file, "rb") as f:
|
||||
file = f.read()
|
||||
|
||||
# 处理文件路径字符串
|
||||
elif isinstance(file, str):
|
||||
if not os.path.isfile(file):
|
||||
raise ValueError(f"File not found: {file}")
|
||||
with open(file, "rb") as f:
|
||||
file = f.read()
|
||||
|
||||
# 处理文件对象
|
||||
elif hasattr(file, 'read'):
|
||||
file = file.read()
|
||||
|
||||
session = await self.coze_session()
|
||||
url = f"{self.api_base}/v1/files/upload"
|
||||
|
||||
try:
|
||||
file_io = io.BytesIO(file)
|
||||
async with session.post(
|
||||
url,
|
||||
data={
|
||||
"file": file_io,
|
||||
},
|
||||
timeout=aiohttp.ClientTimeout(total=60),
|
||||
) as response:
|
||||
if response.status == 401:
|
||||
raise Exception("Coze API 认证失败,请检查 API Key 是否正确")
|
||||
|
||||
response_text = await response.text()
|
||||
|
||||
|
||||
if response.status != 200:
|
||||
raise Exception(
|
||||
f"文件上传失败,状态码: {response.status}, 响应: {response_text}"
|
||||
)
|
||||
try:
|
||||
result = await response.json()
|
||||
except json.JSONDecodeError:
|
||||
raise Exception(f"文件上传响应解析失败: {response_text}")
|
||||
|
||||
if result.get("code") != 0:
|
||||
raise Exception(f"文件上传失败: {result.get('msg', '未知错误')}")
|
||||
|
||||
file_id = result["data"]["id"]
|
||||
return file_id
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
raise Exception("文件上传超时")
|
||||
except Exception as e:
|
||||
raise Exception(f"文件上传失败: {str(e)}")
|
||||
|
||||
|
||||
async def chat_messages(
|
||||
self,
|
||||
bot_id: str,
|
||||
user_id: str,
|
||||
additional_messages: List[Dict] | None = None,
|
||||
conversation_id: str | None = None,
|
||||
auto_save_history: bool = True,
|
||||
stream: bool = True,
|
||||
timeout: float = 120,
|
||||
) -> AsyncGenerator[Dict[str, Any], None]:
|
||||
"""发送聊天消息并返回流式响应
|
||||
|
||||
Args:
|
||||
bot_id: Bot ID
|
||||
user_id: 用户ID
|
||||
additional_messages: 额外消息列表
|
||||
conversation_id: 会话ID
|
||||
auto_save_history: 是否自动保存历史
|
||||
stream: 是否流式响应
|
||||
timeout: 超时时间
|
||||
"""
|
||||
session = await self.coze_session()
|
||||
url = f"{self.api_base}/v3/chat"
|
||||
|
||||
payload = {
|
||||
"bot_id": bot_id,
|
||||
"user_id": user_id,
|
||||
"stream": stream,
|
||||
"auto_save_history": auto_save_history,
|
||||
}
|
||||
|
||||
if additional_messages:
|
||||
payload["additional_messages"] = additional_messages
|
||||
|
||||
params = {}
|
||||
if conversation_id:
|
||||
params["conversation_id"] = conversation_id
|
||||
|
||||
|
||||
try:
|
||||
async with session.post(
|
||||
url,
|
||||
json=payload,
|
||||
params=params,
|
||||
timeout=aiohttp.ClientTimeout(total=timeout),
|
||||
) as response:
|
||||
if response.status == 401:
|
||||
raise Exception("Coze API 认证失败,请检查 API Key 是否正确")
|
||||
|
||||
if response.status != 200:
|
||||
raise Exception(f"Coze API 流式请求失败,状态码: {response.status}")
|
||||
|
||||
|
||||
async for chunk in response.content:
|
||||
chunk = chunk.decode("utf-8")
|
||||
if chunk != '\n':
|
||||
if chunk.startswith("event:"):
|
||||
chunk_type = chunk.replace("event:", "", 1).strip()
|
||||
elif chunk.startswith("data:"):
|
||||
chunk_data = chunk.replace("data:", "", 1).strip()
|
||||
else:
|
||||
yield {"event": chunk_type, "data": json.loads(chunk_data) if chunk_data else {}} # 处理本地部署时,接口返回的data为空值
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
raise Exception(f"Coze API 流式请求超时 ({timeout}秒)")
|
||||
except Exception as e:
|
||||
raise Exception(f"Coze API 流式请求失败: {str(e)}")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -110,6 +110,24 @@ class DingTalkClient:
|
||||
else:
|
||||
raise Exception(f'Error: {response.status_code}, {response.text}')
|
||||
|
||||
async def get_file_url(self, download_code: str):
|
||||
if not await self.check_access_token():
|
||||
await self.get_access_token()
|
||||
url = 'https://api.dingtalk.com/v1.0/robot/messageFiles/download'
|
||||
params = {'downloadCode': download_code, 'robotCode': self.robot_code}
|
||||
headers = {'x-acs-dingtalk-access-token': self.access_token}
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(url, headers=headers, json=params)
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
download_url = result.get('downloadUrl')
|
||||
if download_url:
|
||||
return download_url
|
||||
else:
|
||||
await self.logger.error(f'failed to get file: {response.json()}')
|
||||
else:
|
||||
raise Exception(f'Error: {response.status_code}, {response.text}')
|
||||
|
||||
async def update_incoming_message(self, message):
|
||||
"""异步更新 DingTalkClient 中的 incoming_message"""
|
||||
message_data = await self.get_message(message)
|
||||
@@ -189,6 +207,17 @@ class DingTalkClient:
|
||||
message_data['Audio'] = await self.get_audio_url(incoming_message.to_dict()['content']['downloadCode'])
|
||||
|
||||
message_data['Type'] = 'audio'
|
||||
elif incoming_message.message_type == 'file':
|
||||
down_list = incoming_message.get_down_list()
|
||||
if len(down_list) >= 2:
|
||||
message_data['File'] = await self.get_file_url(down_list[0])
|
||||
message_data['Name'] = down_list[1]
|
||||
else:
|
||||
if self.logger:
|
||||
await self.logger.error(f'get_down_list() returned fewer than 2 elements: {down_list}')
|
||||
message_data['File'] = None
|
||||
message_data['Name'] = None
|
||||
message_data['Type'] = 'file'
|
||||
|
||||
copy_message_data = message_data.copy()
|
||||
del copy_message_data['IncomingMessage']
|
||||
|
||||
@@ -31,6 +31,15 @@ class DingTalkEvent(dict):
|
||||
def audio(self):
|
||||
return self.get('Audio', '')
|
||||
|
||||
@property
|
||||
def file(self):
|
||||
return self.get('File', '')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.get('Name', '')
|
||||
|
||||
|
||||
@property
|
||||
def conversation(self):
|
||||
return self.get('conversation_type', '')
|
||||
|
||||
@@ -1,189 +1,452 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
import uuid
|
||||
import xml.etree.ElementTree as ET
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Callable, Optional
|
||||
from urllib.parse import unquote
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
import httpx
|
||||
from libs.wecom_ai_bot_api.WXBizMsgCrypt3 import WXBizMsgCrypt
|
||||
from quart import Quart, request, Response, jsonify
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
import asyncio
|
||||
from libs.wecom_ai_bot_api import wecombotevent
|
||||
from typing import Callable
|
||||
import base64
|
||||
from Crypto.Cipher import AES
|
||||
from quart import Quart, request, Response, jsonify
|
||||
|
||||
from libs.wecom_ai_bot_api import wecombotevent
|
||||
from libs.wecom_ai_bot_api.WXBizMsgCrypt3 import WXBizMsgCrypt
|
||||
from pkg.platform.logger import EventLogger
|
||||
|
||||
|
||||
@dataclass
|
||||
class StreamChunk:
|
||||
"""描述单次推送给企业微信的流式片段。"""
|
||||
|
||||
# 需要返回给企业微信的文本内容
|
||||
content: str
|
||||
|
||||
# 标记是否为最终片段,对应企业微信协议里的 finish 字段
|
||||
is_final: bool = False
|
||||
|
||||
# 预留额外元信息,未来支持多模态扩展时可使用
|
||||
meta: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StreamSession:
|
||||
"""维护一次企业微信流式会话的上下文。"""
|
||||
|
||||
# 企业微信要求的 stream_id,用于标识后续刷新请求
|
||||
stream_id: str
|
||||
|
||||
# 原始消息的 msgid,便于与流水线消息对应
|
||||
msg_id: str
|
||||
|
||||
# 群聊会话标识(单聊时为空)
|
||||
chat_id: Optional[str]
|
||||
|
||||
# 触发消息的发送者
|
||||
user_id: Optional[str]
|
||||
|
||||
# 会话创建时间
|
||||
created_at: float = field(default_factory=time.time)
|
||||
|
||||
# 最近一次被访问的时间,cleanup 依据该值判断过期
|
||||
last_access: float = field(default_factory=time.time)
|
||||
|
||||
# 将流水线增量结果缓存到队列,刷新请求逐条消费
|
||||
queue: asyncio.Queue = field(default_factory=asyncio.Queue)
|
||||
|
||||
# 是否已经完成(收到最终片段)
|
||||
finished: bool = False
|
||||
|
||||
# 缓存最近一次片段,处理重试或超时兜底
|
||||
last_chunk: Optional[StreamChunk] = None
|
||||
|
||||
|
||||
class StreamSessionManager:
|
||||
"""管理 stream 会话的生命周期,并负责队列的生产消费。"""
|
||||
|
||||
def __init__(self, logger: EventLogger, ttl: int = 60) -> None:
|
||||
self.logger = logger
|
||||
|
||||
self.ttl = ttl # 超时时间(秒),超过该时间未被访问的会话会被清理由 cleanup
|
||||
self._sessions: dict[str, StreamSession] = {} # stream_id -> StreamSession 映射
|
||||
self._msg_index: dict[str, str] = {} # msgid -> stream_id 映射,便于流水线根据消息 ID 找到会话
|
||||
|
||||
def get_stream_id_by_msg(self, msg_id: str) -> Optional[str]:
|
||||
if not msg_id:
|
||||
return None
|
||||
return self._msg_index.get(msg_id)
|
||||
|
||||
def get_session(self, stream_id: str) -> Optional[StreamSession]:
|
||||
return self._sessions.get(stream_id)
|
||||
|
||||
def create_or_get(self, msg_json: dict[str, Any]) -> tuple[StreamSession, bool]:
|
||||
"""根据企业微信回调创建或获取会话。
|
||||
|
||||
Args:
|
||||
msg_json: 企业微信解密后的回调 JSON。
|
||||
|
||||
Returns:
|
||||
Tuple[StreamSession, bool]: `StreamSession` 为会话实例,`bool` 指示是否为新建会话。
|
||||
|
||||
Example:
|
||||
在首次回调中调用,得到 `is_new=True` 后再触发流水线。
|
||||
"""
|
||||
msg_id = msg_json.get('msgid', '')
|
||||
if msg_id and msg_id in self._msg_index:
|
||||
stream_id = self._msg_index[msg_id]
|
||||
session = self._sessions.get(stream_id)
|
||||
if session:
|
||||
session.last_access = time.time()
|
||||
return session, False
|
||||
|
||||
stream_id = str(uuid.uuid4())
|
||||
session = StreamSession(
|
||||
stream_id=stream_id,
|
||||
msg_id=msg_id,
|
||||
chat_id=msg_json.get('chatid'),
|
||||
user_id=msg_json.get('from', {}).get('userid'),
|
||||
)
|
||||
|
||||
if msg_id:
|
||||
self._msg_index[msg_id] = stream_id
|
||||
self._sessions[stream_id] = session
|
||||
return session, True
|
||||
|
||||
async def publish(self, stream_id: str, chunk: StreamChunk) -> bool:
|
||||
"""向 stream 队列写入新的增量片段。
|
||||
|
||||
Args:
|
||||
stream_id: 企业微信分配的流式会话 ID。
|
||||
chunk: 待发送的增量片段。
|
||||
|
||||
Returns:
|
||||
bool: 当流式队列存在并成功入队时返回 True。
|
||||
|
||||
Example:
|
||||
在收到模型增量后调用 `await manager.publish('sid', StreamChunk('hello'))`。
|
||||
"""
|
||||
session = self._sessions.get(stream_id)
|
||||
if not session:
|
||||
return False
|
||||
|
||||
session.last_access = time.time()
|
||||
session.last_chunk = chunk
|
||||
|
||||
try:
|
||||
session.queue.put_nowait(chunk)
|
||||
except asyncio.QueueFull:
|
||||
# 默认无界队列,此处兜底防御
|
||||
await session.queue.put(chunk)
|
||||
|
||||
if chunk.is_final:
|
||||
session.finished = True
|
||||
|
||||
return True
|
||||
|
||||
async def consume(self, stream_id: str, timeout: float = 0.5) -> Optional[StreamChunk]:
|
||||
"""从队列中取出一个片段,若超时返回 None。
|
||||
|
||||
Args:
|
||||
stream_id: 企业微信流式会话 ID。
|
||||
timeout: 取片段的最长等待时间(秒)。
|
||||
|
||||
Returns:
|
||||
Optional[StreamChunk]: 成功时返回片段,超时或会话不存在时返回 None。
|
||||
|
||||
Example:
|
||||
企业微信刷新到达时调用,若队列有数据则立即返回 `StreamChunk`。
|
||||
"""
|
||||
session = self._sessions.get(stream_id)
|
||||
if not session:
|
||||
return None
|
||||
|
||||
session.last_access = time.time()
|
||||
|
||||
try:
|
||||
chunk = await asyncio.wait_for(session.queue.get(), timeout)
|
||||
session.last_access = time.time()
|
||||
if chunk.is_final:
|
||||
session.finished = True
|
||||
return chunk
|
||||
except asyncio.TimeoutError:
|
||||
if session.finished and session.last_chunk:
|
||||
return session.last_chunk
|
||||
return None
|
||||
|
||||
def mark_finished(self, stream_id: str) -> None:
|
||||
session = self._sessions.get(stream_id)
|
||||
if session:
|
||||
session.finished = True
|
||||
session.last_access = time.time()
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""定期清理过期会话,防止队列与映射无上限累积。"""
|
||||
now = time.time()
|
||||
expired: list[str] = []
|
||||
for stream_id, session in self._sessions.items():
|
||||
if now - session.last_access > self.ttl:
|
||||
expired.append(stream_id)
|
||||
|
||||
for stream_id in expired:
|
||||
session = self._sessions.pop(stream_id, None)
|
||||
if not session:
|
||||
continue
|
||||
msg_id = session.msg_id
|
||||
if msg_id and self._msg_index.get(msg_id) == stream_id:
|
||||
self._msg_index.pop(msg_id, None)
|
||||
|
||||
|
||||
class WecomBotClient:
|
||||
def __init__(self,Token:str,EnCodingAESKey:str,Corpid:str,logger:EventLogger):
|
||||
self.Token=Token
|
||||
self.EnCodingAESKey=EnCodingAESKey
|
||||
self.Corpid=Corpid
|
||||
def __init__(self, Token: str, EnCodingAESKey: str, Corpid: str, logger: EventLogger):
|
||||
"""企业微信智能机器人客户端。
|
||||
|
||||
Args:
|
||||
Token: 企业微信回调验证使用的 token。
|
||||
EnCodingAESKey: 企业微信消息加解密密钥。
|
||||
Corpid: 企业 ID。
|
||||
logger: 日志记录器。
|
||||
|
||||
Example:
|
||||
>>> client = WecomBotClient(Token='token', EnCodingAESKey='aeskey', Corpid='corp', logger=logger)
|
||||
"""
|
||||
|
||||
self.Token = Token
|
||||
self.EnCodingAESKey = EnCodingAESKey
|
||||
self.Corpid = Corpid
|
||||
self.ReceiveId = ''
|
||||
self.app = Quart(__name__)
|
||||
self.app.add_url_rule(
|
||||
'/callback/command',
|
||||
'handle_callback',
|
||||
self.handle_callback_request,
|
||||
methods=['POST','GET']
|
||||
methods=['POST', 'GET']
|
||||
)
|
||||
self._message_handlers = {
|
||||
'example': [],
|
||||
}
|
||||
self.user_stream_map = {}
|
||||
self.logger = logger
|
||||
self.generated_content = {}
|
||||
self.msg_id_map = {}
|
||||
self.generated_content: dict[str, str] = {}
|
||||
self.msg_id_map: dict[str, int] = {}
|
||||
self.stream_sessions = StreamSessionManager(logger=logger)
|
||||
self.stream_poll_timeout = 0.5
|
||||
|
||||
async def sha1_signature(token: str, timestamp: str, nonce: str, encrypt: str) -> str:
|
||||
raw = "".join(sorted([token, timestamp, nonce, encrypt]))
|
||||
return hashlib.sha1(raw.encode("utf-8")).hexdigest()
|
||||
|
||||
async def handle_callback_request(self):
|
||||
@staticmethod
|
||||
def _build_stream_payload(stream_id: str, content: str, finish: bool) -> dict[str, Any]:
|
||||
"""按照企业微信协议拼装返回报文。
|
||||
|
||||
Args:
|
||||
stream_id: 企业微信会话 ID。
|
||||
content: 推送的文本内容。
|
||||
finish: 是否为最终片段。
|
||||
|
||||
Returns:
|
||||
dict[str, Any]: 可直接加密返回的 payload。
|
||||
|
||||
Example:
|
||||
组装 `{'msgtype': 'stream', 'stream': {'id': 'sid', ...}}` 结构。
|
||||
"""
|
||||
return {
|
||||
'msgtype': 'stream',
|
||||
'stream': {
|
||||
'id': stream_id,
|
||||
'finish': finish,
|
||||
'content': content,
|
||||
},
|
||||
}
|
||||
|
||||
async def _encrypt_and_reply(self, payload: dict[str, Any], nonce: str) -> tuple[Response, int]:
|
||||
"""对响应进行加密封装并返回给企业微信。
|
||||
|
||||
Args:
|
||||
payload: 待加密的响应内容。
|
||||
nonce: 企业微信回调参数中的 nonce。
|
||||
|
||||
Returns:
|
||||
Tuple[Response, int]: Quart Response 对象及状态码。
|
||||
|
||||
Example:
|
||||
在首包或刷新场景中调用以生成加密响应。
|
||||
"""
|
||||
reply_plain_str = json.dumps(payload, ensure_ascii=False)
|
||||
reply_timestamp = str(int(time.time()))
|
||||
ret, encrypt_text = self.wxcpt.EncryptMsg(reply_plain_str, nonce, reply_timestamp)
|
||||
if ret != 0:
|
||||
await self.logger.error(f'加密失败: {ret}')
|
||||
return jsonify({'error': 'encrypt_failed'}), 500
|
||||
|
||||
root = ET.fromstring(encrypt_text)
|
||||
encrypt = root.find('Encrypt').text
|
||||
resp = {
|
||||
'encrypt': encrypt,
|
||||
}
|
||||
return jsonify(resp), 200
|
||||
|
||||
async def _dispatch_event(self, event: wecombotevent.WecomBotEvent) -> None:
|
||||
"""异步触发流水线处理,避免阻塞首包响应。
|
||||
|
||||
Args:
|
||||
event: 由企业微信消息转换的内部事件对象。
|
||||
"""
|
||||
try:
|
||||
self.wxcpt=WXBizMsgCrypt(self.Token,self.EnCodingAESKey,'')
|
||||
|
||||
if request.method == "GET":
|
||||
|
||||
msg_signature = unquote(request.args.get("msg_signature", ""))
|
||||
timestamp = unquote(request.args.get("timestamp", ""))
|
||||
nonce = unquote(request.args.get("nonce", ""))
|
||||
echostr = unquote(request.args.get("echostr", ""))
|
||||
|
||||
if not all([msg_signature, timestamp, nonce, echostr]):
|
||||
await self.logger.error("请求参数缺失")
|
||||
return Response("缺少参数", status=400)
|
||||
|
||||
ret, decrypted_str = self.wxcpt.VerifyURL(msg_signature, timestamp, nonce, echostr)
|
||||
if ret != 0:
|
||||
|
||||
await self.logger.error("验证URL失败")
|
||||
return Response("验证失败", status=403)
|
||||
|
||||
return Response(decrypted_str, mimetype="text/plain")
|
||||
|
||||
elif request.method == "POST":
|
||||
msg_signature = unquote(request.args.get("msg_signature", ""))
|
||||
timestamp = unquote(request.args.get("timestamp", ""))
|
||||
nonce = unquote(request.args.get("nonce", ""))
|
||||
|
||||
try:
|
||||
timeout = 3
|
||||
interval = 0.1
|
||||
start_time = time.monotonic()
|
||||
encrypted_json = await request.get_json()
|
||||
encrypted_msg = encrypted_json.get("encrypt", "")
|
||||
if not encrypted_msg:
|
||||
await self.logger.error("请求体中缺少 'encrypt' 字段")
|
||||
|
||||
xml_post_data = f"<xml><Encrypt><![CDATA[{encrypted_msg}]]></Encrypt></xml>"
|
||||
ret, decrypted_xml = self.wxcpt.DecryptMsg(xml_post_data, msg_signature, timestamp, nonce)
|
||||
if ret != 0:
|
||||
await self.logger.error("解密失败")
|
||||
|
||||
|
||||
msg_json = json.loads(decrypted_xml)
|
||||
|
||||
from_user_id = msg_json.get("from", {}).get("userid")
|
||||
chatid = msg_json.get("chatid", "")
|
||||
|
||||
message_data = await self.get_message(msg_json)
|
||||
|
||||
|
||||
|
||||
if message_data:
|
||||
try:
|
||||
event = wecombotevent.WecomBotEvent(message_data)
|
||||
if event:
|
||||
await self._handle_message(event)
|
||||
except Exception as e:
|
||||
await self.logger.error(traceback.format_exc())
|
||||
print(traceback.format_exc())
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
if msg_json.get('chattype','') == 'single':
|
||||
if from_user_id in self.user_stream_map:
|
||||
stream_id = self.user_stream_map[from_user_id]
|
||||
else:
|
||||
stream_id =str(uuid.uuid4())
|
||||
self.user_stream_map[from_user_id] = stream_id
|
||||
|
||||
|
||||
else:
|
||||
|
||||
if chatid in self.user_stream_map:
|
||||
stream_id = self.user_stream_map[chatid]
|
||||
else:
|
||||
stream_id = str(uuid.uuid4())
|
||||
self.user_stream_map[chatid] = stream_id
|
||||
except Exception as e:
|
||||
await self.logger.error(traceback.format_exc())
|
||||
print(traceback.format_exc())
|
||||
while True:
|
||||
content = self.generated_content.pop(msg_json['msgid'],None)
|
||||
if content:
|
||||
reply_plain = {
|
||||
"msgtype": "stream",
|
||||
"stream": {
|
||||
"id": stream_id,
|
||||
"finish": True,
|
||||
"content": content
|
||||
}
|
||||
}
|
||||
reply_plain_str = json.dumps(reply_plain, ensure_ascii=False)
|
||||
|
||||
reply_timestamp = str(int(time.time()))
|
||||
ret, encrypt_text = self.wxcpt.EncryptMsg(reply_plain_str, nonce, reply_timestamp)
|
||||
if ret != 0:
|
||||
|
||||
await self.logger.error("加密失败"+str(ret))
|
||||
|
||||
|
||||
root = ET.fromstring(encrypt_text)
|
||||
encrypt = root.find("Encrypt").text
|
||||
resp = {
|
||||
"encrypt": encrypt,
|
||||
}
|
||||
return jsonify(resp), 200
|
||||
|
||||
if time.time() - start_time > timeout:
|
||||
break
|
||||
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
if self.msg_id_map.get(message_data['msgid'], 1) == 3:
|
||||
await self.logger.error('请求失效:暂不支持智能机器人超过7秒的请求,如有需求,请联系 LangBot 团队。')
|
||||
return ''
|
||||
|
||||
except Exception as e:
|
||||
await self.logger.error(traceback.format_exc())
|
||||
print(traceback.format_exc())
|
||||
|
||||
except Exception as e:
|
||||
await self._handle_message(event)
|
||||
except Exception:
|
||||
await self.logger.error(traceback.format_exc())
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
async def get_message(self,msg_json):
|
||||
async def _handle_post_initial_response(self, msg_json: dict[str, Any], nonce: str) -> tuple[Response, int]:
|
||||
"""处理企业微信首次推送的消息,返回 stream_id 并开启流水线。
|
||||
|
||||
Args:
|
||||
msg_json: 解密后的企业微信消息 JSON。
|
||||
nonce: 企业微信回调参数 nonce。
|
||||
|
||||
Returns:
|
||||
Tuple[Response, int]: Quart Response 及状态码。
|
||||
|
||||
Example:
|
||||
首次回调时调用,立即返回带 `stream_id` 的响应。
|
||||
"""
|
||||
session, is_new = self.stream_sessions.create_or_get(msg_json)
|
||||
|
||||
message_data = await self.get_message(msg_json)
|
||||
if message_data:
|
||||
message_data['stream_id'] = session.stream_id
|
||||
try:
|
||||
event = wecombotevent.WecomBotEvent(message_data)
|
||||
except Exception:
|
||||
await self.logger.error(traceback.format_exc())
|
||||
else:
|
||||
if is_new:
|
||||
asyncio.create_task(self._dispatch_event(event))
|
||||
|
||||
payload = self._build_stream_payload(session.stream_id, '', False)
|
||||
return await self._encrypt_and_reply(payload, nonce)
|
||||
|
||||
async def _handle_post_followup_response(self, msg_json: dict[str, Any], nonce: str) -> tuple[Response, int]:
|
||||
"""处理企业微信的流式刷新请求,按需返回增量片段。
|
||||
|
||||
Args:
|
||||
msg_json: 解密后的企业微信刷新请求。
|
||||
nonce: 企业微信回调参数 nonce。
|
||||
|
||||
Returns:
|
||||
Tuple[Response, int]: Quart Response 及状态码。
|
||||
|
||||
Example:
|
||||
在刷新请求中调用,按需返回增量片段。
|
||||
"""
|
||||
stream_info = msg_json.get('stream', {})
|
||||
stream_id = stream_info.get('id', '')
|
||||
if not stream_id:
|
||||
await self.logger.error('刷新请求缺少 stream.id')
|
||||
return await self._encrypt_and_reply(self._build_stream_payload('', '', True), nonce)
|
||||
|
||||
session = self.stream_sessions.get_session(stream_id)
|
||||
chunk = await self.stream_sessions.consume(stream_id, timeout=self.stream_poll_timeout)
|
||||
|
||||
if not chunk:
|
||||
cached_content = None
|
||||
if session and session.msg_id:
|
||||
cached_content = self.generated_content.pop(session.msg_id, None)
|
||||
if cached_content is not None:
|
||||
chunk = StreamChunk(content=cached_content, is_final=True)
|
||||
else:
|
||||
payload = self._build_stream_payload(stream_id, '', False)
|
||||
return await self._encrypt_and_reply(payload, nonce)
|
||||
|
||||
payload = self._build_stream_payload(stream_id, chunk.content, chunk.is_final)
|
||||
if chunk.is_final:
|
||||
self.stream_sessions.mark_finished(stream_id)
|
||||
return await self._encrypt_and_reply(payload, nonce)
|
||||
|
||||
async def handle_callback_request(self):
|
||||
"""企业微信回调入口。
|
||||
|
||||
Returns:
|
||||
Quart Response: 根据请求类型返回验证、首包或刷新结果。
|
||||
|
||||
Example:
|
||||
作为 Quart 路由处理函数直接注册并使用。
|
||||
"""
|
||||
try:
|
||||
self.wxcpt = WXBizMsgCrypt(self.Token, self.EnCodingAESKey, '')
|
||||
await self.logger.info(f'{request.method} {request.url} {str(request.args)}')
|
||||
|
||||
if request.method == 'GET':
|
||||
return await self._handle_get_callback()
|
||||
|
||||
if request.method == 'POST':
|
||||
return await self._handle_post_callback()
|
||||
|
||||
return Response('', status=405)
|
||||
|
||||
except Exception:
|
||||
await self.logger.error(traceback.format_exc())
|
||||
return Response('Internal Server Error', status=500)
|
||||
|
||||
async def _handle_get_callback(self) -> tuple[Response, int] | Response:
|
||||
"""处理企业微信的 GET 验证请求。"""
|
||||
|
||||
msg_signature = unquote(request.args.get('msg_signature', ''))
|
||||
timestamp = unquote(request.args.get('timestamp', ''))
|
||||
nonce = unquote(request.args.get('nonce', ''))
|
||||
echostr = unquote(request.args.get('echostr', ''))
|
||||
|
||||
if not all([msg_signature, timestamp, nonce, echostr]):
|
||||
await self.logger.error('请求参数缺失')
|
||||
return Response('缺少参数', status=400)
|
||||
|
||||
ret, decrypted_str = self.wxcpt.VerifyURL(msg_signature, timestamp, nonce, echostr)
|
||||
if ret != 0:
|
||||
await self.logger.error('验证URL失败')
|
||||
return Response('验证失败', status=403)
|
||||
|
||||
return Response(decrypted_str, mimetype='text/plain')
|
||||
|
||||
async def _handle_post_callback(self) -> tuple[Response, int] | Response:
|
||||
"""处理企业微信的 POST 回调请求。"""
|
||||
|
||||
self.stream_sessions.cleanup()
|
||||
|
||||
msg_signature = unquote(request.args.get('msg_signature', ''))
|
||||
timestamp = unquote(request.args.get('timestamp', ''))
|
||||
nonce = unquote(request.args.get('nonce', ''))
|
||||
|
||||
encrypted_json = await request.get_json()
|
||||
encrypted_msg = (encrypted_json or {}).get('encrypt', '')
|
||||
if not encrypted_msg:
|
||||
await self.logger.error("请求体中缺少 'encrypt' 字段")
|
||||
return Response('Bad Request', status=400)
|
||||
|
||||
xml_post_data = f"<xml><Encrypt><![CDATA[{encrypted_msg}]]></Encrypt></xml>"
|
||||
ret, decrypted_xml = self.wxcpt.DecryptMsg(xml_post_data, msg_signature, timestamp, nonce)
|
||||
if ret != 0:
|
||||
await self.logger.error('解密失败')
|
||||
return Response('解密失败', status=400)
|
||||
|
||||
msg_json = json.loads(decrypted_xml)
|
||||
|
||||
if msg_json.get('msgtype') == 'stream':
|
||||
return await self._handle_post_followup_response(msg_json, nonce)
|
||||
|
||||
return await self._handle_post_initial_response(msg_json, nonce)
|
||||
|
||||
async def get_message(self, msg_json):
|
||||
message_data = {}
|
||||
|
||||
if msg_json.get('chattype','') == 'single':
|
||||
if msg_json.get('chattype', '') == 'single':
|
||||
message_data['type'] = 'single'
|
||||
elif msg_json.get('chattype','') == 'group':
|
||||
elif msg_json.get('chattype', '') == 'group':
|
||||
message_data['type'] = 'group'
|
||||
|
||||
if msg_json.get('msgtype') == 'text':
|
||||
message_data['content'] = msg_json.get('text',{}).get('content')
|
||||
message_data['content'] = msg_json.get('text', {}).get('content')
|
||||
elif msg_json.get('msgtype') == 'image':
|
||||
picurl = msg_json.get('image', {}).get('url','')
|
||||
base64 = await self.download_url_to_base64(picurl,self.EnCodingAESKey)
|
||||
message_data['picurl'] = base64
|
||||
picurl = msg_json.get('image', {}).get('url', '')
|
||||
base64 = await self.download_url_to_base64(picurl, self.EnCodingAESKey)
|
||||
message_data['picurl'] = base64
|
||||
elif msg_json.get('msgtype') == 'mixed':
|
||||
items = msg_json.get('mixed', {}).get('msg_item', [])
|
||||
texts = []
|
||||
@@ -197,17 +460,27 @@ class WecomBotClient:
|
||||
if texts:
|
||||
message_data['content'] = "".join(texts) # 拼接所有 text
|
||||
if picurl:
|
||||
base64 = await self.download_url_to_base64(picurl,self.EnCodingAESKey)
|
||||
message_data['picurl'] = base64 # 只保留第一个 image
|
||||
base64 = await self.download_url_to_base64(picurl, self.EnCodingAESKey)
|
||||
message_data['picurl'] = base64 # 只保留第一个 image
|
||||
|
||||
# Extract user information
|
||||
from_info = msg_json.get('from', {})
|
||||
message_data['userid'] = from_info.get('userid', '')
|
||||
message_data['username'] = from_info.get('alias', '') or from_info.get('name', '') or from_info.get('userid', '')
|
||||
|
||||
# Extract chat/group information
|
||||
if msg_json.get('chattype', '') == 'group':
|
||||
message_data['chatid'] = msg_json.get('chatid', '')
|
||||
# Try to get group name if available
|
||||
message_data['chatname'] = msg_json.get('chatname', '') or msg_json.get('chatid', '')
|
||||
|
||||
message_data['userid'] = msg_json.get('from', {}).get('userid', '')
|
||||
message_data['msgid'] = msg_json.get('msgid', '')
|
||||
|
||||
if msg_json.get('aibotid'):
|
||||
message_data['aibotid'] = msg_json.get('aibotid', '')
|
||||
|
||||
return message_data
|
||||
|
||||
|
||||
async def _handle_message(self, event: wecombotevent.WecomBotEvent):
|
||||
"""
|
||||
处理消息事件。
|
||||
@@ -223,10 +496,46 @@ class WecomBotClient:
|
||||
for handler in self._message_handlers[msg_type]:
|
||||
await handler(event)
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
print(traceback.format_exc())
|
||||
|
||||
async def push_stream_chunk(self, msg_id: str, content: str, is_final: bool = False) -> bool:
|
||||
"""将流水线片段推送到 stream 会话。
|
||||
|
||||
Args:
|
||||
msg_id: 原始企业微信消息 ID。
|
||||
content: 模型产生的片段内容。
|
||||
is_final: 是否为最终片段。
|
||||
|
||||
Returns:
|
||||
bool: 当成功写入流式队列时返回 True。
|
||||
|
||||
Example:
|
||||
在流水线 `reply_message_chunk` 中调用,将增量推送至企业微信。
|
||||
"""
|
||||
# 根据 msg_id 找到对应 stream 会话,如果不存在说明当前消息非流式
|
||||
stream_id = self.stream_sessions.get_stream_id_by_msg(msg_id)
|
||||
if not stream_id:
|
||||
return False
|
||||
|
||||
chunk = StreamChunk(content=content, is_final=is_final)
|
||||
await self.stream_sessions.publish(stream_id, chunk)
|
||||
if is_final:
|
||||
self.stream_sessions.mark_finished(stream_id)
|
||||
return True
|
||||
|
||||
async def set_message(self, msg_id: str, content: str):
|
||||
self.generated_content[msg_id] = content
|
||||
"""兼容旧逻辑:若无法流式返回则缓存最终结果。
|
||||
|
||||
Args:
|
||||
msg_id: 企业微信消息 ID。
|
||||
content: 最终回复的文本内容。
|
||||
|
||||
Example:
|
||||
在非流式场景下缓存最终结果以备刷新时返回。
|
||||
"""
|
||||
handled = await self.push_stream_chunk(msg_id, content, is_final=True)
|
||||
if not handled:
|
||||
self.generated_content[msg_id] = content
|
||||
|
||||
def on_message(self, msg_type: str):
|
||||
def decorator(func: Callable[[wecombotevent.WecomBotEvent], None]):
|
||||
@@ -237,7 +546,6 @@ class WecomBotClient:
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
async def download_url_to_base64(self, download_url, encoding_aes_key):
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(download_url)
|
||||
@@ -247,26 +555,22 @@ class WecomBotClient:
|
||||
|
||||
encrypted_bytes = response.content
|
||||
|
||||
|
||||
aes_key = base64.b64decode(encoding_aes_key + "=") # base64 补齐
|
||||
iv = aes_key[:16]
|
||||
|
||||
|
||||
cipher = AES.new(aes_key, AES.MODE_CBC, iv)
|
||||
decrypted = cipher.decrypt(encrypted_bytes)
|
||||
|
||||
|
||||
pad_len = decrypted[-1]
|
||||
decrypted = decrypted[:-pad_len]
|
||||
|
||||
|
||||
if decrypted.startswith(b"\xff\xd8"): # JPEG
|
||||
if decrypted.startswith(b"\xff\xd8"): # JPEG
|
||||
mime_type = "image/jpeg"
|
||||
elif decrypted.startswith(b"\x89PNG"): # PNG
|
||||
mime_type = "image/png"
|
||||
elif decrypted.startswith((b"GIF87a", b"GIF89a")): # GIF
|
||||
mime_type = "image/gif"
|
||||
elif decrypted.startswith(b"BM"): # BMP
|
||||
elif decrypted.startswith(b"BM"): # BMP
|
||||
mime_type = "image/bmp"
|
||||
elif decrypted.startswith(b"II*\x00") or decrypted.startswith(b"MM\x00*"): # TIFF
|
||||
mime_type = "image/tiff"
|
||||
@@ -276,15 +580,9 @@ class WecomBotClient:
|
||||
# 转 base64
|
||||
base64_str = base64.b64encode(decrypted).decode("utf-8")
|
||||
return f"data:{mime_type};base64,{base64_str}"
|
||||
|
||||
|
||||
async def run_task(self, host: str, port: int, *args, **kwargs):
|
||||
"""
|
||||
启动 Quart 应用。
|
||||
"""
|
||||
await self.app.run_task(host=host, port=port, *args, **kwargs)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,21 @@ class WecomBotEvent(dict):
|
||||
"""
|
||||
用户id
|
||||
"""
|
||||
return self.get('from', {}).get('userid', '')
|
||||
return self.get('from', {}).get('userid', '') or self.get('userid', '')
|
||||
|
||||
@property
|
||||
def username(self) -> str:
|
||||
"""
|
||||
用户名称
|
||||
"""
|
||||
return self.get('username', '') or self.get('from', {}).get('alias', '') or self.get('from', {}).get('name', '') or self.userid
|
||||
|
||||
@property
|
||||
def chatname(self) -> str:
|
||||
"""
|
||||
群组名称
|
||||
"""
|
||||
return self.get('chatname', '') or str(self.chatid)
|
||||
|
||||
@property
|
||||
def content(self) -> str:
|
||||
|
||||
13
main.py
13
main.py
@@ -18,7 +18,13 @@ asciiart = r"""
|
||||
|
||||
async def main_entry(loop: asyncio.AbstractEventLoop):
|
||||
parser = argparse.ArgumentParser(description='LangBot')
|
||||
parser.add_argument('--standalone-runtime', action='store_true', help='使用独立插件运行时', default=False)
|
||||
parser.add_argument(
|
||||
'--standalone-runtime',
|
||||
action='store_true',
|
||||
help='Use standalone plugin runtime / 使用独立插件运行时',
|
||||
default=False,
|
||||
)
|
||||
parser.add_argument('--debug', action='store_true', help='Debug mode / 调试模式', default=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.standalone_runtime:
|
||||
@@ -26,6 +32,11 @@ async def main_entry(loop: asyncio.AbstractEventLoop):
|
||||
|
||||
platform.standalone_runtime = True
|
||||
|
||||
if args.debug:
|
||||
from pkg.utils import constants
|
||||
|
||||
constants.debug_mode = True
|
||||
|
||||
print(asciiart)
|
||||
|
||||
import sys
|
||||
|
||||
@@ -9,6 +9,9 @@ from quart.typing import RouteCallable
|
||||
|
||||
from ....core import app
|
||||
|
||||
# Maximum file upload size limit (10MB)
|
||||
MAX_FILE_SIZE = 10 * 1024 * 1024 # 10MB
|
||||
|
||||
|
||||
preregistered_groups: list[type[RouterGroup]] = []
|
||||
"""Pre-registered list of RouterGroup"""
|
||||
@@ -31,6 +34,8 @@ class AuthType(enum.Enum):
|
||||
|
||||
NONE = 'none'
|
||||
USER_TOKEN = 'user-token'
|
||||
API_KEY = 'api-key'
|
||||
USER_TOKEN_OR_API_KEY = 'user-token-or-api-key'
|
||||
|
||||
|
||||
class RouterGroup(abc.ABC):
|
||||
@@ -84,6 +89,63 @@ class RouterGroup(abc.ABC):
|
||||
except Exception as e:
|
||||
return self.http_status(401, -1, str(e))
|
||||
|
||||
elif auth_type == AuthType.API_KEY:
|
||||
# get API key from Authorization header or X-API-Key header
|
||||
api_key = quart.request.headers.get('X-API-Key', '')
|
||||
if not api_key:
|
||||
auth_header = quart.request.headers.get('Authorization', '')
|
||||
if auth_header.startswith('Bearer '):
|
||||
api_key = auth_header.replace('Bearer ', '')
|
||||
|
||||
if not api_key:
|
||||
return self.http_status(401, -1, 'No valid API key provided')
|
||||
|
||||
try:
|
||||
is_valid = await self.ap.apikey_service.verify_api_key(api_key)
|
||||
if not is_valid:
|
||||
return self.http_status(401, -1, 'Invalid API key')
|
||||
except Exception as e:
|
||||
return self.http_status(401, -1, str(e))
|
||||
|
||||
elif auth_type == AuthType.USER_TOKEN_OR_API_KEY:
|
||||
# Try API key first (check X-API-Key header)
|
||||
api_key = quart.request.headers.get('X-API-Key', '')
|
||||
|
||||
if api_key:
|
||||
# API key authentication
|
||||
try:
|
||||
is_valid = await self.ap.apikey_service.verify_api_key(api_key)
|
||||
if not is_valid:
|
||||
return self.http_status(401, -1, 'Invalid API key')
|
||||
except Exception as e:
|
||||
return self.http_status(401, -1, str(e))
|
||||
else:
|
||||
# Try user token authentication (Authorization header)
|
||||
token = quart.request.headers.get('Authorization', '').replace('Bearer ', '')
|
||||
|
||||
if not token:
|
||||
return self.http_status(401, -1, 'No valid authentication provided (user token or API key required)')
|
||||
|
||||
try:
|
||||
user_email = await self.ap.user_service.verify_jwt_token(token)
|
||||
|
||||
# check if this account exists
|
||||
user = await self.ap.user_service.get_user_by_email(user_email)
|
||||
if not user:
|
||||
return self.http_status(401, -1, 'User not found')
|
||||
|
||||
# check if f accepts user_email parameter
|
||||
if 'user_email' in f.__code__.co_varnames:
|
||||
kwargs['user_email'] = user_email
|
||||
except Exception:
|
||||
# If user token fails, maybe it's an API key in Authorization header
|
||||
try:
|
||||
is_valid = await self.ap.apikey_service.verify_api_key(token)
|
||||
if not is_valid:
|
||||
return self.http_status(401, -1, 'Invalid authentication credentials')
|
||||
except Exception as e:
|
||||
return self.http_status(401, -1, str(e))
|
||||
|
||||
try:
|
||||
return await f(*args, **kwargs)
|
||||
|
||||
|
||||
43
pkg/api/http/controller/groups/apikeys.py
Normal file
43
pkg/api/http/controller/groups/apikeys.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import quart
|
||||
|
||||
from .. import group
|
||||
|
||||
|
||||
@group.group_class('apikeys', '/api/v1/apikeys')
|
||||
class ApiKeysRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
keys = await self.ap.apikey_service.get_api_keys()
|
||||
return self.success(data={'keys': keys})
|
||||
elif quart.request.method == 'POST':
|
||||
json_data = await quart.request.json
|
||||
name = json_data.get('name', '')
|
||||
description = json_data.get('description', '')
|
||||
|
||||
if not name:
|
||||
return self.http_status(400, -1, 'Name is required')
|
||||
|
||||
key = await self.ap.apikey_service.create_api_key(name, description)
|
||||
return self.success(data={'key': key})
|
||||
|
||||
@self.route('/<int:key_id>', methods=['GET', 'PUT', 'DELETE'])
|
||||
async def _(key_id: int) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
key = await self.ap.apikey_service.get_api_key(key_id)
|
||||
if key is None:
|
||||
return self.http_status(404, -1, 'API key not found')
|
||||
return self.success(data={'key': key})
|
||||
|
||||
elif quart.request.method == 'PUT':
|
||||
json_data = await quart.request.json
|
||||
name = json_data.get('name')
|
||||
description = json_data.get('description')
|
||||
|
||||
await self.ap.apikey_service.update_api_key(key_id, name, description)
|
||||
return self.success()
|
||||
|
||||
elif quart.request.method == 'DELETE':
|
||||
await self.ap.apikey_service.delete_api_key(key_id)
|
||||
return self.success()
|
||||
@@ -15,6 +15,9 @@ class FilesRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('/image/<image_key>', methods=['GET'], auth_type=group.AuthType.NONE)
|
||||
async def _(image_key: str) -> quart.Response:
|
||||
if '/' in image_key or '\\' in image_key:
|
||||
return quart.Response(status=404)
|
||||
|
||||
if not await self.ap.storage_mgr.storage_provider.exists(image_key):
|
||||
return quart.Response(status=404)
|
||||
|
||||
@@ -28,15 +31,41 @@ class FilesRouterGroup(group.RouterGroup):
|
||||
@self.route('/documents', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> quart.Response:
|
||||
request = quart.request
|
||||
|
||||
# Check file size limit before reading the file
|
||||
content_length = request.content_length
|
||||
if content_length and content_length > group.MAX_FILE_SIZE:
|
||||
return self.fail(400, 'File size exceeds 10MB limit. Please split large files into smaller parts.')
|
||||
|
||||
# get file bytes from 'file'
|
||||
file = (await request.files)['file']
|
||||
files = await request.files
|
||||
if 'file' not in files:
|
||||
return self.fail(400, 'No file provided in request')
|
||||
|
||||
file = files['file']
|
||||
assert isinstance(file, quart.datastructures.FileStorage)
|
||||
|
||||
file_bytes = await asyncio.to_thread(file.stream.read)
|
||||
extension = file.filename.split('.')[-1]
|
||||
file_name = file.filename.split('.')[0]
|
||||
|
||||
file_key = file_name + '_' + str(uuid.uuid4())[:8] + '.' + extension
|
||||
# Double-check actual file size after reading
|
||||
if len(file_bytes) > group.MAX_FILE_SIZE:
|
||||
return self.fail(400, 'File size exceeds 10MB limit. Please split large files into smaller parts.')
|
||||
|
||||
# Split filename and extension properly
|
||||
if '.' in file.filename:
|
||||
file_name, extension = file.filename.rsplit('.', 1)
|
||||
else:
|
||||
file_name = file.filename
|
||||
extension = ''
|
||||
|
||||
# check if file name contains '/' or '\'
|
||||
if '/' in file_name or '\\' in file_name:
|
||||
return self.fail(400, 'File name contains invalid characters')
|
||||
|
||||
file_key = file_name + '_' + str(uuid.uuid4())[:8]
|
||||
if extension:
|
||||
file_key += '.' + extension
|
||||
|
||||
# save file to storage
|
||||
await self.ap.storage_mgr.storage_provider.save(file_key, file_bytes)
|
||||
return self.success(
|
||||
|
||||
@@ -8,7 +8,7 @@ from ... import group
|
||||
@group.group_class('pipelines', '/api/v1/pipelines')
|
||||
class PipelinesRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
sort_by = quart.request.args.get('sort_by', 'created_at')
|
||||
@@ -23,11 +23,11 @@ class PipelinesRouterGroup(group.RouterGroup):
|
||||
|
||||
return self.success(data={'uuid': pipeline_uuid})
|
||||
|
||||
@self.route('/_/metadata', methods=['GET'])
|
||||
@self.route('/_/metadata', methods=['GET'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _() -> str:
|
||||
return self.success(data={'configs': await self.ap.pipeline_service.get_pipeline_metadata()})
|
||||
|
||||
@self.route('/<pipeline_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
||||
@self.route('/<pipeline_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(pipeline_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
pipeline = await self.ap.pipeline_service.get_pipeline(pipeline_uuid)
|
||||
@@ -46,3 +46,34 @@ class PipelinesRouterGroup(group.RouterGroup):
|
||||
await self.ap.pipeline_service.delete_pipeline(pipeline_uuid)
|
||||
|
||||
return self.success()
|
||||
|
||||
@self.route('/<pipeline_uuid>/extensions', methods=['GET', 'PUT'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(pipeline_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
# Get current extensions and available plugins
|
||||
pipeline = await self.ap.pipeline_service.get_pipeline(pipeline_uuid)
|
||||
if pipeline is None:
|
||||
return self.http_status(404, -1, 'pipeline not found')
|
||||
|
||||
plugins = await self.ap.plugin_connector.list_plugins()
|
||||
mcp_servers = await self.ap.mcp_service.get_mcp_servers(contain_runtime_info=True)
|
||||
|
||||
return self.success(
|
||||
data={
|
||||
'bound_plugins': pipeline.get('extensions_preferences', {}).get('plugins', []),
|
||||
'available_plugins': plugins,
|
||||
'bound_mcp_servers': pipeline.get('extensions_preferences', {}).get('mcp_servers', []),
|
||||
'available_mcp_servers': mcp_servers,
|
||||
}
|
||||
)
|
||||
elif quart.request.method == 'PUT':
|
||||
# Update bound plugins and MCP servers for this pipeline
|
||||
json_data = await quart.request.json
|
||||
bound_plugins = json_data.get('bound_plugins', [])
|
||||
bound_mcp_servers = json_data.get('bound_mcp_servers', [])
|
||||
|
||||
await self.ap.pipeline_service.update_pipeline_extensions(
|
||||
pipeline_uuid, bound_plugins, bound_mcp_servers
|
||||
)
|
||||
|
||||
return self.success()
|
||||
|
||||
@@ -6,7 +6,7 @@ from ... import group
|
||||
@group.group_class('bots', '/api/v1/platform/bots')
|
||||
class BotsRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'bots': await self.ap.bot_service.get_bots()})
|
||||
@@ -15,7 +15,7 @@ class BotsRouterGroup(group.RouterGroup):
|
||||
bot_uuid = await self.ap.bot_service.create_bot(json_data)
|
||||
return self.success(data={'uuid': bot_uuid})
|
||||
|
||||
@self.route('/<bot_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
||||
@self.route('/<bot_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(bot_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
bot = await self.ap.bot_service.get_bot(bot_uuid)
|
||||
@@ -30,7 +30,7 @@ class BotsRouterGroup(group.RouterGroup):
|
||||
await self.ap.bot_service.delete_bot(bot_uuid)
|
||||
return self.success()
|
||||
|
||||
@self.route('/<bot_uuid>/logs', methods=['POST'])
|
||||
@self.route('/<bot_uuid>/logs', methods=['POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(bot_uuid: str) -> str:
|
||||
json_data = await quart.request.json
|
||||
from_index = json_data.get('from_index', -1)
|
||||
|
||||
@@ -2,6 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import quart
|
||||
import re
|
||||
import httpx
|
||||
import uuid
|
||||
import os
|
||||
|
||||
from .....core import taskmgr
|
||||
from .. import group
|
||||
@@ -45,9 +49,12 @@ class PluginsRouterGroup(group.RouterGroup):
|
||||
return self.http_status(404, -1, 'plugin not found')
|
||||
return self.success(data={'plugin': plugin})
|
||||
elif quart.request.method == 'DELETE':
|
||||
delete_data = quart.request.args.get('delete_data', 'false').lower() == 'true'
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_connector.delete_plugin(author, plugin_name, task_context=ctx),
|
||||
self.ap.plugin_connector.delete_plugin(
|
||||
author, plugin_name, delete_data=delete_data, task_context=ctx
|
||||
),
|
||||
kind='plugin-operation',
|
||||
name=f'plugin-remove-{plugin_name}',
|
||||
label=f'Removing plugin {plugin_name}',
|
||||
@@ -89,23 +96,145 @@ class PluginsRouterGroup(group.RouterGroup):
|
||||
|
||||
return quart.Response(icon_data, mimetype=mime_type)
|
||||
|
||||
@self.route('/github/releases', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
"""Get releases from a GitHub repository URL"""
|
||||
data = await quart.request.json
|
||||
repo_url = data.get('repo_url', '')
|
||||
|
||||
# Parse GitHub repository URL to extract owner and repo
|
||||
# Supports: https://github.com/owner/repo or github.com/owner/repo
|
||||
pattern = r'github\.com/([^/]+)/([^/]+?)(?:\.git)?(?:/.*)?$'
|
||||
match = re.search(pattern, repo_url)
|
||||
|
||||
if not match:
|
||||
return self.http_status(400, -1, 'Invalid GitHub repository URL')
|
||||
|
||||
owner, repo = match.groups()
|
||||
|
||||
try:
|
||||
# Fetch releases from GitHub API
|
||||
url = f'https://api.github.com/repos/{owner}/{repo}/releases'
|
||||
async with httpx.AsyncClient(
|
||||
trust_env=True,
|
||||
follow_redirects=True,
|
||||
timeout=10,
|
||||
) as client:
|
||||
response = await client.get(url)
|
||||
response.raise_for_status()
|
||||
releases = response.json()
|
||||
|
||||
# Format releases data for frontend
|
||||
formatted_releases = []
|
||||
for release in releases:
|
||||
formatted_releases.append(
|
||||
{
|
||||
'id': release['id'],
|
||||
'tag_name': release['tag_name'],
|
||||
'name': release['name'],
|
||||
'published_at': release['published_at'],
|
||||
'prerelease': release['prerelease'],
|
||||
'draft': release['draft'],
|
||||
}
|
||||
)
|
||||
|
||||
return self.success(data={'releases': formatted_releases, 'owner': owner, 'repo': repo})
|
||||
except httpx.RequestError as e:
|
||||
return self.http_status(500, -1, f'Failed to fetch releases: {str(e)}')
|
||||
|
||||
@self.route(
|
||||
'/github/release-assets',
|
||||
methods=['POST'],
|
||||
auth_type=group.AuthType.USER_TOKEN,
|
||||
)
|
||||
async def _() -> str:
|
||||
"""Get assets from a specific GitHub release"""
|
||||
data = await quart.request.json
|
||||
owner = data.get('owner', '')
|
||||
repo = data.get('repo', '')
|
||||
release_id = data.get('release_id', '')
|
||||
|
||||
if not all([owner, repo, release_id]):
|
||||
return self.http_status(400, -1, 'Missing required parameters')
|
||||
|
||||
try:
|
||||
# Fetch release assets from GitHub API
|
||||
url = f'https://api.github.com/repos/{owner}/{repo}/releases/{release_id}'
|
||||
async with httpx.AsyncClient(
|
||||
trust_env=True,
|
||||
follow_redirects=True,
|
||||
timeout=10,
|
||||
) as client:
|
||||
response = await client.get(
|
||||
url,
|
||||
)
|
||||
response.raise_for_status()
|
||||
release = response.json()
|
||||
|
||||
# Format assets data for frontend
|
||||
formatted_assets = []
|
||||
for asset in release.get('assets', []):
|
||||
formatted_assets.append(
|
||||
{
|
||||
'id': asset['id'],
|
||||
'name': asset['name'],
|
||||
'size': asset['size'],
|
||||
'download_url': asset['browser_download_url'],
|
||||
'content_type': asset['content_type'],
|
||||
}
|
||||
)
|
||||
|
||||
# add zipball as a downloadable asset
|
||||
# formatted_assets.append(
|
||||
# {
|
||||
# "id": 0,
|
||||
# "name": "Source code (zip)",
|
||||
# "size": -1,
|
||||
# "download_url": release["zipball_url"],
|
||||
# "content_type": "application/zip",
|
||||
# }
|
||||
# )
|
||||
|
||||
return self.success(data={'assets': formatted_assets})
|
||||
except httpx.RequestError as e:
|
||||
return self.http_status(500, -1, f'Failed to fetch release assets: {str(e)}')
|
||||
|
||||
@self.route('/install/github', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
"""Install plugin from GitHub release asset"""
|
||||
data = await quart.request.json
|
||||
asset_url = data.get('asset_url', '')
|
||||
owner = data.get('owner', '')
|
||||
repo = data.get('repo', '')
|
||||
release_tag = data.get('release_tag', '')
|
||||
|
||||
if not asset_url:
|
||||
return self.http_status(400, -1, 'Missing asset_url parameter')
|
||||
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
short_source_str = data['source'][-8:]
|
||||
install_info = {
|
||||
'asset_url': asset_url,
|
||||
'owner': owner,
|
||||
'repo': repo,
|
||||
'release_tag': release_tag,
|
||||
'github_url': f'https://github.com/{owner}/{repo}',
|
||||
}
|
||||
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
self.ap.plugin_mgr.install_plugin(data['source'], task_context=ctx),
|
||||
self.ap.plugin_connector.install_plugin(PluginInstallSource.GITHUB, install_info, task_context=ctx),
|
||||
kind='plugin-operation',
|
||||
name='plugin-install-github',
|
||||
label=f'Installing plugin from github ...{short_source_str}',
|
||||
label=f'Installing plugin from GitHub {owner}/{repo}@{release_tag}',
|
||||
context=ctx,
|
||||
)
|
||||
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
|
||||
@self.route('/install/marketplace', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
@self.route(
|
||||
'/install/marketplace',
|
||||
methods=['POST'],
|
||||
auth_type=group.AuthType.USER_TOKEN,
|
||||
)
|
||||
async def _() -> str:
|
||||
data = await quart.request.json
|
||||
|
||||
@@ -142,3 +271,39 @@ class PluginsRouterGroup(group.RouterGroup):
|
||||
)
|
||||
|
||||
return self.success(data={'task_id': wrapper.id})
|
||||
|
||||
@self.route('/config-files', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
"""Upload a file for plugin configuration"""
|
||||
file = (await quart.request.files).get('file')
|
||||
if file is None:
|
||||
return self.http_status(400, -1, 'file is required')
|
||||
|
||||
# Check file size (10MB limit)
|
||||
MAX_FILE_SIZE = 10 * 1024 * 1024 # 10MB
|
||||
file_bytes = file.read()
|
||||
if len(file_bytes) > MAX_FILE_SIZE:
|
||||
return self.http_status(400, -1, 'file size exceeds 10MB limit')
|
||||
|
||||
# Generate unique file key with original extension
|
||||
original_filename = file.filename
|
||||
_, ext = os.path.splitext(original_filename)
|
||||
file_key = f'plugin_config_{uuid.uuid4().hex}{ext}'
|
||||
|
||||
# Save file using storage manager
|
||||
await self.ap.storage_mgr.storage_provider.save(file_key, file_bytes)
|
||||
|
||||
return self.success(data={'file_key': file_key})
|
||||
|
||||
@self.route('/config-files/<file_key>', methods=['DELETE'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(file_key: str) -> str:
|
||||
"""Delete a plugin configuration file"""
|
||||
# Only allow deletion of files with plugin_config_ prefix for security
|
||||
if not file_key.startswith('plugin_config_'):
|
||||
return self.http_status(400, -1, 'invalid file key')
|
||||
|
||||
try:
|
||||
await self.ap.storage_mgr.storage_provider.delete(file_key)
|
||||
return self.success(data={'deleted': True})
|
||||
except Exception as e:
|
||||
return self.http_status(500, -1, f'failed to delete file: {str(e)}')
|
||||
|
||||
@@ -6,7 +6,7 @@ from ... import group
|
||||
@group.group_class('models/llm', '/api/v1/provider/models/llm')
|
||||
class LLMModelsRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'models': await self.ap.llm_model_service.get_llm_models()})
|
||||
@@ -17,7 +17,7 @@ class LLMModelsRouterGroup(group.RouterGroup):
|
||||
|
||||
return self.success(data={'uuid': model_uuid})
|
||||
|
||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(model_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
model = await self.ap.llm_model_service.get_llm_model(model_uuid)
|
||||
@@ -37,7 +37,7 @@ class LLMModelsRouterGroup(group.RouterGroup):
|
||||
|
||||
return self.success()
|
||||
|
||||
@self.route('/<model_uuid>/test', methods=['POST'])
|
||||
@self.route('/<model_uuid>/test', methods=['POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(model_uuid: str) -> str:
|
||||
json_data = await quart.request.json
|
||||
|
||||
@@ -49,7 +49,7 @@ class LLMModelsRouterGroup(group.RouterGroup):
|
||||
@group.group_class('models/embedding', '/api/v1/provider/models/embedding')
|
||||
class EmbeddingModelsRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('', methods=['GET', 'POST'])
|
||||
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _() -> str:
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'models': await self.ap.embedding_models_service.get_embedding_models()})
|
||||
@@ -60,7 +60,7 @@ class EmbeddingModelsRouterGroup(group.RouterGroup):
|
||||
|
||||
return self.success(data={'uuid': model_uuid})
|
||||
|
||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(model_uuid: str) -> str:
|
||||
if quart.request.method == 'GET':
|
||||
model = await self.ap.embedding_models_service.get_embedding_model(model_uuid)
|
||||
@@ -80,7 +80,7 @@ class EmbeddingModelsRouterGroup(group.RouterGroup):
|
||||
|
||||
return self.success()
|
||||
|
||||
@self.route('/<model_uuid>/test', methods=['POST'])
|
||||
@self.route('/<model_uuid>/test', methods=['POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||
async def _(model_uuid: str) -> str:
|
||||
json_data = await quart.request.json
|
||||
|
||||
|
||||
62
pkg/api/http/controller/groups/resources/mcp.py
Normal file
62
pkg/api/http/controller/groups/resources/mcp.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import quart
|
||||
import traceback
|
||||
|
||||
|
||||
from ... import group
|
||||
|
||||
|
||||
@group.group_class('mcp', '/api/v1/mcp')
|
||||
class MCPRouterGroup(group.RouterGroup):
|
||||
async def initialize(self) -> None:
|
||||
@self.route('/servers', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _() -> str:
|
||||
"""获取MCP服务器列表"""
|
||||
if quart.request.method == 'GET':
|
||||
servers = await self.ap.mcp_service.get_mcp_servers(contain_runtime_info=True)
|
||||
|
||||
return self.success(data={'servers': servers})
|
||||
|
||||
elif quart.request.method == 'POST':
|
||||
data = await quart.request.json
|
||||
|
||||
try:
|
||||
uuid = await self.ap.mcp_service.create_mcp_server(data)
|
||||
return self.success(data={'uuid': uuid})
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return self.http_status(500, -1, f'Failed to create MCP server: {str(e)}')
|
||||
|
||||
@self.route('/servers/<server_name>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(server_name: str) -> str:
|
||||
"""获取、更新或删除MCP服务器配置"""
|
||||
|
||||
server_data = await self.ap.mcp_service.get_mcp_server_by_name(server_name)
|
||||
if server_data is None:
|
||||
return self.http_status(404, -1, 'Server not found')
|
||||
|
||||
if quart.request.method == 'GET':
|
||||
return self.success(data={'server': server_data})
|
||||
|
||||
elif quart.request.method == 'PUT':
|
||||
data = await quart.request.json
|
||||
try:
|
||||
await self.ap.mcp_service.update_mcp_server(server_data['uuid'], data)
|
||||
return self.success()
|
||||
except Exception as e:
|
||||
return self.http_status(500, -1, f'Failed to update MCP server: {str(e)}')
|
||||
|
||||
elif quart.request.method == 'DELETE':
|
||||
try:
|
||||
await self.ap.mcp_service.delete_mcp_server(server_data['uuid'])
|
||||
return self.success()
|
||||
except Exception as e:
|
||||
return self.http_status(500, -1, f'Failed to delete MCP server: {str(e)}')
|
||||
|
||||
@self.route('/servers/<server_name>/test', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||
async def _(server_name: str) -> str:
|
||||
"""测试MCP服务器连接"""
|
||||
server_data = await quart.request.json
|
||||
task_id = await self.ap.mcp_service.test_mcp_server(server_name=server_name, server_data=server_data)
|
||||
return self.success(data={'task_id': task_id})
|
||||
@@ -5,6 +5,7 @@ import os
|
||||
|
||||
import quart
|
||||
import quart_cors
|
||||
from werkzeug.exceptions import RequestEntityTooLarge
|
||||
|
||||
from ....core import app, entities as core_entities
|
||||
from ....utils import importutil
|
||||
@@ -15,12 +16,14 @@ from .groups import provider as groups_provider
|
||||
from .groups import platform as groups_platform
|
||||
from .groups import pipelines as groups_pipelines
|
||||
from .groups import knowledge as groups_knowledge
|
||||
from .groups import resources as groups_resources
|
||||
|
||||
importutil.import_modules_in_pkg(groups)
|
||||
importutil.import_modules_in_pkg(groups_provider)
|
||||
importutil.import_modules_in_pkg(groups_platform)
|
||||
importutil.import_modules_in_pkg(groups_pipelines)
|
||||
importutil.import_modules_in_pkg(groups_knowledge)
|
||||
importutil.import_modules_in_pkg(groups_resources)
|
||||
|
||||
|
||||
class HTTPController:
|
||||
@@ -33,7 +36,20 @@ class HTTPController:
|
||||
self.quart_app = quart.Quart(__name__)
|
||||
quart_cors.cors(self.quart_app, allow_origin='*')
|
||||
|
||||
# Set maximum content length to prevent large file uploads
|
||||
self.quart_app.config['MAX_CONTENT_LENGTH'] = group.MAX_FILE_SIZE
|
||||
|
||||
async def initialize(self) -> None:
|
||||
# Register custom error handler for file size limit
|
||||
@self.quart_app.errorhandler(RequestEntityTooLarge)
|
||||
async def handle_request_entity_too_large(e):
|
||||
return quart.jsonify(
|
||||
{
|
||||
'code': 400,
|
||||
'msg': 'File size exceeds 10MB limit. Please split large files into smaller parts.',
|
||||
}
|
||||
), 400
|
||||
|
||||
await self.register_routes()
|
||||
|
||||
async def run(self) -> None:
|
||||
|
||||
79
pkg/api/http/service/apikey.py
Normal file
79
pkg/api/http/service/apikey.py
Normal file
@@ -0,0 +1,79 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
import sqlalchemy
|
||||
|
||||
from ....core import app
|
||||
from ....entity.persistence import apikey
|
||||
|
||||
|
||||
class ApiKeyService:
|
||||
ap: app.Application
|
||||
|
||||
def __init__(self, ap: app.Application) -> None:
|
||||
self.ap = ap
|
||||
|
||||
async def get_api_keys(self) -> list[dict]:
|
||||
"""Get all API keys"""
|
||||
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(apikey.ApiKey))
|
||||
|
||||
keys = result.all()
|
||||
return [self.ap.persistence_mgr.serialize_model(apikey.ApiKey, key) for key in keys]
|
||||
|
||||
async def create_api_key(self, name: str, description: str = '') -> dict:
|
||||
"""Create a new API key"""
|
||||
# Generate a secure random API key
|
||||
key = f'lbk_{secrets.token_urlsafe(32)}'
|
||||
|
||||
key_data = {'name': name, 'key': key, 'description': description}
|
||||
|
||||
await self.ap.persistence_mgr.execute_async(sqlalchemy.insert(apikey.ApiKey).values(**key_data))
|
||||
|
||||
# Retrieve the created key
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(apikey.ApiKey).where(apikey.ApiKey.key == key)
|
||||
)
|
||||
created_key = result.first()
|
||||
|
||||
return self.ap.persistence_mgr.serialize_model(apikey.ApiKey, created_key)
|
||||
|
||||
async def get_api_key(self, key_id: int) -> dict | None:
|
||||
"""Get a specific API key by ID"""
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(apikey.ApiKey).where(apikey.ApiKey.id == key_id)
|
||||
)
|
||||
|
||||
key = result.first()
|
||||
|
||||
if key is None:
|
||||
return None
|
||||
|
||||
return self.ap.persistence_mgr.serialize_model(apikey.ApiKey, key)
|
||||
|
||||
async def verify_api_key(self, key: str) -> bool:
|
||||
"""Verify if an API key is valid"""
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(apikey.ApiKey).where(apikey.ApiKey.key == key)
|
||||
)
|
||||
|
||||
key_obj = result.first()
|
||||
return key_obj is not None
|
||||
|
||||
async def delete_api_key(self, key_id: int) -> None:
|
||||
"""Delete an API key"""
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.delete(apikey.ApiKey).where(apikey.ApiKey.id == key_id)
|
||||
)
|
||||
|
||||
async def update_api_key(self, key_id: int, name: str = None, description: str = None) -> None:
|
||||
"""Update an API key's metadata (name, description)"""
|
||||
update_data = {}
|
||||
if name is not None:
|
||||
update_data['name'] = name
|
||||
if description is not None:
|
||||
update_data['description'] = description
|
||||
|
||||
if update_data:
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.update(apikey.ApiKey).where(apikey.ApiKey.id == key_id).values(**update_data)
|
||||
)
|
||||
158
pkg/api/http/service/mcp.py
Normal file
158
pkg/api/http/service/mcp.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlalchemy
|
||||
import uuid
|
||||
import asyncio
|
||||
|
||||
from ....core import app
|
||||
from ....entity.persistence import mcp as persistence_mcp
|
||||
from ....core import taskmgr
|
||||
from ....provider.tools.loaders.mcp import RuntimeMCPSession, MCPSessionStatus
|
||||
|
||||
|
||||
class MCPService:
|
||||
ap: app.Application
|
||||
|
||||
def __init__(self, ap: app.Application) -> None:
|
||||
self.ap = ap
|
||||
|
||||
async def get_runtime_info(self, server_name: str) -> dict | None:
|
||||
session = self.ap.tool_mgr.mcp_tool_loader.get_session(server_name)
|
||||
if session:
|
||||
return session.get_runtime_info_dict()
|
||||
return None
|
||||
|
||||
async def get_mcp_servers(self, contain_runtime_info: bool = False) -> list[dict]:
|
||||
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_mcp.MCPServer))
|
||||
|
||||
servers = result.all()
|
||||
serialized_servers = [
|
||||
self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server) for server in servers
|
||||
]
|
||||
if contain_runtime_info:
|
||||
for server in serialized_servers:
|
||||
runtime_info = await self.get_runtime_info(server['name'])
|
||||
|
||||
server['runtime_info'] = runtime_info if runtime_info else None
|
||||
|
||||
return serialized_servers
|
||||
|
||||
async def create_mcp_server(self, server_data: dict) -> str:
|
||||
server_data['uuid'] = str(uuid.uuid4())
|
||||
await self.ap.persistence_mgr.execute_async(sqlalchemy.insert(persistence_mcp.MCPServer).values(server_data))
|
||||
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_data['uuid'])
|
||||
)
|
||||
server_entity = result.first()
|
||||
if server_entity:
|
||||
server_config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server_entity)
|
||||
if self.ap.tool_mgr.mcp_tool_loader:
|
||||
task = asyncio.create_task(self.ap.tool_mgr.mcp_tool_loader.host_mcp_server(server_config))
|
||||
self.ap.tool_mgr.mcp_tool_loader._hosted_mcp_tasks.append(task)
|
||||
|
||||
return server_data['uuid']
|
||||
|
||||
async def get_mcp_server_by_name(self, server_name: str) -> dict | None:
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.name == server_name)
|
||||
)
|
||||
server = result.first()
|
||||
if server is None:
|
||||
return None
|
||||
|
||||
runtime_info = await self.get_runtime_info(server.name)
|
||||
server_data = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server)
|
||||
server_data['runtime_info'] = runtime_info if runtime_info else None
|
||||
return server_data
|
||||
|
||||
async def update_mcp_server(self, server_uuid: str, server_data: dict) -> None:
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||
)
|
||||
old_server = result.first()
|
||||
old_server_name = old_server.name if old_server else None
|
||||
old_enable = old_server.enable if old_server else False
|
||||
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.update(persistence_mcp.MCPServer)
|
||||
.where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||
.values(server_data)
|
||||
)
|
||||
|
||||
if self.ap.tool_mgr.mcp_tool_loader:
|
||||
new_enable = server_data.get('enable', False)
|
||||
|
||||
need_remove = old_server_name and old_server_name in self.ap.tool_mgr.mcp_tool_loader.sessions
|
||||
need_start = new_enable
|
||||
|
||||
|
||||
if old_enable and not new_enable:
|
||||
if need_remove:
|
||||
await self.ap.tool_mgr.mcp_tool_loader.remove_mcp_server(old_server_name)
|
||||
|
||||
elif not old_enable and new_enable:
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||
)
|
||||
updated_server = result.first()
|
||||
if updated_server:
|
||||
server_config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, updated_server)
|
||||
task = asyncio.create_task(self.ap.tool_mgr.mcp_tool_loader.host_mcp_server(server_config))
|
||||
self.ap.tool_mgr.mcp_tool_loader._hosted_mcp_tasks.append(task)
|
||||
|
||||
elif old_enable and new_enable:
|
||||
if need_remove:
|
||||
await self.ap.tool_mgr.mcp_tool_loader.remove_mcp_server(old_server_name)
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||
)
|
||||
updated_server = result.first()
|
||||
if updated_server:
|
||||
server_config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, updated_server)
|
||||
task = asyncio.create_task(self.ap.tool_mgr.mcp_tool_loader.host_mcp_server(server_config))
|
||||
self.ap.tool_mgr.mcp_tool_loader._hosted_mcp_tasks.append(task)
|
||||
|
||||
|
||||
async def delete_mcp_server(self, server_uuid: str) -> None:
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||
)
|
||||
server = result.first()
|
||||
server_name = server.name if server else None
|
||||
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.delete(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||
)
|
||||
|
||||
if server_name and self.ap.tool_mgr.mcp_tool_loader:
|
||||
if server_name in self.ap.tool_mgr.mcp_tool_loader.sessions:
|
||||
await self.ap.tool_mgr.mcp_tool_loader.remove_mcp_server(server_name)
|
||||
|
||||
async def test_mcp_server(self, server_name: str, server_data: dict) -> int:
|
||||
"""测试 MCP 服务器连接并返回任务 ID"""
|
||||
|
||||
runtime_mcp_session: RuntimeMCPSession | None = None
|
||||
|
||||
if server_name != '_':
|
||||
runtime_mcp_session = self.ap.tool_mgr.mcp_tool_loader.get_session(server_name)
|
||||
if runtime_mcp_session is None:
|
||||
raise ValueError(f'Server not found: {server_name}')
|
||||
|
||||
if runtime_mcp_session.status == MCPSessionStatus.ERROR:
|
||||
coroutine = runtime_mcp_session.start()
|
||||
else:
|
||||
coroutine = runtime_mcp_session.refresh()
|
||||
else:
|
||||
runtime_mcp_session = await self.ap.tool_mgr.mcp_tool_loader.load_mcp_server(server_config=server_data)
|
||||
coroutine = runtime_mcp_session.start()
|
||||
|
||||
ctx = taskmgr.TaskContext.new()
|
||||
wrapper = self.ap.task_mgr.create_user_task(
|
||||
coroutine,
|
||||
kind='mcp-operation',
|
||||
name=f'mcp-test-{server_name}',
|
||||
label=f'Testing MCP server {server_name}',
|
||||
context=ctx,
|
||||
)
|
||||
return wrapper.id
|
||||
@@ -1,13 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
|
||||
import sqlalchemy
|
||||
from langbot_plugin.api.entities.builtin.provider import message as provider_message
|
||||
|
||||
from ....core import app
|
||||
from ....entity.persistence import model as persistence_model
|
||||
from ....entity.persistence import pipeline as persistence_pipeline
|
||||
from ....provider.modelmgr import requester as model_requester
|
||||
from langbot_plugin.api.entities.builtin.provider import message as provider_message
|
||||
|
||||
|
||||
class LLMModelsService:
|
||||
@@ -104,12 +105,17 @@ class LLMModelsService:
|
||||
else:
|
||||
runtime_llm_model = await self.ap.model_mgr.init_runtime_llm_model(model_data)
|
||||
|
||||
# 有些模型厂商默认开启了思考功能,测试容易延迟
|
||||
extra_args = model_data.get('extra_args', {})
|
||||
if not extra_args or 'thinking' not in extra_args:
|
||||
extra_args['thinking'] = {'type': 'disabled'}
|
||||
|
||||
await runtime_llm_model.requester.invoke_llm(
|
||||
query=None,
|
||||
model=runtime_llm_model,
|
||||
messages=[provider_message.Message(role='user', content='Hello, world!')],
|
||||
messages=[provider_message.Message(role='user', content='Hello, world! Please just reply a "Hello".')],
|
||||
funcs=[],
|
||||
extra_args=model_data.get('extra_args', {}),
|
||||
extra_args=extra_args,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -136,3 +136,33 @@ class PipelineService:
|
||||
)
|
||||
)
|
||||
await self.ap.pipeline_mgr.remove_pipeline(pipeline_uuid)
|
||||
|
||||
async def update_pipeline_extensions(self, pipeline_uuid: str, bound_plugins: list[dict], bound_mcp_servers: list[str] = None) -> None:
|
||||
"""Update the bound plugins and MCP servers for a pipeline"""
|
||||
# Get current pipeline
|
||||
result = await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.select(persistence_pipeline.LegacyPipeline).where(
|
||||
persistence_pipeline.LegacyPipeline.uuid == pipeline_uuid
|
||||
)
|
||||
)
|
||||
|
||||
pipeline = result.first()
|
||||
if pipeline is None:
|
||||
raise ValueError(f'Pipeline {pipeline_uuid} not found')
|
||||
|
||||
# Update extensions_preferences
|
||||
extensions_preferences = pipeline.extensions_preferences or {}
|
||||
extensions_preferences['plugins'] = bound_plugins
|
||||
if bound_mcp_servers is not None:
|
||||
extensions_preferences['mcp_servers'] = bound_mcp_servers
|
||||
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.update(persistence_pipeline.LegacyPipeline)
|
||||
.where(persistence_pipeline.LegacyPipeline.uuid == pipeline_uuid)
|
||||
.values(extensions_preferences=extensions_preferences)
|
||||
)
|
||||
|
||||
# Reload pipeline to apply changes
|
||||
await self.ap.pipeline_mgr.remove_pipeline(pipeline_uuid)
|
||||
pipeline = await self.get_pipeline(pipeline_uuid)
|
||||
await self.ap.pipeline_mgr.load_pipeline(pipeline)
|
||||
|
||||
@@ -59,14 +59,15 @@ class CommandManager:
|
||||
context: command_context.ExecuteContext,
|
||||
operator_list: list[operator.CommandOperator],
|
||||
operator: operator.CommandOperator = None,
|
||||
bound_plugins: list[str] | None = None,
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
"""执行命令"""
|
||||
|
||||
command_list = await self.ap.plugin_connector.list_commands()
|
||||
command_list = await self.ap.plugin_connector.list_commands(bound_plugins)
|
||||
|
||||
for command in command_list:
|
||||
if command.metadata.name == context.command:
|
||||
async for ret in self.ap.plugin_connector.execute_command(context):
|
||||
async for ret in self.ap.plugin_connector.execute_command(context, bound_plugins):
|
||||
yield ret
|
||||
break
|
||||
else:
|
||||
@@ -102,5 +103,8 @@ class CommandManager:
|
||||
|
||||
ctx.shift()
|
||||
|
||||
async for ret in self._execute(ctx, self.cmd_list):
|
||||
# Get bound plugins from query
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
|
||||
async for ret in self._execute(ctx, self.cmd_list, bound_plugins=bound_plugins):
|
||||
yield ret
|
||||
|
||||
@@ -22,6 +22,8 @@ from ..api.http.service import model as model_service
|
||||
from ..api.http.service import pipeline as pipeline_service
|
||||
from ..api.http.service import bot as bot_service
|
||||
from ..api.http.service import knowledge as knowledge_service
|
||||
from ..api.http.service import mcp as mcp_service
|
||||
from ..api.http.service import apikey as apikey_service
|
||||
from ..discover import engine as discover_engine
|
||||
from ..storage import mgr as storagemgr
|
||||
from ..utils import logcache
|
||||
@@ -119,6 +121,10 @@ class Application:
|
||||
|
||||
knowledge_service: knowledge_service.KnowledgeService = None
|
||||
|
||||
mcp_service: mcp_service.MCPService = None
|
||||
|
||||
apikey_service: apikey_service.ApiKeyService = None
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@ from ...api.http.service import model as model_service
|
||||
from ...api.http.service import pipeline as pipeline_service
|
||||
from ...api.http.service import bot as bot_service
|
||||
from ...api.http.service import knowledge as knowledge_service
|
||||
from ...api.http.service import mcp as mcp_service
|
||||
from ...api.http.service import apikey as apikey_service
|
||||
from ...discover import engine as discover_engine
|
||||
from ...storage import mgr as storagemgr
|
||||
from ...utils import logcache
|
||||
@@ -126,5 +128,11 @@ class BuildAppStage(stage.BootingStage):
|
||||
knowledge_service_inst = knowledge_service.KnowledgeService(ap)
|
||||
ap.knowledge_service = knowledge_service_inst
|
||||
|
||||
mcp_service_inst = mcp_service.MCPService(ap)
|
||||
ap.mcp_service = mcp_service_inst
|
||||
|
||||
apikey_service_inst = apikey_service.ApiKeyService(ap)
|
||||
ap.apikey_service = apikey_service_inst
|
||||
|
||||
ctrl = controller.Controller(ap)
|
||||
ap.ctrl = ctrl
|
||||
|
||||
@@ -1,11 +1,93 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from .. import stage, app
|
||||
from ..bootutils import config
|
||||
|
||||
|
||||
def _apply_env_overrides_to_config(cfg: dict) -> dict:
|
||||
"""Apply environment variable overrides to data/config.yaml
|
||||
|
||||
Environment variables should be uppercase and use __ (double underscore)
|
||||
to represent nested keys. For example:
|
||||
- CONCURRENCY__PIPELINE overrides concurrency.pipeline
|
||||
- PLUGIN__RUNTIME_WS_URL overrides plugin.runtime_ws_url
|
||||
|
||||
Arrays and dict types are ignored.
|
||||
|
||||
Args:
|
||||
cfg: Configuration dictionary
|
||||
|
||||
Returns:
|
||||
Updated configuration dictionary
|
||||
"""
|
||||
|
||||
def convert_value(value: str, original_value: Any) -> Any:
|
||||
"""Convert string value to appropriate type based on original value
|
||||
|
||||
Args:
|
||||
value: String value from environment variable
|
||||
original_value: Original value to infer type from
|
||||
|
||||
Returns:
|
||||
Converted value (falls back to string if conversion fails)
|
||||
"""
|
||||
if isinstance(original_value, bool):
|
||||
return value.lower() in ('true', '1', 'yes', 'on')
|
||||
elif isinstance(original_value, int):
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
# If conversion fails, keep as string (user error, but non-breaking)
|
||||
return value
|
||||
elif isinstance(original_value, float):
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
# If conversion fails, keep as string (user error, but non-breaking)
|
||||
return value
|
||||
else:
|
||||
return value
|
||||
|
||||
# Process environment variables
|
||||
for env_key, env_value in os.environ.items():
|
||||
# Check if the environment variable is uppercase and contains __
|
||||
if not env_key.isupper():
|
||||
continue
|
||||
if '__' not in env_key:
|
||||
continue
|
||||
|
||||
print(f'apply env overrides to config: env_key: {env_key}, env_value: {env_value}')
|
||||
|
||||
# Convert environment variable name to config path
|
||||
# e.g., CONCURRENCY__PIPELINE -> ['concurrency', 'pipeline']
|
||||
keys = [key.lower() for key in env_key.split('__')]
|
||||
|
||||
# Navigate to the target value and validate the path
|
||||
current = cfg
|
||||
|
||||
for i, key in enumerate(keys):
|
||||
if not isinstance(current, dict) or key not in current:
|
||||
break
|
||||
|
||||
if i == len(keys) - 1:
|
||||
# At the final key - check if it's a scalar value
|
||||
if isinstance(current[key], (dict, list)):
|
||||
# Skip dict and list types
|
||||
pass
|
||||
else:
|
||||
# Valid scalar value - convert and set it
|
||||
converted_value = convert_value(env_value, current[key])
|
||||
current[key] = converted_value
|
||||
else:
|
||||
# Navigate deeper
|
||||
current = current[key]
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
@stage.stage_class('LoadConfigStage')
|
||||
class LoadConfigStage(stage.BootingStage):
|
||||
"""Load config file stage"""
|
||||
@@ -54,6 +136,10 @@ class LoadConfigStage(stage.BootingStage):
|
||||
ap.instance_config = await config.load_yaml_config(
|
||||
'data/config.yaml', 'templates/config.yaml', completion=False
|
||||
)
|
||||
|
||||
# Apply environment variable overrides to data/config.yaml
|
||||
ap.instance_config.data = _apply_env_overrides_to_config(ap.instance_config.data)
|
||||
|
||||
await ap.instance_config.dump_config()
|
||||
|
||||
ap.sensitive_meta = await config.load_json_config(
|
||||
|
||||
@@ -156,7 +156,7 @@ class TaskWrapper:
|
||||
'state': self.task._state,
|
||||
'exception': self.assume_exception().__str__() if self.assume_exception() is not None else None,
|
||||
'exception_traceback': exception_traceback,
|
||||
'result': self.assume_result().__str__() if self.assume_result() is not None else None,
|
||||
'result': self.assume_result() if self.assume_result() is not None else None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
21
pkg/entity/persistence/apikey.py
Normal file
21
pkg/entity/persistence/apikey.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import sqlalchemy
|
||||
|
||||
from .base import Base
|
||||
|
||||
|
||||
class ApiKey(Base):
|
||||
"""API Key for external service authentication"""
|
||||
|
||||
__tablename__ = 'api_keys'
|
||||
|
||||
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
key = sqlalchemy.Column(sqlalchemy.String(255), nullable=False, unique=True)
|
||||
description = sqlalchemy.Column(sqlalchemy.String(512), nullable=True, default='')
|
||||
created_at = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, server_default=sqlalchemy.func.now())
|
||||
updated_at = sqlalchemy.Column(
|
||||
sqlalchemy.DateTime,
|
||||
nullable=False,
|
||||
server_default=sqlalchemy.func.now(),
|
||||
onupdate=sqlalchemy.func.now(),
|
||||
)
|
||||
20
pkg/entity/persistence/mcp.py
Normal file
20
pkg/entity/persistence/mcp.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import sqlalchemy
|
||||
|
||||
from .base import Base
|
||||
|
||||
|
||||
class MCPServer(Base):
|
||||
__tablename__ = 'mcp_servers'
|
||||
|
||||
uuid = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True, unique=True)
|
||||
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||
enable = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
|
||||
mode = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) # stdio, sse
|
||||
extra_args = sqlalchemy.Column(sqlalchemy.JSON, nullable=False, default={})
|
||||
created_at = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, server_default=sqlalchemy.func.now())
|
||||
updated_at = sqlalchemy.Column(
|
||||
sqlalchemy.DateTime,
|
||||
nullable=False,
|
||||
server_default=sqlalchemy.func.now(),
|
||||
onupdate=sqlalchemy.func.now(),
|
||||
)
|
||||
@@ -1,12 +1,13 @@
|
||||
import sqlalchemy
|
||||
|
||||
from .base import Base
|
||||
from ...utils import constants
|
||||
|
||||
|
||||
initial_metadata = [
|
||||
{
|
||||
'key': 'database_version',
|
||||
'value': '0',
|
||||
'value': str(constants.required_database_version),
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ class LegacyPipeline(Base):
|
||||
is_default = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
|
||||
stages = sqlalchemy.Column(sqlalchemy.JSON, nullable=False)
|
||||
config = sqlalchemy.Column(sqlalchemy.JSON, nullable=False)
|
||||
extensions_preferences = sqlalchemy.Column(sqlalchemy.JSON, nullable=False, default={})
|
||||
|
||||
|
||||
class PipelineRunRecord(Base):
|
||||
|
||||
@@ -78,6 +78,8 @@ class PersistenceManager:
|
||||
|
||||
self.ap.logger.info(f'Successfully upgraded database to version {last_migration_number}.')
|
||||
|
||||
await self.write_default_pipeline()
|
||||
|
||||
async def create_tables(self):
|
||||
# create tables
|
||||
async with self.get_db_engine().connect() as conn:
|
||||
@@ -98,6 +100,7 @@ class PersistenceManager:
|
||||
if row is None:
|
||||
await self.execute_async(sqlalchemy.insert(metadata.Metadata).values(item))
|
||||
|
||||
async def write_default_pipeline(self):
|
||||
# write default pipeline
|
||||
result = await self.execute_async(sqlalchemy.select(pipeline.LegacyPipeline))
|
||||
default_pipeline_uuid = None
|
||||
@@ -115,6 +118,7 @@ class PersistenceManager:
|
||||
'name': 'ChatPipeline',
|
||||
'description': 'Default pipeline, new bots will be bound to this pipeline | 默认提供的流水线,您配置的机器人将自动绑定到此流水线',
|
||||
'config': pipeline_config,
|
||||
'extensions_preferences': {},
|
||||
}
|
||||
|
||||
await self.execute_async(sqlalchemy.insert(pipeline.LegacyPipeline).values(pipeline_data))
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
import sqlalchemy
|
||||
from .. import migration
|
||||
|
||||
|
||||
@migration.migration_class(9)
|
||||
class DBMigratePipelineExtensionPreferences(migration.DBMigration):
|
||||
"""Pipeline extension preferences"""
|
||||
|
||||
async def upgrade(self):
|
||||
"""Upgrade"""
|
||||
|
||||
sql_text = sqlalchemy.text(
|
||||
"ALTER TABLE legacy_pipelines ADD COLUMN extensions_preferences JSON NOT NULL DEFAULT '{}'"
|
||||
)
|
||||
await self.ap.persistence_mgr.execute_async(sql_text)
|
||||
|
||||
async def downgrade(self):
|
||||
"""Downgrade"""
|
||||
sql_text = sqlalchemy.text('ALTER TABLE legacy_pipelines DROP COLUMN extensions_preferences')
|
||||
await self.ap.persistence_mgr.execute_async(sql_text)
|
||||
@@ -68,6 +68,12 @@ class RuntimePipeline:
|
||||
|
||||
stage_containers: list[StageInstContainer]
|
||||
"""阶段实例容器"""
|
||||
|
||||
bound_plugins: list[str]
|
||||
"""绑定到此流水线的插件列表(格式:author/plugin_name)"""
|
||||
|
||||
bound_mcp_servers: list[str]
|
||||
"""绑定到此流水线的MCP服务器列表(格式:uuid)"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -78,9 +84,20 @@ class RuntimePipeline:
|
||||
self.ap = ap
|
||||
self.pipeline_entity = pipeline_entity
|
||||
self.stage_containers = stage_containers
|
||||
|
||||
# Extract bound plugins and MCP servers from extensions_preferences
|
||||
extensions_prefs = pipeline_entity.extensions_preferences or {}
|
||||
plugin_list = extensions_prefs.get('plugins', [])
|
||||
self.bound_plugins = [f"{p['author']}/{p['name']}" for p in plugin_list] if plugin_list else []
|
||||
|
||||
mcp_server_list = extensions_prefs.get('mcp_servers', [])
|
||||
self.bound_mcp_servers = mcp_server_list if mcp_server_list else []
|
||||
|
||||
async def run(self, query: pipeline_query.Query):
|
||||
query.pipeline_config = self.pipeline_entity.config
|
||||
# Store bound plugins and MCP servers in query for filtering
|
||||
query.variables['_pipeline_bound_plugins'] = self.bound_plugins
|
||||
query.variables['_pipeline_bound_mcp_servers'] = self.bound_mcp_servers
|
||||
await self.process_query(query)
|
||||
|
||||
async def _check_output(self, query: pipeline_query.Query, result: pipeline_entities.StageProcessResult):
|
||||
@@ -96,7 +113,7 @@ class RuntimePipeline:
|
||||
if query.pipeline_config['output']['misc']['at-sender'] and isinstance(
|
||||
query.message_event, platform_events.GroupMessage
|
||||
):
|
||||
result.user_notice.insert(0, platform_message.At(query.message_event.sender.id))
|
||||
result.user_notice.insert(0, platform_message.At(target=query.message_event.sender.id))
|
||||
if await query.adapter.is_stream_output_supported():
|
||||
await query.adapter.reply_message_chunk(
|
||||
message_source=query.message_event,
|
||||
@@ -188,6 +205,9 @@ class RuntimePipeline:
|
||||
async def process_query(self, query: pipeline_query.Query):
|
||||
"""处理请求"""
|
||||
try:
|
||||
# Get bound plugins for this pipeline
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
|
||||
# ======== 触发 MessageReceived 事件 ========
|
||||
event_type = (
|
||||
events.PersonMessageReceived
|
||||
@@ -203,7 +223,7 @@ class RuntimePipeline:
|
||||
message_chain=query.message_chain,
|
||||
)
|
||||
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event_obj)
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event_obj, bound_plugins)
|
||||
|
||||
if event_ctx.is_prevented_default():
|
||||
return
|
||||
@@ -213,7 +233,7 @@ class RuntimePipeline:
|
||||
await self._execute_from_stage(0, query)
|
||||
except Exception as e:
|
||||
inst_name = query.current_stage_name if query.current_stage_name else 'unknown'
|
||||
self.ap.logger.error(f'处理请求时出错 query_id={query.query_id} stage={inst_name} : {e}')
|
||||
self.ap.logger.error(f'Error processing query {query.query_id} stage={inst_name} : {e}')
|
||||
self.ap.logger.error(f'Traceback: {traceback.format_exc()}')
|
||||
finally:
|
||||
self.ap.logger.debug(f'Query {query.query_id} processed')
|
||||
|
||||
@@ -35,11 +35,17 @@ class PreProcessor(stage.PipelineStage):
|
||||
session = await self.ap.sess_mgr.get_session(query)
|
||||
|
||||
# When not local-agent, llm_model is None
|
||||
llm_model = (
|
||||
await self.ap.model_mgr.get_model_by_uuid(query.pipeline_config['ai']['local-agent']['model'])
|
||||
if selected_runner == 'local-agent'
|
||||
else None
|
||||
)
|
||||
try:
|
||||
llm_model = (
|
||||
await self.ap.model_mgr.get_model_by_uuid(query.pipeline_config['ai']['local-agent']['model'])
|
||||
if selected_runner == 'local-agent'
|
||||
else None
|
||||
)
|
||||
except ValueError:
|
||||
self.ap.logger.warning(
|
||||
f'LLM model {query.pipeline_config["ai"]["local-agent"]["model"] + " "}not found or not configured'
|
||||
)
|
||||
llm_model = None
|
||||
|
||||
conversation = await self.ap.sess_mgr.get_conversation(
|
||||
query,
|
||||
@@ -54,12 +60,19 @@ class PreProcessor(stage.PipelineStage):
|
||||
query.prompt = conversation.prompt.copy()
|
||||
query.messages = conversation.messages.copy()
|
||||
|
||||
if selected_runner == 'local-agent':
|
||||
if selected_runner == 'local-agent' and llm_model:
|
||||
query.use_funcs = []
|
||||
query.use_llm_model_uuid = llm_model.model_entity.uuid
|
||||
|
||||
if llm_model.model_entity.abilities.__contains__('func_call'):
|
||||
query.use_funcs = await self.ap.tool_mgr.get_all_tools()
|
||||
# Get bound plugins and MCP servers for filtering tools
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
bound_mcp_servers = query.variables.get('_pipeline_bound_mcp_servers', None)
|
||||
query.use_funcs = await self.ap.tool_mgr.get_all_tools(bound_plugins, bound_mcp_servers)
|
||||
|
||||
self.ap.logger.debug(f'Bound plugins: {bound_plugins}')
|
||||
self.ap.logger.debug(f'Bound MCP servers: {bound_mcp_servers}')
|
||||
self.ap.logger.debug(f'Use funcs: {query.use_funcs}')
|
||||
|
||||
variables = {
|
||||
'session_id': f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||
@@ -72,7 +85,11 @@ class PreProcessor(stage.PipelineStage):
|
||||
|
||||
# Check if this model supports vision, if not, remove all images
|
||||
# TODO this checking should be performed in runner, and in this stage, the image should be reserved
|
||||
if selected_runner == 'local-agent' and not llm_model.model_entity.abilities.__contains__('vision'):
|
||||
if (
|
||||
selected_runner == 'local-agent'
|
||||
and llm_model
|
||||
and not llm_model.model_entity.abilities.__contains__('vision')
|
||||
):
|
||||
for msg in query.messages:
|
||||
if isinstance(msg.content, list):
|
||||
for me in msg.content:
|
||||
@@ -89,15 +106,22 @@ class PreProcessor(stage.PipelineStage):
|
||||
content_list.append(provider_message.ContentElement.from_text(me.text))
|
||||
plain_text += me.text
|
||||
elif isinstance(me, platform_message.Image):
|
||||
if selected_runner != 'local-agent' or llm_model.model_entity.abilities.__contains__('vision'):
|
||||
if selected_runner != 'local-agent' or (
|
||||
llm_model and llm_model.model_entity.abilities.__contains__('vision')
|
||||
):
|
||||
if me.base64 is not None:
|
||||
content_list.append(provider_message.ContentElement.from_image_base64(me.base64))
|
||||
elif isinstance(me, platform_message.File):
|
||||
# if me.url is not None:
|
||||
content_list.append(provider_message.ContentElement.from_file_url(me.url, me.name))
|
||||
elif isinstance(me, platform_message.Quote) and qoute_msg:
|
||||
for msg in me.origin:
|
||||
if isinstance(msg, platform_message.Plain):
|
||||
content_list.append(provider_message.ContentElement.from_text(msg.text))
|
||||
elif isinstance(msg, platform_message.Image):
|
||||
if selected_runner != 'local-agent' or llm_model.model_entity.abilities.__contains__('vision'):
|
||||
if selected_runner != 'local-agent' or (
|
||||
llm_model and llm_model.model_entity.abilities.__contains__('vision')
|
||||
):
|
||||
if msg.base64 is not None:
|
||||
content_list.append(provider_message.ContentElement.from_image_base64(msg.base64))
|
||||
|
||||
@@ -113,7 +137,9 @@ class PreProcessor(stage.PipelineStage):
|
||||
query=query,
|
||||
)
|
||||
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
||||
# Get bound plugins for filtering
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||
|
||||
query.prompt.messages = event_ctx.event.default_prompt
|
||||
query.messages = event_ctx.event.prompt
|
||||
|
||||
@@ -9,7 +9,6 @@ from .. import handler
|
||||
from ... import entities
|
||||
from ....provider import runner as runner_module
|
||||
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
import langbot_plugin.api.entities.events as events
|
||||
from ....utils import importutil
|
||||
from ....provider import runners
|
||||
@@ -44,21 +43,24 @@ class ChatMessageHandler(handler.MessageHandler):
|
||||
query=query,
|
||||
)
|
||||
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
||||
# Get bound plugins for filtering
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||
|
||||
is_create_card = False # 判断下是否需要创建流式卡片
|
||||
|
||||
if event_ctx.is_prevented_default():
|
||||
if event_ctx.event.reply is not None:
|
||||
mc = platform_message.MessageChain(event_ctx.event.reply)
|
||||
if event_ctx.event.reply_message_chain is not None:
|
||||
mc = event_ctx.event.reply_message_chain
|
||||
query.resp_messages.append(mc)
|
||||
|
||||
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
||||
else:
|
||||
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
|
||||
else:
|
||||
if event_ctx.event.alter is not None:
|
||||
if event_ctx.event.user_message_alter is not None:
|
||||
# if isinstance(event_ctx.event, str): # 现在暂时不考虑多模态alter
|
||||
query.user_message.content = event_ctx.event.alter
|
||||
query.user_message.content = event_ctx.event.user_message_alter
|
||||
|
||||
text_length = 0
|
||||
try:
|
||||
|
||||
@@ -5,7 +5,6 @@ import typing
|
||||
from .. import handler
|
||||
from ... import entities
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.events as events
|
||||
@@ -46,11 +45,13 @@ class CommandHandler(handler.MessageHandler):
|
||||
query=query,
|
||||
)
|
||||
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
||||
# Get bound plugins for filtering
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||
|
||||
if event_ctx.is_prevented_default():
|
||||
if event_ctx.event.reply is not None:
|
||||
mc = platform_message.MessageChain(event_ctx.event.reply)
|
||||
if event_ctx.event.reply_message_chain is not None:
|
||||
mc = event_ctx.event.reply_message_chain
|
||||
|
||||
query.resp_messages.append(mc)
|
||||
|
||||
@@ -59,11 +60,6 @@ class CommandHandler(handler.MessageHandler):
|
||||
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
|
||||
|
||||
else:
|
||||
if event_ctx.event.alter is not None:
|
||||
query.message_chain = platform_message.MessageChain(
|
||||
[platform_message.Plain(text=event_ctx.event.alter)]
|
||||
)
|
||||
|
||||
session = await self.ap.sess_mgr.get_session(query)
|
||||
|
||||
async for ret in self.ap.cmd_mgr.execute(
|
||||
@@ -80,7 +76,12 @@ class CommandHandler(handler.MessageHandler):
|
||||
self.ap.logger.info(f'Command({query.query_id}) error: {self.cut_str(str(ret.error))}')
|
||||
|
||||
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
||||
elif ret.text is not None or ret.image_url is not None or ret.image_base64 is not None:
|
||||
elif (
|
||||
ret.text is not None
|
||||
or ret.image_url is not None
|
||||
or ret.image_base64 is not None
|
||||
or ret.file_url is not None
|
||||
):
|
||||
content: list[provider_message.ContentElement] = []
|
||||
|
||||
if ret.text is not None:
|
||||
@@ -92,6 +93,9 @@ class CommandHandler(handler.MessageHandler):
|
||||
if ret.image_base64 is not None:
|
||||
content.append(provider_message.ContentElement.from_image_base64(ret.image_base64))
|
||||
|
||||
if ret.file_url is not None:
|
||||
# 此时为 file 类型
|
||||
content.append(provider_message.ContentElement.from_file_url(ret.file_url, ret.file_name))
|
||||
query.resp_messages.append(
|
||||
provider_message.Message(
|
||||
role='command',
|
||||
|
||||
@@ -16,26 +16,17 @@ class AtBotRule(rule_model.GroupRespondRule):
|
||||
rule_dict: dict,
|
||||
query: pipeline_query.Query,
|
||||
) -> entities.RuleJudgeResult:
|
||||
found = False
|
||||
|
||||
def remove_at(message_chain: platform_message.MessageChain):
|
||||
nonlocal found
|
||||
for component in message_chain.root:
|
||||
if isinstance(component, platform_message.At) and component.target == query.adapter.bot_account_id:
|
||||
if isinstance(component, platform_message.At) and str(component.target) == str(query.adapter.bot_account_id):
|
||||
message_chain.remove(component)
|
||||
found = True
|
||||
break
|
||||
|
||||
remove_at(message_chain)
|
||||
remove_at(message_chain) # 回复消息时会at两次,检查并删除重复的
|
||||
|
||||
# if message_chain.has(platform_message.At(query.adapter.bot_account_id)) and rule_dict['at']:
|
||||
# message_chain.remove(platform_message.At(query.adapter.bot_account_id))
|
||||
|
||||
# if message_chain.has(
|
||||
# platform_message.At(query.adapter.bot_account_id)
|
||||
# ): # 回复消息时会at两次,检查并删除重复的
|
||||
# message_chain.remove(platform_message.At(query.adapter.bot_account_id))
|
||||
|
||||
# return entities.RuleJudgeResult(
|
||||
# matching=True,
|
||||
# replacement=message_chain,
|
||||
# )
|
||||
|
||||
return entities.RuleJudgeResult(matching=False, replacement=message_chain)
|
||||
return entities.RuleJudgeResult(matching=found, replacement=message_chain)
|
||||
|
||||
@@ -72,7 +72,9 @@ class ResponseWrapper(stage.PipelineStage):
|
||||
query=query,
|
||||
)
|
||||
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
||||
# Get bound plugins for filtering
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||
|
||||
if event_ctx.is_prevented_default():
|
||||
yield entities.StageProcessResult(
|
||||
@@ -80,8 +82,8 @@ class ResponseWrapper(stage.PipelineStage):
|
||||
new_query=query,
|
||||
)
|
||||
else:
|
||||
if event_ctx.event.reply is not None:
|
||||
query.resp_message_chain.append(platform_message.MessageChain(event_ctx.event.reply))
|
||||
if event_ctx.event.reply_message_chain is not None:
|
||||
query.resp_message_chain.append(event_ctx.event.reply_message_chain)
|
||||
|
||||
else:
|
||||
query.resp_message_chain.append(result.get_content_platform_message_chain())
|
||||
@@ -115,7 +117,9 @@ class ResponseWrapper(stage.PipelineStage):
|
||||
query=query,
|
||||
)
|
||||
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
||||
# Get bound plugins for filtering
|
||||
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||
|
||||
if event_ctx.is_prevented_default():
|
||||
yield entities.StageProcessResult(
|
||||
@@ -123,10 +127,8 @@ class ResponseWrapper(stage.PipelineStage):
|
||||
new_query=query,
|
||||
)
|
||||
else:
|
||||
if event_ctx.event.reply is not None:
|
||||
query.resp_message_chain.append(
|
||||
platform_message.MessageChain(text=event_ctx.event.reply)
|
||||
)
|
||||
if event_ctx.event.reply_message_chain is not None:
|
||||
query.resp_message_chain.append(event_ctx.event.reply_message_chain)
|
||||
|
||||
else:
|
||||
query.resp_message_chain.append(
|
||||
|
||||
@@ -157,6 +157,9 @@ class PlatformManager:
|
||||
self.adapter_dict = {}
|
||||
|
||||
async def initialize(self):
|
||||
# delete all bot log images
|
||||
await self.ap.storage_mgr.storage_provider.delete_dir_recursive('bot_log_images')
|
||||
|
||||
self.adapter_components = self.ap.discover.get_components_by_kind('MessagePlatformAdapter')
|
||||
adapter_dict: dict[str, type[abstract_platform_adapter.AbstractMessagePlatformAdapter]] = {}
|
||||
for component in self.adapter_components:
|
||||
|
||||
@@ -149,7 +149,7 @@ class EventLogger(abstract_platform_event_logger.AbstractEventLogger):
|
||||
extension = mimetypes.guess_extension(mime_type)
|
||||
if extension is None:
|
||||
extension = '.jpg'
|
||||
image_key = f'{message_session_id}-{uuid.uuid4()}{extension}'
|
||||
image_key = f'bot_log_images/{message_session_id}-{uuid.uuid4()}{extension}'
|
||||
await self.ap.storage_mgr.storage_provider.save(image_key, img_bytes)
|
||||
image_keys.append(image_key)
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ class DingTalkMessageConverter(abstract_platform_adapter.AbstractMessageConverte
|
||||
yiri_msg_list.append(platform_message.Plain(text=text_content))
|
||||
if event.picture:
|
||||
yiri_msg_list.append(platform_message.Image(base64=event.picture))
|
||||
if event.file:
|
||||
yiri_msg_list.append(platform_message.File(url=event.file, name=event.name))
|
||||
if event.audio:
|
||||
yiri_msg_list.append(platform_message.Voice(base64=event.audio))
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import langbot_plugin.api.definition.abstract.platform.event_logger as abstract_
|
||||
from ..logger import EventLogger
|
||||
|
||||
|
||||
|
||||
# 语音功能相关异常定义
|
||||
class VoiceConnectionError(Exception):
|
||||
"""语音连接基础异常"""
|
||||
|
||||
@@ -139,19 +139,15 @@ class QQOfficialAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter
|
||||
event_converter: QQOfficialEventConverter = QQOfficialEventConverter()
|
||||
|
||||
def __init__(self, config: dict, logger: EventLogger):
|
||||
self.config = config
|
||||
self.logger = logger
|
||||
bot = QQOfficialClient(
|
||||
app_id=config['appid'], secret=config['secret'], token=config['token'], logger=logger
|
||||
)
|
||||
|
||||
required_keys = [
|
||||
'appid',
|
||||
'secret',
|
||||
]
|
||||
missing_keys = [key for key in required_keys if key not in config]
|
||||
if missing_keys:
|
||||
raise command_errors.ParamNotEnoughError('QQ官方机器人缺少相关配置项,请查看文档或联系管理员')
|
||||
|
||||
self.bot = QQOfficialClient(
|
||||
app_id=config['appid'], secret=config['secret'], token=config['token'], logger=self.logger
|
||||
super().__init__(
|
||||
config=config,
|
||||
logger=logger,
|
||||
bot=bot,
|
||||
bot_account_id=config['appid'],
|
||||
)
|
||||
|
||||
async def reply_message(
|
||||
|
||||
@@ -139,7 +139,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
||||
pattern = r'@\S{1,20}'
|
||||
content_no_preifx = re.sub(pattern, '', content_no_preifx)
|
||||
|
||||
return platform_message.MessageChain([platform_message.Plain(content_no_preifx)])
|
||||
return platform_message.MessageChain([platform_message.Plain(text=content_no_preifx)])
|
||||
|
||||
async def _handler_image(self, message: Optional[dict], content_no_preifx: str) -> platform_message.MessageChain:
|
||||
"""处理图像消息 (msg_type=3)"""
|
||||
@@ -265,7 +265,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
||||
# 文本消息
|
||||
try:
|
||||
if '<msg>' not in quote_data:
|
||||
quote_data_message_list.append(platform_message.Plain(quote_data))
|
||||
quote_data_message_list.append(platform_message.Plain(text=quote_data))
|
||||
else:
|
||||
# 引用消息展开
|
||||
quote_data_xml = ET.fromstring(quote_data)
|
||||
@@ -280,7 +280,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
||||
quote_data_message_list.extend(await self._handler_compound(None, quote_data))
|
||||
except Exception as e:
|
||||
self.logger.error(f'处理引用消息异常 expcetion:{e}')
|
||||
quote_data_message_list.append(platform_message.Plain(quote_data))
|
||||
quote_data_message_list.append(platform_message.Plain(text=quote_data))
|
||||
message_list.append(
|
||||
platform_message.Quote(
|
||||
sender_id=sender_id,
|
||||
@@ -290,7 +290,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
||||
if len(user_data) > 0:
|
||||
pattern = r'@\S{1,20}'
|
||||
user_data = re.sub(pattern, '', user_data)
|
||||
message_list.append(platform_message.Plain(user_data))
|
||||
message_list.append(platform_message.Plain(text=user_data))
|
||||
|
||||
return platform_message.MessageChain(message_list)
|
||||
|
||||
@@ -543,7 +543,6 @@ class WeChatPadAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter)
|
||||
] = {}
|
||||
|
||||
def __init__(self, config: dict, logger: EventLogger):
|
||||
|
||||
quart_app = quart.Quart(__name__)
|
||||
|
||||
message_converter = WeChatPadMessageConverter(config, logger)
|
||||
@@ -551,15 +550,14 @@ class WeChatPadAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter)
|
||||
bot = WeChatPadClient(config['wechatpad_url'], config['token'])
|
||||
super().__init__(
|
||||
config=config,
|
||||
logger = logger,
|
||||
quart_app = quart_app,
|
||||
message_converter =message_converter,
|
||||
event_converter = event_converter,
|
||||
logger=logger,
|
||||
quart_app=quart_app,
|
||||
message_converter=message_converter,
|
||||
event_converter=event_converter,
|
||||
listeners={},
|
||||
bot_account_id ='',
|
||||
name="WeChatPad",
|
||||
bot_account_id='',
|
||||
name='WeChatPad',
|
||||
bot=bot,
|
||||
|
||||
)
|
||||
|
||||
async def ws_message(self, data):
|
||||
|
||||
@@ -49,7 +49,7 @@ class WecomBotEventConverter(abstract_platform_adapter.AbstractEventConverter):
|
||||
return platform_events.FriendMessage(
|
||||
sender=platform_entities.Friend(
|
||||
id=event.userid,
|
||||
nickname='',
|
||||
nickname=event.username,
|
||||
remark='',
|
||||
),
|
||||
message_chain=message_chain,
|
||||
@@ -61,10 +61,10 @@ class WecomBotEventConverter(abstract_platform_adapter.AbstractEventConverter):
|
||||
sender = platform_entities.GroupMember(
|
||||
id=event.userid,
|
||||
permission='MEMBER',
|
||||
member_name=event.userid,
|
||||
member_name=event.username,
|
||||
group=platform_entities.Group(
|
||||
id=str(event.chatid),
|
||||
name='',
|
||||
name=event.chatname,
|
||||
permission=platform_entities.Permission.Member,
|
||||
),
|
||||
special_title='',
|
||||
@@ -117,6 +117,50 @@ class WecomBotAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter):
|
||||
content = await self.message_converter.yiri2target(message)
|
||||
await self.bot.set_message(message_source.source_platform_object.message_id, content)
|
||||
|
||||
async def reply_message_chunk(
|
||||
self,
|
||||
message_source: platform_events.MessageEvent,
|
||||
bot_message,
|
||||
message: platform_message.MessageChain,
|
||||
quote_origin: bool = False,
|
||||
is_final: bool = False,
|
||||
):
|
||||
"""将流水线增量输出写入企业微信 stream 会话。
|
||||
|
||||
Args:
|
||||
message_source: 流水线提供的原始消息事件。
|
||||
bot_message: 当前片段对应的模型元信息(未使用)。
|
||||
message: 需要回复的消息链。
|
||||
quote_origin: 是否引用原消息(企业微信暂不支持)。
|
||||
is_final: 标记当前片段是否为最终回复。
|
||||
|
||||
Returns:
|
||||
dict: 包含 `stream` 键,标识写入是否成功。
|
||||
|
||||
Example:
|
||||
在流水线 `reply_message_chunk` 调用中自动触发,无需手动调用。
|
||||
"""
|
||||
# 转换为纯文本(智能机器人当前协议仅支持文本流)
|
||||
content = await self.message_converter.yiri2target(message)
|
||||
msg_id = message_source.source_platform_object.message_id
|
||||
|
||||
# 将片段推送到 WecomBotClient 中的队列,返回值用于判断是否走降级逻辑
|
||||
success = await self.bot.push_stream_chunk(msg_id, content, is_final=is_final)
|
||||
if not success and is_final:
|
||||
# 未命中流式队列时使用旧有 set_message 兜底
|
||||
await self.bot.set_message(msg_id, content)
|
||||
return {'stream': success}
|
||||
|
||||
async def is_stream_output_supported(self) -> bool:
|
||||
"""智能机器人侧默认开启流式能力。
|
||||
|
||||
Returns:
|
||||
bool: 恒定返回 True。
|
||||
|
||||
Example:
|
||||
流水线执行阶段会调用此方法以确认是否启用流式。"""
|
||||
return True
|
||||
|
||||
async def send_message(self, target_type, target_id, message):
|
||||
pass
|
||||
|
||||
|
||||
@@ -6,19 +6,24 @@ from typing import Any
|
||||
import typing
|
||||
import os
|
||||
import sys
|
||||
|
||||
import httpx
|
||||
from async_lru import alru_cache
|
||||
|
||||
from ..core import app
|
||||
from . import handler
|
||||
from ..utils import platform
|
||||
from langbot_plugin.runtime.io.controllers.stdio import client as stdio_client_controller
|
||||
from langbot_plugin.runtime.io.controllers.stdio import (
|
||||
client as stdio_client_controller,
|
||||
)
|
||||
from langbot_plugin.runtime.io.controllers.ws import client as ws_client_controller
|
||||
from langbot_plugin.api.entities import events
|
||||
from langbot_plugin.api.entities import context
|
||||
import langbot_plugin.runtime.io.connection as base_connection
|
||||
from langbot_plugin.api.definition.components.manifest import ComponentManifest
|
||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
||||
from langbot_plugin.api.entities.builtin.command import (
|
||||
context as command_context,
|
||||
errors as command_errors,
|
||||
)
|
||||
from langbot_plugin.runtime.plugin.mgr import PluginInstallSource
|
||||
from ..core import taskmgr
|
||||
|
||||
@@ -32,10 +37,16 @@ class PluginRuntimeConnector:
|
||||
|
||||
handler_task: asyncio.Task
|
||||
|
||||
heartbeat_task: asyncio.Task | None = None
|
||||
|
||||
stdio_client_controller: stdio_client_controller.StdioClientController
|
||||
|
||||
ctrl: stdio_client_controller.StdioClientController | ws_client_controller.WebSocketClientController
|
||||
|
||||
runtime_subprocess_on_windows: asyncio.subprocess.Process | None = None
|
||||
|
||||
runtime_subprocess_on_windows_task: asyncio.Task | None = None
|
||||
|
||||
runtime_disconnect_callback: typing.Callable[
|
||||
[PluginRuntimeConnector], typing.Coroutine[typing.Any, typing.Any, None]
|
||||
]
|
||||
@@ -54,13 +65,24 @@ class PluginRuntimeConnector:
|
||||
self.runtime_disconnect_callback = runtime_disconnect_callback
|
||||
self.is_enable_plugin = self.ap.instance_config.data.get('plugin', {}).get('enable', True)
|
||||
|
||||
async def heartbeat_loop(self):
|
||||
while True:
|
||||
await asyncio.sleep(20)
|
||||
try:
|
||||
await self.ping_plugin_runtime()
|
||||
self.ap.logger.debug('Heartbeat to plugin runtime success.')
|
||||
except Exception as e:
|
||||
self.ap.logger.debug(f'Failed to heartbeat to plugin runtime: {e}')
|
||||
|
||||
async def initialize(self):
|
||||
if not self.is_enable_plugin:
|
||||
self.ap.logger.info('Plugin system is disabled.')
|
||||
return
|
||||
|
||||
async def new_connection_callback(connection: base_connection.Connection):
|
||||
async def disconnect_callback(rchandler: handler.RuntimeConnectionHandler) -> bool:
|
||||
async def disconnect_callback(
|
||||
rchandler: handler.RuntimeConnectionHandler,
|
||||
) -> bool:
|
||||
if platform.get_platform() == 'docker' or platform.use_websocket_to_connect_plugin_runtime():
|
||||
self.ap.logger.error('Disconnected from plugin runtime, trying to reconnect...')
|
||||
await self.runtime_disconnect_callback(self)
|
||||
@@ -72,6 +94,7 @@ class PluginRuntimeConnector:
|
||||
return False
|
||||
|
||||
self.handler = handler.RuntimeConnectionHandler(connection, disconnect_callback, self.ap)
|
||||
|
||||
self.handler_task = asyncio.create_task(self.handler.run())
|
||||
_ = await self.handler.ping()
|
||||
self.ap.logger.info('Connected to plugin runtime.')
|
||||
@@ -85,8 +108,14 @@ class PluginRuntimeConnector:
|
||||
'runtime_ws_url', 'ws://langbot_plugin_runtime:5400/control/ws'
|
||||
)
|
||||
|
||||
async def make_connection_failed_callback(ctrl: ws_client_controller.WebSocketClientController) -> None:
|
||||
self.ap.logger.error('Failed to connect to plugin runtime, trying to reconnect...')
|
||||
async def make_connection_failed_callback(
|
||||
ctrl: ws_client_controller.WebSocketClientController,
|
||||
exc: Exception = None,
|
||||
) -> None:
|
||||
if exc is not None:
|
||||
self.ap.logger.error(f'Failed to connect to plugin runtime({ws_url}): {exc}')
|
||||
else:
|
||||
self.ap.logger.error(f'Failed to connect to plugin runtime({ws_url}), trying to reconnect...')
|
||||
await self.runtime_disconnect_callback(self)
|
||||
|
||||
self.ctrl = ws_client_controller.WebSocketClientController(
|
||||
@@ -94,6 +123,41 @@ class PluginRuntimeConnector:
|
||||
make_connection_failed_callback=make_connection_failed_callback,
|
||||
)
|
||||
task = self.ctrl.run(new_connection_callback)
|
||||
elif platform.get_platform() == 'win32':
|
||||
# Due to Windows's lack of supports for both stdio and subprocess:
|
||||
# See also: https://docs.python.org/zh-cn/3.13/library/asyncio-platforms.html
|
||||
# We have to launch runtime via cmd but communicate via ws.
|
||||
self.ap.logger.info('(windows) use cmd to launch plugin runtime and communicate via ws')
|
||||
|
||||
python_path = sys.executable
|
||||
env = os.environ.copy()
|
||||
self.runtime_subprocess_on_windows = await asyncio.create_subprocess_exec(
|
||||
python_path,
|
||||
'-m', 'langbot_plugin.cli.__init__', 'rt',
|
||||
env=env,
|
||||
)
|
||||
|
||||
# hold the process
|
||||
self.runtime_subprocess_on_windows_task = asyncio.create_task(self.runtime_subprocess_on_windows.wait())
|
||||
|
||||
ws_url = 'ws://localhost:5400/control/ws'
|
||||
|
||||
async def make_connection_failed_callback(
|
||||
ctrl: ws_client_controller.WebSocketClientController,
|
||||
exc: Exception = None,
|
||||
) -> None:
|
||||
if exc is not None:
|
||||
self.ap.logger.error(f'(windows) Failed to connect to plugin runtime({ws_url}): {exc}')
|
||||
else:
|
||||
self.ap.logger.error(f'(windows) Failed to connect to plugin runtime({ws_url}), trying to reconnect...')
|
||||
await self.runtime_disconnect_callback(self)
|
||||
|
||||
self.ctrl = ws_client_controller.WebSocketClientController(
|
||||
ws_url=ws_url,
|
||||
make_connection_failed_callback=make_connection_failed_callback,
|
||||
)
|
||||
task = self.ctrl.run(new_connection_callback)
|
||||
|
||||
else: # stdio
|
||||
self.ap.logger.info('use stdio to connect to plugin runtime')
|
||||
# cmd: lbp rt -s
|
||||
@@ -106,6 +170,9 @@ class PluginRuntimeConnector:
|
||||
)
|
||||
task = self.ctrl.run(new_connection_callback)
|
||||
|
||||
if self.heartbeat_task is None:
|
||||
self.heartbeat_task = asyncio.create_task(self.heartbeat_loop())
|
||||
|
||||
asyncio.create_task(task)
|
||||
|
||||
async def initialize_plugins(self):
|
||||
@@ -130,6 +197,25 @@ class PluginRuntimeConnector:
|
||||
install_info['plugin_file_key'] = file_key
|
||||
del install_info['plugin_file']
|
||||
self.ap.logger.info(f'Transfered file {file_key} to plugin runtime')
|
||||
elif install_source == PluginInstallSource.GITHUB:
|
||||
# download and transfer file
|
||||
try:
|
||||
async with httpx.AsyncClient(
|
||||
trust_env=True,
|
||||
follow_redirects=True,
|
||||
timeout=20,
|
||||
) as client:
|
||||
response = await client.get(
|
||||
install_info['asset_url'],
|
||||
)
|
||||
response.raise_for_status()
|
||||
file_bytes = response.content
|
||||
file_key = await self.handler.send_file(file_bytes, 'lbpkg')
|
||||
install_info['plugin_file_key'] = file_key
|
||||
self.ap.logger.info(f'Transfered file {file_key} to plugin runtime')
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Failed to download file from GitHub: {e}')
|
||||
raise Exception(f'Failed to download file from GitHub: {e}')
|
||||
|
||||
async for ret in self.handler.install_plugin(install_source.value, install_info):
|
||||
current_action = ret.get('current_action', None)
|
||||
@@ -143,7 +229,10 @@ class PluginRuntimeConnector:
|
||||
task_context.trace(trace)
|
||||
|
||||
async def upgrade_plugin(
|
||||
self, plugin_author: str, plugin_name: str, task_context: taskmgr.TaskContext | None = None
|
||||
self,
|
||||
plugin_author: str,
|
||||
plugin_name: str,
|
||||
task_context: taskmgr.TaskContext | None = None,
|
||||
) -> dict[str, Any]:
|
||||
async for ret in self.handler.upgrade_plugin(plugin_author, plugin_name):
|
||||
current_action = ret.get('current_action', None)
|
||||
@@ -157,7 +246,11 @@ class PluginRuntimeConnector:
|
||||
task_context.trace(trace)
|
||||
|
||||
async def delete_plugin(
|
||||
self, plugin_author: str, plugin_name: str, task_context: taskmgr.TaskContext | None = None
|
||||
self,
|
||||
plugin_author: str,
|
||||
plugin_name: str,
|
||||
delete_data: bool = False,
|
||||
task_context: taskmgr.TaskContext | None = None,
|
||||
) -> dict[str, Any]:
|
||||
async for ret in self.handler.delete_plugin(plugin_author, plugin_name):
|
||||
current_action = ret.get('current_action', None)
|
||||
@@ -170,7 +263,16 @@ class PluginRuntimeConnector:
|
||||
if task_context is not None:
|
||||
task_context.trace(trace)
|
||||
|
||||
# Clean up plugin settings and binary storage if requested
|
||||
if delete_data:
|
||||
if task_context is not None:
|
||||
task_context.trace('Cleaning up plugin configuration and storage...')
|
||||
await self.handler.cleanup_plugin_data(plugin_author, plugin_name)
|
||||
|
||||
async def list_plugins(self) -> list[dict[str, Any]]:
|
||||
if not self.is_enable_plugin:
|
||||
return []
|
||||
|
||||
return await self.handler.list_plugins()
|
||||
|
||||
async def get_plugin_info(self, author: str, plugin_name: str) -> dict[str, Any]:
|
||||
@@ -186,34 +288,62 @@ class PluginRuntimeConnector:
|
||||
async def emit_event(
|
||||
self,
|
||||
event: events.BaseEventModel,
|
||||
bound_plugins: list[str] | None = None,
|
||||
) -> context.EventContext:
|
||||
event_ctx = context.EventContext.from_event(event)
|
||||
|
||||
if not self.is_enable_plugin:
|
||||
return event_ctx
|
||||
event_ctx_result = await self.handler.emit_event(event_ctx.model_dump(serialize_as_any=True))
|
||||
|
||||
# Pass include_plugins to runtime for filtering
|
||||
event_ctx_result = await self.handler.emit_event(
|
||||
event_ctx.model_dump(serialize_as_any=False), include_plugins=bound_plugins
|
||||
)
|
||||
|
||||
event_ctx = context.EventContext.model_validate(event_ctx_result['event_context'])
|
||||
|
||||
return event_ctx
|
||||
|
||||
async def list_tools(self) -> list[ComponentManifest]:
|
||||
list_tools_data = await self.handler.list_tools()
|
||||
async def list_tools(self, bound_plugins: list[str] | None = None) -> list[ComponentManifest]:
|
||||
if not self.is_enable_plugin:
|
||||
return []
|
||||
|
||||
return [ComponentManifest.model_validate(tool) for tool in list_tools_data]
|
||||
# Pass include_plugins to runtime for filtering
|
||||
list_tools_data = await self.handler.list_tools(include_plugins=bound_plugins)
|
||||
|
||||
async def call_tool(self, tool_name: str, parameters: dict[str, Any]) -> dict[str, Any]:
|
||||
return await self.handler.call_tool(tool_name, parameters)
|
||||
tools = [ComponentManifest.model_validate(tool) for tool in list_tools_data]
|
||||
|
||||
async def list_commands(self) -> list[ComponentManifest]:
|
||||
list_commands_data = await self.handler.list_commands()
|
||||
return tools
|
||||
|
||||
return [ComponentManifest.model_validate(command) for command in list_commands_data]
|
||||
async def call_tool(
|
||||
self, tool_name: str, parameters: dict[str, Any], bound_plugins: list[str] | None = None
|
||||
) -> dict[str, Any]:
|
||||
if not self.is_enable_plugin:
|
||||
return {'error': 'Tool not found: plugin system is disabled'}
|
||||
|
||||
# Pass include_plugins to runtime for validation
|
||||
return await self.handler.call_tool(tool_name, parameters, include_plugins=bound_plugins)
|
||||
|
||||
async def list_commands(self, bound_plugins: list[str] | None = None) -> list[ComponentManifest]:
|
||||
if not self.is_enable_plugin:
|
||||
return []
|
||||
|
||||
# Pass include_plugins to runtime for filtering
|
||||
list_commands_data = await self.handler.list_commands(include_plugins=bound_plugins)
|
||||
|
||||
commands = [ComponentManifest.model_validate(command) for command in list_commands_data]
|
||||
|
||||
return commands
|
||||
|
||||
async def execute_command(
|
||||
self, command_ctx: command_context.ExecuteContext
|
||||
self, command_ctx: command_context.ExecuteContext, bound_plugins: list[str] | None = None
|
||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||
gen = self.handler.execute_command(command_ctx.model_dump(serialize_as_any=True))
|
||||
if not self.is_enable_plugin:
|
||||
yield command_context.CommandReturn(error=command_errors.CommandNotFoundError(command_ctx.command))
|
||||
return
|
||||
|
||||
# Pass include_plugins to runtime for validation
|
||||
gen = self.handler.execute_command(command_ctx.model_dump(serialize_as_any=True), include_plugins=bound_plugins)
|
||||
|
||||
async for ret in gen:
|
||||
cmd_ret = command_context.CommandReturn.model_validate(ret)
|
||||
@@ -221,6 +351,13 @@ class PluginRuntimeConnector:
|
||||
yield cmd_ret
|
||||
|
||||
def dispose(self):
|
||||
# No need to consider the shutdown on Windows
|
||||
# for Windows can kill processes and subprocesses chainly
|
||||
|
||||
if self.is_enable_plugin and isinstance(self.ctrl, stdio_client_controller.StdioClientController):
|
||||
self.ap.logger.info('Terminating plugin runtime process...')
|
||||
self.ctrl.process.terminate()
|
||||
|
||||
if self.heartbeat_task is not None:
|
||||
self.heartbeat_task.cancel()
|
||||
self.heartbeat_task = None
|
||||
|
||||
@@ -56,7 +56,9 @@ class RuntimeConnectionHandler(handler.Handler):
|
||||
.where(persistence_plugin.PluginSetting.plugin_name == plugin_name)
|
||||
)
|
||||
|
||||
if result.first() is not None:
|
||||
setting = result.first()
|
||||
|
||||
if setting is not None:
|
||||
# delete plugin setting
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.delete(persistence_plugin.PluginSetting)
|
||||
@@ -71,6 +73,10 @@ class RuntimeConnectionHandler(handler.Handler):
|
||||
plugin_name=plugin_name,
|
||||
install_source=install_source,
|
||||
install_info=install_info,
|
||||
# inherit from existing setting
|
||||
enabled=setting.enabled if setting is not None else True,
|
||||
priority=setting.priority if setting is not None else 0,
|
||||
config=setting.config if setting is not None else {}, # noqa: F821
|
||||
)
|
||||
)
|
||||
|
||||
@@ -430,6 +436,25 @@ class RuntimeConnectionHandler(handler.Handler):
|
||||
},
|
||||
)
|
||||
|
||||
@self.action(RuntimeToLangBotAction.GET_CONFIG_FILE)
|
||||
async def get_config_file(data: dict[str, Any]) -> handler.ActionResponse:
|
||||
"""Get a config file by file key"""
|
||||
file_key = data['file_key']
|
||||
|
||||
try:
|
||||
# Load file from storage
|
||||
file_bytes = await self.ap.storage_mgr.storage_provider.load(file_key)
|
||||
|
||||
return handler.ActionResponse.success(
|
||||
data={
|
||||
'file_base64': base64.b64encode(file_bytes).decode('utf-8'),
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
return handler.ActionResponse.error(
|
||||
message=f'Failed to load config file {file_key}: {e}',
|
||||
)
|
||||
|
||||
async def ping(self) -> dict[str, Any]:
|
||||
"""Ping the runtime"""
|
||||
return await self.call_action(
|
||||
@@ -529,24 +554,28 @@ class RuntimeConnectionHandler(handler.Handler):
|
||||
async def emit_event(
|
||||
self,
|
||||
event_context: dict[str, Any],
|
||||
include_plugins: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Emit event"""
|
||||
result = await self.call_action(
|
||||
LangBotToRuntimeAction.EMIT_EVENT,
|
||||
{
|
||||
'event_context': event_context,
|
||||
'include_plugins': include_plugins,
|
||||
},
|
||||
timeout=30,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def list_tools(self) -> list[dict[str, Any]]:
|
||||
async def list_tools(self, include_plugins: list[str] | None = None) -> list[dict[str, Any]]:
|
||||
"""List tools"""
|
||||
result = await self.call_action(
|
||||
LangBotToRuntimeAction.LIST_TOOLS,
|
||||
{},
|
||||
timeout=10,
|
||||
{
|
||||
'include_plugins': include_plugins,
|
||||
},
|
||||
timeout=20,
|
||||
)
|
||||
|
||||
return result['tools']
|
||||
@@ -573,36 +602,61 @@ class RuntimeConnectionHandler(handler.Handler):
|
||||
'mime_type': mime_type,
|
||||
}
|
||||
|
||||
async def call_tool(self, tool_name: str, parameters: dict[str, Any]) -> dict[str, Any]:
|
||||
async def cleanup_plugin_data(self, plugin_author: str, plugin_name: str) -> None:
|
||||
"""Cleanup plugin settings and binary storage"""
|
||||
# Delete plugin settings
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.delete(persistence_plugin.PluginSetting)
|
||||
.where(persistence_plugin.PluginSetting.plugin_author == plugin_author)
|
||||
.where(persistence_plugin.PluginSetting.plugin_name == plugin_name)
|
||||
)
|
||||
|
||||
# Delete all binary storage for this plugin
|
||||
owner = f'{plugin_author}/{plugin_name}'
|
||||
await self.ap.persistence_mgr.execute_async(
|
||||
sqlalchemy.delete(persistence_bstorage.BinaryStorage)
|
||||
.where(persistence_bstorage.BinaryStorage.owner_type == 'plugin')
|
||||
.where(persistence_bstorage.BinaryStorage.owner == owner)
|
||||
)
|
||||
|
||||
async def call_tool(
|
||||
self, tool_name: str, parameters: dict[str, Any], include_plugins: list[str] | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""Call tool"""
|
||||
result = await self.call_action(
|
||||
LangBotToRuntimeAction.CALL_TOOL,
|
||||
{
|
||||
'tool_name': tool_name,
|
||||
'tool_parameters': parameters,
|
||||
'include_plugins': include_plugins,
|
||||
},
|
||||
timeout=30,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
return result['tool_response']
|
||||
|
||||
async def list_commands(self) -> list[dict[str, Any]]:
|
||||
async def list_commands(self, include_plugins: list[str] | None = None) -> list[dict[str, Any]]:
|
||||
"""List commands"""
|
||||
result = await self.call_action(
|
||||
LangBotToRuntimeAction.LIST_COMMANDS,
|
||||
{},
|
||||
{
|
||||
'include_plugins': include_plugins,
|
||||
},
|
||||
timeout=10,
|
||||
)
|
||||
return result['commands']
|
||||
|
||||
async def execute_command(self, command_context: dict[str, Any]) -> typing.AsyncGenerator[dict[str, Any], None]:
|
||||
async def execute_command(
|
||||
self, command_context: dict[str, Any], include_plugins: list[str] | None = None
|
||||
) -> typing.AsyncGenerator[dict[str, Any], None]:
|
||||
"""Execute command"""
|
||||
gen = self.call_action_generator(
|
||||
LangBotToRuntimeAction.EXECUTE_COMMAND,
|
||||
{
|
||||
'command_context': command_context,
|
||||
'include_plugins': include_plugins,
|
||||
},
|
||||
timeout=30,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
async for ret in gen:
|
||||
|
||||
@@ -59,7 +59,7 @@ class ModelManager:
|
||||
try:
|
||||
await self.load_llm_model(llm_model)
|
||||
except provider_errors.RequesterNotFoundError as e:
|
||||
self.ap.logger.warning(f'Requester {e.requester_name} not found, skipping model {llm_model.uuid}')
|
||||
self.ap.logger.warning(f'Requester {e.requester_name} not found, skipping llm model {llm_model.uuid}')
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Failed to load model {llm_model.uuid}: {e}\n{traceback.format_exc()}')
|
||||
|
||||
@@ -67,7 +67,14 @@ class ModelManager:
|
||||
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_model.EmbeddingModel))
|
||||
embedding_models = result.all()
|
||||
for embedding_model in embedding_models:
|
||||
await self.load_embedding_model(embedding_model)
|
||||
try:
|
||||
await self.load_embedding_model(embedding_model)
|
||||
except provider_errors.RequesterNotFoundError as e:
|
||||
self.ap.logger.warning(
|
||||
f'Requester {e.requester_name} not found, skipping embedding model {embedding_model.uuid}'
|
||||
)
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Failed to load model {embedding_model.uuid}: {e}\n{traceback.format_exc()}')
|
||||
|
||||
async def init_runtime_llm_model(
|
||||
self,
|
||||
@@ -107,6 +114,9 @@ class ModelManager:
|
||||
elif isinstance(model_info, dict):
|
||||
model_info = persistence_model.EmbeddingModel(**model_info)
|
||||
|
||||
if model_info.requester not in self.requester_dict:
|
||||
raise provider_errors.RequesterNotFoundError(model_info.requester)
|
||||
|
||||
requester_inst = self.requester_dict[model_info.requester](ap=self.ap, config=model_info.requester_config)
|
||||
|
||||
await requester_inst.initialize()
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import dashscope
|
||||
import openai
|
||||
|
||||
from . import modelscopechatcmpl
|
||||
from .. import requester
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
|
||||
@@ -15,3 +20,211 @@ class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
|
||||
'base_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
|
||||
async def _closure_stream(
|
||||
self,
|
||||
query: pipeline_query.Query,
|
||||
req_messages: list[dict],
|
||||
use_model: requester.RuntimeLLMModel,
|
||||
use_funcs: list[resource_tool.LLMTool] = None,
|
||||
extra_args: dict[str, typing.Any] = {},
|
||||
remove_think: bool = False,
|
||||
) -> provider_message.Message | typing.AsyncGenerator[provider_message.MessageChunk, None]:
|
||||
self.client.api_key = use_model.token_mgr.get_token()
|
||||
|
||||
args = {}
|
||||
args['model'] = use_model.model_entity.name
|
||||
|
||||
if use_funcs:
|
||||
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||
|
||||
if tools:
|
||||
args['tools'] = tools
|
||||
|
||||
# 设置此次请求中的messages
|
||||
messages = req_messages.copy()
|
||||
|
||||
is_use_dashscope_call = False # 是否使用阿里原生库调用
|
||||
is_enable_multi_model = True # 是否支持多轮对话
|
||||
use_time_num = 0 # 模型已调用次数,防止存在多文件时重复调用
|
||||
use_time_ids = [] # 已调用的ID列表
|
||||
message_id = 0 # 记录消息序号
|
||||
|
||||
for msg in messages:
|
||||
# print(msg)
|
||||
if 'content' in msg and isinstance(msg['content'], list):
|
||||
for me in msg['content']:
|
||||
if me['type'] == 'image_base64':
|
||||
me['image_url'] = {'url': me['image_base64']}
|
||||
me['type'] = 'image_url'
|
||||
del me['image_base64']
|
||||
elif me['type'] == 'file_url' and '.' in me.get('file_name', ''):
|
||||
# 1. 视频文件推理
|
||||
# https://bailian.console.aliyun.com/?tab=doc#/doc/?type=model&url=2845871
|
||||
file_type = me.get('file_name').lower().split('.')[-1]
|
||||
if file_type in ['mp4', 'avi', 'mkv', 'mov', 'flv', 'wmv']:
|
||||
me['type'] = 'video_url'
|
||||
me['video_url'] = {'url': me['file_url']}
|
||||
del me['file_url']
|
||||
del me['file_name']
|
||||
use_time_num +=1
|
||||
use_time_ids.append(message_id)
|
||||
is_enable_multi_model = False
|
||||
# 2. 语音文件识别, 无法通过openai的audio字段传递,暂时不支持
|
||||
# https://bailian.console.aliyun.com/?tab=doc#/doc/?type=model&url=2979031
|
||||
elif file_type in ['aac', 'amr', 'aiff', 'flac', 'm4a',
|
||||
'mp3', 'mpeg', 'ogg', 'opus', 'wav', 'webm', 'wma']:
|
||||
me['audio'] = me['file_url']
|
||||
me['type'] = 'audio'
|
||||
del me['file_url']
|
||||
del me['type']
|
||||
del me['file_name']
|
||||
is_use_dashscope_call = True
|
||||
use_time_num +=1
|
||||
use_time_ids.append(message_id)
|
||||
is_enable_multi_model = False
|
||||
message_id += 1
|
||||
|
||||
# 使用列表推导式,保留不在 use_time_ids[:-1] 中的元素,仅保留最后一个多媒体消息
|
||||
if not is_enable_multi_model and use_time_num > 1:
|
||||
messages = [msg for idx, msg in enumerate(messages) if idx not in use_time_ids[:-1]]
|
||||
|
||||
if not is_enable_multi_model:
|
||||
messages = [msg for msg in messages if 'resp_message_id' not in msg]
|
||||
|
||||
args['messages'] = messages
|
||||
args['stream'] = True
|
||||
|
||||
# 流式处理状态
|
||||
# tool_calls_map: dict[str, provider_message.ToolCall] = {}
|
||||
chunk_idx = 0
|
||||
thinking_started = False
|
||||
thinking_ended = False
|
||||
role = 'assistant' # 默认角色
|
||||
|
||||
if is_use_dashscope_call:
|
||||
response = dashscope.MultiModalConversation.call(
|
||||
# 若没有配置环境变量,请用百炼API Key将下行替换为:api_key = "sk-xxx"
|
||||
api_key=use_model.token_mgr.get_token(),
|
||||
model=use_model.model_entity.name,
|
||||
messages=messages,
|
||||
result_format="message",
|
||||
asr_options={
|
||||
# "language": "zh", # 可选,若已知音频的语种,可通过该参数指定待识别语种,以提升识别准确率
|
||||
"enable_lid": True,
|
||||
"enable_itn": False
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
content_length_list = []
|
||||
previous_length = 0 # 记录上一次的内容长度
|
||||
for res in response:
|
||||
chunk = res["output"]
|
||||
# 解析 chunk 数据
|
||||
if hasattr(chunk, 'choices') and chunk.choices:
|
||||
choice = chunk.choices[0]
|
||||
delta_content = choice["message"].content[0]["text"]
|
||||
finish_reason = choice["finish_reason"]
|
||||
content_length_list.append(len(delta_content))
|
||||
else:
|
||||
delta_content = ""
|
||||
finish_reason = None
|
||||
|
||||
# 跳过空的第一个 chunk(只有 role 没有内容)
|
||||
if chunk_idx == 0 and not delta_content:
|
||||
chunk_idx += 1
|
||||
continue
|
||||
|
||||
# 检查 content_length_list 是否有足够的数据
|
||||
if len(content_length_list) >= 2:
|
||||
now_content = delta_content[previous_length: content_length_list[-1]]
|
||||
previous_length = content_length_list[-1] # 更新上一次的长度
|
||||
else:
|
||||
now_content = delta_content # 第一次循环时直接使用 delta_content
|
||||
previous_length = len(delta_content) # 更新上一次的长度
|
||||
|
||||
# 构建 MessageChunk - 只包含增量内容
|
||||
chunk_data = {
|
||||
'role': role,
|
||||
'content': now_content if now_content else None,
|
||||
'is_final': bool(finish_reason) and finish_reason != "null",
|
||||
}
|
||||
|
||||
# 移除 None 值
|
||||
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
|
||||
yield provider_message.MessageChunk(**chunk_data)
|
||||
chunk_idx += 1
|
||||
else:
|
||||
async for chunk in self._req_stream(args, extra_body=extra_args):
|
||||
# 解析 chunk 数据
|
||||
if hasattr(chunk, 'choices') and chunk.choices:
|
||||
choice = chunk.choices[0]
|
||||
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
|
||||
finish_reason = getattr(choice, 'finish_reason', None)
|
||||
else:
|
||||
delta = {}
|
||||
finish_reason = None
|
||||
|
||||
# 从第一个 chunk 获取 role,后续使用这个 role
|
||||
if 'role' in delta and delta['role']:
|
||||
role = delta['role']
|
||||
|
||||
# 获取增量内容
|
||||
delta_content = delta.get('content', '')
|
||||
reasoning_content = delta.get('reasoning_content', '')
|
||||
|
||||
# 处理 reasoning_content
|
||||
if reasoning_content:
|
||||
# accumulated_reasoning += reasoning_content
|
||||
# 如果设置了 remove_think,跳过 reasoning_content
|
||||
if remove_think:
|
||||
chunk_idx += 1
|
||||
continue
|
||||
|
||||
# 第一次出现 reasoning_content,添加 <think> 开始标签
|
||||
if not thinking_started:
|
||||
thinking_started = True
|
||||
delta_content = '<think>\n' + reasoning_content
|
||||
else:
|
||||
# 继续输出 reasoning_content
|
||||
delta_content = reasoning_content
|
||||
elif thinking_started and not thinking_ended and delta_content:
|
||||
# reasoning_content 结束,normal content 开始,添加 </think> 结束标签
|
||||
thinking_ended = True
|
||||
delta_content = '\n</think>\n' + delta_content
|
||||
|
||||
# 处理工具调用增量
|
||||
if delta.get('tool_calls'):
|
||||
for tool_call in delta['tool_calls']:
|
||||
if tool_call['id'] != '':
|
||||
tool_id = tool_call['id']
|
||||
if tool_call['function']['name'] is not None:
|
||||
tool_name = tool_call['function']['name']
|
||||
|
||||
if tool_call['type'] is None:
|
||||
tool_call['type'] = 'function'
|
||||
tool_call['id'] = tool_id
|
||||
tool_call['function']['name'] = tool_name
|
||||
tool_call['function']['arguments'] = (
|
||||
'' if tool_call['function']['arguments'] is None else tool_call['function']['arguments']
|
||||
)
|
||||
|
||||
# 跳过空的第一个 chunk(只有 role 没有内容)
|
||||
if chunk_idx == 0 and not delta_content and not reasoning_content and not delta.get('tool_calls'):
|
||||
chunk_idx += 1
|
||||
continue
|
||||
|
||||
# 构建 MessageChunk - 只包含增量内容
|
||||
chunk_data = {
|
||||
'role': role,
|
||||
'content': delta_content if delta_content else None,
|
||||
'tool_calls': delta.get('tool_calls'),
|
||||
'is_final': bool(finish_reason),
|
||||
}
|
||||
|
||||
# 移除 None 值
|
||||
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
|
||||
|
||||
yield provider_message.MessageChunk(**chunk_data)
|
||||
chunk_idx += 1
|
||||
# return
|
||||
|
||||
1
pkg/provider/modelmgr/requesters/tokenpony.svg
Normal file
1
pkg/provider/modelmgr/requesters/tokenpony.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="450" height="280" viewBox="0 0 450 280" class="cursor-pointer h-24 flex-shrink-0 w-149"><g fill="none" fill-rule="nonzero"><path fill="#0005DE" d="M97.705 6.742c58.844 0 90.962 34.353 90.962 98.341v21.843c-15.118-2.479-30.297-6.573-45.558-12.3v-9.543c0-35.97-15.564-56.281-45.404-56.281s-45.404 20.31-45.404 56.281v72.48c0 36.117 15.65 56.818 45.404 56.818 26.78 0 42.133-16.768 44.936-46.452q22.397 6.473 44.905 9.356c-6.15 51.52-37.492 79.155-89.841 79.155-58.678 0-90.963-34.72-90.963-98.878v-72.479c0-63.988 32.119-98.34 90.963-98.34m253.627 0c58.844 0 90.963 34.353 90.963 98.341v72.48c0 64.157-32.285 98.877-90.963 98.877-52.438 0-83.797-27.729-89.874-79.415 15-2.026 29.965-5.252 44.887-9.67 2.658 30.042 18.036 47.026 44.987 47.026 29.755 0 45.404-20.7 45.404-56.819v-72.479c0-35.97-15.564-56.281-45.404-56.281s-45.403 20.31-45.403 56.281v8.778c-15.262 5.868-30.44 10.104-45.559 12.725v-21.503c0-63.988 32.118-98.34 90.962-98.34m-164.37 140.026.57.09.831.127-.83-.128a234.5 234.5 0 0 0 35.979 2.79q18.408.002 36.858-2.928l1.401-.226a242 242 0 0 0 1.45-.244l-1.037.175q.729-.12 1.458-.247l-.421.072 1.26-.219-.84.147a244 244 0 0 0 2.8-.5l-.792.144q.648-.117 1.298-.239l-.506.094q.66-.122 1.322-.248l-.816.154q.759-.142 1.518-.289l-.702.135a247 247 0 0 0 5.364-1.084l-.463.098a250 250 0 0 0 3.928-.864l-.785.178 1.45-.33-.665.152q.597-.137 1.193-.276l-.528.123a253 253 0 0 0 3.685-.882l-.254.063q.683-.168 1.366-.34l-1.112.277q.809-.2 1.618-.405l-.506.128q.818-.206 1.634-.417l-1.128.289q.71-.18 1.419-.365l1.506-.397a259 259 0 0 0 1.804-.488l-.433.119a261 261 0 0 0 3.751-1.053l-.681.196a264 264 0 0 0 1.735-.502l-1.054.306q.636-.184 1.272-.37l-.218.064 1.238-.366-1.02.302a266 266 0 0 0 2.936-.882l-1.026.312q.71-.214 1.42-.433l-.394.121q.675-.207 1.35-.418l-.955.297q.8-.246 1.6-.499l-.645.202q.86-.269 1.72-.543l-1.076.341q.666-.21 1.33-.423l-.254.082q.833-.266 1.665-.539l-1.41.457q.874-.28 1.75-.568l-.34.111q.702-.229 1.403-.462l-1.063.351q.818-.269 1.634-.542l-.571.19a276 276 0 0 0 4.038-1.378l-.735.256q.657-.228 1.315-.46l-.58.204q16.86-5.903 33.78-14.256l-7.114-12.453 42.909 6.553-13.148 45.541-7.734-13.537q-23.832 11.94-47.755 19.504l-.199.063a298 298 0 0 1-11.65 3.412 288 288 0 0 1-10.39 2.603 280 280 0 0 1-11.677 2.431 273 273 0 0 1-11.643 1.903 263.5 263.5 0 0 1-36.858 2.599q-17.437 0-34.844-2.323l-.227-.03q-.635-.085-1.27-.174l1.497.204a268 268 0 0 1-13.673-2.182 275 275 0 0 1-12.817-2.697 282 282 0 0 1-11.859-3.057 291 291 0 0 1-7.21-2.123c-17.23-5.314-34.43-12.334-51.59-21.051l-8.258 14.455-13.148-45.541 42.909-6.553-6.594 11.544q18.421 9.24 36.776 15.572l1.316.45 1.373.462-.831-.278q.795.267 1.589.53l-.758-.252q.632.211 1.264.419l-.506-.167q.642.212 1.284.42l-.778-.253a271 271 0 0 0 3.914 1.251l-.227-.07a267 267 0 0 0 3.428 1.046l-.194-.058 1.315.389-1.121-.331q.864.256 1.73.508l-.609-.177q.826.241 1.651.478l-1.043-.3 1.307.375-.264-.075q.802.228 1.603.452l-1.34-.377q1.034.294 2.067.58l-.727-.203q.713.2 1.426.394l-.699-.192q.62.171 1.237.338l-.538-.146a259 259 0 0 0 3.977 1.051l-.66-.17q.683.177 1.367.35l-.707-.18q.687.175 1.373.348l-.666-.168q.738.186 1.475.368l-.809-.2q.716.179 1.43.353l-.621-.153a253 253 0 0 0 3.766.898l-.308-.07q.735.17 1.472.336l-1.164-.266q.747.173 1.496.34l-.332-.074q.845.19 1.69.374l-1.358-.3q.932.21 1.864.41l-.505-.11q.726.159 1.452.313l-.947-.203q.72.156 1.44.307l-.493-.104q.684.144 1.368.286l-.875-.182q.743.155 1.485.306l-.61-.124q.932.192 1.864.376l-1.254-.252q.904.184 1.809.361l-.555-.109q.752.15 1.504.293l-.95-.184q.69.135 1.377.265l-.427-.081q.784.15 1.569.295l-1.142-.214q.717.136 1.434.268l-.292-.054a244 244 0 0 0 3.808.673l-.68-.116 1.063.18-.383-.064q1.076.18 2.152.352z"></path></g></svg>
|
||||
|
After Width: | Height: | Size: 3.6 KiB |
31
pkg/provider/modelmgr/requesters/tokenpony.yaml
Normal file
31
pkg/provider/modelmgr/requesters/tokenpony.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
kind: LLMAPIRequester
|
||||
metadata:
|
||||
name: tokenpony-chat-completions
|
||||
label:
|
||||
en_US: TokenPony
|
||||
zh_Hans: 小马算力
|
||||
icon: tokenpony.svg
|
||||
spec:
|
||||
config:
|
||||
- name: base_url
|
||||
label:
|
||||
en_US: Base URL
|
||||
zh_Hans: 基础 URL
|
||||
type: string
|
||||
required: true
|
||||
default: "https://api.tokenpony.cn/v1"
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Timeout
|
||||
zh_Hans: 超时时间
|
||||
type: integer
|
||||
required: true
|
||||
default: 120
|
||||
support_type:
|
||||
- llm
|
||||
- text-embedding
|
||||
execution:
|
||||
python:
|
||||
path: ./tokenponychatcmpl.py
|
||||
attr: TokenPonyChatCompletions
|
||||
17
pkg/provider/modelmgr/requesters/tokenponychatcmpl.py
Normal file
17
pkg/provider/modelmgr/requesters/tokenponychatcmpl.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import openai
|
||||
|
||||
from . import chatcmpl
|
||||
|
||||
|
||||
class TokenPonyChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||
"""TokenPony ChatCompletion API 请求器"""
|
||||
|
||||
client: openai.AsyncClient
|
||||
|
||||
default_config: dict[str, typing.Any] = {
|
||||
'base_url': 'https://api.tokenpony.cn/v1',
|
||||
'timeout': 120,
|
||||
}
|
||||
313
pkg/provider/runners/cozeapi.py
Normal file
313
pkg/provider/runners/cozeapi.py
Normal file
@@ -0,0 +1,313 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import json
|
||||
import uuid
|
||||
import base64
|
||||
|
||||
from .. import runner
|
||||
from ...core import app
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
from ...utils import image
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
from libs.coze_server_api.client import AsyncCozeAPIClient
|
||||
|
||||
@runner.runner_class('coze-api')
|
||||
class CozeAPIRunner(runner.RequestRunner):
|
||||
"""Coze API 对话请求器"""
|
||||
|
||||
def __init__(self, ap: app.Application, pipeline_config: dict):
|
||||
self.pipeline_config = pipeline_config
|
||||
self.ap = ap
|
||||
self.agent_token = pipeline_config["ai"]['coze-api']['api-key']
|
||||
self.bot_id = pipeline_config["ai"]['coze-api'].get('bot-id')
|
||||
self.chat_timeout = pipeline_config["ai"]['coze-api'].get('timeout')
|
||||
self.auto_save_history = pipeline_config["ai"]['coze-api'].get('auto_save_history')
|
||||
self.api_base = pipeline_config["ai"]['coze-api'].get('api-base')
|
||||
|
||||
self.coze = AsyncCozeAPIClient(
|
||||
self.agent_token,
|
||||
self.api_base
|
||||
)
|
||||
|
||||
def _process_thinking_content(
|
||||
self,
|
||||
content: str,
|
||||
) -> tuple[str, str]:
|
||||
"""处理思维链内容
|
||||
|
||||
Args:
|
||||
content: 原始内容
|
||||
Returns:
|
||||
(处理后的内容, 提取的思维链内容)
|
||||
"""
|
||||
remove_think = self.pipeline_config.get('output', {}).get('misc', {}).get('remove-think', False)
|
||||
thinking_content = ''
|
||||
# 从 content 中提取 <think> 标签内容
|
||||
if content and '<think>' in content and '</think>' in content:
|
||||
import re
|
||||
|
||||
think_pattern = r'<think>(.*?)</think>'
|
||||
think_matches = re.findall(think_pattern, content, re.DOTALL)
|
||||
if think_matches:
|
||||
thinking_content = '\n'.join(think_matches)
|
||||
# 移除 content 中的 <think> 标签
|
||||
content = re.sub(think_pattern, '', content, flags=re.DOTALL).strip()
|
||||
|
||||
# 根据 remove_think 参数决定是否保留思维链
|
||||
if remove_think:
|
||||
return content, ''
|
||||
else:
|
||||
# 如果有思维链内容,将其以 <think> 格式添加到 content 开头
|
||||
if thinking_content:
|
||||
content = f'<think>\n{thinking_content}\n</think>\n{content}'.strip()
|
||||
return content, thinking_content
|
||||
|
||||
async def _preprocess_user_message(self, query: pipeline_query.Query) -> list[dict]:
|
||||
"""预处理用户消息,转换为Coze消息格式
|
||||
|
||||
Returns:
|
||||
list[dict]: Coze消息列表
|
||||
"""
|
||||
messages = []
|
||||
|
||||
if isinstance(query.user_message.content, list):
|
||||
# 多模态消息处理
|
||||
content_parts = []
|
||||
|
||||
for ce in query.user_message.content:
|
||||
if ce.type == 'text':
|
||||
content_parts.append({"type": "text", "text": ce.text})
|
||||
elif ce.type == 'image_base64':
|
||||
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
file_bytes = base64.b64decode(image_b64)
|
||||
file_id = await self._get_file_id(file_bytes)
|
||||
content_parts.append({"type": "image", "file_id": file_id})
|
||||
elif ce.type == 'file':
|
||||
# 处理文件,上传到Coze
|
||||
file_id = await self._get_file_id(ce.file)
|
||||
content_parts.append({"type": "file", "file_id": file_id})
|
||||
|
||||
# 创建多模态消息
|
||||
if content_parts:
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": json.dumps(content_parts),
|
||||
"content_type": "object_string",
|
||||
"meta_data": None
|
||||
})
|
||||
|
||||
elif isinstance(query.user_message.content, str):
|
||||
# 纯文本消息
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": query.user_message.content,
|
||||
"content_type": "text",
|
||||
"meta_data": None
|
||||
})
|
||||
|
||||
return messages
|
||||
|
||||
async def _get_file_id(self, file) -> str:
|
||||
"""上传文件到Coze服务
|
||||
Args:
|
||||
file: 文件
|
||||
Returns:
|
||||
str: 文件ID
|
||||
"""
|
||||
file_id = await self.coze.upload(file=file)
|
||||
return file_id
|
||||
|
||||
async def _chat_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""调用聊天助手(非流式)
|
||||
|
||||
注意:由于cozepy没有提供非流式API,这里使用流式API并在结束后一次性返回完整内容
|
||||
"""
|
||||
user_id = f'{query.launcher_id}_{query.sender_id}'
|
||||
|
||||
# 预处理用户消息
|
||||
additional_messages = await self._preprocess_user_message(query)
|
||||
|
||||
# 获取会话ID
|
||||
conversation_id = None
|
||||
|
||||
# 收集完整内容
|
||||
full_content = ''
|
||||
full_reasoning = ''
|
||||
|
||||
try:
|
||||
# 调用Coze API流式接口
|
||||
async for chunk in self.coze.chat_messages(
|
||||
bot_id=self.bot_id,
|
||||
user_id=user_id,
|
||||
additional_messages=additional_messages,
|
||||
conversation_id=conversation_id,
|
||||
timeout=self.chat_timeout,
|
||||
auto_save_history=self.auto_save_history,
|
||||
stream=True
|
||||
):
|
||||
self.ap.logger.debug(f'coze-chat-stream: {chunk}')
|
||||
|
||||
event_type = chunk.get('event')
|
||||
data = chunk.get('data', {})
|
||||
# Removed debug print statement to avoid cluttering logs in production
|
||||
|
||||
if event_type == 'conversation.message.delta':
|
||||
# 收集内容
|
||||
if 'content' in data:
|
||||
full_content += data.get('content', '')
|
||||
|
||||
# 收集推理内容(如果有)
|
||||
if 'reasoning_content' in data:
|
||||
full_reasoning += data.get('reasoning_content', '')
|
||||
|
||||
elif event_type.split(".")[-1] == 'done' : # 本地部署coze时,结束event不为done
|
||||
# 保存会话ID
|
||||
if 'conversation_id' in data:
|
||||
conversation_id = data.get('conversation_id')
|
||||
|
||||
elif event_type == 'error':
|
||||
# 处理错误
|
||||
error_msg = f"Coze API错误: {data.get('message', '未知错误')}"
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=error_msg,
|
||||
)
|
||||
return
|
||||
|
||||
# 处理思维链内容
|
||||
content, thinking_content = self._process_thinking_content(full_content)
|
||||
if full_reasoning:
|
||||
remove_think = self.pipeline_config.get('output', {}).get('misc', {}).get('remove-think', False)
|
||||
if not remove_think:
|
||||
content = f'<think>\n{full_reasoning}\n</think>\n{content}'.strip()
|
||||
|
||||
# 一次性返回完整内容
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=content,
|
||||
)
|
||||
|
||||
# 保存会话ID
|
||||
if conversation_id and query.session.using_conversation:
|
||||
query.session.using_conversation.uuid = conversation_id
|
||||
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Coze API错误: {str(e)}')
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=f'Coze API调用失败: {str(e)}',
|
||||
)
|
||||
|
||||
|
||||
async def _chat_messages_chunk(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.MessageChunk, None]:
|
||||
"""调用聊天助手(流式)"""
|
||||
user_id = f'{query.launcher_id}_{query.sender_id}'
|
||||
|
||||
# 预处理用户消息
|
||||
additional_messages = await self._preprocess_user_message(query)
|
||||
|
||||
# 获取会话ID
|
||||
conversation_id = None
|
||||
|
||||
start_reasoning = False
|
||||
stop_reasoning = False
|
||||
message_idx = 1
|
||||
is_final = False
|
||||
full_content = ''
|
||||
remove_think = self.pipeline_config.get('output', {}).get('misc', {}).get('remove-think', False)
|
||||
|
||||
|
||||
|
||||
try:
|
||||
# 调用Coze API流式接口
|
||||
async for chunk in self.coze.chat_messages(
|
||||
bot_id=self.bot_id,
|
||||
user_id=user_id,
|
||||
additional_messages=additional_messages,
|
||||
conversation_id=conversation_id,
|
||||
timeout=self.chat_timeout,
|
||||
auto_save_history=self.auto_save_history,
|
||||
stream=True
|
||||
):
|
||||
self.ap.logger.debug(f'coze-chat-stream-chunk: {chunk}')
|
||||
|
||||
event_type = chunk.get('event')
|
||||
data = chunk.get('data', {})
|
||||
content = ""
|
||||
|
||||
if event_type == 'conversation.message.delta':
|
||||
message_idx += 1
|
||||
# 处理内容增量
|
||||
if "reasoning_content" in data and not remove_think:
|
||||
|
||||
reasoning_content = data.get('reasoning_content', '')
|
||||
if reasoning_content and not start_reasoning:
|
||||
content = f"<think/>\n"
|
||||
start_reasoning = True
|
||||
content += reasoning_content
|
||||
|
||||
if 'content' in data:
|
||||
if data.get('content', ''):
|
||||
content += data.get('content', '')
|
||||
if not stop_reasoning and start_reasoning:
|
||||
content = f"</think>\n{content}"
|
||||
stop_reasoning = True
|
||||
|
||||
|
||||
elif event_type.split(".")[-1] == 'done' : # 本地部署coze时,结束event不为done
|
||||
# 保存会话ID
|
||||
if 'conversation_id' in data:
|
||||
conversation_id = data.get('conversation_id')
|
||||
if query.session.using_conversation:
|
||||
query.session.using_conversation.uuid = conversation_id
|
||||
is_final = True
|
||||
|
||||
|
||||
elif event_type == 'error':
|
||||
# 处理错误
|
||||
error_msg = f"Coze API错误: {data.get('message', '未知错误')}"
|
||||
yield provider_message.MessageChunk(
|
||||
role='assistant',
|
||||
content=error_msg,
|
||||
finish_reason='error'
|
||||
)
|
||||
return
|
||||
full_content += content
|
||||
if message_idx % 8 == 0 or is_final:
|
||||
if full_content:
|
||||
yield provider_message.MessageChunk(
|
||||
role='assistant',
|
||||
content=full_content,
|
||||
is_final=is_final
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Coze API流式调用错误: {str(e)}')
|
||||
yield provider_message.MessageChunk(
|
||||
role='assistant',
|
||||
content=f'Coze API流式调用失败: {str(e)}',
|
||||
finish_reason='error'
|
||||
)
|
||||
|
||||
|
||||
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""运行"""
|
||||
msg_seq = 0
|
||||
if await query.adapter.is_stream_output_supported():
|
||||
async for msg in self._chat_messages_chunk(query):
|
||||
if isinstance(msg, provider_message.MessageChunk):
|
||||
msg_seq += 1
|
||||
msg.msg_sequence = msg_seq
|
||||
yield msg
|
||||
else:
|
||||
async for msg in self._chat_messages(query):
|
||||
yield msg
|
||||
|
||||
|
||||
|
||||
|
||||
205
pkg/provider/runners/tboxapi.py
Normal file
205
pkg/provider/runners/tboxapi.py
Normal file
@@ -0,0 +1,205 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
import json
|
||||
import base64
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
from tboxsdk.tbox import TboxClient
|
||||
from tboxsdk.model.file import File, FileType
|
||||
|
||||
from .. import runner
|
||||
from ...core import app
|
||||
from ...utils import image
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
|
||||
class TboxAPIError(Exception):
|
||||
"""TBox API 请求失败"""
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
@runner.runner_class('tbox-app-api')
|
||||
class TboxAPIRunner(runner.RequestRunner):
|
||||
"蚂蚁百宝箱API对话请求器"
|
||||
|
||||
# 运行器内部使用的配置
|
||||
app_id: str # 蚂蚁百宝箱平台中的应用ID
|
||||
api_key: str # 在蚂蚁百宝箱平台中申请的令牌
|
||||
|
||||
def __init__(self, ap: app.Application, pipeline_config: dict):
|
||||
"""初始化"""
|
||||
self.ap = ap
|
||||
self.pipeline_config = pipeline_config
|
||||
|
||||
# 初始化Tbox 参数配置
|
||||
self.app_id = self.pipeline_config['ai']['tbox-app-api']['app-id']
|
||||
self.api_key = self.pipeline_config['ai']['tbox-app-api']['api-key']
|
||||
|
||||
# 初始化Tbox client
|
||||
self.tbox_client = TboxClient(authorization=self.api_key)
|
||||
|
||||
async def _preprocess_user_message(self, query: pipeline_query.Query) -> tuple[str, list[str]]:
|
||||
"""预处理用户消息,提取纯文本,并将图片上传到 Tbox 服务
|
||||
|
||||
Returns:
|
||||
tuple[str, list[str]]: 纯文本和图片的 Tbox 文件ID
|
||||
"""
|
||||
plain_text = ''
|
||||
image_ids = []
|
||||
|
||||
if isinstance(query.user_message.content, list):
|
||||
for ce in query.user_message.content:
|
||||
if ce.type == 'text':
|
||||
plain_text += ce.text
|
||||
elif ce.type == 'image_base64':
|
||||
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||
# 创建临时文件
|
||||
file_bytes = base64.b64decode(image_b64)
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile(suffix=f'.{image_format}', delete=False) as tmp_file:
|
||||
tmp_file.write(file_bytes)
|
||||
tmp_file_path = tmp_file.name
|
||||
file_upload_resp = self.tbox_client.upload_file(
|
||||
tmp_file_path
|
||||
)
|
||||
image_id = file_upload_resp.get("data", "")
|
||||
image_ids.append(image_id)
|
||||
finally:
|
||||
# 清理临时文件
|
||||
if os.path.exists(tmp_file_path):
|
||||
os.unlink(tmp_file_path)
|
||||
elif isinstance(query.user_message.content, str):
|
||||
plain_text = query.user_message.content
|
||||
|
||||
return plain_text, image_ids
|
||||
|
||||
async def _agent_messages(
|
||||
self, query: pipeline_query.Query
|
||||
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""TBox 智能体对话请求"""
|
||||
|
||||
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||
remove_think = self.pipeline_config['output'].get('misc', {}).get('remove-think')
|
||||
|
||||
try:
|
||||
is_stream = await query.adapter.is_stream_output_supported()
|
||||
except AttributeError:
|
||||
is_stream = False
|
||||
|
||||
# 获取Tbox的conversation_id
|
||||
conversation_id = query.session.using_conversation.uuid or None
|
||||
|
||||
files = None
|
||||
if image_ids:
|
||||
files = [
|
||||
File(file_id=image_id, type=FileType.IMAGE)
|
||||
for image_id in image_ids
|
||||
]
|
||||
|
||||
# 发送对话请求
|
||||
response = self.tbox_client.chat(
|
||||
app_id=self.app_id, # Tbox中智能体应用的ID
|
||||
user_id=query.bot_uuid, # 用户ID
|
||||
query=plain_text, # 用户输入的文本信息
|
||||
stream=is_stream, # 是否流式输出
|
||||
conversation_id=conversation_id, # 会话ID,为None时Tbox会自动创建一个新会话
|
||||
files=files, # 图片内容
|
||||
)
|
||||
|
||||
if is_stream:
|
||||
# 解析Tbox流式输出内容,并发送给上游
|
||||
for chunk in self._process_stream_message(response, query, remove_think):
|
||||
yield chunk
|
||||
else:
|
||||
message = self._process_non_stream_message(response, query, remove_think)
|
||||
yield provider_message.Message(
|
||||
role='assistant',
|
||||
content=message,
|
||||
)
|
||||
|
||||
def _process_non_stream_message(self, response: typing.Dict, query: pipeline_query.Query, remove_think: bool):
|
||||
if response.get('errorCode') != "0":
|
||||
raise TboxAPIError(f'Tbox API 请求失败: {response.get("errorMsg", "")}')
|
||||
payload = response.get('data', {})
|
||||
conversation_id = payload.get('conversationId', '')
|
||||
query.session.using_conversation.uuid = conversation_id
|
||||
thinking_content = payload.get('reasoningContent', [])
|
||||
result = ""
|
||||
if thinking_content and not remove_think:
|
||||
result += f'<think>\n{thinking_content[0].get("text", "")}\n</think>\n'
|
||||
content = payload.get('result', [])
|
||||
if content:
|
||||
result += content[0].get('chunk', '')
|
||||
return result
|
||||
|
||||
def _process_stream_message(self, response: typing.Generator[dict], query: pipeline_query.Query, remove_think: bool):
|
||||
idx_msg = 0
|
||||
pending_content = ''
|
||||
conversation_id = None
|
||||
think_start = False
|
||||
think_end = False
|
||||
for chunk in response:
|
||||
if chunk.get('type', '') == 'chunk':
|
||||
"""
|
||||
Tbox返回的消息内容chunk结构
|
||||
{'lane': 'default', 'payload': {'conversationId': '20250918tBI947065406', 'messageId': '20250918TB1f53230954', 'text': '️'}, 'type': 'chunk'}
|
||||
"""
|
||||
# 如果包含思考过程,拼接</think>
|
||||
if think_start and not think_end:
|
||||
pending_content += '\n</think>\n'
|
||||
think_end = True
|
||||
|
||||
payload = chunk.get('payload', {})
|
||||
if not conversation_id:
|
||||
conversation_id = payload.get('conversationId')
|
||||
query.session.using_conversation.uuid = conversation_id
|
||||
if payload.get('text'):
|
||||
idx_msg += 1
|
||||
pending_content += payload.get('text')
|
||||
elif chunk.get('type', '') == 'thinking' and not remove_think:
|
||||
"""
|
||||
Tbox返回的思考过程chunk结构
|
||||
{'payload': '{"ext_data":{"text":"日期"},"event":"flow.node.llm.thinking","entity":{"node_type":"text-completion","execute_id":"6","group_id":0,"parent_execute_id":"6","node_name":"模型推理","node_id":"TC_5u6gl0"}}', 'type': 'thinking'}
|
||||
"""
|
||||
payload = json.loads(chunk.get('payload', '{}'))
|
||||
if payload.get('ext_data', {}).get('text'):
|
||||
idx_msg += 1
|
||||
content = payload.get('ext_data', {}).get('text')
|
||||
if not think_start:
|
||||
think_start = True
|
||||
pending_content += f'<think>\n{content}'
|
||||
else:
|
||||
pending_content += content
|
||||
elif chunk.get('type', '') == 'error':
|
||||
raise TboxAPIError(
|
||||
f'Tbox API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
|
||||
)
|
||||
|
||||
if idx_msg % 8 == 0:
|
||||
yield provider_message.MessageChunk(
|
||||
role='assistant',
|
||||
content=pending_content,
|
||||
is_final=False,
|
||||
)
|
||||
|
||||
# Tbox不返回END事件,默认发一个最终消息
|
||||
yield provider_message.MessageChunk(
|
||||
role='assistant',
|
||||
content=pending_content,
|
||||
is_final=True,
|
||||
)
|
||||
|
||||
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||
"""运行"""
|
||||
msg_seq = 0
|
||||
async for msg in self._agent_messages(query):
|
||||
if isinstance(msg, provider_message.MessageChunk):
|
||||
msg_seq += 1
|
||||
msg.msg_sequence = msg_seq
|
||||
yield msg
|
||||
@@ -35,7 +35,7 @@ class ToolLoader(abc.ABC):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
||||
async def get_tools(self, bound_plugins: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||
"""获取所有工具"""
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
import typing
|
||||
from contextlib import AsyncExitStack
|
||||
import traceback
|
||||
import sqlalchemy
|
||||
import asyncio
|
||||
|
||||
from mcp import ClientSession, StdioServerParameters
|
||||
from mcp.client.stdio import stdio_client
|
||||
@@ -10,6 +14,13 @@ from mcp.client.sse import sse_client
|
||||
from .. import loader
|
||||
from ....core import app
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
from ....entity.persistence import mcp as persistence_mcp
|
||||
|
||||
|
||||
class MCPSessionStatus(enum.Enum):
|
||||
CONNECTING = 'connecting'
|
||||
CONNECTED = 'connected'
|
||||
ERROR = 'error'
|
||||
|
||||
|
||||
class RuntimeMCPSession:
|
||||
@@ -19,6 +30,8 @@ class RuntimeMCPSession:
|
||||
|
||||
server_name: str
|
||||
|
||||
server_uuid: str
|
||||
|
||||
server_config: dict
|
||||
|
||||
session: ClientSession
|
||||
@@ -27,16 +40,34 @@ class RuntimeMCPSession:
|
||||
|
||||
functions: list[resource_tool.LLMTool] = []
|
||||
|
||||
def __init__(self, server_name: str, server_config: dict, ap: app.Application):
|
||||
enable: bool
|
||||
|
||||
# connected: bool
|
||||
status: MCPSessionStatus
|
||||
|
||||
_lifecycle_task: asyncio.Task | None
|
||||
|
||||
_shutdown_event: asyncio.Event
|
||||
|
||||
_ready_event: asyncio.Event
|
||||
|
||||
def __init__(self, server_name: str, server_config: dict, enable: bool, ap: app.Application):
|
||||
self.server_name = server_name
|
||||
self.server_uuid = server_config.get('uuid', '')
|
||||
self.server_config = server_config
|
||||
self.ap = ap
|
||||
|
||||
self.enable = enable
|
||||
self.session = None
|
||||
|
||||
self.exit_stack = AsyncExitStack()
|
||||
self.functions = []
|
||||
|
||||
self.status = MCPSessionStatus.CONNECTING
|
||||
|
||||
self._lifecycle_task = None
|
||||
self._shutdown_event = asyncio.Event()
|
||||
self._ready_event = asyncio.Event()
|
||||
|
||||
async def _init_stdio_python_server(self):
|
||||
server_params = StdioServerParameters(
|
||||
command=self.server_config['command'],
|
||||
@@ -58,6 +89,7 @@ class RuntimeMCPSession:
|
||||
self.server_config['url'],
|
||||
headers=self.server_config.get('headers', {}),
|
||||
timeout=self.server_config.get('timeout', 10),
|
||||
sse_read_timeout=self.server_config.get('ssereadtimeout', 30),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -67,19 +99,65 @@ class RuntimeMCPSession:
|
||||
|
||||
await self.session.initialize()
|
||||
|
||||
async def initialize(self):
|
||||
self.ap.logger.debug(f'初始化 MCP 会话: {self.server_name} {self.server_config}')
|
||||
async def _lifecycle_loop(self):
|
||||
"""在后台任务中管理整个MCP会话的生命周期"""
|
||||
try:
|
||||
if self.server_config['mode'] == 'stdio':
|
||||
await self._init_stdio_python_server()
|
||||
elif self.server_config['mode'] == 'sse':
|
||||
await self._init_sse_server()
|
||||
else:
|
||||
raise ValueError(f'无法识别 MCP 服务器类型: {self.server_name}: {self.server_config}')
|
||||
|
||||
if self.server_config['mode'] == 'stdio':
|
||||
await self._init_stdio_python_server()
|
||||
elif self.server_config['mode'] == 'sse':
|
||||
await self._init_sse_server()
|
||||
else:
|
||||
raise ValueError(f'无法识别 MCP 服务器类型: {self.server_name}: {self.server_config}')
|
||||
await self.refresh()
|
||||
|
||||
self.status = MCPSessionStatus.CONNECTED
|
||||
|
||||
# 通知start()方法连接已建立
|
||||
self._ready_event.set()
|
||||
|
||||
# 等待shutdown信号
|
||||
await self._shutdown_event.wait()
|
||||
|
||||
except Exception as e:
|
||||
self.status = MCPSessionStatus.ERROR
|
||||
self.ap.logger.error(f'Error in MCP session lifecycle {self.server_name}: {e}\n{traceback.format_exc()}')
|
||||
# 即使出错也要设置ready事件,让start()方法知道初始化已完成
|
||||
self._ready_event.set()
|
||||
finally:
|
||||
# 在同一个任务中清理所有资源
|
||||
try:
|
||||
if self.exit_stack:
|
||||
await self.exit_stack.aclose()
|
||||
self.functions.clear()
|
||||
self.session = None
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Error cleaning up MCP session {self.server_name}: {e}\n{traceback.format_exc()}')
|
||||
|
||||
async def start(self):
|
||||
if not self.enable:
|
||||
return
|
||||
|
||||
# 创建后台任务来管理生命周期
|
||||
self._lifecycle_task = asyncio.create_task(self._lifecycle_loop())
|
||||
|
||||
# 等待连接建立或失败(带超时)
|
||||
try:
|
||||
await asyncio.wait_for(self._ready_event.wait(), timeout=30.0)
|
||||
except asyncio.TimeoutError:
|
||||
self.status = MCPSessionStatus.ERROR
|
||||
raise Exception('Connection timeout after 30 seconds')
|
||||
|
||||
# 检查是否有错误
|
||||
if self.status == MCPSessionStatus.ERROR:
|
||||
raise Exception('Connection failed, please check URL')
|
||||
|
||||
async def refresh(self):
|
||||
self.functions.clear()
|
||||
|
||||
tools = await self.session.list_tools()
|
||||
|
||||
self.ap.logger.debug(f'获取 MCP 工具: {tools}')
|
||||
self.ap.logger.debug(f'Refresh MCP tools: {tools}')
|
||||
|
||||
for tool in tools.tools:
|
||||
|
||||
@@ -101,58 +179,220 @@ class RuntimeMCPSession:
|
||||
)
|
||||
)
|
||||
|
||||
def get_tools(self) -> list[resource_tool.LLMTool]:
|
||||
return self.functions
|
||||
|
||||
def get_runtime_info_dict(self) -> dict:
|
||||
return {
|
||||
'status': self.status.value,
|
||||
'tool_count': len(self.get_tools()),
|
||||
'tools': [
|
||||
{
|
||||
'name': tool.name,
|
||||
'description': tool.description,
|
||||
}
|
||||
for tool in self.get_tools()
|
||||
],
|
||||
}
|
||||
|
||||
async def shutdown(self):
|
||||
"""关闭工具"""
|
||||
await self.session._exit_stack.aclose()
|
||||
"""关闭会话并清理资源"""
|
||||
try:
|
||||
# 设置shutdown事件,通知lifecycle任务退出
|
||||
self._shutdown_event.set()
|
||||
|
||||
# 等待lifecycle任务完成(带超时)
|
||||
if self._lifecycle_task and not self._lifecycle_task.done():
|
||||
try:
|
||||
await asyncio.wait_for(self._lifecycle_task, timeout=5.0)
|
||||
except asyncio.TimeoutError:
|
||||
self.ap.logger.warning(f'MCP session {self.server_name} shutdown timeout, cancelling task')
|
||||
self._lifecycle_task.cancel()
|
||||
try:
|
||||
await self._lifecycle_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
self.ap.logger.info(f'MCP session {self.server_name} shutdown complete')
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Error shutting down MCP session {self.server_name}: {e}\n{traceback.format_exc()}')
|
||||
|
||||
|
||||
@loader.loader_class('mcp')
|
||||
# @loader.loader_class('mcp')
|
||||
class MCPLoader(loader.ToolLoader):
|
||||
"""MCP 工具加载器。
|
||||
|
||||
在此加载器中管理所有与 MCP Server 的连接。
|
||||
"""
|
||||
|
||||
sessions: dict[str, RuntimeMCPSession] = {}
|
||||
sessions: dict[str, RuntimeMCPSession]
|
||||
|
||||
_last_listed_functions: list[resource_tool.LLMTool] = []
|
||||
_last_listed_functions: list[resource_tool.LLMTool]
|
||||
|
||||
_hosted_mcp_tasks: list[asyncio.Task]
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
super().__init__(ap)
|
||||
self.sessions = {}
|
||||
self._last_listed_functions = []
|
||||
self._hosted_mcp_tasks = []
|
||||
|
||||
async def initialize(self):
|
||||
for server_config in self.ap.instance_config.data.get('mcp', {}).get('servers', []):
|
||||
if not server_config['enable']:
|
||||
continue
|
||||
session = RuntimeMCPSession(server_config['name'], server_config, self.ap)
|
||||
await session.initialize()
|
||||
# self.ap.event_loop.create_task(session.initialize())
|
||||
self.sessions[server_config['name']] = session
|
||||
await self.load_mcp_servers_from_db()
|
||||
|
||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
||||
async def load_mcp_servers_from_db(self):
|
||||
self.ap.logger.info('Loading MCP servers from db...')
|
||||
|
||||
self.sessions = {}
|
||||
|
||||
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_mcp.MCPServer))
|
||||
servers = result.all()
|
||||
|
||||
for server in servers:
|
||||
config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server)
|
||||
|
||||
task = asyncio.create_task(self.host_mcp_server(config))
|
||||
self._hosted_mcp_tasks.append(task)
|
||||
|
||||
async def host_mcp_server(self, server_config: dict):
|
||||
self.ap.logger.debug(f'Loading MCP server {server_config}')
|
||||
try:
|
||||
session = await self.load_mcp_server(server_config)
|
||||
self.sessions[server_config['name']] = session
|
||||
except Exception as e:
|
||||
self.ap.logger.error(
|
||||
f'Failed to load MCP server from db: {server_config["name"]}({server_config["uuid"]}): {e}\n{traceback.format_exc()}'
|
||||
)
|
||||
return
|
||||
|
||||
self.ap.logger.debug(f'Starting MCP server {server_config["name"]}({server_config["uuid"]})')
|
||||
try:
|
||||
await session.start()
|
||||
except Exception as e:
|
||||
self.ap.logger.error(
|
||||
f'Failed to start MCP server {server_config["name"]}({server_config["uuid"]}): {e}\n{traceback.format_exc()}'
|
||||
)
|
||||
return
|
||||
|
||||
self.ap.logger.debug(f'Started MCP server {server_config["name"]}({server_config["uuid"]})')
|
||||
|
||||
async def load_mcp_server(self, server_config: dict) -> RuntimeMCPSession:
|
||||
"""加载 MCP 服务器到运行时
|
||||
|
||||
Args:
|
||||
server_config: 服务器配置字典,必须包含:
|
||||
- name: 服务器名称
|
||||
- mode: 连接模式 (stdio/sse)
|
||||
- enable: 是否启用
|
||||
- extra_args: 额外的配置参数 (可选)
|
||||
"""
|
||||
|
||||
name = server_config['name']
|
||||
uuid = server_config['uuid']
|
||||
mode = server_config['mode']
|
||||
enable = server_config['enable']
|
||||
extra_args = server_config.get('extra_args', {})
|
||||
|
||||
mixed_config = {
|
||||
'name': name,
|
||||
'uuid': uuid,
|
||||
'mode': mode,
|
||||
'enable': enable,
|
||||
**extra_args,
|
||||
}
|
||||
|
||||
session = RuntimeMCPSession(name, mixed_config, enable, self.ap)
|
||||
|
||||
return session
|
||||
|
||||
async def get_tools(self, bound_mcp_servers: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||
all_functions = []
|
||||
|
||||
for session in self.sessions.values():
|
||||
all_functions.extend(session.functions)
|
||||
# If bound_mcp_servers is specified, only include tools from those servers
|
||||
if bound_mcp_servers is not None:
|
||||
if session.server_uuid in bound_mcp_servers:
|
||||
all_functions.extend(session.get_tools())
|
||||
else:
|
||||
# If no bound servers specified, include all tools
|
||||
all_functions.extend(session.get_tools())
|
||||
|
||||
self._last_listed_functions = all_functions
|
||||
|
||||
return all_functions
|
||||
|
||||
async def has_tool(self, name: str) -> bool:
|
||||
return name in [f.name for f in self._last_listed_functions]
|
||||
"""检查工具是否存在"""
|
||||
for session in self.sessions.values():
|
||||
for function in session.get_tools():
|
||||
if function.name == name:
|
||||
return True
|
||||
return False
|
||||
|
||||
async def invoke_tool(self, name: str, parameters: dict) -> typing.Any:
|
||||
for server_name, session in self.sessions.items():
|
||||
for function in session.functions:
|
||||
"""执行工具调用"""
|
||||
for session in self.sessions.values():
|
||||
for function in session.get_tools():
|
||||
if function.name == name:
|
||||
return await function.func(**parameters)
|
||||
self.ap.logger.debug(f'Invoking MCP tool: {name} with parameters: {parameters}')
|
||||
try:
|
||||
result = await function.func(**parameters)
|
||||
self.ap.logger.debug(f'MCP tool {name} executed successfully')
|
||||
return result
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Error invoking MCP tool {name}: {e}\n{traceback.format_exc()}')
|
||||
raise
|
||||
|
||||
raise ValueError(f'未找到工具: {name}')
|
||||
raise ValueError(f'Tool not found: {name}')
|
||||
|
||||
async def remove_mcp_server(self, server_name: str):
|
||||
"""移除 MCP 服务器"""
|
||||
if server_name not in self.sessions:
|
||||
self.ap.logger.warning(f'MCP server {server_name} not found in sessions, skipping removal')
|
||||
return
|
||||
|
||||
session = self.sessions.pop(server_name)
|
||||
await session.shutdown()
|
||||
self.ap.logger.info(f'Removed MCP server: {server_name}')
|
||||
|
||||
def get_session(self, server_name: str) -> RuntimeMCPSession | None:
|
||||
"""获取指定名称的 MCP 会话"""
|
||||
return self.sessions.get(server_name)
|
||||
|
||||
def has_session(self, server_name: str) -> bool:
|
||||
"""检查是否存在指定名称的 MCP 会话"""
|
||||
return server_name in self.sessions
|
||||
|
||||
def get_all_server_names(self) -> list[str]:
|
||||
"""获取所有已加载的 MCP 服务器名称"""
|
||||
return list(self.sessions.keys())
|
||||
|
||||
def get_server_tool_count(self, server_name: str) -> int:
|
||||
"""获取指定服务器的工具数量"""
|
||||
session = self.get_session(server_name)
|
||||
return len(session.get_tools()) if session else 0
|
||||
|
||||
def get_all_servers_info(self) -> dict[str, dict]:
|
||||
"""获取所有服务器的信息"""
|
||||
info = {}
|
||||
for server_name, session in self.sessions.items():
|
||||
info[server_name] = {
|
||||
'name': server_name,
|
||||
'mode': session.server_config.get('mode'),
|
||||
'enable': session.enable,
|
||||
'tools_count': len(session.get_tools()),
|
||||
'tool_names': [f.name for f in session.get_tools()],
|
||||
}
|
||||
return info
|
||||
|
||||
async def shutdown(self):
|
||||
"""关闭工具"""
|
||||
for session in self.sessions.values():
|
||||
await session.shutdown()
|
||||
"""关闭所有工具"""
|
||||
self.ap.logger.info('Shutting down all MCP sessions...')
|
||||
for server_name, session in list(self.sessions.items()):
|
||||
try:
|
||||
await session.shutdown()
|
||||
self.ap.logger.debug(f'Shutdown MCP session: {server_name}')
|
||||
except Exception as e:
|
||||
self.ap.logger.error(f'Error shutting down MCP session {server_name}: {e}\n{traceback.format_exc()}')
|
||||
self.sessions.clear()
|
||||
self.ap.logger.info('All MCP sessions shutdown complete')
|
||||
|
||||
@@ -7,18 +7,18 @@ from .. import loader
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
|
||||
|
||||
@loader.loader_class('plugin-tool-loader')
|
||||
# @loader.loader_class('plugin-tool-loader')
|
||||
class PluginToolLoader(loader.ToolLoader):
|
||||
"""插件工具加载器。
|
||||
|
||||
本加载器中不存储工具信息,仅负责从插件系统中获取工具信息。
|
||||
"""
|
||||
|
||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
||||
async def get_tools(self, bound_plugins: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||
# 从插件系统获取工具(内容函数)
|
||||
all_functions: list[resource_tool.LLMTool] = []
|
||||
|
||||
for tool in await self.ap.plugin_connector.list_tools():
|
||||
for tool in await self.ap.plugin_connector.list_tools(bound_plugins):
|
||||
tool_obj = resource_tool.LLMTool(
|
||||
name=tool.metadata.name,
|
||||
human_desc=tool.metadata.description.en_US,
|
||||
|
||||
@@ -3,9 +3,9 @@ from __future__ import annotations
|
||||
import typing
|
||||
|
||||
from ...core import app
|
||||
from . import loader as tools_loader
|
||||
from ...utils import importutil
|
||||
from . import loaders
|
||||
from .loaders import mcp as mcp_loader, plugin as plugin_loader
|
||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||
|
||||
importutil.import_modules_in_pkg(loaders)
|
||||
@@ -16,25 +16,24 @@ class ToolManager:
|
||||
|
||||
ap: app.Application
|
||||
|
||||
loaders: list[tools_loader.ToolLoader]
|
||||
plugin_tool_loader: plugin_loader.PluginToolLoader
|
||||
mcp_tool_loader: mcp_loader.MCPLoader
|
||||
|
||||
def __init__(self, ap: app.Application):
|
||||
self.ap = ap
|
||||
self.all_functions = []
|
||||
self.loaders = []
|
||||
|
||||
async def initialize(self):
|
||||
for loader_cls in tools_loader.preregistered_loaders:
|
||||
loader_inst = loader_cls(self.ap)
|
||||
await loader_inst.initialize()
|
||||
self.loaders.append(loader_inst)
|
||||
self.plugin_tool_loader = plugin_loader.PluginToolLoader(self.ap)
|
||||
await self.plugin_tool_loader.initialize()
|
||||
self.mcp_tool_loader = mcp_loader.MCPLoader(self.ap)
|
||||
await self.mcp_tool_loader.initialize()
|
||||
|
||||
async def get_all_tools(self) -> list[resource_tool.LLMTool]:
|
||||
async def get_all_tools(self, bound_plugins: list[str] | None = None, bound_mcp_servers: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||
"""获取所有函数"""
|
||||
all_functions: list[resource_tool.LLMTool] = []
|
||||
|
||||
for loader in self.loaders:
|
||||
all_functions.extend(await loader.get_tools())
|
||||
all_functions.extend(await self.plugin_tool_loader.get_tools(bound_plugins))
|
||||
all_functions.extend(await self.mcp_tool_loader.get_tools(bound_mcp_servers))
|
||||
|
||||
return all_functions
|
||||
|
||||
@@ -93,13 +92,14 @@ class ToolManager:
|
||||
async def execute_func_call(self, name: str, parameters: dict) -> typing.Any:
|
||||
"""执行函数调用"""
|
||||
|
||||
for loader in self.loaders:
|
||||
if await loader.has_tool(name):
|
||||
return await loader.invoke_tool(name, parameters)
|
||||
if await self.plugin_tool_loader.has_tool(name):
|
||||
return await self.plugin_tool_loader.invoke_tool(name, parameters)
|
||||
elif await self.mcp_tool_loader.has_tool(name):
|
||||
return await self.mcp_tool_loader.invoke_tool(name, parameters)
|
||||
else:
|
||||
raise ValueError(f'未找到工具: {name}')
|
||||
|
||||
async def shutdown(self):
|
||||
"""关闭所有工具"""
|
||||
for loader in self.loaders:
|
||||
await loader.shutdown()
|
||||
await self.plugin_tool_loader.shutdown()
|
||||
await self.mcp_tool_loader.shutdown()
|
||||
|
||||
@@ -4,6 +4,7 @@ import json
|
||||
from typing import List
|
||||
from pkg.rag.knowledge.services import base_service
|
||||
from pkg.core import app
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
|
||||
|
||||
class Chunker(base_service.BaseService):
|
||||
@@ -27,21 +28,6 @@ class Chunker(base_service.BaseService):
|
||||
"""
|
||||
if not text:
|
||||
return []
|
||||
# words = text.split()
|
||||
# chunks = []
|
||||
# current_chunk = []
|
||||
|
||||
# for word in words:
|
||||
# current_chunk.append(word)
|
||||
# if len(current_chunk) > self.chunk_size:
|
||||
# chunks.append(" ".join(current_chunk[:self.chunk_size]))
|
||||
# current_chunk = current_chunk[self.chunk_size - self.chunk_overlap:]
|
||||
|
||||
# if current_chunk:
|
||||
# chunks.append(" ".join(current_chunk))
|
||||
|
||||
# A more robust chunking strategy (e.g., using recursive character text splitter)
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=self.chunk_size,
|
||||
|
||||
@@ -42,3 +42,10 @@ class StorageProvider(abc.ABC):
|
||||
key: str,
|
||||
):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def delete_dir_recursive(
|
||||
self,
|
||||
dir_path: str,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
import aiofiles
|
||||
import shutil
|
||||
|
||||
from ...core import app
|
||||
|
||||
@@ -22,6 +23,8 @@ class LocalStorageProvider(provider.StorageProvider):
|
||||
key: str,
|
||||
value: bytes,
|
||||
):
|
||||
if not os.path.exists(os.path.join(LOCAL_STORAGE_PATH, os.path.dirname(key))):
|
||||
os.makedirs(os.path.join(LOCAL_STORAGE_PATH, os.path.dirname(key)))
|
||||
async with aiofiles.open(os.path.join(LOCAL_STORAGE_PATH, f'{key}'), 'wb') as f:
|
||||
await f.write(value)
|
||||
|
||||
@@ -43,3 +46,11 @@ class LocalStorageProvider(provider.StorageProvider):
|
||||
key: str,
|
||||
):
|
||||
os.remove(os.path.join(LOCAL_STORAGE_PATH, f'{key}'))
|
||||
|
||||
async def delete_dir_recursive(
|
||||
self,
|
||||
dir_path: str,
|
||||
):
|
||||
# 直接删除整个目录
|
||||
if os.path.exists(os.path.join(LOCAL_STORAGE_PATH, dir_path)):
|
||||
shutil.rmtree(os.path.join(LOCAL_STORAGE_PATH, dir_path))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
semantic_version = 'v4.3.3'
|
||||
semantic_version = 'v4.4.1'
|
||||
|
||||
required_database_version = 8
|
||||
required_database_version = 9
|
||||
"""Tag the version of the database schema, used to check if the database needs to be migrated"""
|
||||
|
||||
debug_mode = False
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "langbot"
|
||||
version = "4.3.3"
|
||||
version = "4.4.1"
|
||||
description = "Easy-to-use global IM bot platform designed for LLM era"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10.1,<4.0"
|
||||
@@ -60,11 +60,13 @@ dependencies = [
|
||||
"ebooklib>=0.18",
|
||||
"html2text>=2024.2.26",
|
||||
"langchain>=0.2.0",
|
||||
"langchain-text-splitters>=0.0.1",
|
||||
"chromadb>=0.4.24",
|
||||
"qdrant-client (>=1.15.1,<2.0.0)",
|
||||
"langbot-plugin==0.1.2",
|
||||
"langbot-plugin==0.1.9b2",
|
||||
"asyncpg>=0.30.0",
|
||||
"line-bot-sdk>=3.19.0"
|
||||
"line-bot-sdk>=3.19.0",
|
||||
"tboxsdk>=0.0.10",
|
||||
]
|
||||
keywords = [
|
||||
"bot",
|
||||
@@ -102,6 +104,7 @@ dev = [
|
||||
"pre-commit>=4.2.0",
|
||||
"pytest>=8.4.1",
|
||||
"pytest-asyncio>=1.0.0",
|
||||
"pytest-cov>=7.0.0",
|
||||
"ruff>=0.11.9",
|
||||
]
|
||||
|
||||
|
||||
39
pytest.ini
Normal file
39
pytest.ini
Normal file
@@ -0,0 +1,39 @@
|
||||
[pytest]
|
||||
# Test discovery patterns
|
||||
python_files = test_*.py
|
||||
python_classes = Test*
|
||||
python_functions = test_*
|
||||
|
||||
# Test paths
|
||||
testpaths = tests
|
||||
|
||||
# Asyncio configuration
|
||||
asyncio_mode = auto
|
||||
|
||||
# Output options
|
||||
addopts =
|
||||
-v
|
||||
--strict-markers
|
||||
--tb=short
|
||||
--disable-warnings
|
||||
|
||||
# Markers
|
||||
markers =
|
||||
asyncio: mark test as async
|
||||
unit: mark test as unit test
|
||||
integration: mark test as integration test
|
||||
slow: mark test as slow running
|
||||
|
||||
# Coverage options (when using pytest-cov)
|
||||
[coverage:run]
|
||||
source = pkg
|
||||
omit =
|
||||
*/tests/*
|
||||
*/test_*.py
|
||||
*/__pycache__/*
|
||||
*/site-packages/*
|
||||
|
||||
[coverage:report]
|
||||
precision = 2
|
||||
show_missing = True
|
||||
skip_covered = False
|
||||
31
run_tests.sh
Executable file
31
run_tests.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to run all unit tests
|
||||
# This script helps avoid circular import issues by setting up the environment properly
|
||||
|
||||
set -e
|
||||
|
||||
echo "Setting up test environment..."
|
||||
|
||||
# Activate virtual environment if it exists
|
||||
if [ -d ".venv" ]; then
|
||||
source .venv/bin/activate
|
||||
fi
|
||||
|
||||
# Check if pytest is installed
|
||||
if ! command -v pytest &> /dev/null; then
|
||||
echo "Installing test dependencies..."
|
||||
pip install pytest pytest-asyncio pytest-cov
|
||||
fi
|
||||
|
||||
echo "Running all unit tests..."
|
||||
|
||||
# Run tests with coverage
|
||||
pytest tests/unit_tests/ -v --tb=short \
|
||||
--cov=pkg \
|
||||
--cov-report=xml \
|
||||
"$@"
|
||||
|
||||
echo ""
|
||||
echo "Test run complete!"
|
||||
echo "Coverage report saved to coverage.xml"
|
||||
@@ -10,8 +10,6 @@ command:
|
||||
concurrency:
|
||||
pipeline: 20
|
||||
session: 1
|
||||
mcp:
|
||||
servers: []
|
||||
proxy:
|
||||
http: ''
|
||||
https: ''
|
||||
@@ -41,4 +39,4 @@ plugin:
|
||||
enable: true
|
||||
runtime_ws_url: 'ws://langbot_plugin_runtime:5400/control/ws'
|
||||
enable_marketplace: true
|
||||
cloud_service_url: 'https://space.langbot.app'
|
||||
cloud_service_url: 'https://space.langbot.app'
|
||||
|
||||
@@ -23,6 +23,10 @@ stages:
|
||||
label:
|
||||
en_US: Local Agent
|
||||
zh_Hans: 内置 Agent
|
||||
- name: tbox-app-api
|
||||
label:
|
||||
en_US: Tbox App API
|
||||
zh_Hans: 蚂蚁百宝箱平台 API
|
||||
- name: dify-service-api
|
||||
label:
|
||||
en_US: Dify Service API
|
||||
@@ -39,6 +43,10 @@ stages:
|
||||
label:
|
||||
en_US: Langflow API
|
||||
zh_Hans: Langflow API
|
||||
- name: coze-api
|
||||
label:
|
||||
en_US: Coze API
|
||||
zh_Hans: 扣子 API
|
||||
- name: local-agent
|
||||
label:
|
||||
en_US: Local Agent
|
||||
@@ -82,6 +90,26 @@ stages:
|
||||
type: knowledge-base-selector
|
||||
required: false
|
||||
default: ''
|
||||
- name: tbox-app-api
|
||||
label:
|
||||
en_US: Tbox App API
|
||||
zh_Hans: 蚂蚁百宝箱平台 API
|
||||
description:
|
||||
en_US: Configure the Tbox App API of the pipeline
|
||||
zh_Hans: 配置蚂蚁百宝箱平台 API
|
||||
config:
|
||||
- name: api-key
|
||||
label:
|
||||
en_US: API Key
|
||||
zh_Hans: API 密钥
|
||||
type: string
|
||||
required: true
|
||||
- name: app-id
|
||||
label:
|
||||
en_US: App ID
|
||||
zh_Hans: 应用 ID
|
||||
type: string
|
||||
required: true
|
||||
- name: dify-service-api
|
||||
label:
|
||||
en_US: Dify Service API
|
||||
@@ -356,4 +384,57 @@ stages:
|
||||
zh_Hans: 可选的流程调整参数
|
||||
type: json
|
||||
required: false
|
||||
default: '{}'
|
||||
default: '{}'
|
||||
- name: coze-api
|
||||
label:
|
||||
en_US: coze API
|
||||
zh_Hans: 扣子 API
|
||||
description:
|
||||
en_US: Configure the Coze API of the pipeline
|
||||
zh_Hans: 配置Coze API
|
||||
config:
|
||||
- name: api-key
|
||||
label:
|
||||
en_US: API Key
|
||||
zh_Hans: API 密钥
|
||||
description:
|
||||
en_US: The API key for the Coze server
|
||||
zh_Hans: Coze服务器的 API 密钥
|
||||
type: string
|
||||
required: true
|
||||
- name: bot-id
|
||||
label:
|
||||
en_US: Bot ID
|
||||
zh_Hans: 机器人 ID
|
||||
description:
|
||||
en_US: The ID of the bot to run
|
||||
zh_Hans: 要运行的机器人 ID
|
||||
type: string
|
||||
required: true
|
||||
- name: api-base
|
||||
label:
|
||||
en_US: API Base URL
|
||||
zh_Hans: API 基础 URL
|
||||
description:
|
||||
en_US: The base URL for the Coze API, please use https://api.coze.com for global Coze edition(coze.com).
|
||||
zh_Hans: Coze API 的基础 URL,请使用 https://api.coze.com 用于全球 Coze 版(coze.com)
|
||||
type: string
|
||||
default: "https://api.coze.cn"
|
||||
- name: auto-save-history
|
||||
label:
|
||||
en_US: Auto Save History
|
||||
zh_Hans: 自动保存历史
|
||||
description:
|
||||
en_US: Whether to automatically save conversation history
|
||||
zh_Hans: 是否自动保存对话历史
|
||||
type: boolean
|
||||
default: true
|
||||
- name: timeout
|
||||
label:
|
||||
en_US: Request Timeout
|
||||
zh_Hans: 请求超时
|
||||
description:
|
||||
en_US: Timeout in seconds for API requests
|
||||
zh_Hans: API 请求超时时间(秒)
|
||||
type: number
|
||||
default: 120
|
||||
183
tests/README.md
Normal file
183
tests/README.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# LangBot Test Suite
|
||||
|
||||
This directory contains the test suite for LangBot, with a focus on comprehensive unit testing of pipeline stages.
|
||||
|
||||
## Important Note
|
||||
|
||||
Due to circular import dependencies in the pipeline module structure, the test files use **lazy imports** via `importlib.import_module()` instead of direct imports. This ensures tests can run without triggering circular import errors.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
tests/
|
||||
├── pipeline/ # Pipeline stage tests
|
||||
│ ├── conftest.py # Shared fixtures and test infrastructure
|
||||
│ ├── test_simple.py # Basic infrastructure tests (always pass)
|
||||
│ ├── test_bansess.py # BanSessionCheckStage tests
|
||||
│ ├── test_ratelimit.py # RateLimit stage tests
|
||||
│ ├── test_preproc.py # PreProcessor stage tests
|
||||
│ ├── test_respback.py # SendResponseBackStage tests
|
||||
│ ├── test_resprule.py # GroupRespondRuleCheckStage tests
|
||||
│ ├── test_pipelinemgr.py # PipelineManager tests
|
||||
│ └── test_stages_integration.py # Integration tests
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
## Test Architecture
|
||||
|
||||
### Fixtures (`conftest.py`)
|
||||
|
||||
The test suite uses a centralized fixture system that provides:
|
||||
|
||||
- **MockApplication**: Comprehensive mock of the Application object with all dependencies
|
||||
- **Mock objects**: Pre-configured mocks for Session, Conversation, Model, Adapter
|
||||
- **Sample data**: Ready-to-use Query objects, message chains, and configurations
|
||||
- **Helper functions**: Utilities for creating results and common assertions
|
||||
|
||||
### Design Principles
|
||||
|
||||
1. **Isolation**: Each test is independent and doesn't rely on external systems
|
||||
2. **Mocking**: All external dependencies are mocked to ensure fast, reliable tests
|
||||
3. **Coverage**: Tests cover happy paths, edge cases, and error conditions
|
||||
4. **Extensibility**: Easy to add new tests by reusing existing fixtures
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Using the test runner script (recommended)
|
||||
```bash
|
||||
bash run_tests.sh
|
||||
```
|
||||
|
||||
This script automatically:
|
||||
- Activates the virtual environment
|
||||
- Installs test dependencies if needed
|
||||
- Runs tests with coverage
|
||||
- Generates HTML coverage report
|
||||
|
||||
### Manual test execution
|
||||
|
||||
#### Run all tests
|
||||
```bash
|
||||
pytest tests/pipeline/
|
||||
```
|
||||
|
||||
#### Run only simple tests (no imports, always pass)
|
||||
```bash
|
||||
pytest tests/pipeline/test_simple.py -v
|
||||
```
|
||||
|
||||
#### Run specific test file
|
||||
```bash
|
||||
pytest tests/pipeline/test_bansess.py -v
|
||||
```
|
||||
|
||||
#### Run with coverage
|
||||
```bash
|
||||
pytest tests/pipeline/ --cov=pkg/pipeline --cov-report=html
|
||||
```
|
||||
|
||||
#### Run specific test
|
||||
```bash
|
||||
pytest tests/pipeline/test_bansess.py::test_bansess_whitelist_allow -v
|
||||
```
|
||||
|
||||
### Known Issues
|
||||
|
||||
Some tests may encounter circular import errors. This is a known issue with the current module structure. The test infrastructure is designed to work around this using lazy imports, but if you encounter issues:
|
||||
|
||||
1. Make sure you're running from the project root directory
|
||||
2. Ensure the virtual environment is activated
|
||||
3. Try running `test_simple.py` first to verify the test infrastructure works
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
Tests are automatically run on:
|
||||
- Pull request opened
|
||||
- Pull request marked ready for review
|
||||
- Push to PR branch
|
||||
- Push to master/develop branches
|
||||
|
||||
The workflow runs tests on Python 3.10, 3.11, and 3.12 to ensure compatibility.
|
||||
|
||||
## Adding New Tests
|
||||
|
||||
### 1. For a new pipeline stage
|
||||
|
||||
Create a new test file `test_<stage_name>.py`:
|
||||
|
||||
```python
|
||||
"""
|
||||
<StageName> stage unit tests
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pkg.pipeline.<module>.<stage> import <StageClass>
|
||||
from pkg.pipeline import entities as pipeline_entities
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stage_basic_flow(mock_app, sample_query):
|
||||
"""Test basic flow"""
|
||||
stage = <StageClass>(mock_app)
|
||||
await stage.initialize({})
|
||||
|
||||
result = await stage.process(sample_query, '<StageName>')
|
||||
|
||||
assert result.result_type == pipeline_entities.ResultType.CONTINUE
|
||||
```
|
||||
|
||||
### 2. For additional fixtures
|
||||
|
||||
Add new fixtures to `conftest.py`:
|
||||
|
||||
```python
|
||||
@pytest.fixture
|
||||
def my_custom_fixture():
|
||||
"""Description of fixture"""
|
||||
return create_test_data()
|
||||
```
|
||||
|
||||
### 3. For test data
|
||||
|
||||
Use the helper functions in `conftest.py`:
|
||||
|
||||
```python
|
||||
from tests.pipeline.conftest import create_stage_result, assert_result_continue
|
||||
|
||||
result = create_stage_result(
|
||||
result_type=pipeline_entities.ResultType.CONTINUE,
|
||||
query=sample_query
|
||||
)
|
||||
|
||||
assert_result_continue(result)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Test naming**: Use descriptive names that explain what's being tested
|
||||
2. **Arrange-Act-Assert**: Structure tests clearly with setup, execution, and verification
|
||||
3. **One assertion per test**: Focus each test on a single behavior
|
||||
4. **Mock appropriately**: Mock external dependencies, not the code under test
|
||||
5. **Use fixtures**: Reuse common test data through fixtures
|
||||
6. **Document tests**: Add docstrings explaining what each test validates
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Import errors
|
||||
Make sure you've installed the package in development mode:
|
||||
```bash
|
||||
uv pip install -e .
|
||||
```
|
||||
|
||||
### Async test failures
|
||||
Ensure you're using `@pytest.mark.asyncio` decorator for async tests.
|
||||
|
||||
### Mock not working
|
||||
Check that you're mocking at the right level and using `AsyncMock` for async functions.
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
- [ ] Add integration tests for full pipeline execution
|
||||
- [ ] Add performance benchmarks
|
||||
- [ ] Add mutation testing for better coverage quality
|
||||
- [ ] Add property-based testing with Hypothesis
|
||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
0
tests/unit_tests/__init__.py
Normal file
0
tests/unit_tests/__init__.py
Normal file
1
tests/unit_tests/config/__init__.py
Normal file
1
tests/unit_tests/config/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Config unit tests
|
||||
332
tests/unit_tests/config/test_env_override.py
Normal file
332
tests/unit_tests/config/test_env_override.py
Normal file
@@ -0,0 +1,332 @@
|
||||
"""
|
||||
Tests for environment variable override functionality in YAML config
|
||||
"""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _apply_env_overrides_to_config(cfg: dict) -> dict:
|
||||
"""Apply environment variable overrides to data/config.yaml
|
||||
|
||||
Environment variables should be uppercase and use __ (double underscore)
|
||||
to represent nested keys. For example:
|
||||
- CONCURRENCY__PIPELINE overrides concurrency.pipeline
|
||||
- PLUGIN__RUNTIME_WS_URL overrides plugin.runtime_ws_url
|
||||
|
||||
Arrays and dict types are ignored.
|
||||
|
||||
Args:
|
||||
cfg: Configuration dictionary
|
||||
|
||||
Returns:
|
||||
Updated configuration dictionary
|
||||
"""
|
||||
def convert_value(value: str, original_value: Any) -> Any:
|
||||
"""Convert string value to appropriate type based on original value
|
||||
|
||||
Args:
|
||||
value: String value from environment variable
|
||||
original_value: Original value to infer type from
|
||||
|
||||
Returns:
|
||||
Converted value (falls back to string if conversion fails)
|
||||
"""
|
||||
if isinstance(original_value, bool):
|
||||
return value.lower() in ('true', '1', 'yes', 'on')
|
||||
elif isinstance(original_value, int):
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
# If conversion fails, keep as string (user error, but non-breaking)
|
||||
return value
|
||||
elif isinstance(original_value, float):
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
# If conversion fails, keep as string (user error, but non-breaking)
|
||||
return value
|
||||
else:
|
||||
return value
|
||||
|
||||
# Process environment variables
|
||||
for env_key, env_value in os.environ.items():
|
||||
# Check if the environment variable is uppercase and contains __
|
||||
if not env_key.isupper():
|
||||
continue
|
||||
if '__' not in env_key:
|
||||
continue
|
||||
|
||||
# Convert environment variable name to config path
|
||||
# e.g., CONCURRENCY__PIPELINE -> ['concurrency', 'pipeline']
|
||||
keys = [key.lower() for key in env_key.split('__')]
|
||||
|
||||
# Navigate to the target value and validate the path
|
||||
current = cfg
|
||||
|
||||
for i, key in enumerate(keys):
|
||||
if not isinstance(current, dict) or key not in current:
|
||||
break
|
||||
|
||||
if i == len(keys) - 1:
|
||||
# At the final key - check if it's a scalar value
|
||||
if isinstance(current[key], (dict, list)):
|
||||
# Skip dict and list types
|
||||
pass
|
||||
else:
|
||||
# Valid scalar value - convert and set it
|
||||
converted_value = convert_value(env_value, current[key])
|
||||
current[key] = converted_value
|
||||
else:
|
||||
# Navigate deeper
|
||||
current = current[key]
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class TestEnvOverrides:
|
||||
"""Test environment variable override functionality"""
|
||||
|
||||
def test_simple_string_override(self):
|
||||
"""Test overriding a simple string value"""
|
||||
cfg = {
|
||||
'api': {
|
||||
'port': 5300
|
||||
}
|
||||
}
|
||||
|
||||
# Set environment variable
|
||||
os.environ['API__PORT'] = '8080'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['api']['port'] == 8080
|
||||
|
||||
# Cleanup
|
||||
del os.environ['API__PORT']
|
||||
|
||||
def test_nested_key_override(self):
|
||||
"""Test overriding nested keys with __ delimiter"""
|
||||
cfg = {
|
||||
'concurrency': {
|
||||
'pipeline': 20,
|
||||
'session': 1
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['CONCURRENCY__PIPELINE'] = '50'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['concurrency']['pipeline'] == 50
|
||||
assert result['concurrency']['session'] == 1 # Unchanged
|
||||
|
||||
del os.environ['CONCURRENCY__PIPELINE']
|
||||
|
||||
def test_deep_nested_override(self):
|
||||
"""Test overriding deeply nested keys"""
|
||||
cfg = {
|
||||
'system': {
|
||||
'jwt': {
|
||||
'expire': 604800,
|
||||
'secret': ''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['SYSTEM__JWT__EXPIRE'] = '86400'
|
||||
os.environ['SYSTEM__JWT__SECRET'] = 'my_secret_key'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['system']['jwt']['expire'] == 86400
|
||||
assert result['system']['jwt']['secret'] == 'my_secret_key'
|
||||
|
||||
del os.environ['SYSTEM__JWT__EXPIRE']
|
||||
del os.environ['SYSTEM__JWT__SECRET']
|
||||
|
||||
def test_underscore_in_key(self):
|
||||
"""Test keys with underscores like runtime_ws_url"""
|
||||
cfg = {
|
||||
'plugin': {
|
||||
'enable': True,
|
||||
'runtime_ws_url': 'ws://localhost:5400/control/ws'
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['PLUGIN__RUNTIME_WS_URL'] = 'ws://newhost:6000/ws'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['plugin']['runtime_ws_url'] == 'ws://newhost:6000/ws'
|
||||
|
||||
del os.environ['PLUGIN__RUNTIME_WS_URL']
|
||||
|
||||
def test_boolean_conversion(self):
|
||||
"""Test boolean value conversion"""
|
||||
cfg = {
|
||||
'plugin': {
|
||||
'enable': True,
|
||||
'enable_marketplace': False
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['PLUGIN__ENABLE'] = 'false'
|
||||
os.environ['PLUGIN__ENABLE_MARKETPLACE'] = 'true'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['plugin']['enable'] is False
|
||||
assert result['plugin']['enable_marketplace'] is True
|
||||
|
||||
del os.environ['PLUGIN__ENABLE']
|
||||
del os.environ['PLUGIN__ENABLE_MARKETPLACE']
|
||||
|
||||
def test_ignore_dict_type(self):
|
||||
"""Test that dict types are ignored"""
|
||||
cfg = {
|
||||
'database': {
|
||||
'use': 'sqlite',
|
||||
'sqlite': {
|
||||
'path': 'data/langbot.db'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Try to override a dict value - should be ignored
|
||||
os.environ['DATABASE__SQLITE'] = 'new_value'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
# Should remain a dict, not overridden
|
||||
assert isinstance(result['database']['sqlite'], dict)
|
||||
assert result['database']['sqlite']['path'] == 'data/langbot.db'
|
||||
|
||||
del os.environ['DATABASE__SQLITE']
|
||||
|
||||
def test_ignore_list_type(self):
|
||||
"""Test that list/array types are ignored"""
|
||||
cfg = {
|
||||
'admins': ['admin1', 'admin2'],
|
||||
'command': {
|
||||
'enable': True,
|
||||
'prefix': ['!', '!']
|
||||
}
|
||||
}
|
||||
|
||||
# Try to override list values - should be ignored
|
||||
os.environ['ADMINS'] = 'admin3'
|
||||
os.environ['COMMAND__PREFIX'] = '?'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
# Should remain lists, not overridden
|
||||
assert isinstance(result['admins'], list)
|
||||
assert result['admins'] == ['admin1', 'admin2']
|
||||
assert isinstance(result['command']['prefix'], list)
|
||||
assert result['command']['prefix'] == ['!', '!']
|
||||
|
||||
del os.environ['ADMINS']
|
||||
del os.environ['COMMAND__PREFIX']
|
||||
|
||||
def test_lowercase_env_var_ignored(self):
|
||||
"""Test that lowercase environment variables are ignored"""
|
||||
cfg = {
|
||||
'api': {
|
||||
'port': 5300
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['api__port'] = '8080'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
# Should not be overridden
|
||||
assert result['api']['port'] == 5300
|
||||
|
||||
del os.environ['api__port']
|
||||
|
||||
def test_no_double_underscore_ignored(self):
|
||||
"""Test that env vars without __ are ignored"""
|
||||
cfg = {
|
||||
'api': {
|
||||
'port': 5300
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['APIPORT'] = '8080'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
# Should not be overridden
|
||||
assert result['api']['port'] == 5300
|
||||
|
||||
del os.environ['APIPORT']
|
||||
|
||||
def test_nonexistent_key_ignored(self):
|
||||
"""Test that env vars for non-existent keys are ignored"""
|
||||
cfg = {
|
||||
'api': {
|
||||
'port': 5300
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['API__NONEXISTENT'] = 'value'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
# Should not create new key
|
||||
assert 'nonexistent' not in result['api']
|
||||
|
||||
del os.environ['API__NONEXISTENT']
|
||||
|
||||
def test_integer_conversion(self):
|
||||
"""Test integer value conversion"""
|
||||
cfg = {
|
||||
'concurrency': {
|
||||
'pipeline': 20
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['CONCURRENCY__PIPELINE'] = '100'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['concurrency']['pipeline'] == 100
|
||||
assert isinstance(result['concurrency']['pipeline'], int)
|
||||
|
||||
del os.environ['CONCURRENCY__PIPELINE']
|
||||
|
||||
def test_multiple_overrides(self):
|
||||
"""Test multiple environment variable overrides at once"""
|
||||
cfg = {
|
||||
'api': {
|
||||
'port': 5300
|
||||
},
|
||||
'concurrency': {
|
||||
'pipeline': 20,
|
||||
'session': 1
|
||||
},
|
||||
'plugin': {
|
||||
'enable': False
|
||||
}
|
||||
}
|
||||
|
||||
os.environ['API__PORT'] = '8080'
|
||||
os.environ['CONCURRENCY__PIPELINE'] = '50'
|
||||
os.environ['PLUGIN__ENABLE'] = 'true'
|
||||
|
||||
result = _apply_env_overrides_to_config(cfg)
|
||||
|
||||
assert result['api']['port'] == 8080
|
||||
assert result['concurrency']['pipeline'] == 50
|
||||
assert result['plugin']['enable'] is True
|
||||
|
||||
del os.environ['API__PORT']
|
||||
del os.environ['CONCURRENCY__PIPELINE']
|
||||
del os.environ['PLUGIN__ENABLE']
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pytest.main([__file__, '-v'])
|
||||
0
tests/unit_tests/pipeline/__init__.py
Normal file
0
tests/unit_tests/pipeline/__init__.py
Normal file
251
tests/unit_tests/pipeline/conftest.py
Normal file
251
tests/unit_tests/pipeline/conftest.py
Normal file
@@ -0,0 +1,251 @@
|
||||
"""
|
||||
Shared test fixtures and configuration
|
||||
|
||||
This file provides infrastructure for all pipeline tests, including:
|
||||
- Mock object factories
|
||||
- Test fixtures
|
||||
- Common test helper functions
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock
|
||||
from typing import Any
|
||||
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
import langbot_plugin.api.entities.builtin.platform.events as platform_events
|
||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||
|
||||
from pkg.pipeline import entities as pipeline_entities
|
||||
|
||||
|
||||
class MockApplication:
|
||||
"""Mock Application object providing all basic dependencies needed by stages"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = self._create_mock_logger()
|
||||
self.sess_mgr = self._create_mock_session_manager()
|
||||
self.model_mgr = self._create_mock_model_manager()
|
||||
self.tool_mgr = self._create_mock_tool_manager()
|
||||
self.plugin_connector = self._create_mock_plugin_connector()
|
||||
self.persistence_mgr = self._create_mock_persistence_manager()
|
||||
self.query_pool = self._create_mock_query_pool()
|
||||
self.instance_config = self._create_mock_instance_config()
|
||||
self.task_mgr = self._create_mock_task_manager()
|
||||
|
||||
def _create_mock_logger(self):
|
||||
logger = Mock()
|
||||
logger.debug = Mock()
|
||||
logger.info = Mock()
|
||||
logger.error = Mock()
|
||||
logger.warning = Mock()
|
||||
return logger
|
||||
|
||||
def _create_mock_session_manager(self):
|
||||
sess_mgr = AsyncMock()
|
||||
sess_mgr.get_session = AsyncMock()
|
||||
sess_mgr.get_conversation = AsyncMock()
|
||||
return sess_mgr
|
||||
|
||||
def _create_mock_model_manager(self):
|
||||
model_mgr = AsyncMock()
|
||||
model_mgr.get_model_by_uuid = AsyncMock()
|
||||
return model_mgr
|
||||
|
||||
def _create_mock_tool_manager(self):
|
||||
tool_mgr = AsyncMock()
|
||||
tool_mgr.get_all_tools = AsyncMock(return_value=[])
|
||||
return tool_mgr
|
||||
|
||||
def _create_mock_plugin_connector(self):
|
||||
plugin_connector = AsyncMock()
|
||||
plugin_connector.emit_event = AsyncMock()
|
||||
return plugin_connector
|
||||
|
||||
def _create_mock_persistence_manager(self):
|
||||
persistence_mgr = AsyncMock()
|
||||
persistence_mgr.execute_async = AsyncMock()
|
||||
return persistence_mgr
|
||||
|
||||
def _create_mock_query_pool(self):
|
||||
query_pool = Mock()
|
||||
query_pool.cached_queries = {}
|
||||
query_pool.queries = []
|
||||
query_pool.condition = AsyncMock()
|
||||
return query_pool
|
||||
|
||||
def _create_mock_instance_config(self):
|
||||
instance_config = Mock()
|
||||
instance_config.data = {
|
||||
'command': {'prefix': ['/', '!'], 'enable': True},
|
||||
'concurrency': {'pipeline': 10},
|
||||
}
|
||||
return instance_config
|
||||
|
||||
def _create_mock_task_manager(self):
|
||||
task_mgr = Mock()
|
||||
task_mgr.create_task = Mock()
|
||||
return task_mgr
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_app():
|
||||
"""Provides Mock Application instance"""
|
||||
return MockApplication()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_session():
|
||||
"""Provides Mock Session object"""
|
||||
session = Mock()
|
||||
session.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
session.launcher_id = 12345
|
||||
session._semaphore = AsyncMock()
|
||||
session._semaphore.locked = Mock(return_value=False)
|
||||
session._semaphore.acquire = AsyncMock()
|
||||
session._semaphore.release = AsyncMock()
|
||||
return session
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_conversation():
|
||||
"""Provides Mock Conversation object"""
|
||||
conversation = Mock()
|
||||
conversation.uuid = 'test-conversation-uuid'
|
||||
|
||||
# Create mock prompt with copy method
|
||||
mock_prompt = Mock()
|
||||
mock_prompt.messages = []
|
||||
mock_prompt.copy = Mock(return_value=Mock(messages=[]))
|
||||
conversation.prompt = mock_prompt
|
||||
|
||||
# Create mock messages list with copy method
|
||||
mock_messages = Mock()
|
||||
mock_messages.copy = Mock(return_value=[])
|
||||
conversation.messages = mock_messages
|
||||
|
||||
return conversation
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_model():
|
||||
"""Provides Mock Model object"""
|
||||
model = Mock()
|
||||
model.model_entity = Mock()
|
||||
model.model_entity.uuid = 'test-model-uuid'
|
||||
model.model_entity.abilities = ['func_call', 'vision']
|
||||
return model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_adapter():
|
||||
"""Provides Mock Adapter object"""
|
||||
adapter = AsyncMock()
|
||||
adapter.is_stream_output_supported = AsyncMock(return_value=False)
|
||||
adapter.reply_message = AsyncMock()
|
||||
adapter.reply_message_chunk = AsyncMock()
|
||||
return adapter
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_message_chain():
|
||||
"""Provides sample message chain"""
|
||||
return platform_message.MessageChain(
|
||||
[
|
||||
platform_message.Plain(text='Hello, this is a test message'),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_message_event(sample_message_chain):
|
||||
"""Provides sample message event"""
|
||||
event = Mock()
|
||||
event.sender = Mock()
|
||||
event.sender.id = 12345
|
||||
event.time = 1609459200 # 2021-01-01 00:00:00
|
||||
return event
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_query(sample_message_chain, sample_message_event, mock_adapter):
|
||||
"""Provides sample Query object - using model_construct to bypass validation"""
|
||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||
|
||||
# Use model_construct to bypass Pydantic validation for test purposes
|
||||
query = pipeline_query.Query.model_construct(
|
||||
query_id='test-query-id',
|
||||
launcher_type=provider_session.LauncherTypes.PERSON,
|
||||
launcher_id=12345,
|
||||
sender_id=12345,
|
||||
message_chain=sample_message_chain,
|
||||
message_event=sample_message_event,
|
||||
adapter=mock_adapter,
|
||||
pipeline_uuid='test-pipeline-uuid',
|
||||
bot_uuid='test-bot-uuid',
|
||||
pipeline_config={
|
||||
'ai': {
|
||||
'runner': {'runner': 'local-agent'},
|
||||
'local-agent': {'model': 'test-model-uuid', 'prompt': 'test-prompt'},
|
||||
},
|
||||
'output': {'misc': {'at-sender': False, 'quote-origin': False}},
|
||||
'trigger': {'misc': {'combine-quote-message': False}},
|
||||
},
|
||||
session=None,
|
||||
prompt=None,
|
||||
messages=[],
|
||||
user_message=None,
|
||||
use_funcs=[],
|
||||
use_llm_model_uuid=None,
|
||||
variables={},
|
||||
resp_messages=[],
|
||||
resp_message_chain=None,
|
||||
current_stage_name=None
|
||||
)
|
||||
return query
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_pipeline_config():
|
||||
"""Provides sample pipeline configuration"""
|
||||
return {
|
||||
'ai': {
|
||||
'runner': {'runner': 'local-agent'},
|
||||
'local-agent': {'model': 'test-model-uuid', 'prompt': 'test-prompt'},
|
||||
},
|
||||
'output': {'misc': {'at-sender': False, 'quote-origin': False}},
|
||||
'trigger': {'misc': {'combine-quote-message': False}},
|
||||
'ratelimit': {'enable': True, 'algo': 'fixwin', 'window': 60, 'limit': 10},
|
||||
}
|
||||
|
||||
|
||||
def create_stage_result(
|
||||
result_type: pipeline_entities.ResultType,
|
||||
query: pipeline_query.Query,
|
||||
user_notice: str = '',
|
||||
console_notice: str = '',
|
||||
debug_notice: str = '',
|
||||
error_notice: str = '',
|
||||
) -> pipeline_entities.StageProcessResult:
|
||||
"""Helper function to create stage process result"""
|
||||
return pipeline_entities.StageProcessResult(
|
||||
result_type=result_type,
|
||||
new_query=query,
|
||||
user_notice=user_notice,
|
||||
console_notice=console_notice,
|
||||
debug_notice=debug_notice,
|
||||
error_notice=error_notice,
|
||||
)
|
||||
|
||||
|
||||
def assert_result_continue(result: pipeline_entities.StageProcessResult):
|
||||
"""Assert result is CONTINUE type"""
|
||||
assert result.result_type == pipeline_entities.ResultType.CONTINUE
|
||||
|
||||
|
||||
def assert_result_interrupt(result: pipeline_entities.StageProcessResult):
|
||||
"""Assert result is INTERRUPT type"""
|
||||
assert result.result_type == pipeline_entities.ResultType.INTERRUPT
|
||||
189
tests/unit_tests/pipeline/test_bansess.py
Normal file
189
tests/unit_tests/pipeline/test_bansess.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""
|
||||
BanSessionCheckStage unit tests
|
||||
|
||||
Tests the actual BanSessionCheckStage implementation from pkg.pipeline.bansess
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import Mock
|
||||
from importlib import import_module
|
||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||
|
||||
|
||||
def get_modules():
|
||||
"""Lazy import to ensure proper initialization order"""
|
||||
# Import pipelinemgr first to trigger proper stage registration
|
||||
pipelinemgr = import_module('pkg.pipeline.pipelinemgr')
|
||||
bansess = import_module('pkg.pipeline.bansess.bansess')
|
||||
entities = import_module('pkg.pipeline.entities')
|
||||
return bansess, entities
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_whitelist_allow(mock_app, sample_query):
|
||||
"""Test whitelist allows matching session"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'whitelist',
|
||||
'whitelist': ['person_12345']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
assert result.new_query == sample_query
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_whitelist_deny(mock_app, sample_query):
|
||||
"""Test whitelist denies non-matching session"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '99999'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'whitelist',
|
||||
'whitelist': ['person_12345']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.INTERRUPT
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_blacklist_allow(mock_app, sample_query):
|
||||
"""Test blacklist allows non-matching session"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'blacklist',
|
||||
'blacklist': ['person_99999']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_blacklist_deny(mock_app, sample_query):
|
||||
"""Test blacklist denies matching session"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'blacklist',
|
||||
'blacklist': ['person_12345']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.INTERRUPT
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_wildcard_group(mock_app, sample_query):
|
||||
"""Test group wildcard matching"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.GROUP
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'whitelist',
|
||||
'whitelist': ['group_*']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_wildcard_person(mock_app, sample_query):
|
||||
"""Test person wildcard matching"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'whitelist',
|
||||
'whitelist': ['person_*']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_user_id_wildcard(mock_app, sample_query):
|
||||
"""Test user ID wildcard matching (*_id format)"""
|
||||
bansess, entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.sender_id = '67890'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'access-control': {
|
||||
'mode': 'whitelist',
|
||||
'whitelist': ['*_67890']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage = bansess.BanSessionCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'BanSessionCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
170
tests/unit_tests/pipeline/test_pipelinemgr.py
Normal file
170
tests/unit_tests/pipeline/test_pipelinemgr.py
Normal file
@@ -0,0 +1,170 @@
|
||||
"""
|
||||
PipelineManager unit tests
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
from importlib import import_module
|
||||
|
||||
|
||||
def get_pipelinemgr_module():
|
||||
return import_module('pkg.pipeline.pipelinemgr')
|
||||
|
||||
|
||||
def get_stage_module():
|
||||
return import_module('pkg.pipeline.stage')
|
||||
|
||||
|
||||
def get_entities_module():
|
||||
return import_module('pkg.pipeline.entities')
|
||||
|
||||
|
||||
def get_persistence_pipeline_module():
|
||||
return import_module('pkg.entity.persistence.pipeline')
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_manager_initialize(mock_app):
|
||||
"""Test pipeline manager initialization"""
|
||||
pipelinemgr = get_pipelinemgr_module()
|
||||
|
||||
mock_app.persistence_mgr.execute_async = AsyncMock(return_value=Mock(all=Mock(return_value=[])))
|
||||
|
||||
manager = pipelinemgr.PipelineManager(mock_app)
|
||||
await manager.initialize()
|
||||
|
||||
assert manager.stage_dict is not None
|
||||
assert len(manager.pipelines) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_load_pipeline(mock_app):
|
||||
"""Test loading a single pipeline"""
|
||||
pipelinemgr = get_pipelinemgr_module()
|
||||
persistence_pipeline = get_persistence_pipeline_module()
|
||||
|
||||
mock_app.persistence_mgr.execute_async = AsyncMock(return_value=Mock(all=Mock(return_value=[])))
|
||||
|
||||
manager = pipelinemgr.PipelineManager(mock_app)
|
||||
await manager.initialize()
|
||||
|
||||
# Create test pipeline entity
|
||||
pipeline_entity = Mock(spec=persistence_pipeline.LegacyPipeline)
|
||||
pipeline_entity.uuid = 'test-uuid'
|
||||
pipeline_entity.stages = []
|
||||
pipeline_entity.config = {'test': 'config'}
|
||||
pipeline_entity.extensions_preferences = {'plugins': []}
|
||||
|
||||
await manager.load_pipeline(pipeline_entity)
|
||||
|
||||
assert len(manager.pipelines) == 1
|
||||
assert manager.pipelines[0].pipeline_entity.uuid == 'test-uuid'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pipeline_by_uuid(mock_app):
|
||||
"""Test getting pipeline by UUID"""
|
||||
pipelinemgr = get_pipelinemgr_module()
|
||||
persistence_pipeline = get_persistence_pipeline_module()
|
||||
|
||||
mock_app.persistence_mgr.execute_async = AsyncMock(return_value=Mock(all=Mock(return_value=[])))
|
||||
|
||||
manager = pipelinemgr.PipelineManager(mock_app)
|
||||
await manager.initialize()
|
||||
|
||||
# Create and add test pipeline
|
||||
pipeline_entity = Mock(spec=persistence_pipeline.LegacyPipeline)
|
||||
pipeline_entity.uuid = 'test-uuid'
|
||||
pipeline_entity.stages = []
|
||||
pipeline_entity.config = {}
|
||||
pipeline_entity.extensions_preferences = {'plugins': []}
|
||||
|
||||
await manager.load_pipeline(pipeline_entity)
|
||||
|
||||
# Test retrieval
|
||||
result = await manager.get_pipeline_by_uuid('test-uuid')
|
||||
assert result is not None
|
||||
assert result.pipeline_entity.uuid == 'test-uuid'
|
||||
|
||||
# Test non-existent UUID
|
||||
result = await manager.get_pipeline_by_uuid('non-existent')
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_pipeline(mock_app):
|
||||
"""Test removing a pipeline"""
|
||||
pipelinemgr = get_pipelinemgr_module()
|
||||
persistence_pipeline = get_persistence_pipeline_module()
|
||||
|
||||
mock_app.persistence_mgr.execute_async = AsyncMock(return_value=Mock(all=Mock(return_value=[])))
|
||||
|
||||
manager = pipelinemgr.PipelineManager(mock_app)
|
||||
await manager.initialize()
|
||||
|
||||
# Create and add test pipeline
|
||||
pipeline_entity = Mock(spec=persistence_pipeline.LegacyPipeline)
|
||||
pipeline_entity.uuid = 'test-uuid'
|
||||
pipeline_entity.stages = []
|
||||
pipeline_entity.config = {}
|
||||
pipeline_entity.extensions_preferences = {'plugins': []}
|
||||
|
||||
await manager.load_pipeline(pipeline_entity)
|
||||
assert len(manager.pipelines) == 1
|
||||
|
||||
# Remove pipeline
|
||||
await manager.remove_pipeline('test-uuid')
|
||||
assert len(manager.pipelines) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_runtime_pipeline_execute(mock_app, sample_query):
|
||||
"""Test runtime pipeline execution"""
|
||||
pipelinemgr = get_pipelinemgr_module()
|
||||
stage = get_stage_module()
|
||||
persistence_pipeline = get_persistence_pipeline_module()
|
||||
|
||||
# Create mock stage that returns a simple result dict (avoiding Pydantic validation)
|
||||
mock_result = Mock()
|
||||
mock_result.result_type = Mock()
|
||||
mock_result.result_type.value = 'CONTINUE' # Simulate enum value
|
||||
mock_result.new_query = sample_query
|
||||
mock_result.user_notice = ''
|
||||
mock_result.console_notice = ''
|
||||
mock_result.debug_notice = ''
|
||||
mock_result.error_notice = ''
|
||||
|
||||
# Make it look like ResultType.CONTINUE
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
CONTINUE = MagicMock()
|
||||
CONTINUE.__eq__ = lambda self, other: True # Always equal for comparison
|
||||
mock_result.result_type = CONTINUE
|
||||
|
||||
mock_stage = Mock(spec=stage.PipelineStage)
|
||||
mock_stage.process = AsyncMock(return_value=mock_result)
|
||||
|
||||
# Create stage container
|
||||
stage_container = pipelinemgr.StageInstContainer(inst_name='TestStage', inst=mock_stage)
|
||||
|
||||
# Create pipeline entity
|
||||
pipeline_entity = Mock(spec=persistence_pipeline.LegacyPipeline)
|
||||
pipeline_entity.config = sample_query.pipeline_config
|
||||
pipeline_entity.extensions_preferences = {'plugins': []}
|
||||
|
||||
# Create runtime pipeline
|
||||
runtime_pipeline = pipelinemgr.RuntimePipeline(mock_app, pipeline_entity, [stage_container])
|
||||
|
||||
# Mock plugin connector
|
||||
event_ctx = Mock()
|
||||
event_ctx.is_prevented_default = Mock(return_value=False)
|
||||
mock_app.plugin_connector.emit_event = AsyncMock(return_value=event_ctx)
|
||||
|
||||
# Add query to cached_queries to prevent KeyError in finally block
|
||||
mock_app.query_pool.cached_queries[sample_query.query_id] = sample_query
|
||||
|
||||
# Execute pipeline
|
||||
await runtime_pipeline.run(sample_query)
|
||||
|
||||
# Verify stage was called
|
||||
mock_stage.process.assert_called_once()
|
||||
109
tests/unit_tests/pipeline/test_ratelimit.py
Normal file
109
tests/unit_tests/pipeline/test_ratelimit.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
RateLimit stage unit tests
|
||||
|
||||
Tests the actual RateLimit implementation from pkg.pipeline.ratelimit
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
from importlib import import_module
|
||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||
|
||||
|
||||
def get_modules():
|
||||
"""Lazy import to ensure proper initialization order"""
|
||||
# Import pipelinemgr first to trigger proper stage registration
|
||||
pipelinemgr = import_module('pkg.pipeline.pipelinemgr')
|
||||
ratelimit = import_module('pkg.pipeline.ratelimit.ratelimit')
|
||||
entities = import_module('pkg.pipeline.entities')
|
||||
algo_module = import_module('pkg.pipeline.ratelimit.algo')
|
||||
return ratelimit, entities, algo_module
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_require_access_allowed(mock_app, sample_query):
|
||||
"""Test RequireRateLimitOccupancy allows access when rate limit is not exceeded"""
|
||||
ratelimit, entities, algo_module = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {}
|
||||
|
||||
# Create mock algorithm that allows access
|
||||
mock_algo = Mock(spec=algo_module.ReteLimitAlgo)
|
||||
mock_algo.require_access = AsyncMock(return_value=True)
|
||||
mock_algo.initialize = AsyncMock()
|
||||
|
||||
stage = ratelimit.RateLimit(mock_app)
|
||||
|
||||
# Patch the algorithm selection to use our mock
|
||||
with patch.object(algo_module, 'preregistered_algos', []):
|
||||
stage.algo = mock_algo
|
||||
|
||||
result = await stage.process(sample_query, 'RequireRateLimitOccupancy')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
assert result.new_query == sample_query
|
||||
mock_algo.require_access.assert_called_once_with(
|
||||
sample_query,
|
||||
'person',
|
||||
'12345'
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_require_access_denied(mock_app, sample_query):
|
||||
"""Test RequireRateLimitOccupancy denies access when rate limit is exceeded"""
|
||||
ratelimit, entities, algo_module = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {}
|
||||
|
||||
# Create mock algorithm that denies access
|
||||
mock_algo = Mock(spec=algo_module.ReteLimitAlgo)
|
||||
mock_algo.require_access = AsyncMock(return_value=False)
|
||||
mock_algo.initialize = AsyncMock()
|
||||
|
||||
stage = ratelimit.RateLimit(mock_app)
|
||||
|
||||
# Patch the algorithm selection to use our mock
|
||||
with patch.object(algo_module, 'preregistered_algos', []):
|
||||
stage.algo = mock_algo
|
||||
|
||||
result = await stage.process(sample_query, 'RequireRateLimitOccupancy')
|
||||
|
||||
assert result.result_type == entities.ResultType.INTERRUPT
|
||||
assert result.user_notice != ''
|
||||
mock_algo.require_access.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_release_access(mock_app, sample_query):
|
||||
"""Test ReleaseRateLimitOccupancy releases rate limit occupancy"""
|
||||
ratelimit, entities, algo_module = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {}
|
||||
|
||||
# Create mock algorithm
|
||||
mock_algo = Mock(spec=algo_module.ReteLimitAlgo)
|
||||
mock_algo.release_access = AsyncMock()
|
||||
mock_algo.initialize = AsyncMock()
|
||||
|
||||
stage = ratelimit.RateLimit(mock_app)
|
||||
|
||||
# Patch the algorithm selection to use our mock
|
||||
with patch.object(algo_module, 'preregistered_algos', []):
|
||||
stage.algo = mock_algo
|
||||
|
||||
result = await stage.process(sample_query, 'ReleaseRateLimitOccupancy')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
assert result.new_query == sample_query
|
||||
mock_algo.release_access.assert_called_once_with(
|
||||
sample_query,
|
||||
'person',
|
||||
'12345'
|
||||
)
|
||||
171
tests/unit_tests/pipeline/test_resprule.py
Normal file
171
tests/unit_tests/pipeline/test_resprule.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""
|
||||
GroupRespondRuleCheckStage unit tests
|
||||
|
||||
Tests the actual GroupRespondRuleCheckStage implementation from pkg.pipeline.resprule
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
from importlib import import_module
|
||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
||||
|
||||
|
||||
def get_modules():
|
||||
"""Lazy import to ensure proper initialization order"""
|
||||
# Import pipelinemgr first to trigger proper stage registration
|
||||
pipelinemgr = import_module('pkg.pipeline.pipelinemgr')
|
||||
resprule = import_module('pkg.pipeline.resprule.resprule')
|
||||
entities = import_module('pkg.pipeline.entities')
|
||||
rule = import_module('pkg.pipeline.resprule.rule')
|
||||
rule_entities = import_module('pkg.pipeline.resprule.entities')
|
||||
return resprule, entities, rule, rule_entities
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_person_message_skip(mock_app, sample_query):
|
||||
"""Test person message skips rule check"""
|
||||
resprule, entities, rule, rule_entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.PERSON
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'group-respond-rules': {}
|
||||
}
|
||||
}
|
||||
|
||||
stage = resprule.GroupRespondRuleCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
|
||||
result = await stage.process(sample_query, 'GroupRespondRuleCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
assert result.new_query == sample_query
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_group_message_no_match(mock_app, sample_query):
|
||||
"""Test group message with no matching rules"""
|
||||
resprule, entities, rule, rule_entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.GROUP
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'group-respond-rules': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Create mock rule matcher that doesn't match
|
||||
mock_rule = Mock(spec=rule.GroupRespondRule)
|
||||
mock_rule.match = AsyncMock(return_value=rule_entities.RuleJudgeResult(
|
||||
matching=False,
|
||||
replacement=sample_query.message_chain
|
||||
))
|
||||
|
||||
stage = resprule.GroupRespondRuleCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
stage.rule_matchers = [mock_rule]
|
||||
|
||||
result = await stage.process(sample_query, 'GroupRespondRuleCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.INTERRUPT
|
||||
assert result.new_query == sample_query
|
||||
mock_rule.match.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_group_message_match(mock_app, sample_query):
|
||||
"""Test group message with matching rule"""
|
||||
resprule, entities, rule, rule_entities = get_modules()
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.GROUP
|
||||
sample_query.launcher_id = '12345'
|
||||
sample_query.pipeline_config = {
|
||||
'trigger': {
|
||||
'group-respond-rules': {}
|
||||
}
|
||||
}
|
||||
|
||||
# Create new message chain after rule processing
|
||||
new_chain = platform_message.MessageChain([
|
||||
platform_message.Plain(text='Processed message')
|
||||
])
|
||||
|
||||
# Create mock rule matcher that matches
|
||||
mock_rule = Mock(spec=rule.GroupRespondRule)
|
||||
mock_rule.match = AsyncMock(return_value=rule_entities.RuleJudgeResult(
|
||||
matching=True,
|
||||
replacement=new_chain
|
||||
))
|
||||
|
||||
stage = resprule.GroupRespondRuleCheckStage(mock_app)
|
||||
await stage.initialize(sample_query.pipeline_config)
|
||||
stage.rule_matchers = [mock_rule]
|
||||
|
||||
result = await stage.process(sample_query, 'GroupRespondRuleCheckStage')
|
||||
|
||||
assert result.result_type == entities.ResultType.CONTINUE
|
||||
assert result.new_query == sample_query
|
||||
assert sample_query.message_chain == new_chain
|
||||
mock_rule.match.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atbot_rule_match(mock_app, sample_query):
|
||||
"""Test AtBotRule removes At component"""
|
||||
resprule, entities, rule, rule_entities = get_modules()
|
||||
atbot_module = import_module('pkg.pipeline.resprule.rules.atbot')
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.GROUP
|
||||
sample_query.adapter.bot_account_id = '999'
|
||||
|
||||
# Create message chain with At component
|
||||
message_chain = platform_message.MessageChain([
|
||||
platform_message.At(target='999'),
|
||||
platform_message.Plain(text='Hello bot')
|
||||
])
|
||||
sample_query.message_chain = message_chain
|
||||
|
||||
atbot_rule = atbot_module.AtBotRule(mock_app)
|
||||
await atbot_rule.initialize()
|
||||
|
||||
result = await atbot_rule.match(
|
||||
str(message_chain),
|
||||
message_chain,
|
||||
{},
|
||||
sample_query
|
||||
)
|
||||
|
||||
assert result.matching is True
|
||||
# At component should be removed
|
||||
assert len(result.replacement.root) == 1
|
||||
assert isinstance(result.replacement.root[0], platform_message.Plain)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_atbot_rule_no_match(mock_app, sample_query):
|
||||
"""Test AtBotRule when no At component present"""
|
||||
resprule, entities, rule, rule_entities = get_modules()
|
||||
atbot_module = import_module('pkg.pipeline.resprule.rules.atbot')
|
||||
|
||||
sample_query.launcher_type = provider_session.LauncherTypes.GROUP
|
||||
sample_query.adapter.bot_account_id = '999'
|
||||
|
||||
# Create message chain without At component
|
||||
message_chain = platform_message.MessageChain([
|
||||
platform_message.Plain(text='Hello')
|
||||
])
|
||||
sample_query.message_chain = message_chain
|
||||
|
||||
atbot_rule = atbot_module.AtBotRule(mock_app)
|
||||
await atbot_rule.initialize()
|
||||
|
||||
result = await atbot_rule.match(
|
||||
str(message_chain),
|
||||
message_chain,
|
||||
{},
|
||||
sample_query
|
||||
)
|
||||
|
||||
assert result.matching is False
|
||||
40
tests/unit_tests/pipeline/test_simple.py
Normal file
40
tests/unit_tests/pipeline/test_simple.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""
|
||||
Simple standalone tests to verify test infrastructure
|
||||
These tests don't import the actual pipeline code to avoid circular import issues
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import Mock, AsyncMock
|
||||
|
||||
|
||||
def test_pytest_works():
|
||||
"""Verify pytest is working"""
|
||||
assert True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_works():
|
||||
"""Verify async tests work"""
|
||||
mock = AsyncMock(return_value=42)
|
||||
result = await mock()
|
||||
assert result == 42
|
||||
|
||||
|
||||
def test_mocks_work():
|
||||
"""Verify mocking works"""
|
||||
mock = Mock()
|
||||
mock.return_value = 'test'
|
||||
assert mock() == 'test'
|
||||
|
||||
|
||||
def test_fixtures_work(mock_app):
|
||||
"""Verify fixtures are loaded"""
|
||||
assert mock_app is not None
|
||||
assert mock_app.logger is not None
|
||||
assert mock_app.sess_mgr is not None
|
||||
|
||||
|
||||
def test_sample_query(sample_query):
|
||||
"""Verify sample query fixture works"""
|
||||
assert sample_query.query_id == 'test-query-id'
|
||||
assert sample_query.launcher_id == 12345
|
||||
@@ -23,6 +23,7 @@
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@hookform/resolvers": "^5.0.1",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.15",
|
||||
"@radix-ui/react-checkbox": "^1.3.1",
|
||||
"@radix-ui/react-context-menu": "^2.2.15",
|
||||
"@radix-ui/react-dialog": "^1.1.14",
|
||||
|
||||
@@ -115,7 +115,6 @@ export default function BotForm({
|
||||
|
||||
useEffect(() => {
|
||||
setBotFormValues();
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
function setBotFormValues() {
|
||||
|
||||
@@ -0,0 +1,353 @@
|
||||
'use client';
|
||||
|
||||
import * as React from 'react';
|
||||
import { useState, useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { toast } from 'sonner';
|
||||
import { Copy, Trash2, Plus } from 'lucide-react';
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogFooter,
|
||||
DialogDescription,
|
||||
} from '@/components/ui/dialog';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { Input } from '@/components/ui/input';
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from '@/components/ui/table';
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
AlertDialogPortal,
|
||||
AlertDialogOverlay,
|
||||
} from '@/components/ui/alert-dialog';
|
||||
import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog';
|
||||
import { backendClient } from '@/app/infra/http';
|
||||
|
||||
interface ApiKey {
|
||||
id: number;
|
||||
name: string;
|
||||
key: string;
|
||||
description: string;
|
||||
created_at: string;
|
||||
}
|
||||
|
||||
interface ApiKeyManagementDialogProps {
|
||||
open: boolean;
|
||||
onOpenChange: (open: boolean) => void;
|
||||
}
|
||||
|
||||
export default function ApiKeyManagementDialog({
|
||||
open,
|
||||
onOpenChange,
|
||||
}: ApiKeyManagementDialogProps) {
|
||||
const { t } = useTranslation();
|
||||
const [apiKeys, setApiKeys] = useState<ApiKey[]>([]);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [showCreateDialog, setShowCreateDialog] = useState(false);
|
||||
const [newKeyName, setNewKeyName] = useState('');
|
||||
const [newKeyDescription, setNewKeyDescription] = useState('');
|
||||
const [createdKey, setCreatedKey] = useState<ApiKey | null>(null);
|
||||
const [deleteKeyId, setDeleteKeyId] = useState<number | null>(null);
|
||||
|
||||
// 清理 body 样式,防止对话框关闭后页面无法交互
|
||||
useEffect(() => {
|
||||
if (!deleteKeyId) {
|
||||
const cleanup = () => {
|
||||
document.body.style.removeProperty('pointer-events');
|
||||
};
|
||||
|
||||
cleanup();
|
||||
const timer = setTimeout(cleanup, 100);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [deleteKeyId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
loadApiKeys();
|
||||
}
|
||||
}, [open]);
|
||||
|
||||
const loadApiKeys = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const response = (await backendClient.get('/api/v1/apikeys')) as {
|
||||
keys: ApiKey[];
|
||||
};
|
||||
setApiKeys(response.keys || []);
|
||||
} catch (error) {
|
||||
toast.error(`Failed to load API keys: ${error}`);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCreateApiKey = async () => {
|
||||
if (!newKeyName.trim()) {
|
||||
toast.error(t('common.apiKeyNameRequired'));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = (await backendClient.post('/api/v1/apikeys', {
|
||||
name: newKeyName,
|
||||
description: newKeyDescription,
|
||||
})) as { key: ApiKey };
|
||||
|
||||
setCreatedKey(response.key);
|
||||
toast.success(t('common.apiKeyCreated'));
|
||||
setNewKeyName('');
|
||||
setNewKeyDescription('');
|
||||
setShowCreateDialog(false);
|
||||
loadApiKeys();
|
||||
} catch (error) {
|
||||
toast.error(`Failed to create API key: ${error}`);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeleteApiKey = async (keyId: number) => {
|
||||
try {
|
||||
await backendClient.delete(`/api/v1/apikeys/${keyId}`);
|
||||
toast.success(t('common.apiKeyDeleted'));
|
||||
loadApiKeys();
|
||||
setDeleteKeyId(null);
|
||||
} catch (error) {
|
||||
toast.error(`Failed to delete API key: ${error}`);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCopyKey = (key: string) => {
|
||||
navigator.clipboard.writeText(key);
|
||||
toast.success(t('common.apiKeyCopied'));
|
||||
};
|
||||
|
||||
const maskApiKey = (key: string) => {
|
||||
if (key.length <= 8) return key;
|
||||
return `${key.substring(0, 8)}...${key.substring(key.length - 4)}`;
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<Dialog
|
||||
open={open}
|
||||
onOpenChange={(newOpen) => {
|
||||
// 如果删除确认框是打开的,不允许关闭主对话框
|
||||
if (!newOpen && deleteKeyId) {
|
||||
return;
|
||||
}
|
||||
onOpenChange(newOpen);
|
||||
}}
|
||||
>
|
||||
<DialogContent className="sm:max-w-[700px]">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('common.manageApiKeys')}</DialogTitle>
|
||||
<DialogDescription>{t('common.apiKeyHint')}</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
onClick={() => setShowCreateDialog(true)}
|
||||
size="sm"
|
||||
className="gap-2"
|
||||
>
|
||||
<Plus className="h-4 w-4" />
|
||||
{t('common.createApiKey')}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{loading ? (
|
||||
<div className="text-center py-8 text-muted-foreground">
|
||||
{t('common.loading')}
|
||||
</div>
|
||||
) : apiKeys.length === 0 ? (
|
||||
<div className="text-center py-8 text-muted-foreground">
|
||||
{t('common.noApiKeys')}
|
||||
</div>
|
||||
) : (
|
||||
<div className="border rounded-md">
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>{t('common.name')}</TableHead>
|
||||
<TableHead>{t('common.apiKeyValue')}</TableHead>
|
||||
<TableHead className="w-[100px]">
|
||||
{t('common.actions')}
|
||||
</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{apiKeys.map((key) => (
|
||||
<TableRow key={key.id}>
|
||||
<TableCell>
|
||||
<div>
|
||||
<div className="font-medium">{key.name}</div>
|
||||
{key.description && (
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{key.description}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<code className="text-sm bg-muted px-2 py-1 rounded">
|
||||
{maskApiKey(key.key)}
|
||||
</code>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={() => handleCopyKey(key.key)}
|
||||
title={t('common.copyApiKey')}
|
||||
>
|
||||
<Copy className="h-4 w-4" />
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={() => setDeleteKeyId(key.id)}
|
||||
title={t('common.delete')}
|
||||
>
|
||||
<Trash2 className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<DialogFooter>
|
||||
<Button variant="outline" onClick={() => onOpenChange(false)}>
|
||||
{t('common.close')}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
{/* Create API Key Dialog */}
|
||||
<Dialog open={showCreateDialog} onOpenChange={setShowCreateDialog}>
|
||||
<DialogContent className="sm:max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('common.createApiKey')}</DialogTitle>
|
||||
</DialogHeader>
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<label className="text-sm font-medium">{t('common.name')}</label>
|
||||
<Input
|
||||
value={newKeyName}
|
||||
onChange={(e) => setNewKeyName(e.target.value)}
|
||||
placeholder={t('common.name')}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-sm font-medium">
|
||||
{t('common.description')}
|
||||
</label>
|
||||
<Input
|
||||
value={newKeyDescription}
|
||||
onChange={(e) => setNewKeyDescription(e.target.value)}
|
||||
placeholder={t('common.description')}
|
||||
className="mt-1"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => setShowCreateDialog(false)}
|
||||
>
|
||||
{t('common.cancel')}
|
||||
</Button>
|
||||
<Button onClick={handleCreateApiKey}>{t('common.create')}</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
{/* Show Created Key Dialog */}
|
||||
<Dialog open={!!createdKey} onOpenChange={() => setCreatedKey(null)}>
|
||||
<DialogContent className="sm:max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('common.apiKeyCreated')}</DialogTitle>
|
||||
<DialogDescription>
|
||||
{t('common.apiKeyCreatedMessage')}
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<label className="text-sm font-medium">
|
||||
{t('common.apiKeyValue')}
|
||||
</label>
|
||||
<div className="flex gap-2 mt-1">
|
||||
<Input value={createdKey?.key || ''} readOnly />
|
||||
<Button
|
||||
onClick={() => createdKey && handleCopyKey(createdKey.key)}
|
||||
variant="outline"
|
||||
size="icon"
|
||||
>
|
||||
<Copy className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button onClick={() => setCreatedKey(null)}>
|
||||
{t('common.close')}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
{/* Delete Confirmation Dialog */}
|
||||
<AlertDialog open={!!deleteKeyId}>
|
||||
<AlertDialogPortal>
|
||||
<AlertDialogOverlay
|
||||
className="z-[60]"
|
||||
onClick={() => setDeleteKeyId(null)}
|
||||
/>
|
||||
<AlertDialogPrimitive.Content
|
||||
className="fixed left-[50%] top-[50%] z-[60] grid w-full max-w-lg translate-x-[-50%] translate-y-[-50%] gap-4 border bg-background p-6 shadow-lg duration-200 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 sm:rounded-lg"
|
||||
onEscapeKeyDown={() => setDeleteKeyId(null)}
|
||||
>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>{t('common.confirmDelete')}</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
{t('common.apiKeyDeleteConfirm')}
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel onClick={() => setDeleteKeyId(null)}>
|
||||
{t('common.cancel')}
|
||||
</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
onClick={() => deleteKeyId && handleDeleteApiKey(deleteKeyId)}
|
||||
>
|
||||
{t('common.delete')}
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogPrimitive.Content>
|
||||
</AlertDialogPortal>
|
||||
</AlertDialog>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -11,18 +11,23 @@ import {
|
||||
FormMessage,
|
||||
} from '@/components/ui/form';
|
||||
import DynamicFormItemComponent from '@/app/home/components/dynamic-form/DynamicFormItemComponent';
|
||||
import { useEffect } from 'react';
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { extractI18nObject } from '@/i18n/I18nProvider';
|
||||
|
||||
export default function DynamicFormComponent({
|
||||
itemConfigList,
|
||||
onSubmit,
|
||||
initialValues,
|
||||
onFileUploaded,
|
||||
}: {
|
||||
itemConfigList: IDynamicFormItemSchema[];
|
||||
onSubmit?: (val: object) => unknown;
|
||||
initialValues?: Record<string, object>;
|
||||
onFileUploaded?: (fileKey: string) => void;
|
||||
}) {
|
||||
const isInitialMount = useRef(true);
|
||||
const previousInitialValues = useRef(initialValues);
|
||||
|
||||
// 根据 itemConfigList 动态生成 zod schema
|
||||
const formSchema = z.object(
|
||||
itemConfigList.reduce(
|
||||
@@ -53,6 +58,9 @@ export default function DynamicFormComponent({
|
||||
case 'knowledge-base-selector':
|
||||
fieldSchema = z.string();
|
||||
break;
|
||||
case 'bot-selector':
|
||||
fieldSchema = z.string();
|
||||
break;
|
||||
case 'prompt-editor':
|
||||
fieldSchema = z.array(
|
||||
z.object({
|
||||
@@ -97,9 +105,24 @@ export default function DynamicFormComponent({
|
||||
});
|
||||
|
||||
// 当 initialValues 变化时更新表单值
|
||||
// 但要避免因为内部表单更新触发的 onSubmit 导致的 initialValues 变化而重新设置表单
|
||||
useEffect(() => {
|
||||
console.log('initialValues', initialValues);
|
||||
if (initialValues) {
|
||||
|
||||
// 首次挂载时,使用 initialValues 初始化表单
|
||||
if (isInitialMount.current) {
|
||||
isInitialMount.current = false;
|
||||
previousInitialValues.current = initialValues;
|
||||
return;
|
||||
}
|
||||
|
||||
// 检查 initialValues 是否真的发生了实质性变化
|
||||
// 使用 JSON.stringify 进行深度比较
|
||||
const hasRealChange =
|
||||
JSON.stringify(previousInitialValues.current) !==
|
||||
JSON.stringify(initialValues);
|
||||
|
||||
if (initialValues && hasRealChange) {
|
||||
// 合并默认值和初始值
|
||||
const mergedValues = itemConfigList.reduce(
|
||||
(acc, item) => {
|
||||
@@ -112,6 +135,8 @@ export default function DynamicFormComponent({
|
||||
Object.entries(mergedValues).forEach(([key, value]) => {
|
||||
form.setValue(key as keyof FormValues, value);
|
||||
});
|
||||
|
||||
previousInitialValues.current = initialValues;
|
||||
}
|
||||
}, [initialValues, form, itemConfigList]);
|
||||
|
||||
@@ -149,7 +174,11 @@ export default function DynamicFormComponent({
|
||||
{config.required && <span className="text-red-500">*</span>}
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<DynamicFormItemComponent config={config} field={field} />
|
||||
<DynamicFormItemComponent
|
||||
config={config}
|
||||
field={field}
|
||||
onFileUploaded={onFileUploaded}
|
||||
/>
|
||||
</FormControl>
|
||||
{config.description && (
|
||||
<p className="text-sm text-muted-foreground">
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import {
|
||||
DynamicFormItemType,
|
||||
IDynamicFormItemSchema,
|
||||
IFileConfig,
|
||||
} from '@/app/infra/entities/form/dynamic';
|
||||
import { Input } from '@/components/ui/input';
|
||||
import {
|
||||
@@ -16,7 +17,7 @@ import { ControllerRenderProps } from 'react-hook-form';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { httpClient } from '@/app/infra/http/HttpClient';
|
||||
import { LLMModel } from '@/app/infra/entities/api';
|
||||
import { LLMModel, Bot } from '@/app/infra/entities/api';
|
||||
import { KnowledgeBase } from '@/app/infra/entities/api';
|
||||
import { toast } from 'sonner';
|
||||
import {
|
||||
@@ -27,19 +28,54 @@ import {
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { extractI18nObject } from '@/i18n/I18nProvider';
|
||||
import { Textarea } from '@/components/ui/textarea';
|
||||
import { Card, CardContent } from '@/components/ui/card';
|
||||
|
||||
export default function DynamicFormItemComponent({
|
||||
config,
|
||||
field,
|
||||
onFileUploaded,
|
||||
}: {
|
||||
config: IDynamicFormItemSchema;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
field: ControllerRenderProps<any, any>;
|
||||
onFileUploaded?: (fileKey: string) => void;
|
||||
}) {
|
||||
const [llmModels, setLlmModels] = useState<LLMModel[]>([]);
|
||||
const [knowledgeBases, setKnowledgeBases] = useState<KnowledgeBase[]>([]);
|
||||
const [bots, setBots] = useState<Bot[]>([]);
|
||||
const [uploading, setUploading] = useState<boolean>(false);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleFileUpload = async (file: File): Promise<IFileConfig | null> => {
|
||||
const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
|
||||
|
||||
if (file.size > MAX_FILE_SIZE) {
|
||||
toast.error(t('plugins.fileUpload.tooLarge'));
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
setUploading(true);
|
||||
const response = await httpClient.uploadPluginConfigFile(file);
|
||||
toast.success(t('plugins.fileUpload.success'));
|
||||
|
||||
// 通知父组件文件已上传
|
||||
onFileUploaded?.(response.file_key);
|
||||
|
||||
return {
|
||||
file_key: response.file_key,
|
||||
mimetype: file.type,
|
||||
};
|
||||
} catch (error) {
|
||||
toast.error(
|
||||
t('plugins.fileUpload.failed') + ': ' + (error as Error).message,
|
||||
);
|
||||
return null;
|
||||
} finally {
|
||||
setUploading(false);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (config.type === DynamicFormItemType.LLM_MODEL_SELECTOR) {
|
||||
httpClient
|
||||
@@ -48,7 +84,7 @@ export default function DynamicFormItemComponent({
|
||||
setLlmModels(resp.models);
|
||||
})
|
||||
.catch((err) => {
|
||||
toast.error('获取 LLM 模型列表失败:' + err.message);
|
||||
toast.error('Failed to get LLM model list: ' + err.message);
|
||||
});
|
||||
}
|
||||
}, [config.type]);
|
||||
@@ -61,7 +97,20 @@ export default function DynamicFormItemComponent({
|
||||
setKnowledgeBases(resp.bases);
|
||||
})
|
||||
.catch((err) => {
|
||||
toast.error('获取知识库列表失败:' + err.message);
|
||||
toast.error('Failed to get knowledge base list: ' + err.message);
|
||||
});
|
||||
}
|
||||
}, [config.type]);
|
||||
|
||||
useEffect(() => {
|
||||
if (config.type === DynamicFormItemType.BOT_SELECTOR) {
|
||||
httpClient
|
||||
.getBots()
|
||||
.then((resp) => {
|
||||
setBots(resp.bots);
|
||||
})
|
||||
.catch((err) => {
|
||||
toast.error('Failed to get bot list: ' + err.message);
|
||||
});
|
||||
}
|
||||
}, [config.type]);
|
||||
@@ -80,6 +129,9 @@ export default function DynamicFormItemComponent({
|
||||
case DynamicFormItemType.STRING:
|
||||
return <Input {...field} />;
|
||||
|
||||
case DynamicFormItemType.TEXT:
|
||||
return <Textarea {...field} className="min-h-[120px]" />;
|
||||
|
||||
case DynamicFormItemType.BOOLEAN:
|
||||
return <Switch checked={field.value} onCheckedChange={field.onChange} />;
|
||||
|
||||
@@ -284,6 +336,24 @@ export default function DynamicFormItemComponent({
|
||||
</Select>
|
||||
);
|
||||
|
||||
case DynamicFormItemType.BOT_SELECTOR:
|
||||
return (
|
||||
<Select value={field.value} onValueChange={field.onChange}>
|
||||
<SelectTrigger className="bg-[#ffffff] dark:bg-[#2a2a2e]">
|
||||
<SelectValue placeholder={t('bots.selectBot')} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectGroup>
|
||||
{bots.map((bot) => (
|
||||
<SelectItem key={bot.uuid} value={bot.uuid ?? ''}>
|
||||
{bot.name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectGroup>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
);
|
||||
|
||||
case DynamicFormItemType.PROMPT_EDITOR:
|
||||
return (
|
||||
<div className="space-y-2">
|
||||
@@ -366,6 +436,185 @@ export default function DynamicFormItemComponent({
|
||||
</div>
|
||||
);
|
||||
|
||||
case DynamicFormItemType.FILE:
|
||||
return (
|
||||
<div className="space-y-2">
|
||||
{field.value && (field.value as IFileConfig).file_key ? (
|
||||
<Card className="py-3 max-w-full overflow-hidden bg-gray-900">
|
||||
<CardContent className="flex items-center gap-3 p-0 px-4 min-w-0">
|
||||
<div className="flex-1 min-w-0 overflow-hidden">
|
||||
<div
|
||||
className="text-sm font-medium truncate"
|
||||
title={(field.value as IFileConfig).file_key}
|
||||
>
|
||||
{(field.value as IFileConfig).file_key}
|
||||
</div>
|
||||
<div className="text-xs text-muted-foreground truncate">
|
||||
{(field.value as IFileConfig).mimetype}
|
||||
</div>
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="flex-shrink-0 h-8 w-8 p-0"
|
||||
onClick={(e) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
field.onChange(null);
|
||||
}}
|
||||
title={t('common.delete')}
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
className="w-4 h-4 text-destructive"
|
||||
>
|
||||
<path d="M7 4V2H17V4H22V6H20V21C20 21.5523 19.5523 22 19 22H5C4.44772 22 4 21.5523 4 21V6H2V4H7ZM6 6V20H18V6H6ZM9 9H11V17H9V9ZM13 9H15V17H13V9Z"></path>
|
||||
</svg>
|
||||
</Button>
|
||||
</CardContent>
|
||||
</Card>
|
||||
) : (
|
||||
<div className="relative">
|
||||
<input
|
||||
type="file"
|
||||
accept={config.accept}
|
||||
disabled={uploading}
|
||||
onChange={async (e) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (file) {
|
||||
const fileConfig = await handleFileUpload(file);
|
||||
if (fileConfig) {
|
||||
field.onChange(fileConfig);
|
||||
}
|
||||
}
|
||||
e.target.value = '';
|
||||
}}
|
||||
className="hidden"
|
||||
id={`file-input-${config.name}`}
|
||||
/>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="sm"
|
||||
disabled={uploading}
|
||||
onClick={() =>
|
||||
document.getElementById(`file-input-${config.name}`)?.click()
|
||||
}
|
||||
>
|
||||
<svg
|
||||
className="w-4 h-4 mr-2"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M11 11V5H13V11H19V13H13V19H11V13H5V11H11Z"></path>
|
||||
</svg>
|
||||
{uploading
|
||||
? t('plugins.fileUpload.uploading')
|
||||
: t('plugins.fileUpload.chooseFile')}
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
||||
case DynamicFormItemType.FILE_ARRAY:
|
||||
return (
|
||||
<div className="space-y-2">
|
||||
{(field.value as IFileConfig[])?.map(
|
||||
(fileConfig: IFileConfig, index: number) => (
|
||||
<Card
|
||||
key={index}
|
||||
className="py-3 max-w-full overflow-hidden bg-gray-900"
|
||||
>
|
||||
<CardContent className="flex items-center gap-3 p-0 px-4 min-w-0">
|
||||
<div className="flex-1 min-w-0 overflow-hidden">
|
||||
<div
|
||||
className="text-sm font-medium truncate"
|
||||
title={fileConfig.file_key}
|
||||
>
|
||||
{fileConfig.file_key}
|
||||
</div>
|
||||
<div className="text-xs text-muted-foreground truncate">
|
||||
{fileConfig.mimetype}
|
||||
</div>
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="flex-shrink-0 h-8 w-8 p-0"
|
||||
onClick={(e) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
const newValue = (field.value as IFileConfig[]).filter(
|
||||
(_: IFileConfig, i: number) => i !== index,
|
||||
);
|
||||
field.onChange(newValue);
|
||||
}}
|
||||
title={t('common.delete')}
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
className="w-4 h-4 text-destructive"
|
||||
>
|
||||
<path d="M7 4V2H17V4H22V6H20V21C20 21.5523 19.5523 22 19 22H5C4.44772 22 4 21.5523 4 21V6H2V4H7ZM6 6V20H18V6H6ZM9 9H11V17H9V9ZM13 9H15V17H13V9Z"></path>
|
||||
</svg>
|
||||
</Button>
|
||||
</CardContent>
|
||||
</Card>
|
||||
),
|
||||
)}
|
||||
<div className="relative">
|
||||
<input
|
||||
type="file"
|
||||
accept={config.accept}
|
||||
disabled={uploading}
|
||||
onChange={async (e) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (file) {
|
||||
const fileConfig = await handleFileUpload(file);
|
||||
if (fileConfig) {
|
||||
field.onChange([...(field.value || []), fileConfig]);
|
||||
}
|
||||
}
|
||||
e.target.value = '';
|
||||
}}
|
||||
className="hidden"
|
||||
id={`file-array-input-${config.name}`}
|
||||
/>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="sm"
|
||||
disabled={uploading}
|
||||
onClick={() =>
|
||||
document
|
||||
.getElementById(`file-array-input-${config.name}`)
|
||||
?.click()
|
||||
}
|
||||
>
|
||||
<svg
|
||||
className="w-4 h-4 mr-2"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M11 11V5H13V11H19V13H13V19H11V13H5V11H11Z"></path>
|
||||
</svg>
|
||||
{uploading
|
||||
? t('plugins.fileUpload.uploading')
|
||||
: t('plugins.fileUpload.addFile')}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
default:
|
||||
return <Input {...field} />;
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import { ToggleGroup, ToggleGroupItem } from '@/components/ui/toggle-group';
|
||||
import { LanguageSelector } from '@/components/ui/language-selector';
|
||||
import { Badge } from '@/components/ui/badge';
|
||||
import PasswordChangeDialog from '@/app/home/components/password-change-dialog/PasswordChangeDialog';
|
||||
import ApiKeyManagementDialog from '@/app/home/components/api-key-management-dialog/ApiKeyManagementDialog';
|
||||
|
||||
// TODO 侧边导航栏要加动画
|
||||
export default function HomeSidebar({
|
||||
@@ -45,6 +46,7 @@ export default function HomeSidebar({
|
||||
const { t } = useTranslation();
|
||||
const [popoverOpen, setPopoverOpen] = useState(false);
|
||||
const [passwordChangeOpen, setPasswordChangeOpen] = useState(false);
|
||||
const [apiKeyDialogOpen, setApiKeyDialogOpen] = useState(false);
|
||||
const [languageSelectorOpen, setLanguageSelectorOpen] = useState(false);
|
||||
const [starCount, setStarCount] = useState<number | null>(null);
|
||||
|
||||
@@ -65,7 +67,6 @@ export default function HomeSidebar({
|
||||
console.error('Failed to fetch GitHub star count:', error);
|
||||
});
|
||||
return () => console.log('sidebar.unmounted');
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
function handleChildClick(child: SidebarChildVO) {
|
||||
@@ -221,6 +222,23 @@ export default function HomeSidebar({
|
||||
name={t('common.helpDocs')}
|
||||
/>
|
||||
|
||||
<SidebarChild
|
||||
onClick={() => {
|
||||
setApiKeyDialogOpen(true);
|
||||
}}
|
||||
isSelected={false}
|
||||
icon={
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M10.7577 11.8281L18.6066 3.97919L20.0208 5.3934L18.6066 6.80761L21.0815 9.28249L19.6673 10.6967L17.1924 8.22183L15.7782 9.63604L17.8995 11.7574L16.4853 13.1716L14.364 11.0503L12.1719 13.2423C13.4581 15.1837 13.246 17.8251 11.5355 19.5355C9.58291 21.4882 6.41709 21.4882 4.46447 19.5355C2.51184 17.5829 2.51184 14.4171 4.46447 12.4645C6.17493 10.754 8.81633 10.5419 10.7577 11.8281ZM10.1213 18.1213C11.2929 16.9497 11.2929 15.0503 10.1213 13.8787C8.94975 12.7071 7.05025 12.7071 5.87868 13.8787C4.70711 15.0503 4.70711 16.9497 5.87868 18.1213C7.05025 19.2929 8.94975 19.2929 10.1213 18.1213Z"></path>
|
||||
</svg>
|
||||
}
|
||||
name={t('common.apiKeys')}
|
||||
/>
|
||||
|
||||
<Popover
|
||||
open={popoverOpen}
|
||||
onOpenChange={(open) => {
|
||||
@@ -327,6 +345,10 @@ export default function HomeSidebar({
|
||||
open={passwordChangeOpen}
|
||||
onOpenChange={setPasswordChangeOpen}
|
||||
/>
|
||||
<ApiKeyManagementDialog
|
||||
open={apiKeyDialogOpen}
|
||||
onOpenChange={setApiKeyDialogOpen}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -23,6 +23,13 @@ export default function FileUploadZone({
|
||||
async (file: File) => {
|
||||
if (isUploading) return;
|
||||
|
||||
// Check file size (10MB limit)
|
||||
const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
|
||||
if (file.size > MAX_FILE_SIZE) {
|
||||
toast.error(t('knowledge.documentsTab.fileSizeExceeded'));
|
||||
return;
|
||||
}
|
||||
|
||||
setIsUploading(true);
|
||||
const toastId = toast.loading(t('knowledge.documentsTab.uploadingFile'));
|
||||
|
||||
@@ -46,7 +53,7 @@ export default function FileUploadZone({
|
||||
setIsUploading(false);
|
||||
}
|
||||
},
|
||||
[kbId, isUploading, onUploadSuccess, onUploadError],
|
||||
[kbId, isUploading, onUploadSuccess, onUploadError, t],
|
||||
);
|
||||
|
||||
const handleDragOver = useCallback((e: React.DragEvent) => {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import styles from './layout.module.css';
|
||||
import HomeSidebar from '@/app/home/components/home-sidebar/HomeSidebar';
|
||||
import HomeTitleBar from '@/app/home/components/home-titlebar/HomeTitleBar';
|
||||
import React, { useState } from 'react';
|
||||
import React, { useState, useCallback, useMemo } from 'react';
|
||||
import { SidebarChildVO } from '@/app/home/components/home-sidebar/HomeSidebarChild';
|
||||
import { I18nObject } from '@/app/infra/entities/common';
|
||||
|
||||
@@ -18,11 +18,15 @@ export default function HomeLayout({
|
||||
en_US: '',
|
||||
zh_Hans: '',
|
||||
});
|
||||
const onSelectedChangeAction = (child: SidebarChildVO) => {
|
||||
|
||||
const onSelectedChangeAction = useCallback((child: SidebarChildVO) => {
|
||||
setTitle(child.name);
|
||||
setSubtitle(child.description);
|
||||
setHelpLink(child.helpLink);
|
||||
};
|
||||
}, []);
|
||||
|
||||
// Memoize the main content area to prevent re-renders when sidebar state changes
|
||||
const mainContent = useMemo(() => children, [children]);
|
||||
|
||||
return (
|
||||
<div className={styles.homeLayoutContainer}>
|
||||
@@ -33,7 +37,7 @@ export default function HomeLayout({
|
||||
<div className={styles.main}>
|
||||
<HomeTitleBar title={title} subtitle={subtitle} helpLink={helpLink} />
|
||||
|
||||
<main className={styles.mainContent}>{children}</main>
|
||||
<main className={styles.mainContent}>{mainContent}</main>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -75,7 +75,7 @@ const getFormSchema = (t: (key: string) => string) =>
|
||||
.string()
|
||||
.min(1, { message: t('models.modelProviderRequired') }),
|
||||
url: z.string().min(1, { message: t('models.requestURLRequired') }),
|
||||
api_key: z.string().min(1, { message: t('models.apiKeyRequired') }),
|
||||
api_key: z.string().optional(),
|
||||
extra_args: z.array(getExtraArgSchema(t)).optional(),
|
||||
});
|
||||
|
||||
@@ -101,7 +101,7 @@ export default function EmbeddingForm({
|
||||
name: '',
|
||||
model_provider: '',
|
||||
url: '',
|
||||
api_key: 'sk-xxxxx',
|
||||
api_key: '',
|
||||
extra_args: [],
|
||||
},
|
||||
});
|
||||
@@ -245,7 +245,7 @@ export default function EmbeddingForm({
|
||||
timeout: 120,
|
||||
},
|
||||
extra_args: extraArgsObj,
|
||||
api_keys: [value.api_key],
|
||||
api_keys: value.api_key ? [value.api_key] : [],
|
||||
};
|
||||
|
||||
if (editMode) {
|
||||
@@ -310,6 +310,7 @@ export default function EmbeddingForm({
|
||||
extraArgsObj[arg.key] = arg.value;
|
||||
}
|
||||
});
|
||||
const apiKey = form.getValues('api_key');
|
||||
httpClient
|
||||
.testEmbeddingModel('_', {
|
||||
uuid: '',
|
||||
@@ -320,7 +321,7 @@ export default function EmbeddingForm({
|
||||
base_url: form.getValues('url'),
|
||||
timeout: 120,
|
||||
},
|
||||
api_keys: [form.getValues('api_key')],
|
||||
api_keys: apiKey ? [apiKey] : [],
|
||||
extra_args: extraArgsObj,
|
||||
})
|
||||
.then((res) => {
|
||||
@@ -461,10 +462,7 @@ export default function EmbeddingForm({
|
||||
name="api_key"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>
|
||||
{t('models.apiKey')}
|
||||
<span className="text-red-500">*</span>
|
||||
</FormLabel>
|
||||
<FormLabel>{t('models.apiKey')}</FormLabel>
|
||||
<FormControl>
|
||||
<Input {...field} />
|
||||
</FormControl>
|
||||
|
||||
@@ -76,7 +76,7 @@ const getFormSchema = (t: (key: string) => string) =>
|
||||
.string()
|
||||
.min(1, { message: t('models.modelProviderRequired') }),
|
||||
url: z.string().min(1, { message: t('models.requestURLRequired') }),
|
||||
api_key: z.string().min(1, { message: t('models.apiKeyRequired') }),
|
||||
api_key: z.string().optional(),
|
||||
abilities: z.array(z.string()),
|
||||
extra_args: z.array(getExtraArgSchema(t)).optional(),
|
||||
});
|
||||
@@ -103,7 +103,7 @@ export default function LLMForm({
|
||||
name: '',
|
||||
model_provider: '',
|
||||
url: '',
|
||||
api_key: 'sk-xxxxx',
|
||||
api_key: '',
|
||||
abilities: [],
|
||||
extra_args: [],
|
||||
},
|
||||
@@ -261,7 +261,7 @@ export default function LLMForm({
|
||||
timeout: 120,
|
||||
},
|
||||
extra_args: extraArgsObj,
|
||||
api_keys: [value.api_key],
|
||||
api_keys: value.api_key ? [value.api_key] : [],
|
||||
abilities: value.abilities,
|
||||
};
|
||||
|
||||
@@ -324,6 +324,7 @@ export default function LLMForm({
|
||||
extraArgsObj[arg.key] = arg.value;
|
||||
}
|
||||
});
|
||||
const apiKey = form.getValues('api_key');
|
||||
httpClient
|
||||
.testLLMModel('_', {
|
||||
uuid: '',
|
||||
@@ -334,7 +335,7 @@ export default function LLMForm({
|
||||
base_url: form.getValues('url'),
|
||||
timeout: 120,
|
||||
},
|
||||
api_keys: [form.getValues('api_key')],
|
||||
api_keys: apiKey ? [apiKey] : [],
|
||||
abilities: form.getValues('abilities'),
|
||||
extra_args: extraArgsObj,
|
||||
})
|
||||
@@ -478,10 +479,7 @@ export default function LLMForm({
|
||||
name="api_key"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>
|
||||
{t('models.apiKey')}
|
||||
<span className="text-red-500">*</span>
|
||||
</FormLabel>
|
||||
<FormLabel>{t('models.apiKey')}</FormLabel>
|
||||
<FormControl>
|
||||
<Input {...field} />
|
||||
</FormControl>
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
} from '@/components/ui/sidebar';
|
||||
import PipelineFormComponent from './components/pipeline-form/PipelineFormComponent';
|
||||
import DebugDialog from './components/debug-dialog/DebugDialog';
|
||||
import PipelineExtension from './components/pipeline-extensions/PipelineExtension';
|
||||
|
||||
interface PipelineDialogProps {
|
||||
open: boolean;
|
||||
@@ -31,7 +32,7 @@ interface PipelineDialogProps {
|
||||
onCancel: () => void;
|
||||
}
|
||||
|
||||
type DialogMode = 'config' | 'debug';
|
||||
type DialogMode = 'config' | 'debug' | 'extensions';
|
||||
|
||||
export default function PipelineDialog({
|
||||
open,
|
||||
@@ -81,6 +82,19 @@ export default function PipelineDialog({
|
||||
</svg>
|
||||
),
|
||||
},
|
||||
{
|
||||
key: 'extensions',
|
||||
label: t('pipelines.extensions.title'),
|
||||
icon: (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M7 5C7 2.79086 8.79086 1 11 1C13.2091 1 15 2.79086 15 5H18C18.5523 5 19 5.44772 19 6V9C21.2091 9 23 10.7909 23 13C23 15.2091 21.2091 17 19 17V20C19 20.5523 18.5523 21 18 21H4C3.44772 21 3 20.5523 3 20V6C3 5.44772 3.44772 5 4 5H7ZM11 3C9.89543 3 9 3.89543 9 5C9 5.23554 9.0403 5.45952 9.11355 5.66675C9.22172 5.97282 9.17461 6.31235 8.98718 6.57739C8.79974 6.84243 8.49532 7 8.17071 7H5V19H17V15.8293C17 15.5047 17.1576 15.2003 17.4226 15.0128C17.6877 14.8254 18.0272 14.7783 18.3332 14.8865C18.5405 14.9597 18.7645 15 19 15C20.1046 15 21 14.1046 21 13C21 11.8954 20.1046 11 19 11C18.7645 11 18.5405 11.0403 18.3332 11.1135C18.0272 11.2217 17.6877 11.1746 17.4226 10.9872C17.1576 10.7997 17 10.4953 17 10.1707V7H13.8293C13.5047 7 13.2003 6.84243 13.0128 6.57739C12.8254 6.31235 12.7783 5.97282 12.8865 5.66675C12.9597 5.45952 13 5.23555 13 5C13 3.89543 12.1046 3 11 3Z"></path>
|
||||
</svg>
|
||||
),
|
||||
},
|
||||
{
|
||||
key: 'debug',
|
||||
label: t('pipelines.debugChat'),
|
||||
@@ -102,6 +116,9 @@ export default function PipelineDialog({
|
||||
? t('pipelines.editPipeline')
|
||||
: t('pipelines.createPipeline');
|
||||
}
|
||||
if (currentMode === 'extensions') {
|
||||
return t('pipelines.extensions.title');
|
||||
}
|
||||
return t('pipelines.debugDialog.title');
|
||||
};
|
||||
|
||||
@@ -193,6 +210,11 @@ export default function PipelineDialog({
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{currentMode === 'extensions' && pipelineId && (
|
||||
<PipelineExtension pipelineId={pipelineId} />
|
||||
)}
|
||||
|
||||
{currentMode === 'debug' && pipelineId && (
|
||||
<DebugDialog
|
||||
open={true}
|
||||
|
||||
@@ -0,0 +1,450 @@
|
||||
'use client';
|
||||
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { backendClient } from '@/app/infra/http';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { toast } from 'sonner';
|
||||
import { Skeleton } from '@/components/ui/skeleton';
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogFooter,
|
||||
} from '@/components/ui/dialog';
|
||||
import { Checkbox } from '@/components/ui/checkbox';
|
||||
import { Plus, X, Server, Wrench } from 'lucide-react';
|
||||
import { Badge } from '@/components/ui/badge';
|
||||
import { Plugin } from '@/app/infra/entities/plugin';
|
||||
import { MCPServer } from '@/app/infra/entities/api';
|
||||
import PluginComponentList from '@/app/home/plugins/components/plugin-installed/PluginComponentList';
|
||||
|
||||
export default function PipelineExtension({
|
||||
pipelineId,
|
||||
}: {
|
||||
pipelineId: string;
|
||||
}) {
|
||||
const { t } = useTranslation();
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [selectedPlugins, setSelectedPlugins] = useState<Plugin[]>([]);
|
||||
const [allPlugins, setAllPlugins] = useState<Plugin[]>([]);
|
||||
const [selectedMCPServers, setSelectedMCPServers] = useState<MCPServer[]>([]);
|
||||
const [allMCPServers, setAllMCPServers] = useState<MCPServer[]>([]);
|
||||
const [pluginDialogOpen, setPluginDialogOpen] = useState(false);
|
||||
const [mcpDialogOpen, setMcpDialogOpen] = useState(false);
|
||||
const [tempSelectedPluginIds, setTempSelectedPluginIds] = useState<string[]>(
|
||||
[],
|
||||
);
|
||||
const [tempSelectedMCPIds, setTempSelectedMCPIds] = useState<string[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
loadExtensions();
|
||||
}, [pipelineId]);
|
||||
|
||||
const getPluginId = (plugin: Plugin): string => {
|
||||
const author = plugin.manifest.manifest.metadata.author;
|
||||
const name = plugin.manifest.manifest.metadata.name;
|
||||
return `${author}/${name}`;
|
||||
};
|
||||
|
||||
const loadExtensions = async () => {
|
||||
try {
|
||||
setLoading(true);
|
||||
const data = await backendClient.getPipelineExtensions(pipelineId);
|
||||
|
||||
const boundPluginIds = new Set(
|
||||
data.bound_plugins.map((p) => `${p.author}/${p.name}`),
|
||||
);
|
||||
|
||||
const selected = data.available_plugins.filter((plugin) =>
|
||||
boundPluginIds.has(getPluginId(plugin)),
|
||||
);
|
||||
|
||||
setSelectedPlugins(selected);
|
||||
setAllPlugins(data.available_plugins);
|
||||
|
||||
// Load MCP servers
|
||||
const boundMCPServerIds = new Set(data.bound_mcp_servers || []);
|
||||
const selectedMCP = data.available_mcp_servers.filter((server) =>
|
||||
boundMCPServerIds.has(server.uuid || ''),
|
||||
);
|
||||
|
||||
setSelectedMCPServers(selectedMCP);
|
||||
setAllMCPServers(data.available_mcp_servers);
|
||||
} catch (error) {
|
||||
console.error('Failed to load extensions:', error);
|
||||
toast.error(t('pipelines.extensions.loadError'));
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const saveToBackend = async (plugins: Plugin[], mcpServers: MCPServer[]) => {
|
||||
try {
|
||||
const boundPluginsArray = plugins.map((plugin) => {
|
||||
const metadata = plugin.manifest.manifest.metadata;
|
||||
return {
|
||||
author: metadata.author || '',
|
||||
name: metadata.name,
|
||||
};
|
||||
});
|
||||
|
||||
const boundMCPServerIds = mcpServers.map((server) => server.uuid || '');
|
||||
|
||||
await backendClient.updatePipelineExtensions(
|
||||
pipelineId,
|
||||
boundPluginsArray,
|
||||
boundMCPServerIds,
|
||||
);
|
||||
toast.success(t('pipelines.extensions.saveSuccess'));
|
||||
} catch (error) {
|
||||
console.error('Failed to save extensions:', error);
|
||||
toast.error(t('pipelines.extensions.saveError'));
|
||||
// Reload on error to restore correct state
|
||||
loadExtensions();
|
||||
}
|
||||
};
|
||||
|
||||
const handleRemovePlugin = async (pluginId: string) => {
|
||||
const newPlugins = selectedPlugins.filter(
|
||||
(p) => getPluginId(p) !== pluginId,
|
||||
);
|
||||
setSelectedPlugins(newPlugins);
|
||||
await saveToBackend(newPlugins, selectedMCPServers);
|
||||
};
|
||||
|
||||
const handleRemoveMCPServer = async (serverUuid: string) => {
|
||||
const newServers = selectedMCPServers.filter((s) => s.uuid !== serverUuid);
|
||||
setSelectedMCPServers(newServers);
|
||||
await saveToBackend(selectedPlugins, newServers);
|
||||
};
|
||||
|
||||
const handleOpenPluginDialog = () => {
|
||||
setTempSelectedPluginIds(selectedPlugins.map((p) => getPluginId(p)));
|
||||
setPluginDialogOpen(true);
|
||||
};
|
||||
|
||||
const handleOpenMCPDialog = () => {
|
||||
setTempSelectedMCPIds(selectedMCPServers.map((s) => s.uuid || ''));
|
||||
setMcpDialogOpen(true);
|
||||
};
|
||||
|
||||
const handleTogglePlugin = (pluginId: string) => {
|
||||
setTempSelectedPluginIds((prev) =>
|
||||
prev.includes(pluginId)
|
||||
? prev.filter((id) => id !== pluginId)
|
||||
: [...prev, pluginId],
|
||||
);
|
||||
};
|
||||
|
||||
const handleToggleMCPServer = (serverUuid: string) => {
|
||||
setTempSelectedMCPIds((prev) =>
|
||||
prev.includes(serverUuid)
|
||||
? prev.filter((id) => id !== serverUuid)
|
||||
: [...prev, serverUuid],
|
||||
);
|
||||
};
|
||||
|
||||
const handleConfirmPluginSelection = async () => {
|
||||
const newSelected = allPlugins.filter((p) =>
|
||||
tempSelectedPluginIds.includes(getPluginId(p)),
|
||||
);
|
||||
setSelectedPlugins(newSelected);
|
||||
setPluginDialogOpen(false);
|
||||
await saveToBackend(newSelected, selectedMCPServers);
|
||||
};
|
||||
|
||||
const handleConfirmMCPSelection = async () => {
|
||||
const newSelected = allMCPServers.filter((s) =>
|
||||
tempSelectedMCPIds.includes(s.uuid || ''),
|
||||
);
|
||||
setSelectedMCPServers(newSelected);
|
||||
setMcpDialogOpen(false);
|
||||
await saveToBackend(selectedPlugins, newSelected);
|
||||
};
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<Skeleton className="h-20 w-full" />
|
||||
<Skeleton className="h-20 w-full" />
|
||||
<Skeleton className="h-20 w-full" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Plugins Section */}
|
||||
<div className="space-y-3">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
{t('pipelines.extensions.pluginsTitle')}
|
||||
</h3>
|
||||
<div className="space-y-2">
|
||||
{selectedPlugins.length === 0 ? (
|
||||
<div className="flex h-32 items-center justify-center rounded-lg border-2 border-dashed border-border">
|
||||
<p className="text-sm text-muted-foreground">
|
||||
{t('pipelines.extensions.noPluginsSelected')}
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-2">
|
||||
{selectedPlugins.map((plugin) => {
|
||||
const pluginId = getPluginId(plugin);
|
||||
const metadata = plugin.manifest.manifest.metadata;
|
||||
return (
|
||||
<div
|
||||
key={pluginId}
|
||||
className="flex items-center justify-between rounded-lg border p-3 hover:bg-accent"
|
||||
>
|
||||
<div className="flex-1 flex items-center gap-3">
|
||||
<img
|
||||
src={backendClient.getPluginIconURL(
|
||||
metadata.author || '',
|
||||
metadata.name,
|
||||
)}
|
||||
alt={metadata.name}
|
||||
className="w-10 h-10 rounded-lg border bg-muted object-cover flex-shrink-0"
|
||||
/>
|
||||
<div className="flex-1">
|
||||
<div className="font-medium">{metadata.name}</div>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{metadata.author} • v{metadata.version}
|
||||
</div>
|
||||
<div className="flex gap-1 mt-1">
|
||||
<PluginComponentList
|
||||
components={plugin.components}
|
||||
showComponentName={true}
|
||||
showTitle={false}
|
||||
useBadge={true}
|
||||
t={t}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
{!plugin.enabled && (
|
||||
<Badge variant="secondary">
|
||||
{t('pipelines.extensions.disabled')}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => handleRemovePlugin(pluginId)}
|
||||
>
|
||||
<X className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Button
|
||||
onClick={handleOpenPluginDialog}
|
||||
variant="outline"
|
||||
className="w-full"
|
||||
>
|
||||
<Plus className="mr-2 h-4 w-4" />
|
||||
{t('pipelines.extensions.addPlugin')}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* MCP Servers Section */}
|
||||
<div className="space-y-3">
|
||||
<h3 className="text-sm font-semibold text-foreground">
|
||||
{t('pipelines.extensions.mcpServersTitle')}
|
||||
</h3>
|
||||
<div className="space-y-2">
|
||||
{selectedMCPServers.length === 0 ? (
|
||||
<div className="flex h-32 items-center justify-center rounded-lg border-2 border-dashed border-border">
|
||||
<p className="text-sm text-muted-foreground">
|
||||
{t('pipelines.extensions.noMCPServersSelected')}
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-2">
|
||||
{selectedMCPServers.map((server) => (
|
||||
<div
|
||||
key={server.uuid}
|
||||
className="flex items-center justify-between rounded-lg border p-3 hover:bg-accent"
|
||||
>
|
||||
<div className="flex-1 flex items-center gap-3">
|
||||
<div className="w-10 h-10 rounded-lg border bg-muted flex items-center justify-center flex-shrink-0">
|
||||
<Server className="h-5 w-5 text-muted-foreground" />
|
||||
</div>
|
||||
<div className="flex-1">
|
||||
<div className="font-medium">{server.name}</div>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{server.mode}
|
||||
</div>
|
||||
{server.runtime_info &&
|
||||
server.runtime_info.status === 'connected' && (
|
||||
<Badge
|
||||
variant="outline"
|
||||
className="flex items-center gap-1 mt-1"
|
||||
>
|
||||
<Wrench className="h-3 w-3 text-white" />
|
||||
<span className="text-xs text-white">
|
||||
{t('pipelines.extensions.toolCount', {
|
||||
count: server.runtime_info.tool_count || 0,
|
||||
})}
|
||||
</span>
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
{!server.enable && (
|
||||
<Badge variant="secondary">
|
||||
{t('pipelines.extensions.disabled')}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={() => handleRemoveMCPServer(server.uuid || '')}
|
||||
>
|
||||
<X className="h-4 w-4" />
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Button
|
||||
onClick={handleOpenMCPDialog}
|
||||
variant="outline"
|
||||
className="w-full"
|
||||
>
|
||||
<Plus className="mr-2 h-4 w-4" />
|
||||
{t('pipelines.extensions.addMCPServer')}
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Plugin Selection Dialog */}
|
||||
<Dialog open={pluginDialogOpen} onOpenChange={setPluginDialogOpen}>
|
||||
<DialogContent className="max-w-2xl max-h-[80vh] overflow-hidden flex flex-col">
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t('pipelines.extensions.selectPlugins')}</DialogTitle>
|
||||
</DialogHeader>
|
||||
<div className="flex-1 overflow-y-auto space-y-2 pr-2">
|
||||
{allPlugins.map((plugin) => {
|
||||
const pluginId = getPluginId(plugin);
|
||||
const metadata = plugin.manifest.manifest.metadata;
|
||||
const isSelected = tempSelectedPluginIds.includes(pluginId);
|
||||
return (
|
||||
<div
|
||||
key={pluginId}
|
||||
className="flex items-center gap-3 rounded-lg border p-3 hover:bg-accent cursor-pointer"
|
||||
onClick={() => handleTogglePlugin(pluginId)}
|
||||
>
|
||||
<Checkbox checked={isSelected} />
|
||||
<img
|
||||
src={backendClient.getPluginIconURL(
|
||||
metadata.author || '',
|
||||
metadata.name,
|
||||
)}
|
||||
alt={metadata.name}
|
||||
className="w-10 h-10 rounded-lg border bg-muted object-cover flex-shrink-0"
|
||||
/>
|
||||
<div className="flex-1">
|
||||
<div className="font-medium">{metadata.name}</div>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{metadata.author} • v{metadata.version}
|
||||
</div>
|
||||
<div className="flex gap-1 mt-1">
|
||||
<PluginComponentList
|
||||
components={plugin.components}
|
||||
showComponentName={true}
|
||||
showTitle={false}
|
||||
useBadge={true}
|
||||
t={t}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
{!plugin.enabled && (
|
||||
<Badge variant="secondary">
|
||||
{t('pipelines.extensions.disabled')}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => setPluginDialogOpen(false)}
|
||||
>
|
||||
{t('common.cancel')}
|
||||
</Button>
|
||||
<Button onClick={handleConfirmPluginSelection}>
|
||||
{t('common.confirm')}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
{/* MCP Server Selection Dialog */}
|
||||
<Dialog open={mcpDialogOpen} onOpenChange={setMcpDialogOpen}>
|
||||
<DialogContent className="max-w-2xl max-h-[80vh] overflow-hidden flex flex-col">
|
||||
<DialogHeader>
|
||||
<DialogTitle>
|
||||
{t('pipelines.extensions.selectMCPServers')}
|
||||
</DialogTitle>
|
||||
</DialogHeader>
|
||||
<div className="flex-1 overflow-y-auto space-y-2 pr-2">
|
||||
{allMCPServers.map((server) => {
|
||||
const isSelected = tempSelectedMCPIds.includes(server.uuid || '');
|
||||
return (
|
||||
<div
|
||||
key={server.uuid}
|
||||
className="flex items-center gap-3 rounded-lg border p-3 hover:bg-accent cursor-pointer"
|
||||
onClick={() => handleToggleMCPServer(server.uuid || '')}
|
||||
>
|
||||
<Checkbox checked={isSelected} />
|
||||
<div className="w-10 h-10 rounded-lg border bg-muted flex items-center justify-center flex-shrink-0">
|
||||
<Server className="h-5 w-5 text-muted-foreground" />
|
||||
</div>
|
||||
<div className="flex-1">
|
||||
<div className="font-medium">{server.name}</div>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{server.mode}
|
||||
</div>
|
||||
{server.runtime_info &&
|
||||
server.runtime_info.status === 'connected' && (
|
||||
<div className="flex items-center gap-1 mt-1">
|
||||
<Wrench className="h-3 w-3 text-muted-foreground" />
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{t('pipelines.extensions.toolCount', {
|
||||
count: server.runtime_info.tool_count || 0,
|
||||
})}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
{!server.enable && (
|
||||
<Badge variant="secondary">
|
||||
{t('pipelines.extensions.disabled')}
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button variant="outline" onClick={() => setMcpDialogOpen(false)}>
|
||||
{t('common.cancel')}
|
||||
</Button>
|
||||
<Button onClick={handleConfirmMCPSelection}>
|
||||
{t('common.confirm')}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user