mirror of
https://github.com/langbot-app/LangBot.git
synced 2025-11-26 03:44:58 +08:00
Compare commits
168 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cd50fbdb4 | ||
|
|
42421d171e | ||
|
|
32215e9a3f | ||
|
|
dd1c7ffc39 | ||
|
|
b59bf62da5 | ||
|
|
f4c32f7b30 | ||
|
|
8844a5304d | ||
|
|
922ddd47f4 | ||
|
|
8c8702c6c9 | ||
|
|
70147fcf5e | ||
|
|
b3ee16e876 | ||
|
|
8d7976190d | ||
|
|
3edae3e678 | ||
|
|
dd2254203c | ||
|
|
f8658e2d77 | ||
|
|
021c3bbb94 | ||
|
|
0a64a96f65 | ||
|
|
48576dc46d | ||
|
|
12de0343b4 | ||
|
|
fcd34a9ff3 | ||
|
|
0dcf904d81 | ||
|
|
4fe92d8ece | ||
|
|
c893ffc177 | ||
|
|
a076ce5756 | ||
|
|
af82227dff | ||
|
|
8f2b177145 | ||
|
|
9a997fbcb0 | ||
|
|
17070471f7 | ||
|
|
cb48221ed3 | ||
|
|
68eb0290e0 | ||
|
|
61bc6a1dc2 | ||
|
|
4a84bf2355 | ||
|
|
2c2a89d9db | ||
|
|
c91e2f0efe | ||
|
|
411d082d2a | ||
|
|
d4e08a1765 | ||
|
|
b529d07479 | ||
|
|
d44df75e5c | ||
|
|
b74e07b608 | ||
|
|
4a868afecd | ||
|
|
1cb9560663 | ||
|
|
8f878673ae | ||
|
|
74a5e37892 | ||
|
|
76a69ecc7e | ||
|
|
f06e3d3efa | ||
|
|
973e7bae42 | ||
|
|
94aa175c1a | ||
|
|
777b766fff | ||
|
|
1adaa93034 | ||
|
|
9853eccd89 | ||
|
|
7699ba3cae | ||
|
|
9ac8b1a6fd | ||
|
|
f476c4724d | ||
|
|
3d12632c9f | ||
|
|
350e59fa6b | ||
|
|
b3d5b3fc8f | ||
|
|
4a02c531b2 | ||
|
|
2dd2abedde | ||
|
|
0d59c04151 | ||
|
|
08e0ede655 | ||
|
|
bcf89ca434 | ||
|
|
5e2f677d0b | ||
|
|
4df372052d | ||
|
|
2c5a0a00ba | ||
|
|
f3295b0fdd | ||
|
|
431d515c26 | ||
|
|
d9e6198992 | ||
|
|
3951cbf266 | ||
|
|
c47c4994ae | ||
|
|
a6072c2abb | ||
|
|
360422f25e | ||
|
|
f135c946bd | ||
|
|
750cc24900 | ||
|
|
46062bf4b9 | ||
|
|
869b2176a7 | ||
|
|
7138c101e3 | ||
|
|
04e26225cd | ||
|
|
f9f2de570f | ||
|
|
1dd598c7be | ||
|
|
c0f04e4f20 | ||
|
|
d3279b9823 | ||
|
|
2ad1f97e12 | ||
|
|
1046f3c2aa | ||
|
|
1afecf01e4 | ||
|
|
3ee7736361 | ||
|
|
0666778fea | ||
|
|
8df90558ab | ||
|
|
c1c03f11b4 | ||
|
|
da9afcd0ad | ||
|
|
bc1fbfa190 | ||
|
|
f3199dda20 | ||
|
|
4d0a28a1a7 | ||
|
|
76831579ad | ||
|
|
c2d752f9e9 | ||
|
|
4c0917556f | ||
|
|
e17b0cf5c5 | ||
|
|
f2647316a5 | ||
|
|
78cc157657 | ||
|
|
f576f990de | ||
|
|
254feb6a3a | ||
|
|
4c5139e9ff | ||
|
|
a055e37d3a | ||
|
|
bef5d6627b | ||
|
|
69767ebdb4 | ||
|
|
53ecd0933e | ||
|
|
d32f783392 | ||
|
|
4d3610cdf7 | ||
|
|
166eebabff | ||
|
|
9f2f1cd577 | ||
|
|
d86b884cab | ||
|
|
8345edd9f7 | ||
|
|
e3821b3f09 | ||
|
|
72ca62eae4 | ||
|
|
075091ed06 | ||
|
|
d0a3dee083 | ||
|
|
6ba9b6973d | ||
|
|
345eccf04c | ||
|
|
127a38b15c | ||
|
|
760db38c11 | ||
|
|
e4729337c8 | ||
|
|
7be226d3fa | ||
|
|
68372a4b7a | ||
|
|
d65f862c36 | ||
|
|
5fa75330cf | ||
|
|
547e3d098e | ||
|
|
0f39a31648 | ||
|
|
f1ddddfe00 | ||
|
|
4e61302156 | ||
|
|
9e3cf418ba | ||
|
|
3e29ec7892 | ||
|
|
f452742cd2 | ||
|
|
b560432b0b | ||
|
|
99e5478ced | ||
|
|
09dba91a37 | ||
|
|
18ec4adac9 | ||
|
|
8bedaa468a | ||
|
|
0ab366fcac | ||
|
|
d664039e54 | ||
|
|
6535ba4f72 | ||
|
|
3b181cff93 | ||
|
|
d1274366a0 | ||
|
|
35a4b0f55f | ||
|
|
399ebd36d7 | ||
|
|
a3552893aa | ||
|
|
b6cdf18c1a | ||
|
|
bd4c7f634d | ||
|
|
160ca540ab | ||
|
|
74c3a77ed1 | ||
|
|
0b527868bc | ||
|
|
0f35458cf7 | ||
|
|
70ad92ca16 | ||
|
|
c0d56aa905 | ||
|
|
ed869f7e81 | ||
|
|
ea42579374 | ||
|
|
72d701df3e | ||
|
|
1191b34fd4 | ||
|
|
ca3d3b2a66 | ||
|
|
2891708060 | ||
|
|
3f59bfac5c | ||
|
|
ee24582dd3 | ||
|
|
0ffb4d5792 | ||
|
|
5a6206f148 | ||
|
|
b1014313d6 | ||
|
|
fcc2f6a195 | ||
|
|
c8ffc79077 | ||
|
|
1a13a41168 | ||
|
|
bf279049c0 | ||
|
|
05cc58f2d7 |
6
.github/workflows/build-docker-image.yml
vendored
6
.github/workflows/build-docker-image.yml
vendored
@@ -41,5 +41,9 @@ jobs:
|
|||||||
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
run: docker login --username=${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_PASSWORD }}
|
||||||
- name: Create Buildx
|
- name: Create Buildx
|
||||||
run: docker buildx create --name mybuilder --use
|
run: docker buildx create --name mybuilder --use
|
||||||
- name: Build # image name: rockchin/langbot:<VERSION>
|
- name: Build for Release # only relase, exlude pre-release
|
||||||
|
if: ${{ github.event.release.prerelease == false }}
|
||||||
run: docker buildx build --platform linux/arm64,linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} -t rockchin/langbot:latest . --push
|
run: docker buildx build --platform linux/arm64,linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} -t rockchin/langbot:latest . --push
|
||||||
|
- name: Build for Pre-release # no update for latest tag
|
||||||
|
if: ${{ github.event.release.prerelease == true }}
|
||||||
|
run: docker buildx build --platform linux/arm64,linux/amd64 -t rockchin/langbot:${{ steps.check_version.outputs.version }} . --push
|
||||||
71
.github/workflows/run-tests.yml
vendored
Normal file
71
.github/workflows/run-tests.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
name: Unit Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, ready_for_review, synchronize]
|
||||||
|
paths:
|
||||||
|
- 'pkg/**'
|
||||||
|
- 'tests/**'
|
||||||
|
- '.github/workflows/run-tests.yml'
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'run_tests.sh'
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- develop
|
||||||
|
paths:
|
||||||
|
- 'pkg/**'
|
||||||
|
- 'tests/**'
|
||||||
|
- '.github/workflows/run-tests.yml'
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'run_tests.sh'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Run Unit Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version: ['3.10', '3.11', '3.12']
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
run: |
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
uv sync --dev
|
||||||
|
|
||||||
|
- name: Run unit tests
|
||||||
|
run: |
|
||||||
|
bash run_tests.sh
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
if: matrix.python-version == '3.12'
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
files: ./coverage.xml
|
||||||
|
flags: unit-tests
|
||||||
|
name: unit-tests-coverage
|
||||||
|
fail_ci_if_error: false
|
||||||
|
env:
|
||||||
|
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|
||||||
|
- name: Test Summary
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "## Unit Tests Results" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Python Version: ${{ matrix.python-version }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Test Status: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY
|
||||||
108
.github/workflows/test-dev-image.yaml
vendored
Normal file
108
.github/workflows/test-dev-image.yaml
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
name: Test Dev Image
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_run:
|
||||||
|
workflows: ["Build Dev Image"]
|
||||||
|
types:
|
||||||
|
- completed
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-dev-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Only run if the build workflow succeeded
|
||||||
|
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Update Docker Compose to use master tag
|
||||||
|
working-directory: ./docker
|
||||||
|
run: |
|
||||||
|
# Replace 'latest' with 'master' tag for testing the dev image
|
||||||
|
sed -i 's/rockchin\/langbot:latest/rockchin\/langbot:master/g' docker-compose.yaml
|
||||||
|
echo "Updated docker-compose.yaml to use master tag:"
|
||||||
|
cat docker-compose.yaml
|
||||||
|
|
||||||
|
- name: Start Docker Compose
|
||||||
|
working-directory: ./docker
|
||||||
|
run: docker compose up -d
|
||||||
|
|
||||||
|
- name: Wait and Test API
|
||||||
|
run: |
|
||||||
|
# Function to test API endpoint
|
||||||
|
test_api() {
|
||||||
|
echo "Testing API endpoint..."
|
||||||
|
response=$(curl -s --connect-timeout 10 --max-time 30 -w "\n%{http_code}" http://localhost:5300/api/v1/system/info 2>&1)
|
||||||
|
curl_exit_code=$?
|
||||||
|
|
||||||
|
if [ $curl_exit_code -ne 0 ]; then
|
||||||
|
echo "Curl failed with exit code: $curl_exit_code"
|
||||||
|
echo "Error: $response"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
http_code=$(echo "$response" | tail -n 1)
|
||||||
|
response_body=$(echo "$response" | head -n -1)
|
||||||
|
|
||||||
|
if [ "$http_code" = "200" ]; then
|
||||||
|
echo "API is healthy! Response code: $http_code"
|
||||||
|
echo "Response: $response_body"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo "API returned non-200 response: $http_code"
|
||||||
|
echo "Response body: $response_body"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Wait 30 seconds before first attempt
|
||||||
|
echo "Waiting 30 seconds for services to start..."
|
||||||
|
sleep 30
|
||||||
|
|
||||||
|
# Try up to 3 times with 30-second intervals
|
||||||
|
max_attempts=3
|
||||||
|
attempt=1
|
||||||
|
|
||||||
|
while [ $attempt -le $max_attempts ]; do
|
||||||
|
echo "Attempt $attempt of $max_attempts"
|
||||||
|
|
||||||
|
if test_api; then
|
||||||
|
echo "Success! API is responding correctly."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $attempt -lt $max_attempts ]; then
|
||||||
|
echo "Retrying in 30 seconds..."
|
||||||
|
sleep 30
|
||||||
|
fi
|
||||||
|
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
# All attempts failed
|
||||||
|
echo "Failed to get healthy response after $max_attempts attempts"
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
- name: Show Container Logs on Failure
|
||||||
|
if: failure()
|
||||||
|
working-directory: ./docker
|
||||||
|
run: |
|
||||||
|
echo "=== Docker Compose Status ==="
|
||||||
|
docker compose ps
|
||||||
|
echo ""
|
||||||
|
echo "=== LangBot Logs ==="
|
||||||
|
docker compose logs langbot
|
||||||
|
echo ""
|
||||||
|
echo "=== Plugin Runtime Logs ==="
|
||||||
|
docker compose logs langbot_plugin_runtime
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
working-directory: ./docker
|
||||||
|
run: docker compose down
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -22,7 +22,7 @@ tips.py
|
|||||||
venv*
|
venv*
|
||||||
bin/
|
bin/
|
||||||
.vscode
|
.vscode
|
||||||
test_*
|
/test_*
|
||||||
venv/
|
venv/
|
||||||
hugchat.json
|
hugchat.json
|
||||||
qcapi
|
qcapi
|
||||||
@@ -43,4 +43,7 @@ test.py
|
|||||||
/web_ui
|
/web_ui
|
||||||
.venv/
|
.venv/
|
||||||
uv.lock
|
uv.lock
|
||||||
/test
|
/test
|
||||||
|
plugins.bak
|
||||||
|
coverage.xml
|
||||||
|
.coverage
|
||||||
|
|||||||
81
AGENTS.md
Normal file
81
AGENTS.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# AGENTS.md
|
||||||
|
|
||||||
|
This file is for guiding code agents (like Claude Code, GitHub Copilot, OpenAI Codex, etc.) to work in LangBot project.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
LangBot is a open-source LLM native instant messaging bot development platform, aiming to provide an out-of-the-box IM robot development experience, with Agent, RAG, MCP and other LLM application functions, supporting global instant messaging platforms, and providing rich API interfaces, supporting custom development.
|
||||||
|
|
||||||
|
LangBot has a comprehensive frontend, all operations can be performed through the frontend. The project splited into these major parts:
|
||||||
|
|
||||||
|
- `./pkg`: The core python package of the project backend.
|
||||||
|
- `./pkg/platform`: The platform module of the project, containing the logic of message platform adapters, bot managers, message session managers, etc.
|
||||||
|
- `./pkg/provider`: The provider module of the project, containing the logic of LLM providers, tool providers, etc.
|
||||||
|
- `./pkg/pipeline`: The pipeline module of the project, containing the logic of pipelines, stages, query pool, etc.
|
||||||
|
- `./pkg/api`: The api module of the project, containing the http api controllers and services.
|
||||||
|
- `./pkg/plugin`: LangBot bridge for connecting with plugin system.
|
||||||
|
- `./libs`: Some SDKs we previously developed for the project, such as `qq_official_api`, `wecom_api`, etc.
|
||||||
|
- `./templates`: Templates of config files, components, etc.
|
||||||
|
- `./web`: Frontend codebase, built with Next.js + **shadcn** + **Tailwind CSS**.
|
||||||
|
- `./docker`: docker-compose deployment files.
|
||||||
|
|
||||||
|
## Backend Development
|
||||||
|
|
||||||
|
We use `uv` to manage dependencies.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install uv
|
||||||
|
uv sync --dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Start the backend and run the project in development mode.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run main.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can access the project at `http://127.0.0.1:5300`.
|
||||||
|
|
||||||
|
## Frontend Development
|
||||||
|
|
||||||
|
We use `pnpm` to manage dependencies.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
cp .env.example .env
|
||||||
|
pnpm install
|
||||||
|
pnpm dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can access the project at `http://127.0.0.1:3000`.
|
||||||
|
|
||||||
|
## Plugin System Architecture
|
||||||
|
|
||||||
|
LangBot is composed of various internal components such as Large Language Model tools, commands, messaging platform adapters, LLM requesters, and more. To meet extensibility and flexibility requirements, we have implemented a production-grade plugin system.
|
||||||
|
|
||||||
|
Each plugin runs in an independent process, managed uniformly by the Plugin Runtime. It has two operating modes: `stdio` and `websocket`. When LangBot is started directly by users (not running in a container), it uses `stdio` mode, which is common for personal users or lightweight environments. When LangBot runs in a container, it uses `websocket` mode, designed specifically for production environments.
|
||||||
|
|
||||||
|
Plugin Runtime automatically starts each installed plugin and interacts through stdio. In plugin development scenarios, developers can use the lbp command-line tool to start plugins and connect to the running Runtime via WebSocket for debugging.
|
||||||
|
|
||||||
|
> Plugin SDK, CLI, Runtime, and entities definitions shared between LangBot and plugins are contained in the [`langbot-plugin-sdk`](https://github.com/langbot-app/langbot-plugin-sdk) repository.
|
||||||
|
|
||||||
|
## Some Development Tips and Standards
|
||||||
|
|
||||||
|
- LangBot is a global project, any comments in code should be in English, and user experience should be considered in all aspects.
|
||||||
|
- Thus you should consider the i18n support in all aspects.
|
||||||
|
- LangBot is widely adopted in both toC and toB scenarios, so you should consider the compatibility and security in all aspects.
|
||||||
|
|
||||||
|
## Some Principles
|
||||||
|
|
||||||
|
- Keep it simple, stupid.
|
||||||
|
- Entities should not be multiplied unnecessarily
|
||||||
|
- 八荣八耻
|
||||||
|
|
||||||
|
以瞎猜接口为耻,以认真查询为荣。
|
||||||
|
以模糊执行为耻,以寻求确认为荣。
|
||||||
|
以臆想业务为耻,以人类确认为荣。
|
||||||
|
以创造接口为耻,以复用现有为荣。
|
||||||
|
以跳过验证为耻,以主动测试为荣。
|
||||||
|
以破坏架构为耻,以遵循规范为荣。
|
||||||
|
以假装理解为耻,以诚实无知为荣。
|
||||||
|
以盲目修改为耻,以谨慎重构为荣。
|
||||||
10
README.md
10
README.md
@@ -35,7 +35,7 @@ LangBot 是一个开源的大语言模型原生即时通信机器人开发平台
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/langbot-app/LangBot
|
git clone https://github.com/langbot-app/LangBot
|
||||||
cd LangBot
|
cd LangBot/docker
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -119,10 +119,12 @@ docker compose up -d
|
|||||||
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型运行平台 |
|
| [LMStudio](https://lmstudio.ai/) | ✅ | 本地大模型运行平台 |
|
||||||
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 |
|
| [GiteeAI](https://ai.gitee.com/) | ✅ | 大模型接口聚合平台 |
|
||||||
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
| [SiliconFlow](https://siliconflow.cn/) | ✅ | 大模型聚合平台 |
|
||||||
|
| [小马算力](https://www.tokenpony.cn/453z1) | ✅ | 大模型聚合平台 |
|
||||||
| [阿里云百炼](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
| [阿里云百炼](https://bailian.console.aliyun.com/) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||||
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
| [火山方舟](https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW) | ✅ | 大模型聚合平台, LLMOps 平台 |
|
||||||
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
| [ModelScope](https://modelscope.cn/docs/model-service/API-Inference/intro) | ✅ | 大模型聚合平台 |
|
||||||
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支持通过 MCP 协议获取工具 |
|
| [MCP](https://modelcontextprotocol.io/) | ✅ | 支持通过 MCP 协议获取工具 |
|
||||||
|
| [百宝箱Tbox](https://www.tbox.cn/open) | ✅ | 蚂蚁百宝箱智能体平台,每月免费10亿大模型Token |
|
||||||
|
|
||||||
### TTS
|
### TTS
|
||||||
|
|
||||||
@@ -145,3 +147,9 @@ docker compose up -d
|
|||||||
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
<a href="https://github.com/langbot-app/LangBot/graphs/contributors">
|
||||||
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
<img src="https://contrib.rocks/image?repo=langbot-app/LangBot" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
## For Code Agents
|
||||||
|
|
||||||
|
If you're a code agent based on LLM, you should read [AGENTS.md](AGENTS.md) to understand the project structure and development process before you start any development.
|
||||||
|
-->
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ LangBot is an open-source LLM native instant messaging robot development platfor
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/langbot-app/LangBot
|
git clone https://github.com/langbot-app/LangBot
|
||||||
cd LangBot
|
cd LangBot/docker
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ LangBot は、エージェント、RAG、MCP などの LLM アプリケーショ
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/langbot-app/LangBot
|
git clone https://github.com/langbot-app/LangBot
|
||||||
cd LangBot
|
cd LangBot/docker
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ LangBot 是一個開源的大語言模型原生即時通訊機器人開發平台
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/langbot-app/LangBot
|
git clone https://github.com/langbot-app/LangBot
|
||||||
cd LangBot
|
cd LangBot/docker
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
4
codecov.yml
Normal file
4
codecov.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
coverage:
|
||||||
|
status:
|
||||||
|
project: off
|
||||||
|
patch: off
|
||||||
@@ -16,7 +16,3 @@ spec:
|
|||||||
LLMAPIRequester:
|
LLMAPIRequester:
|
||||||
fromDirs:
|
fromDirs:
|
||||||
- path: pkg/provider/modelmgr/requesters/
|
- path: pkg/provider/modelmgr/requesters/
|
||||||
Plugin:
|
|
||||||
fromDirs:
|
|
||||||
- path: plugins/
|
|
||||||
maxDepth: 2
|
|
||||||
|
|||||||
291
docs/API_KEY_AUTH.md
Normal file
291
docs/API_KEY_AUTH.md
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
# API Key Authentication
|
||||||
|
|
||||||
|
LangBot now supports API key authentication for external systems to access its HTTP service API.
|
||||||
|
|
||||||
|
## Managing API Keys
|
||||||
|
|
||||||
|
API keys can be managed through the web interface:
|
||||||
|
|
||||||
|
1. Log in to the LangBot web interface
|
||||||
|
2. Click the "API Keys" button at the bottom of the sidebar
|
||||||
|
3. Create, view, copy, or delete API keys as needed
|
||||||
|
|
||||||
|
## Using API Keys
|
||||||
|
|
||||||
|
### Authentication Headers
|
||||||
|
|
||||||
|
Include your API key in the request header using one of these methods:
|
||||||
|
|
||||||
|
**Method 1: X-API-Key header (Recommended)**
|
||||||
|
```
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
**Method 2: Authorization Bearer token**
|
||||||
|
```
|
||||||
|
Authorization: Bearer lbk_your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
## Available APIs
|
||||||
|
|
||||||
|
All existing LangBot APIs now support **both user token and API key authentication**. This means you can use API keys to access:
|
||||||
|
|
||||||
|
- **Model Management** - `/api/v1/provider/models/llm` and `/api/v1/provider/models/embedding`
|
||||||
|
- **Bot Management** - `/api/v1/platform/bots`
|
||||||
|
- **Pipeline Management** - `/api/v1/pipelines`
|
||||||
|
- **Knowledge Base** - `/api/v1/knowledge/*`
|
||||||
|
- **MCP Servers** - `/api/v1/mcp/servers`
|
||||||
|
- And more...
|
||||||
|
|
||||||
|
### Authentication Methods
|
||||||
|
|
||||||
|
Each endpoint accepts **either**:
|
||||||
|
1. **User Token** (via `Authorization: Bearer <user_jwt_token>`) - for web UI and authenticated users
|
||||||
|
2. **API Key** (via `X-API-Key` or `Authorization: Bearer <api_key>`) - for external services
|
||||||
|
|
||||||
|
## Example: Model Management
|
||||||
|
|
||||||
|
### List All LLM Models
|
||||||
|
|
||||||
|
```http
|
||||||
|
GET /api/v1/provider/models/llm
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"code": 0,
|
||||||
|
"msg": "ok",
|
||||||
|
"data": {
|
||||||
|
"models": [
|
||||||
|
{
|
||||||
|
"uuid": "model-uuid",
|
||||||
|
"name": "GPT-4",
|
||||||
|
"description": "OpenAI GPT-4 model",
|
||||||
|
"requester": "openai-chat-completions",
|
||||||
|
"requester_config": {...},
|
||||||
|
"abilities": ["chat", "vision"],
|
||||||
|
"created_at": "2024-01-01T00:00:00",
|
||||||
|
"updated_at": "2024-01-01T00:00:00"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a New LLM Model
|
||||||
|
|
||||||
|
```http
|
||||||
|
POST /api/v1/provider/models/llm
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "My Custom Model",
|
||||||
|
"description": "Description of the model",
|
||||||
|
"requester": "openai-chat-completions",
|
||||||
|
"requester_config": {
|
||||||
|
"model": "gpt-4",
|
||||||
|
"args": {}
|
||||||
|
},
|
||||||
|
"api_keys": [
|
||||||
|
{
|
||||||
|
"name": "default",
|
||||||
|
"keys": ["sk-..."]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"abilities": ["chat"],
|
||||||
|
"extra_args": {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update an LLM Model
|
||||||
|
|
||||||
|
```http
|
||||||
|
PUT /api/v1/provider/models/llm/{model_uuid}
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "Updated Model Name",
|
||||||
|
"description": "Updated description",
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Delete an LLM Model
|
||||||
|
|
||||||
|
```http
|
||||||
|
DELETE /api/v1/provider/models/llm/{model_uuid}
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example: Bot Management
|
||||||
|
|
||||||
|
### List All Bots
|
||||||
|
|
||||||
|
```http
|
||||||
|
GET /api/v1/platform/bots
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a New Bot
|
||||||
|
|
||||||
|
```http
|
||||||
|
POST /api/v1/platform/bots
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "My Bot",
|
||||||
|
"adapter": "telegram",
|
||||||
|
"config": {...}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example: Pipeline Management
|
||||||
|
|
||||||
|
### List All Pipelines
|
||||||
|
|
||||||
|
```http
|
||||||
|
GET /api/v1/pipelines
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a New Pipeline
|
||||||
|
|
||||||
|
```http
|
||||||
|
POST /api/v1/pipelines
|
||||||
|
X-API-Key: lbk_your_api_key_here
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "My Pipeline",
|
||||||
|
"config": {...}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Responses
|
||||||
|
|
||||||
|
### 401 Unauthorized
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"code": -1,
|
||||||
|
"msg": "No valid authentication provided (user token or API key required)"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"code": -1,
|
||||||
|
"msg": "Invalid API key"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 404 Not Found
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"code": -1,
|
||||||
|
"msg": "Resource not found"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 500 Internal Server Error
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"code": -2,
|
||||||
|
"msg": "Error message details"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Best Practices
|
||||||
|
|
||||||
|
1. **Keep API keys secure**: Store them securely and never commit them to version control
|
||||||
|
2. **Use HTTPS**: Always use HTTPS in production to encrypt API key transmission
|
||||||
|
3. **Rotate keys regularly**: Create new API keys periodically and delete old ones
|
||||||
|
4. **Use descriptive names**: Give your API keys meaningful names to track their usage
|
||||||
|
5. **Delete unused keys**: Remove API keys that are no longer needed
|
||||||
|
6. **Use X-API-Key header**: Prefer using the `X-API-Key` header for clarity
|
||||||
|
|
||||||
|
## Example: Python Client
|
||||||
|
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
|
||||||
|
API_KEY = "lbk_your_api_key_here"
|
||||||
|
BASE_URL = "http://your-langbot-server:5300"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"X-API-Key": API_KEY,
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# List all models
|
||||||
|
response = requests.get(f"{BASE_URL}/api/v1/provider/models/llm", headers=headers)
|
||||||
|
models = response.json()["data"]["models"]
|
||||||
|
|
||||||
|
print(f"Found {len(models)} models")
|
||||||
|
for model in models:
|
||||||
|
print(f"- {model['name']}: {model['description']}")
|
||||||
|
|
||||||
|
# Create a new bot
|
||||||
|
bot_data = {
|
||||||
|
"name": "My Telegram Bot",
|
||||||
|
"adapter": "telegram",
|
||||||
|
"config": {
|
||||||
|
"token": "your-telegram-token"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
f"{BASE_URL}/api/v1/platform/bots",
|
||||||
|
headers=headers,
|
||||||
|
json=bot_data
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
bot_uuid = response.json()["data"]["uuid"]
|
||||||
|
print(f"Bot created with UUID: {bot_uuid}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example: cURL
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all models
|
||||||
|
curl -X GET \
|
||||||
|
-H "X-API-Key: lbk_your_api_key_here" \
|
||||||
|
http://your-langbot-server:5300/api/v1/provider/models/llm
|
||||||
|
|
||||||
|
# Create a new pipeline
|
||||||
|
curl -X POST \
|
||||||
|
-H "X-API-Key: lbk_your_api_key_here" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"name": "My Pipeline",
|
||||||
|
"config": {...}
|
||||||
|
}' \
|
||||||
|
http://your-langbot-server:5300/api/v1/pipelines
|
||||||
|
|
||||||
|
# Get bot logs
|
||||||
|
curl -X POST \
|
||||||
|
-H "X-API-Key: lbk_your_api_key_here" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"from_index": -1,
|
||||||
|
"max_count": 10
|
||||||
|
}' \
|
||||||
|
http://your-langbot-server:5300/api/v1/platform/bots/{bot_uuid}/logs
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- The same endpoints work for both the web UI (with user tokens) and external services (with API keys)
|
||||||
|
- No need to learn different API paths - use the existing API documentation with API key authentication
|
||||||
|
- All endpoints that previously required user authentication now also accept API keys
|
||||||
|
|
||||||
180
docs/TESTING_SUMMARY.md
Normal file
180
docs/TESTING_SUMMARY.md
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
# Pipeline Unit Tests - Implementation Summary
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Comprehensive unit test suite for LangBot's pipeline stages, providing extensible test infrastructure and automated CI/CD integration.
|
||||||
|
|
||||||
|
## What Was Implemented
|
||||||
|
|
||||||
|
### 1. Test Infrastructure (`tests/pipeline/conftest.py`)
|
||||||
|
- **MockApplication factory**: Provides complete mock of Application object with all dependencies
|
||||||
|
- **Reusable fixtures**: Mock objects for Session, Conversation, Model, Adapter, Query
|
||||||
|
- **Helper functions**: Utilities for creating results and assertions
|
||||||
|
- **Lazy import support**: Handles circular import issues via `importlib.import_module()`
|
||||||
|
|
||||||
|
### 2. Test Coverage
|
||||||
|
|
||||||
|
#### Pipeline Stages Tested:
|
||||||
|
- ✅ **test_bansess.py** (6 tests) - Access control whitelist/blacklist logic
|
||||||
|
- ✅ **test_ratelimit.py** (3 tests) - Rate limiting acquire/release logic
|
||||||
|
- ✅ **test_preproc.py** (3 tests) - Message preprocessing and variable setup
|
||||||
|
- ✅ **test_respback.py** (2 tests) - Response sending with/without quotes
|
||||||
|
- ✅ **test_resprule.py** (3 tests) - Group message rule matching
|
||||||
|
- ✅ **test_pipelinemgr.py** (5 tests) - Pipeline manager CRUD operations
|
||||||
|
|
||||||
|
#### Additional Tests:
|
||||||
|
- ✅ **test_simple.py** (5 tests) - Test infrastructure validation
|
||||||
|
- ✅ **test_stages_integration.py** - Integration tests with full imports
|
||||||
|
|
||||||
|
**Total: 27 test cases**
|
||||||
|
|
||||||
|
### 3. CI/CD Integration
|
||||||
|
|
||||||
|
**GitHub Actions Workflow** (`.github/workflows/pipeline-tests.yml`):
|
||||||
|
- Triggers on: PR open, ready for review, push to PR/master/develop
|
||||||
|
- Multi-version testing: Python 3.10, 3.11, 3.12
|
||||||
|
- Coverage reporting: Integrated with Codecov
|
||||||
|
- Auto-runs via `run_tests.sh` script
|
||||||
|
|
||||||
|
### 4. Configuration Files
|
||||||
|
|
||||||
|
- **pytest.ini** - Pytest configuration with asyncio support
|
||||||
|
- **run_tests.sh** - Automated test runner with coverage
|
||||||
|
- **tests/README.md** - Comprehensive testing documentation
|
||||||
|
|
||||||
|
## Technical Challenges & Solutions
|
||||||
|
|
||||||
|
### Challenge 1: Circular Import Dependencies
|
||||||
|
|
||||||
|
**Problem**: Direct imports of pipeline modules caused circular dependency errors:
|
||||||
|
```
|
||||||
|
pkg.pipeline.stage → pkg.core.app → pkg.pipeline.pipelinemgr → pkg.pipeline.resprule
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**: Implemented lazy imports using `importlib.import_module()`:
|
||||||
|
```python
|
||||||
|
def get_bansess_module():
|
||||||
|
return import_module('pkg.pipeline.bansess.bansess')
|
||||||
|
|
||||||
|
# Use in tests
|
||||||
|
bansess = get_bansess_module()
|
||||||
|
stage = bansess.BanSessionCheckStage(mock_app)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Challenge 2: Pydantic Validation Errors
|
||||||
|
|
||||||
|
**Problem**: Some stages use Pydantic models that validate `new_query` parameter.
|
||||||
|
|
||||||
|
**Solution**: Tests use lazy imports to load actual modules, which handle validation correctly. Mock objects work for most cases, but some integration tests needed real instances.
|
||||||
|
|
||||||
|
### Challenge 3: Mock Configuration
|
||||||
|
|
||||||
|
**Problem**: Lists don't allow `.copy` attribute assignment in Python.
|
||||||
|
|
||||||
|
**Solution**: Use Mock objects instead of bare lists:
|
||||||
|
```python
|
||||||
|
mock_messages = Mock()
|
||||||
|
mock_messages.copy = Mock(return_value=[])
|
||||||
|
conversation.messages = mock_messages
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Execution
|
||||||
|
|
||||||
|
### Current Status
|
||||||
|
|
||||||
|
Running `bash run_tests.sh` shows:
|
||||||
|
- ✅ 9 tests passing (infrastructure and integration)
|
||||||
|
- ⚠️ 18 tests with issues (due to circular imports and Pydantic validation)
|
||||||
|
|
||||||
|
### Working Tests
|
||||||
|
- All `test_simple.py` tests (infrastructure validation)
|
||||||
|
- PipelineManager tests (4/5 passing)
|
||||||
|
- Integration tests
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
|
||||||
|
Some tests encounter:
|
||||||
|
1. **Circular import errors** - When importing certain stage modules
|
||||||
|
2. **Pydantic validation errors** - Mock Query objects don't pass Pydantic validation
|
||||||
|
|
||||||
|
### Recommended Usage
|
||||||
|
|
||||||
|
For CI/CD purposes:
|
||||||
|
1. Run `test_simple.py` to validate test infrastructure
|
||||||
|
2. Run `test_pipelinemgr.py` for manager logic
|
||||||
|
3. Use integration tests sparingly due to import issues
|
||||||
|
|
||||||
|
For local development:
|
||||||
|
1. Use the test infrastructure as a template
|
||||||
|
2. Add new tests following the lazy import pattern
|
||||||
|
3. Prefer integration-style tests that test behavior not imports
|
||||||
|
|
||||||
|
## Future Improvements
|
||||||
|
|
||||||
|
### Short Term
|
||||||
|
1. **Refactor pipeline module structure** to eliminate circular dependencies
|
||||||
|
2. **Add Pydantic model factories** for creating valid test instances
|
||||||
|
3. **Expand integration tests** once import issues are resolved
|
||||||
|
|
||||||
|
### Long Term
|
||||||
|
1. **Integration tests** - Full pipeline execution tests
|
||||||
|
2. **Performance benchmarks** - Measure stage execution time
|
||||||
|
3. **Mutation testing** - Verify test quality with mutation testing
|
||||||
|
4. **Property-based testing** - Use Hypothesis for edge case discovery
|
||||||
|
|
||||||
|
## File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── .github/workflows/
|
||||||
|
│ └── pipeline-tests.yml # CI/CD workflow
|
||||||
|
├── tests/
|
||||||
|
│ ├── README.md # Testing documentation
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ └── pipeline/
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ ├── conftest.py # Shared fixtures
|
||||||
|
│ ├── test_simple.py # Infrastructure tests ✅
|
||||||
|
│ ├── test_bansess.py # BanSession tests
|
||||||
|
│ ├── test_ratelimit.py # RateLimit tests
|
||||||
|
│ ├── test_preproc.py # PreProcessor tests
|
||||||
|
│ ├── test_respback.py # ResponseBack tests
|
||||||
|
│ ├── test_resprule.py # ResponseRule tests
|
||||||
|
│ ├── test_pipelinemgr.py # Manager tests ✅
|
||||||
|
│ └── test_stages_integration.py # Integration tests
|
||||||
|
├── pytest.ini # Pytest config
|
||||||
|
├── run_tests.sh # Test runner
|
||||||
|
└── TESTING_SUMMARY.md # This file
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to Use
|
||||||
|
|
||||||
|
### Run Tests Locally
|
||||||
|
```bash
|
||||||
|
bash run_tests.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Specific Test File
|
||||||
|
```bash
|
||||||
|
pytest tests/pipeline/test_simple.py -v
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run with Coverage
|
||||||
|
```bash
|
||||||
|
pytest tests/pipeline/ --cov=pkg/pipeline --cov-report=html
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Coverage Report
|
||||||
|
```bash
|
||||||
|
open htmlcov/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
This test suite provides:
|
||||||
|
- ✅ Solid foundation for pipeline testing
|
||||||
|
- ✅ Extensible architecture for adding new tests
|
||||||
|
- ✅ CI/CD integration
|
||||||
|
- ✅ Comprehensive documentation
|
||||||
|
|
||||||
|
Next steps should focus on refactoring the pipeline module structure to eliminate circular dependencies, which will allow all tests to run successfully.
|
||||||
1944
docs/service-api-openapi.json
Normal file
1944
docs/service-api-openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
0
libs/coze_server_api/__init__.py
Normal file
0
libs/coze_server_api/__init__.py
Normal file
192
libs/coze_server_api/client.py
Normal file
192
libs/coze_server_api/client.py
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
import json
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import io
|
||||||
|
from typing import Dict, List, Any, AsyncGenerator
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncCozeAPIClient:
|
||||||
|
def __init__(self, api_key: str, api_base: str = "https://api.coze.cn"):
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_base = api_base
|
||||||
|
self.session = None
|
||||||
|
|
||||||
|
async def __aenter__(self):
|
||||||
|
"""支持异步上下文管理器"""
|
||||||
|
await self.coze_session()
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
"""退出时自动关闭会话"""
|
||||||
|
await self.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def coze_session(self):
|
||||||
|
"""确保HTTP session存在"""
|
||||||
|
if self.session is None:
|
||||||
|
connector = aiohttp.TCPConnector(
|
||||||
|
ssl=False if self.api_base.startswith("http://") else True,
|
||||||
|
limit=100,
|
||||||
|
limit_per_host=30,
|
||||||
|
keepalive_timeout=30,
|
||||||
|
enable_cleanup_closed=True,
|
||||||
|
)
|
||||||
|
timeout = aiohttp.ClientTimeout(
|
||||||
|
total=120, # 默认超时时间
|
||||||
|
connect=30,
|
||||||
|
sock_read=120,
|
||||||
|
)
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {self.api_key}",
|
||||||
|
"Accept": "text/event-stream",
|
||||||
|
}
|
||||||
|
self.session = aiohttp.ClientSession(
|
||||||
|
headers=headers, timeout=timeout, connector=connector
|
||||||
|
)
|
||||||
|
return self.session
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
"""显式关闭会话"""
|
||||||
|
if self.session and not self.session.closed:
|
||||||
|
await self.session.close()
|
||||||
|
self.session = None
|
||||||
|
|
||||||
|
async def upload(
|
||||||
|
self,
|
||||||
|
file,
|
||||||
|
) -> str:
|
||||||
|
# 处理 Path 对象
|
||||||
|
if isinstance(file, Path):
|
||||||
|
if not file.exists():
|
||||||
|
raise ValueError(f"File not found: {file}")
|
||||||
|
with open(file, "rb") as f:
|
||||||
|
file = f.read()
|
||||||
|
|
||||||
|
# 处理文件路径字符串
|
||||||
|
elif isinstance(file, str):
|
||||||
|
if not os.path.isfile(file):
|
||||||
|
raise ValueError(f"File not found: {file}")
|
||||||
|
with open(file, "rb") as f:
|
||||||
|
file = f.read()
|
||||||
|
|
||||||
|
# 处理文件对象
|
||||||
|
elif hasattr(file, 'read'):
|
||||||
|
file = file.read()
|
||||||
|
|
||||||
|
session = await self.coze_session()
|
||||||
|
url = f"{self.api_base}/v1/files/upload"
|
||||||
|
|
||||||
|
try:
|
||||||
|
file_io = io.BytesIO(file)
|
||||||
|
async with session.post(
|
||||||
|
url,
|
||||||
|
data={
|
||||||
|
"file": file_io,
|
||||||
|
},
|
||||||
|
timeout=aiohttp.ClientTimeout(total=60),
|
||||||
|
) as response:
|
||||||
|
if response.status == 401:
|
||||||
|
raise Exception("Coze API 认证失败,请检查 API Key 是否正确")
|
||||||
|
|
||||||
|
response_text = await response.text()
|
||||||
|
|
||||||
|
|
||||||
|
if response.status != 200:
|
||||||
|
raise Exception(
|
||||||
|
f"文件上传失败,状态码: {response.status}, 响应: {response_text}"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
result = await response.json()
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
raise Exception(f"文件上传响应解析失败: {response_text}")
|
||||||
|
|
||||||
|
if result.get("code") != 0:
|
||||||
|
raise Exception(f"文件上传失败: {result.get('msg', '未知错误')}")
|
||||||
|
|
||||||
|
file_id = result["data"]["id"]
|
||||||
|
return file_id
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise Exception("文件上传超时")
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"文件上传失败: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
async def chat_messages(
|
||||||
|
self,
|
||||||
|
bot_id: str,
|
||||||
|
user_id: str,
|
||||||
|
additional_messages: List[Dict] | None = None,
|
||||||
|
conversation_id: str | None = None,
|
||||||
|
auto_save_history: bool = True,
|
||||||
|
stream: bool = True,
|
||||||
|
timeout: float = 120,
|
||||||
|
) -> AsyncGenerator[Dict[str, Any], None]:
|
||||||
|
"""发送聊天消息并返回流式响应
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bot_id: Bot ID
|
||||||
|
user_id: 用户ID
|
||||||
|
additional_messages: 额外消息列表
|
||||||
|
conversation_id: 会话ID
|
||||||
|
auto_save_history: 是否自动保存历史
|
||||||
|
stream: 是否流式响应
|
||||||
|
timeout: 超时时间
|
||||||
|
"""
|
||||||
|
session = await self.coze_session()
|
||||||
|
url = f"{self.api_base}/v3/chat"
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"bot_id": bot_id,
|
||||||
|
"user_id": user_id,
|
||||||
|
"stream": stream,
|
||||||
|
"auto_save_history": auto_save_history,
|
||||||
|
}
|
||||||
|
|
||||||
|
if additional_messages:
|
||||||
|
payload["additional_messages"] = additional_messages
|
||||||
|
|
||||||
|
params = {}
|
||||||
|
if conversation_id:
|
||||||
|
params["conversation_id"] = conversation_id
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with session.post(
|
||||||
|
url,
|
||||||
|
json=payload,
|
||||||
|
params=params,
|
||||||
|
timeout=aiohttp.ClientTimeout(total=timeout),
|
||||||
|
) as response:
|
||||||
|
if response.status == 401:
|
||||||
|
raise Exception("Coze API 认证失败,请检查 API Key 是否正确")
|
||||||
|
|
||||||
|
if response.status != 200:
|
||||||
|
raise Exception(f"Coze API 流式请求失败,状态码: {response.status}")
|
||||||
|
|
||||||
|
|
||||||
|
async for chunk in response.content:
|
||||||
|
chunk = chunk.decode("utf-8")
|
||||||
|
if chunk != '\n':
|
||||||
|
if chunk.startswith("event:"):
|
||||||
|
chunk_type = chunk.replace("event:", "", 1).strip()
|
||||||
|
elif chunk.startswith("data:"):
|
||||||
|
chunk_data = chunk.replace("data:", "", 1).strip()
|
||||||
|
else:
|
||||||
|
yield {"event": chunk_type, "data": json.loads(chunk_data) if chunk_data else {}} # 处理本地部署时,接口返回的data为空值
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise Exception(f"Coze API 流式请求超时 ({timeout}秒)")
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Coze API 流式请求失败: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -5,6 +5,8 @@ import typing
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from .errors import DifyAPIError
|
from .errors import DifyAPIError
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
class AsyncDifyServiceClient:
|
class AsyncDifyServiceClient:
|
||||||
@@ -109,7 +111,23 @@ class AsyncDifyServiceClient:
|
|||||||
user: str,
|
user: str,
|
||||||
timeout: float = 30.0,
|
timeout: float = 30.0,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""上传文件"""
|
# 处理 Path 对象
|
||||||
|
if isinstance(file, Path):
|
||||||
|
if not file.exists():
|
||||||
|
raise ValueError(f'File not found: {file}')
|
||||||
|
with open(file, 'rb') as f:
|
||||||
|
file = f.read()
|
||||||
|
|
||||||
|
# 处理文件路径字符串
|
||||||
|
elif isinstance(file, str):
|
||||||
|
if not os.path.isfile(file):
|
||||||
|
raise ValueError(f'File not found: {file}')
|
||||||
|
with open(file, 'rb') as f:
|
||||||
|
file = f.read()
|
||||||
|
|
||||||
|
# 处理文件对象
|
||||||
|
elif hasattr(file, 'read'):
|
||||||
|
file = file.read()
|
||||||
async with httpx.AsyncClient(
|
async with httpx.AsyncClient(
|
||||||
base_url=self.base_url,
|
base_url=self.base_url,
|
||||||
trust_env=True,
|
trust_env=True,
|
||||||
@@ -121,6 +139,8 @@ class AsyncDifyServiceClient:
|
|||||||
headers={'Authorization': f'Bearer {self.api_key}'},
|
headers={'Authorization': f'Bearer {self.api_key}'},
|
||||||
files={
|
files={
|
||||||
'file': file,
|
'file': file,
|
||||||
|
},
|
||||||
|
data={
|
||||||
'user': (None, user),
|
'user': (None, user),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -110,6 +110,24 @@ class DingTalkClient:
|
|||||||
else:
|
else:
|
||||||
raise Exception(f'Error: {response.status_code}, {response.text}')
|
raise Exception(f'Error: {response.status_code}, {response.text}')
|
||||||
|
|
||||||
|
async def get_file_url(self, download_code: str):
|
||||||
|
if not await self.check_access_token():
|
||||||
|
await self.get_access_token()
|
||||||
|
url = 'https://api.dingtalk.com/v1.0/robot/messageFiles/download'
|
||||||
|
params = {'downloadCode': download_code, 'robotCode': self.robot_code}
|
||||||
|
headers = {'x-acs-dingtalk-access-token': self.access_token}
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(url, headers=headers, json=params)
|
||||||
|
if response.status_code == 200:
|
||||||
|
result = response.json()
|
||||||
|
download_url = result.get('downloadUrl')
|
||||||
|
if download_url:
|
||||||
|
return download_url
|
||||||
|
else:
|
||||||
|
await self.logger.error(f'failed to get file: {response.json()}')
|
||||||
|
else:
|
||||||
|
raise Exception(f'Error: {response.status_code}, {response.text}')
|
||||||
|
|
||||||
async def update_incoming_message(self, message):
|
async def update_incoming_message(self, message):
|
||||||
"""异步更新 DingTalkClient 中的 incoming_message"""
|
"""异步更新 DingTalkClient 中的 incoming_message"""
|
||||||
message_data = await self.get_message(message)
|
message_data = await self.get_message(message)
|
||||||
@@ -170,12 +188,80 @@ class DingTalkClient:
|
|||||||
|
|
||||||
if incoming_message.message_type == 'richText':
|
if incoming_message.message_type == 'richText':
|
||||||
data = incoming_message.rich_text_content.to_dict()
|
data = incoming_message.rich_text_content.to_dict()
|
||||||
|
|
||||||
|
# 使用统一的结构化数据格式,保持顺序
|
||||||
|
rich_content = {
|
||||||
|
'Type': 'richText',
|
||||||
|
'Elements': [], # 按顺序存储所有元素
|
||||||
|
'SimpleContent': '', # 兼容字段:纯文本内容
|
||||||
|
'SimplePicture': '' # 兼容字段:第一张图片
|
||||||
|
}
|
||||||
|
|
||||||
|
# 先收集所有文本和图片占位符
|
||||||
|
text_elements = []
|
||||||
|
image_placeholders = []
|
||||||
|
|
||||||
|
# 解析富文本内容,保持原始顺序
|
||||||
for item in data['richText']:
|
for item in data['richText']:
|
||||||
if 'text' in item:
|
|
||||||
message_data['Content'] = item['text']
|
# 处理文本内容
|
||||||
if incoming_message.get_image_list()[0]:
|
if 'text' in item and item['text'] != "\n":
|
||||||
message_data['Picture'] = await self.download_image(incoming_message.get_image_list()[0])
|
element = {
|
||||||
message_data['Type'] = 'text'
|
'Type': 'text',
|
||||||
|
'Content': item['text']
|
||||||
|
}
|
||||||
|
rich_content['Elements'].append(element)
|
||||||
|
text_elements.append(item['text'])
|
||||||
|
|
||||||
|
# 检查是否是图片元素 - 根据钉钉API的实际结构调整
|
||||||
|
# 钉钉富文本中的图片通常有特定标识,可能需要根据实际返回调整
|
||||||
|
elif item.get("type") == "picture":
|
||||||
|
# 创建图片占位符
|
||||||
|
element = {
|
||||||
|
'Type': 'image_placeholder',
|
||||||
|
}
|
||||||
|
rich_content['Elements'].append(element)
|
||||||
|
|
||||||
|
# 获取并下载所有图片
|
||||||
|
image_list = incoming_message.get_image_list()
|
||||||
|
if image_list:
|
||||||
|
new_elements = []
|
||||||
|
image_index = 0
|
||||||
|
|
||||||
|
for element in rich_content['Elements']:
|
||||||
|
if element['Type'] == 'image_placeholder':
|
||||||
|
if image_index < len(image_list) and image_list[image_index]:
|
||||||
|
image_url = await self.download_image(image_list[image_index])
|
||||||
|
new_elements.append({
|
||||||
|
'Type': 'image',
|
||||||
|
'Picture': image_url
|
||||||
|
})
|
||||||
|
image_index += 1
|
||||||
|
else:
|
||||||
|
# 如果没有对应的图片,保留占位符或跳过
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
new_elements.append(element)
|
||||||
|
|
||||||
|
rich_content['Elements'] = new_elements
|
||||||
|
|
||||||
|
|
||||||
|
# 设置兼容字段
|
||||||
|
all_texts = [elem['Content'] for elem in rich_content['Elements'] if elem.get('Type') == 'text']
|
||||||
|
rich_content['SimpleContent'] = '\n'.join(all_texts) if all_texts else ''
|
||||||
|
|
||||||
|
all_images = [elem['Picture'] for elem in rich_content['Elements'] if elem.get('Type') == 'image']
|
||||||
|
if all_images:
|
||||||
|
rich_content['SimplePicture'] = all_images[0]
|
||||||
|
rich_content['AllImages'] = all_images # 所有图片的列表
|
||||||
|
|
||||||
|
# 设置原始的 content 和 picture 字段以保持兼容
|
||||||
|
message_data['Content'] = rich_content['SimpleContent']
|
||||||
|
message_data['Rich_Content'] = rich_content
|
||||||
|
if all_images:
|
||||||
|
message_data['Picture'] = all_images[0]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
elif incoming_message.message_type == 'text':
|
elif incoming_message.message_type == 'text':
|
||||||
message_data['Content'] = incoming_message.get_text_list()[0]
|
message_data['Content'] = incoming_message.get_text_list()[0]
|
||||||
@@ -189,6 +275,17 @@ class DingTalkClient:
|
|||||||
message_data['Audio'] = await self.get_audio_url(incoming_message.to_dict()['content']['downloadCode'])
|
message_data['Audio'] = await self.get_audio_url(incoming_message.to_dict()['content']['downloadCode'])
|
||||||
|
|
||||||
message_data['Type'] = 'audio'
|
message_data['Type'] = 'audio'
|
||||||
|
elif incoming_message.message_type == 'file':
|
||||||
|
down_list = incoming_message.get_down_list()
|
||||||
|
if len(down_list) >= 2:
|
||||||
|
message_data['File'] = await self.get_file_url(down_list[0])
|
||||||
|
message_data['Name'] = down_list[1]
|
||||||
|
else:
|
||||||
|
if self.logger:
|
||||||
|
await self.logger.error(f'get_down_list() returned fewer than 2 elements: {down_list}')
|
||||||
|
message_data['File'] = None
|
||||||
|
message_data['Name'] = None
|
||||||
|
message_data['Type'] = 'file'
|
||||||
|
|
||||||
copy_message_data = message_data.copy()
|
copy_message_data = message_data.copy()
|
||||||
del copy_message_data['IncomingMessage']
|
del copy_message_data['IncomingMessage']
|
||||||
|
|||||||
@@ -15,6 +15,10 @@ class DingTalkEvent(dict):
|
|||||||
def content(self):
|
def content(self):
|
||||||
return self.get('Content', '')
|
return self.get('Content', '')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rich_content(self):
|
||||||
|
return self.get('Rich_Content', '')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def incoming_message(self) -> Optional['dingtalk_stream.chatbot.ChatbotMessage']:
|
def incoming_message(self) -> Optional['dingtalk_stream.chatbot.ChatbotMessage']:
|
||||||
return self.get('IncomingMessage')
|
return self.get('IncomingMessage')
|
||||||
@@ -31,6 +35,15 @@ class DingTalkEvent(dict):
|
|||||||
def audio(self):
|
def audio(self):
|
||||||
return self.get('Audio', '')
|
return self.get('Audio', '')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def file(self):
|
||||||
|
return self.get('File', '')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self.get('Name', '')
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def conversation(self):
|
def conversation(self):
|
||||||
return self.get('conversation_type', '')
|
return self.get('conversation_type', '')
|
||||||
|
|||||||
@@ -1,189 +1,452 @@
|
|||||||
|
import asyncio
|
||||||
|
import base64
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import traceback
|
||||||
import uuid
|
import uuid
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any, Callable, Optional
|
||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
import hashlib
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
from libs.wecom_ai_bot_api.WXBizMsgCrypt3 import WXBizMsgCrypt
|
|
||||||
from quart import Quart, request, Response, jsonify
|
|
||||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
|
||||||
import asyncio
|
|
||||||
from libs.wecom_ai_bot_api import wecombotevent
|
|
||||||
from typing import Callable
|
|
||||||
import base64
|
|
||||||
from Crypto.Cipher import AES
|
from Crypto.Cipher import AES
|
||||||
|
from quart import Quart, request, Response, jsonify
|
||||||
|
|
||||||
|
from libs.wecom_ai_bot_api import wecombotevent
|
||||||
|
from libs.wecom_ai_bot_api.WXBizMsgCrypt3 import WXBizMsgCrypt
|
||||||
from pkg.platform.logger import EventLogger
|
from pkg.platform.logger import EventLogger
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StreamChunk:
|
||||||
|
"""描述单次推送给企业微信的流式片段。"""
|
||||||
|
|
||||||
|
# 需要返回给企业微信的文本内容
|
||||||
|
content: str
|
||||||
|
|
||||||
|
# 标记是否为最终片段,对应企业微信协议里的 finish 字段
|
||||||
|
is_final: bool = False
|
||||||
|
|
||||||
|
# 预留额外元信息,未来支持多模态扩展时可使用
|
||||||
|
meta: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StreamSession:
|
||||||
|
"""维护一次企业微信流式会话的上下文。"""
|
||||||
|
|
||||||
|
# 企业微信要求的 stream_id,用于标识后续刷新请求
|
||||||
|
stream_id: str
|
||||||
|
|
||||||
|
# 原始消息的 msgid,便于与流水线消息对应
|
||||||
|
msg_id: str
|
||||||
|
|
||||||
|
# 群聊会话标识(单聊时为空)
|
||||||
|
chat_id: Optional[str]
|
||||||
|
|
||||||
|
# 触发消息的发送者
|
||||||
|
user_id: Optional[str]
|
||||||
|
|
||||||
|
# 会话创建时间
|
||||||
|
created_at: float = field(default_factory=time.time)
|
||||||
|
|
||||||
|
# 最近一次被访问的时间,cleanup 依据该值判断过期
|
||||||
|
last_access: float = field(default_factory=time.time)
|
||||||
|
|
||||||
|
# 将流水线增量结果缓存到队列,刷新请求逐条消费
|
||||||
|
queue: asyncio.Queue = field(default_factory=asyncio.Queue)
|
||||||
|
|
||||||
|
# 是否已经完成(收到最终片段)
|
||||||
|
finished: bool = False
|
||||||
|
|
||||||
|
# 缓存最近一次片段,处理重试或超时兜底
|
||||||
|
last_chunk: Optional[StreamChunk] = None
|
||||||
|
|
||||||
|
|
||||||
|
class StreamSessionManager:
|
||||||
|
"""管理 stream 会话的生命周期,并负责队列的生产消费。"""
|
||||||
|
|
||||||
|
def __init__(self, logger: EventLogger, ttl: int = 60) -> None:
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
self.ttl = ttl # 超时时间(秒),超过该时间未被访问的会话会被清理由 cleanup
|
||||||
|
self._sessions: dict[str, StreamSession] = {} # stream_id -> StreamSession 映射
|
||||||
|
self._msg_index: dict[str, str] = {} # msgid -> stream_id 映射,便于流水线根据消息 ID 找到会话
|
||||||
|
|
||||||
|
def get_stream_id_by_msg(self, msg_id: str) -> Optional[str]:
|
||||||
|
if not msg_id:
|
||||||
|
return None
|
||||||
|
return self._msg_index.get(msg_id)
|
||||||
|
|
||||||
|
def get_session(self, stream_id: str) -> Optional[StreamSession]:
|
||||||
|
return self._sessions.get(stream_id)
|
||||||
|
|
||||||
|
def create_or_get(self, msg_json: dict[str, Any]) -> tuple[StreamSession, bool]:
|
||||||
|
"""根据企业微信回调创建或获取会话。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg_json: 企业微信解密后的回调 JSON。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[StreamSession, bool]: `StreamSession` 为会话实例,`bool` 指示是否为新建会话。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在首次回调中调用,得到 `is_new=True` 后再触发流水线。
|
||||||
|
"""
|
||||||
|
msg_id = msg_json.get('msgid', '')
|
||||||
|
if msg_id and msg_id in self._msg_index:
|
||||||
|
stream_id = self._msg_index[msg_id]
|
||||||
|
session = self._sessions.get(stream_id)
|
||||||
|
if session:
|
||||||
|
session.last_access = time.time()
|
||||||
|
return session, False
|
||||||
|
|
||||||
|
stream_id = str(uuid.uuid4())
|
||||||
|
session = StreamSession(
|
||||||
|
stream_id=stream_id,
|
||||||
|
msg_id=msg_id,
|
||||||
|
chat_id=msg_json.get('chatid'),
|
||||||
|
user_id=msg_json.get('from', {}).get('userid'),
|
||||||
|
)
|
||||||
|
|
||||||
|
if msg_id:
|
||||||
|
self._msg_index[msg_id] = stream_id
|
||||||
|
self._sessions[stream_id] = session
|
||||||
|
return session, True
|
||||||
|
|
||||||
|
async def publish(self, stream_id: str, chunk: StreamChunk) -> bool:
|
||||||
|
"""向 stream 队列写入新的增量片段。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stream_id: 企业微信分配的流式会话 ID。
|
||||||
|
chunk: 待发送的增量片段。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: 当流式队列存在并成功入队时返回 True。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在收到模型增量后调用 `await manager.publish('sid', StreamChunk('hello'))`。
|
||||||
|
"""
|
||||||
|
session = self._sessions.get(stream_id)
|
||||||
|
if not session:
|
||||||
|
return False
|
||||||
|
|
||||||
|
session.last_access = time.time()
|
||||||
|
session.last_chunk = chunk
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.queue.put_nowait(chunk)
|
||||||
|
except asyncio.QueueFull:
|
||||||
|
# 默认无界队列,此处兜底防御
|
||||||
|
await session.queue.put(chunk)
|
||||||
|
|
||||||
|
if chunk.is_final:
|
||||||
|
session.finished = True
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def consume(self, stream_id: str, timeout: float = 0.5) -> Optional[StreamChunk]:
|
||||||
|
"""从队列中取出一个片段,若超时返回 None。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stream_id: 企业微信流式会话 ID。
|
||||||
|
timeout: 取片段的最长等待时间(秒)。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[StreamChunk]: 成功时返回片段,超时或会话不存在时返回 None。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
企业微信刷新到达时调用,若队列有数据则立即返回 `StreamChunk`。
|
||||||
|
"""
|
||||||
|
session = self._sessions.get(stream_id)
|
||||||
|
if not session:
|
||||||
|
return None
|
||||||
|
|
||||||
|
session.last_access = time.time()
|
||||||
|
|
||||||
|
try:
|
||||||
|
chunk = await asyncio.wait_for(session.queue.get(), timeout)
|
||||||
|
session.last_access = time.time()
|
||||||
|
if chunk.is_final:
|
||||||
|
session.finished = True
|
||||||
|
return chunk
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
if session.finished and session.last_chunk:
|
||||||
|
return session.last_chunk
|
||||||
|
return None
|
||||||
|
|
||||||
|
def mark_finished(self, stream_id: str) -> None:
|
||||||
|
session = self._sessions.get(stream_id)
|
||||||
|
if session:
|
||||||
|
session.finished = True
|
||||||
|
session.last_access = time.time()
|
||||||
|
|
||||||
|
def cleanup(self) -> None:
|
||||||
|
"""定期清理过期会话,防止队列与映射无上限累积。"""
|
||||||
|
now = time.time()
|
||||||
|
expired: list[str] = []
|
||||||
|
for stream_id, session in self._sessions.items():
|
||||||
|
if now - session.last_access > self.ttl:
|
||||||
|
expired.append(stream_id)
|
||||||
|
|
||||||
|
for stream_id in expired:
|
||||||
|
session = self._sessions.pop(stream_id, None)
|
||||||
|
if not session:
|
||||||
|
continue
|
||||||
|
msg_id = session.msg_id
|
||||||
|
if msg_id and self._msg_index.get(msg_id) == stream_id:
|
||||||
|
self._msg_index.pop(msg_id, None)
|
||||||
|
|
||||||
|
|
||||||
class WecomBotClient:
|
class WecomBotClient:
|
||||||
def __init__(self,Token:str,EnCodingAESKey:str,Corpid:str,logger:EventLogger):
|
def __init__(self, Token: str, EnCodingAESKey: str, Corpid: str, logger: EventLogger):
|
||||||
self.Token=Token
|
"""企业微信智能机器人客户端。
|
||||||
self.EnCodingAESKey=EnCodingAESKey
|
|
||||||
self.Corpid=Corpid
|
Args:
|
||||||
|
Token: 企业微信回调验证使用的 token。
|
||||||
|
EnCodingAESKey: 企业微信消息加解密密钥。
|
||||||
|
Corpid: 企业 ID。
|
||||||
|
logger: 日志记录器。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> client = WecomBotClient(Token='token', EnCodingAESKey='aeskey', Corpid='corp', logger=logger)
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.Token = Token
|
||||||
|
self.EnCodingAESKey = EnCodingAESKey
|
||||||
|
self.Corpid = Corpid
|
||||||
self.ReceiveId = ''
|
self.ReceiveId = ''
|
||||||
self.app = Quart(__name__)
|
self.app = Quart(__name__)
|
||||||
self.app.add_url_rule(
|
self.app.add_url_rule(
|
||||||
'/callback/command',
|
'/callback/command',
|
||||||
'handle_callback',
|
'handle_callback',
|
||||||
self.handle_callback_request,
|
self.handle_callback_request,
|
||||||
methods=['POST','GET']
|
methods=['POST', 'GET']
|
||||||
)
|
)
|
||||||
self._message_handlers = {
|
self._message_handlers = {
|
||||||
'example': [],
|
'example': [],
|
||||||
}
|
}
|
||||||
self.user_stream_map = {}
|
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.generated_content = {}
|
self.generated_content: dict[str, str] = {}
|
||||||
self.msg_id_map = {}
|
self.msg_id_map: dict[str, int] = {}
|
||||||
|
self.stream_sessions = StreamSessionManager(logger=logger)
|
||||||
|
self.stream_poll_timeout = 0.5
|
||||||
|
|
||||||
async def sha1_signature(token: str, timestamp: str, nonce: str, encrypt: str) -> str:
|
@staticmethod
|
||||||
raw = "".join(sorted([token, timestamp, nonce, encrypt]))
|
def _build_stream_payload(stream_id: str, content: str, finish: bool) -> dict[str, Any]:
|
||||||
return hashlib.sha1(raw.encode("utf-8")).hexdigest()
|
"""按照企业微信协议拼装返回报文。
|
||||||
|
|
||||||
async def handle_callback_request(self):
|
Args:
|
||||||
|
stream_id: 企业微信会话 ID。
|
||||||
|
content: 推送的文本内容。
|
||||||
|
finish: 是否为最终片段。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Any]: 可直接加密返回的 payload。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
组装 `{'msgtype': 'stream', 'stream': {'id': 'sid', ...}}` 结构。
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
'msgtype': 'stream',
|
||||||
|
'stream': {
|
||||||
|
'id': stream_id,
|
||||||
|
'finish': finish,
|
||||||
|
'content': content,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _encrypt_and_reply(self, payload: dict[str, Any], nonce: str) -> tuple[Response, int]:
|
||||||
|
"""对响应进行加密封装并返回给企业微信。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
payload: 待加密的响应内容。
|
||||||
|
nonce: 企业微信回调参数中的 nonce。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[Response, int]: Quart Response 对象及状态码。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在首包或刷新场景中调用以生成加密响应。
|
||||||
|
"""
|
||||||
|
reply_plain_str = json.dumps(payload, ensure_ascii=False)
|
||||||
|
reply_timestamp = str(int(time.time()))
|
||||||
|
ret, encrypt_text = self.wxcpt.EncryptMsg(reply_plain_str, nonce, reply_timestamp)
|
||||||
|
if ret != 0:
|
||||||
|
await self.logger.error(f'加密失败: {ret}')
|
||||||
|
return jsonify({'error': 'encrypt_failed'}), 500
|
||||||
|
|
||||||
|
root = ET.fromstring(encrypt_text)
|
||||||
|
encrypt = root.find('Encrypt').text
|
||||||
|
resp = {
|
||||||
|
'encrypt': encrypt,
|
||||||
|
}
|
||||||
|
return jsonify(resp), 200
|
||||||
|
|
||||||
|
async def _dispatch_event(self, event: wecombotevent.WecomBotEvent) -> None:
|
||||||
|
"""异步触发流水线处理,避免阻塞首包响应。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: 由企业微信消息转换的内部事件对象。
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
self.wxcpt=WXBizMsgCrypt(self.Token,self.EnCodingAESKey,'')
|
await self._handle_message(event)
|
||||||
|
except Exception:
|
||||||
if request.method == "GET":
|
|
||||||
|
|
||||||
msg_signature = unquote(request.args.get("msg_signature", ""))
|
|
||||||
timestamp = unquote(request.args.get("timestamp", ""))
|
|
||||||
nonce = unquote(request.args.get("nonce", ""))
|
|
||||||
echostr = unquote(request.args.get("echostr", ""))
|
|
||||||
|
|
||||||
if not all([msg_signature, timestamp, nonce, echostr]):
|
|
||||||
await self.logger.error("请求参数缺失")
|
|
||||||
return Response("缺少参数", status=400)
|
|
||||||
|
|
||||||
ret, decrypted_str = self.wxcpt.VerifyURL(msg_signature, timestamp, nonce, echostr)
|
|
||||||
if ret != 0:
|
|
||||||
|
|
||||||
await self.logger.error("验证URL失败")
|
|
||||||
return Response("验证失败", status=403)
|
|
||||||
|
|
||||||
return Response(decrypted_str, mimetype="text/plain")
|
|
||||||
|
|
||||||
elif request.method == "POST":
|
|
||||||
msg_signature = unquote(request.args.get("msg_signature", ""))
|
|
||||||
timestamp = unquote(request.args.get("timestamp", ""))
|
|
||||||
nonce = unquote(request.args.get("nonce", ""))
|
|
||||||
|
|
||||||
try:
|
|
||||||
timeout = 3
|
|
||||||
interval = 0.1
|
|
||||||
start_time = time.monotonic()
|
|
||||||
encrypted_json = await request.get_json()
|
|
||||||
encrypted_msg = encrypted_json.get("encrypt", "")
|
|
||||||
if not encrypted_msg:
|
|
||||||
await self.logger.error("请求体中缺少 'encrypt' 字段")
|
|
||||||
|
|
||||||
xml_post_data = f"<xml><Encrypt><![CDATA[{encrypted_msg}]]></Encrypt></xml>"
|
|
||||||
ret, decrypted_xml = self.wxcpt.DecryptMsg(xml_post_data, msg_signature, timestamp, nonce)
|
|
||||||
if ret != 0:
|
|
||||||
await self.logger.error("解密失败")
|
|
||||||
|
|
||||||
|
|
||||||
msg_json = json.loads(decrypted_xml)
|
|
||||||
|
|
||||||
from_user_id = msg_json.get("from", {}).get("userid")
|
|
||||||
chatid = msg_json.get("chatid", "")
|
|
||||||
|
|
||||||
message_data = await self.get_message(msg_json)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if message_data:
|
|
||||||
try:
|
|
||||||
event = wecombotevent.WecomBotEvent(message_data)
|
|
||||||
if event:
|
|
||||||
await self._handle_message(event)
|
|
||||||
except Exception as e:
|
|
||||||
await self.logger.error(traceback.format_exc())
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
try:
|
|
||||||
if msg_json.get('chattype','') == 'single':
|
|
||||||
if from_user_id in self.user_stream_map:
|
|
||||||
stream_id = self.user_stream_map[from_user_id]
|
|
||||||
else:
|
|
||||||
stream_id =str(uuid.uuid4())
|
|
||||||
self.user_stream_map[from_user_id] = stream_id
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
if chatid in self.user_stream_map:
|
|
||||||
stream_id = self.user_stream_map[chatid]
|
|
||||||
else:
|
|
||||||
stream_id = str(uuid.uuid4())
|
|
||||||
self.user_stream_map[chatid] = stream_id
|
|
||||||
except Exception as e:
|
|
||||||
await self.logger.error(traceback.format_exc())
|
|
||||||
print(traceback.format_exc())
|
|
||||||
while True:
|
|
||||||
content = self.generated_content.pop(msg_json['msgid'],None)
|
|
||||||
if content:
|
|
||||||
reply_plain = {
|
|
||||||
"msgtype": "stream",
|
|
||||||
"stream": {
|
|
||||||
"id": stream_id,
|
|
||||||
"finish": True,
|
|
||||||
"content": content
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reply_plain_str = json.dumps(reply_plain, ensure_ascii=False)
|
|
||||||
|
|
||||||
reply_timestamp = str(int(time.time()))
|
|
||||||
ret, encrypt_text = self.wxcpt.EncryptMsg(reply_plain_str, nonce, reply_timestamp)
|
|
||||||
if ret != 0:
|
|
||||||
|
|
||||||
await self.logger.error("加密失败"+str(ret))
|
|
||||||
|
|
||||||
|
|
||||||
root = ET.fromstring(encrypt_text)
|
|
||||||
encrypt = root.find("Encrypt").text
|
|
||||||
resp = {
|
|
||||||
"encrypt": encrypt,
|
|
||||||
}
|
|
||||||
return jsonify(resp), 200
|
|
||||||
|
|
||||||
if time.time() - start_time > timeout:
|
|
||||||
break
|
|
||||||
|
|
||||||
await asyncio.sleep(interval)
|
|
||||||
|
|
||||||
if self.msg_id_map.get(message_data['msgid'], 1) == 3:
|
|
||||||
await self.logger.error('请求失效:暂不支持智能机器人超过7秒的请求,如有需求,请联系 LangBot 团队。')
|
|
||||||
return ''
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
await self.logger.error(traceback.format_exc())
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
await self.logger.error(traceback.format_exc())
|
await self.logger.error(traceback.format_exc())
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
|
async def _handle_post_initial_response(self, msg_json: dict[str, Any], nonce: str) -> tuple[Response, int]:
|
||||||
async def get_message(self,msg_json):
|
"""处理企业微信首次推送的消息,返回 stream_id 并开启流水线。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg_json: 解密后的企业微信消息 JSON。
|
||||||
|
nonce: 企业微信回调参数 nonce。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[Response, int]: Quart Response 及状态码。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
首次回调时调用,立即返回带 `stream_id` 的响应。
|
||||||
|
"""
|
||||||
|
session, is_new = self.stream_sessions.create_or_get(msg_json)
|
||||||
|
|
||||||
|
message_data = await self.get_message(msg_json)
|
||||||
|
if message_data:
|
||||||
|
message_data['stream_id'] = session.stream_id
|
||||||
|
try:
|
||||||
|
event = wecombotevent.WecomBotEvent(message_data)
|
||||||
|
except Exception:
|
||||||
|
await self.logger.error(traceback.format_exc())
|
||||||
|
else:
|
||||||
|
if is_new:
|
||||||
|
asyncio.create_task(self._dispatch_event(event))
|
||||||
|
|
||||||
|
payload = self._build_stream_payload(session.stream_id, '', False)
|
||||||
|
return await self._encrypt_and_reply(payload, nonce)
|
||||||
|
|
||||||
|
async def _handle_post_followup_response(self, msg_json: dict[str, Any], nonce: str) -> tuple[Response, int]:
|
||||||
|
"""处理企业微信的流式刷新请求,按需返回增量片段。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg_json: 解密后的企业微信刷新请求。
|
||||||
|
nonce: 企业微信回调参数 nonce。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[Response, int]: Quart Response 及状态码。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在刷新请求中调用,按需返回增量片段。
|
||||||
|
"""
|
||||||
|
stream_info = msg_json.get('stream', {})
|
||||||
|
stream_id = stream_info.get('id', '')
|
||||||
|
if not stream_id:
|
||||||
|
await self.logger.error('刷新请求缺少 stream.id')
|
||||||
|
return await self._encrypt_and_reply(self._build_stream_payload('', '', True), nonce)
|
||||||
|
|
||||||
|
session = self.stream_sessions.get_session(stream_id)
|
||||||
|
chunk = await self.stream_sessions.consume(stream_id, timeout=self.stream_poll_timeout)
|
||||||
|
|
||||||
|
if not chunk:
|
||||||
|
cached_content = None
|
||||||
|
if session and session.msg_id:
|
||||||
|
cached_content = self.generated_content.pop(session.msg_id, None)
|
||||||
|
if cached_content is not None:
|
||||||
|
chunk = StreamChunk(content=cached_content, is_final=True)
|
||||||
|
else:
|
||||||
|
payload = self._build_stream_payload(stream_id, '', False)
|
||||||
|
return await self._encrypt_and_reply(payload, nonce)
|
||||||
|
|
||||||
|
payload = self._build_stream_payload(stream_id, chunk.content, chunk.is_final)
|
||||||
|
if chunk.is_final:
|
||||||
|
self.stream_sessions.mark_finished(stream_id)
|
||||||
|
return await self._encrypt_and_reply(payload, nonce)
|
||||||
|
|
||||||
|
async def handle_callback_request(self):
|
||||||
|
"""企业微信回调入口。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Quart Response: 根据请求类型返回验证、首包或刷新结果。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
作为 Quart 路由处理函数直接注册并使用。
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.wxcpt = WXBizMsgCrypt(self.Token, self.EnCodingAESKey, '')
|
||||||
|
await self.logger.info(f'{request.method} {request.url} {str(request.args)}')
|
||||||
|
|
||||||
|
if request.method == 'GET':
|
||||||
|
return await self._handle_get_callback()
|
||||||
|
|
||||||
|
if request.method == 'POST':
|
||||||
|
return await self._handle_post_callback()
|
||||||
|
|
||||||
|
return Response('', status=405)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
await self.logger.error(traceback.format_exc())
|
||||||
|
return Response('Internal Server Error', status=500)
|
||||||
|
|
||||||
|
async def _handle_get_callback(self) -> tuple[Response, int] | Response:
|
||||||
|
"""处理企业微信的 GET 验证请求。"""
|
||||||
|
|
||||||
|
msg_signature = unquote(request.args.get('msg_signature', ''))
|
||||||
|
timestamp = unquote(request.args.get('timestamp', ''))
|
||||||
|
nonce = unquote(request.args.get('nonce', ''))
|
||||||
|
echostr = unquote(request.args.get('echostr', ''))
|
||||||
|
|
||||||
|
if not all([msg_signature, timestamp, nonce, echostr]):
|
||||||
|
await self.logger.error('请求参数缺失')
|
||||||
|
return Response('缺少参数', status=400)
|
||||||
|
|
||||||
|
ret, decrypted_str = self.wxcpt.VerifyURL(msg_signature, timestamp, nonce, echostr)
|
||||||
|
if ret != 0:
|
||||||
|
await self.logger.error('验证URL失败')
|
||||||
|
return Response('验证失败', status=403)
|
||||||
|
|
||||||
|
return Response(decrypted_str, mimetype='text/plain')
|
||||||
|
|
||||||
|
async def _handle_post_callback(self) -> tuple[Response, int] | Response:
|
||||||
|
"""处理企业微信的 POST 回调请求。"""
|
||||||
|
|
||||||
|
self.stream_sessions.cleanup()
|
||||||
|
|
||||||
|
msg_signature = unquote(request.args.get('msg_signature', ''))
|
||||||
|
timestamp = unquote(request.args.get('timestamp', ''))
|
||||||
|
nonce = unquote(request.args.get('nonce', ''))
|
||||||
|
|
||||||
|
encrypted_json = await request.get_json()
|
||||||
|
encrypted_msg = (encrypted_json or {}).get('encrypt', '')
|
||||||
|
if not encrypted_msg:
|
||||||
|
await self.logger.error("请求体中缺少 'encrypt' 字段")
|
||||||
|
return Response('Bad Request', status=400)
|
||||||
|
|
||||||
|
xml_post_data = f"<xml><Encrypt><![CDATA[{encrypted_msg}]]></Encrypt></xml>"
|
||||||
|
ret, decrypted_xml = self.wxcpt.DecryptMsg(xml_post_data, msg_signature, timestamp, nonce)
|
||||||
|
if ret != 0:
|
||||||
|
await self.logger.error('解密失败')
|
||||||
|
return Response('解密失败', status=400)
|
||||||
|
|
||||||
|
msg_json = json.loads(decrypted_xml)
|
||||||
|
|
||||||
|
if msg_json.get('msgtype') == 'stream':
|
||||||
|
return await self._handle_post_followup_response(msg_json, nonce)
|
||||||
|
|
||||||
|
return await self._handle_post_initial_response(msg_json, nonce)
|
||||||
|
|
||||||
|
async def get_message(self, msg_json):
|
||||||
message_data = {}
|
message_data = {}
|
||||||
|
|
||||||
if msg_json.get('chattype','') == 'single':
|
if msg_json.get('chattype', '') == 'single':
|
||||||
message_data['type'] = 'single'
|
message_data['type'] = 'single'
|
||||||
elif msg_json.get('chattype','') == 'group':
|
elif msg_json.get('chattype', '') == 'group':
|
||||||
message_data['type'] = 'group'
|
message_data['type'] = 'group'
|
||||||
|
|
||||||
if msg_json.get('msgtype') == 'text':
|
if msg_json.get('msgtype') == 'text':
|
||||||
message_data['content'] = msg_json.get('text',{}).get('content')
|
message_data['content'] = msg_json.get('text', {}).get('content')
|
||||||
elif msg_json.get('msgtype') == 'image':
|
elif msg_json.get('msgtype') == 'image':
|
||||||
picurl = msg_json.get('image', {}).get('url','')
|
picurl = msg_json.get('image', {}).get('url', '')
|
||||||
base64 = await self.download_url_to_base64(picurl,self.EnCodingAESKey)
|
base64 = await self.download_url_to_base64(picurl, self.EnCodingAESKey)
|
||||||
message_data['picurl'] = base64
|
message_data['picurl'] = base64
|
||||||
elif msg_json.get('msgtype') == 'mixed':
|
elif msg_json.get('msgtype') == 'mixed':
|
||||||
items = msg_json.get('mixed', {}).get('msg_item', [])
|
items = msg_json.get('mixed', {}).get('msg_item', [])
|
||||||
texts = []
|
texts = []
|
||||||
@@ -197,17 +460,27 @@ class WecomBotClient:
|
|||||||
if texts:
|
if texts:
|
||||||
message_data['content'] = "".join(texts) # 拼接所有 text
|
message_data['content'] = "".join(texts) # 拼接所有 text
|
||||||
if picurl:
|
if picurl:
|
||||||
base64 = await self.download_url_to_base64(picurl,self.EnCodingAESKey)
|
base64 = await self.download_url_to_base64(picurl, self.EnCodingAESKey)
|
||||||
message_data['picurl'] = base64 # 只保留第一个 image
|
message_data['picurl'] = base64 # 只保留第一个 image
|
||||||
|
|
||||||
|
# Extract user information
|
||||||
|
from_info = msg_json.get('from', {})
|
||||||
|
message_data['userid'] = from_info.get('userid', '')
|
||||||
|
message_data['username'] = from_info.get('alias', '') or from_info.get('name', '') or from_info.get('userid', '')
|
||||||
|
|
||||||
|
# Extract chat/group information
|
||||||
|
if msg_json.get('chattype', '') == 'group':
|
||||||
|
message_data['chatid'] = msg_json.get('chatid', '')
|
||||||
|
# Try to get group name if available
|
||||||
|
message_data['chatname'] = msg_json.get('chatname', '') or msg_json.get('chatid', '')
|
||||||
|
|
||||||
message_data['userid'] = msg_json.get('from', {}).get('userid', '')
|
|
||||||
message_data['msgid'] = msg_json.get('msgid', '')
|
message_data['msgid'] = msg_json.get('msgid', '')
|
||||||
|
|
||||||
if msg_json.get('aibotid'):
|
if msg_json.get('aibotid'):
|
||||||
message_data['aibotid'] = msg_json.get('aibotid', '')
|
message_data['aibotid'] = msg_json.get('aibotid', '')
|
||||||
|
|
||||||
return message_data
|
return message_data
|
||||||
|
|
||||||
async def _handle_message(self, event: wecombotevent.WecomBotEvent):
|
async def _handle_message(self, event: wecombotevent.WecomBotEvent):
|
||||||
"""
|
"""
|
||||||
处理消息事件。
|
处理消息事件。
|
||||||
@@ -223,10 +496,46 @@ class WecomBotClient:
|
|||||||
for handler in self._message_handlers[msg_type]:
|
for handler in self._message_handlers[msg_type]:
|
||||||
await handler(event)
|
await handler(event)
|
||||||
except Exception:
|
except Exception:
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
async def push_stream_chunk(self, msg_id: str, content: str, is_final: bool = False) -> bool:
|
||||||
|
"""将流水线片段推送到 stream 会话。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg_id: 原始企业微信消息 ID。
|
||||||
|
content: 模型产生的片段内容。
|
||||||
|
is_final: 是否为最终片段。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: 当成功写入流式队列时返回 True。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在流水线 `reply_message_chunk` 中调用,将增量推送至企业微信。
|
||||||
|
"""
|
||||||
|
# 根据 msg_id 找到对应 stream 会话,如果不存在说明当前消息非流式
|
||||||
|
stream_id = self.stream_sessions.get_stream_id_by_msg(msg_id)
|
||||||
|
if not stream_id:
|
||||||
|
return False
|
||||||
|
|
||||||
|
chunk = StreamChunk(content=content, is_final=is_final)
|
||||||
|
await self.stream_sessions.publish(stream_id, chunk)
|
||||||
|
if is_final:
|
||||||
|
self.stream_sessions.mark_finished(stream_id)
|
||||||
|
return True
|
||||||
|
|
||||||
async def set_message(self, msg_id: str, content: str):
|
async def set_message(self, msg_id: str, content: str):
|
||||||
self.generated_content[msg_id] = content
|
"""兼容旧逻辑:若无法流式返回则缓存最终结果。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg_id: 企业微信消息 ID。
|
||||||
|
content: 最终回复的文本内容。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在非流式场景下缓存最终结果以备刷新时返回。
|
||||||
|
"""
|
||||||
|
handled = await self.push_stream_chunk(msg_id, content, is_final=True)
|
||||||
|
if not handled:
|
||||||
|
self.generated_content[msg_id] = content
|
||||||
|
|
||||||
def on_message(self, msg_type: str):
|
def on_message(self, msg_type: str):
|
||||||
def decorator(func: Callable[[wecombotevent.WecomBotEvent], None]):
|
def decorator(func: Callable[[wecombotevent.WecomBotEvent], None]):
|
||||||
@@ -237,7 +546,6 @@ class WecomBotClient:
|
|||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
async def download_url_to_base64(self, download_url, encoding_aes_key):
|
async def download_url_to_base64(self, download_url, encoding_aes_key):
|
||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient() as client:
|
||||||
response = await client.get(download_url)
|
response = await client.get(download_url)
|
||||||
@@ -247,26 +555,22 @@ class WecomBotClient:
|
|||||||
|
|
||||||
encrypted_bytes = response.content
|
encrypted_bytes = response.content
|
||||||
|
|
||||||
|
|
||||||
aes_key = base64.b64decode(encoding_aes_key + "=") # base64 补齐
|
aes_key = base64.b64decode(encoding_aes_key + "=") # base64 补齐
|
||||||
iv = aes_key[:16]
|
iv = aes_key[:16]
|
||||||
|
|
||||||
|
|
||||||
cipher = AES.new(aes_key, AES.MODE_CBC, iv)
|
cipher = AES.new(aes_key, AES.MODE_CBC, iv)
|
||||||
decrypted = cipher.decrypt(encrypted_bytes)
|
decrypted = cipher.decrypt(encrypted_bytes)
|
||||||
|
|
||||||
|
|
||||||
pad_len = decrypted[-1]
|
pad_len = decrypted[-1]
|
||||||
decrypted = decrypted[:-pad_len]
|
decrypted = decrypted[:-pad_len]
|
||||||
|
|
||||||
|
if decrypted.startswith(b"\xff\xd8"): # JPEG
|
||||||
if decrypted.startswith(b"\xff\xd8"): # JPEG
|
|
||||||
mime_type = "image/jpeg"
|
mime_type = "image/jpeg"
|
||||||
elif decrypted.startswith(b"\x89PNG"): # PNG
|
elif decrypted.startswith(b"\x89PNG"): # PNG
|
||||||
mime_type = "image/png"
|
mime_type = "image/png"
|
||||||
elif decrypted.startswith((b"GIF87a", b"GIF89a")): # GIF
|
elif decrypted.startswith((b"GIF87a", b"GIF89a")): # GIF
|
||||||
mime_type = "image/gif"
|
mime_type = "image/gif"
|
||||||
elif decrypted.startswith(b"BM"): # BMP
|
elif decrypted.startswith(b"BM"): # BMP
|
||||||
mime_type = "image/bmp"
|
mime_type = "image/bmp"
|
||||||
elif decrypted.startswith(b"II*\x00") or decrypted.startswith(b"MM\x00*"): # TIFF
|
elif decrypted.startswith(b"II*\x00") or decrypted.startswith(b"MM\x00*"): # TIFF
|
||||||
mime_type = "image/tiff"
|
mime_type = "image/tiff"
|
||||||
@@ -276,15 +580,9 @@ class WecomBotClient:
|
|||||||
# 转 base64
|
# 转 base64
|
||||||
base64_str = base64.b64encode(decrypted).decode("utf-8")
|
base64_str = base64.b64encode(decrypted).decode("utf-8")
|
||||||
return f"data:{mime_type};base64,{base64_str}"
|
return f"data:{mime_type};base64,{base64_str}"
|
||||||
|
|
||||||
|
|
||||||
async def run_task(self, host: str, port: int, *args, **kwargs):
|
async def run_task(self, host: str, port: int, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
启动 Quart 应用。
|
启动 Quart 应用。
|
||||||
"""
|
"""
|
||||||
await self.app.run_task(host=host, port=port, *args, **kwargs)
|
await self.app.run_task(host=host, port=port, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,21 @@ class WecomBotEvent(dict):
|
|||||||
"""
|
"""
|
||||||
用户id
|
用户id
|
||||||
"""
|
"""
|
||||||
return self.get('from', {}).get('userid', '')
|
return self.get('from', {}).get('userid', '') or self.get('userid', '')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def username(self) -> str:
|
||||||
|
"""
|
||||||
|
用户名称
|
||||||
|
"""
|
||||||
|
return self.get('username', '') or self.get('from', {}).get('alias', '') or self.get('from', {}).get('name', '') or self.userid
|
||||||
|
|
||||||
|
@property
|
||||||
|
def chatname(self) -> str:
|
||||||
|
"""
|
||||||
|
群组名称
|
||||||
|
"""
|
||||||
|
return self.get('chatname', '') or str(self.chatid)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def content(self) -> str:
|
def content(self) -> str:
|
||||||
|
|||||||
13
main.py
13
main.py
@@ -18,7 +18,13 @@ asciiart = r"""
|
|||||||
|
|
||||||
async def main_entry(loop: asyncio.AbstractEventLoop):
|
async def main_entry(loop: asyncio.AbstractEventLoop):
|
||||||
parser = argparse.ArgumentParser(description='LangBot')
|
parser = argparse.ArgumentParser(description='LangBot')
|
||||||
parser.add_argument('--standalone-runtime', action='store_true', help='使用独立插件运行时', default=False)
|
parser.add_argument(
|
||||||
|
'--standalone-runtime',
|
||||||
|
action='store_true',
|
||||||
|
help='Use standalone plugin runtime / 使用独立插件运行时',
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
parser.add_argument('--debug', action='store_true', help='Debug mode / 调试模式', default=False)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if args.standalone_runtime:
|
if args.standalone_runtime:
|
||||||
@@ -26,6 +32,11 @@ async def main_entry(loop: asyncio.AbstractEventLoop):
|
|||||||
|
|
||||||
platform.standalone_runtime = True
|
platform.standalone_runtime = True
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
from pkg.utils import constants
|
||||||
|
|
||||||
|
constants.debug_mode = True
|
||||||
|
|
||||||
print(asciiart)
|
print(asciiart)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|||||||
@@ -9,6 +9,9 @@ from quart.typing import RouteCallable
|
|||||||
|
|
||||||
from ....core import app
|
from ....core import app
|
||||||
|
|
||||||
|
# Maximum file upload size limit (10MB)
|
||||||
|
MAX_FILE_SIZE = 10 * 1024 * 1024 # 10MB
|
||||||
|
|
||||||
|
|
||||||
preregistered_groups: list[type[RouterGroup]] = []
|
preregistered_groups: list[type[RouterGroup]] = []
|
||||||
"""Pre-registered list of RouterGroup"""
|
"""Pre-registered list of RouterGroup"""
|
||||||
@@ -31,6 +34,8 @@ class AuthType(enum.Enum):
|
|||||||
|
|
||||||
NONE = 'none'
|
NONE = 'none'
|
||||||
USER_TOKEN = 'user-token'
|
USER_TOKEN = 'user-token'
|
||||||
|
API_KEY = 'api-key'
|
||||||
|
USER_TOKEN_OR_API_KEY = 'user-token-or-api-key'
|
||||||
|
|
||||||
|
|
||||||
class RouterGroup(abc.ABC):
|
class RouterGroup(abc.ABC):
|
||||||
@@ -84,6 +89,63 @@ class RouterGroup(abc.ABC):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return self.http_status(401, -1, str(e))
|
return self.http_status(401, -1, str(e))
|
||||||
|
|
||||||
|
elif auth_type == AuthType.API_KEY:
|
||||||
|
# get API key from Authorization header or X-API-Key header
|
||||||
|
api_key = quart.request.headers.get('X-API-Key', '')
|
||||||
|
if not api_key:
|
||||||
|
auth_header = quart.request.headers.get('Authorization', '')
|
||||||
|
if auth_header.startswith('Bearer '):
|
||||||
|
api_key = auth_header.replace('Bearer ', '')
|
||||||
|
|
||||||
|
if not api_key:
|
||||||
|
return self.http_status(401, -1, 'No valid API key provided')
|
||||||
|
|
||||||
|
try:
|
||||||
|
is_valid = await self.ap.apikey_service.verify_api_key(api_key)
|
||||||
|
if not is_valid:
|
||||||
|
return self.http_status(401, -1, 'Invalid API key')
|
||||||
|
except Exception as e:
|
||||||
|
return self.http_status(401, -1, str(e))
|
||||||
|
|
||||||
|
elif auth_type == AuthType.USER_TOKEN_OR_API_KEY:
|
||||||
|
# Try API key first (check X-API-Key header)
|
||||||
|
api_key = quart.request.headers.get('X-API-Key', '')
|
||||||
|
|
||||||
|
if api_key:
|
||||||
|
# API key authentication
|
||||||
|
try:
|
||||||
|
is_valid = await self.ap.apikey_service.verify_api_key(api_key)
|
||||||
|
if not is_valid:
|
||||||
|
return self.http_status(401, -1, 'Invalid API key')
|
||||||
|
except Exception as e:
|
||||||
|
return self.http_status(401, -1, str(e))
|
||||||
|
else:
|
||||||
|
# Try user token authentication (Authorization header)
|
||||||
|
token = quart.request.headers.get('Authorization', '').replace('Bearer ', '')
|
||||||
|
|
||||||
|
if not token:
|
||||||
|
return self.http_status(401, -1, 'No valid authentication provided (user token or API key required)')
|
||||||
|
|
||||||
|
try:
|
||||||
|
user_email = await self.ap.user_service.verify_jwt_token(token)
|
||||||
|
|
||||||
|
# check if this account exists
|
||||||
|
user = await self.ap.user_service.get_user_by_email(user_email)
|
||||||
|
if not user:
|
||||||
|
return self.http_status(401, -1, 'User not found')
|
||||||
|
|
||||||
|
# check if f accepts user_email parameter
|
||||||
|
if 'user_email' in f.__code__.co_varnames:
|
||||||
|
kwargs['user_email'] = user_email
|
||||||
|
except Exception:
|
||||||
|
# If user token fails, maybe it's an API key in Authorization header
|
||||||
|
try:
|
||||||
|
is_valid = await self.ap.apikey_service.verify_api_key(token)
|
||||||
|
if not is_valid:
|
||||||
|
return self.http_status(401, -1, 'Invalid authentication credentials')
|
||||||
|
except Exception as e:
|
||||||
|
return self.http_status(401, -1, str(e))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return await f(*args, **kwargs)
|
return await f(*args, **kwargs)
|
||||||
|
|
||||||
|
|||||||
43
pkg/api/http/controller/groups/apikeys.py
Normal file
43
pkg/api/http/controller/groups/apikeys.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
import quart
|
||||||
|
|
||||||
|
from .. import group
|
||||||
|
|
||||||
|
|
||||||
|
@group.group_class('apikeys', '/api/v1/apikeys')
|
||||||
|
class ApiKeysRouterGroup(group.RouterGroup):
|
||||||
|
async def initialize(self) -> None:
|
||||||
|
@self.route('', methods=['GET', 'POST'])
|
||||||
|
async def _() -> str:
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
keys = await self.ap.apikey_service.get_api_keys()
|
||||||
|
return self.success(data={'keys': keys})
|
||||||
|
elif quart.request.method == 'POST':
|
||||||
|
json_data = await quart.request.json
|
||||||
|
name = json_data.get('name', '')
|
||||||
|
description = json_data.get('description', '')
|
||||||
|
|
||||||
|
if not name:
|
||||||
|
return self.http_status(400, -1, 'Name is required')
|
||||||
|
|
||||||
|
key = await self.ap.apikey_service.create_api_key(name, description)
|
||||||
|
return self.success(data={'key': key})
|
||||||
|
|
||||||
|
@self.route('/<int:key_id>', methods=['GET', 'PUT', 'DELETE'])
|
||||||
|
async def _(key_id: int) -> str:
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
key = await self.ap.apikey_service.get_api_key(key_id)
|
||||||
|
if key is None:
|
||||||
|
return self.http_status(404, -1, 'API key not found')
|
||||||
|
return self.success(data={'key': key})
|
||||||
|
|
||||||
|
elif quart.request.method == 'PUT':
|
||||||
|
json_data = await quart.request.json
|
||||||
|
name = json_data.get('name')
|
||||||
|
description = json_data.get('description')
|
||||||
|
|
||||||
|
await self.ap.apikey_service.update_api_key(key_id, name, description)
|
||||||
|
return self.success()
|
||||||
|
|
||||||
|
elif quart.request.method == 'DELETE':
|
||||||
|
await self.ap.apikey_service.delete_api_key(key_id)
|
||||||
|
return self.success()
|
||||||
@@ -15,6 +15,9 @@ class FilesRouterGroup(group.RouterGroup):
|
|||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
@self.route('/image/<image_key>', methods=['GET'], auth_type=group.AuthType.NONE)
|
@self.route('/image/<image_key>', methods=['GET'], auth_type=group.AuthType.NONE)
|
||||||
async def _(image_key: str) -> quart.Response:
|
async def _(image_key: str) -> quart.Response:
|
||||||
|
if '/' in image_key or '\\' in image_key:
|
||||||
|
return quart.Response(status=404)
|
||||||
|
|
||||||
if not await self.ap.storage_mgr.storage_provider.exists(image_key):
|
if not await self.ap.storage_mgr.storage_provider.exists(image_key):
|
||||||
return quart.Response(status=404)
|
return quart.Response(status=404)
|
||||||
|
|
||||||
@@ -28,15 +31,41 @@ class FilesRouterGroup(group.RouterGroup):
|
|||||||
@self.route('/documents', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
@self.route('/documents', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
async def _() -> quart.Response:
|
async def _() -> quart.Response:
|
||||||
request = quart.request
|
request = quart.request
|
||||||
|
|
||||||
|
# Check file size limit before reading the file
|
||||||
|
content_length = request.content_length
|
||||||
|
if content_length and content_length > group.MAX_FILE_SIZE:
|
||||||
|
return self.fail(400, 'File size exceeds 10MB limit. Please split large files into smaller parts.')
|
||||||
|
|
||||||
# get file bytes from 'file'
|
# get file bytes from 'file'
|
||||||
file = (await request.files)['file']
|
files = await request.files
|
||||||
|
if 'file' not in files:
|
||||||
|
return self.fail(400, 'No file provided in request')
|
||||||
|
|
||||||
|
file = files['file']
|
||||||
assert isinstance(file, quart.datastructures.FileStorage)
|
assert isinstance(file, quart.datastructures.FileStorage)
|
||||||
|
|
||||||
file_bytes = await asyncio.to_thread(file.stream.read)
|
file_bytes = await asyncio.to_thread(file.stream.read)
|
||||||
extension = file.filename.split('.')[-1]
|
|
||||||
file_name = file.filename.split('.')[0]
|
|
||||||
|
|
||||||
file_key = file_name + '_' + str(uuid.uuid4())[:8] + '.' + extension
|
# Double-check actual file size after reading
|
||||||
|
if len(file_bytes) > group.MAX_FILE_SIZE:
|
||||||
|
return self.fail(400, 'File size exceeds 10MB limit. Please split large files into smaller parts.')
|
||||||
|
|
||||||
|
# Split filename and extension properly
|
||||||
|
if '.' in file.filename:
|
||||||
|
file_name, extension = file.filename.rsplit('.', 1)
|
||||||
|
else:
|
||||||
|
file_name = file.filename
|
||||||
|
extension = ''
|
||||||
|
|
||||||
|
# check if file name contains '/' or '\'
|
||||||
|
if '/' in file_name or '\\' in file_name:
|
||||||
|
return self.fail(400, 'File name contains invalid characters')
|
||||||
|
|
||||||
|
file_key = file_name + '_' + str(uuid.uuid4())[:8]
|
||||||
|
if extension:
|
||||||
|
file_key += '.' + extension
|
||||||
|
|
||||||
# save file to storage
|
# save file to storage
|
||||||
await self.ap.storage_mgr.storage_provider.save(file_key, file_bytes)
|
await self.ap.storage_mgr.storage_provider.save(file_key, file_bytes)
|
||||||
return self.success(
|
return self.success(
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from ... import group
|
|||||||
@group.group_class('pipelines', '/api/v1/pipelines')
|
@group.group_class('pipelines', '/api/v1/pipelines')
|
||||||
class PipelinesRouterGroup(group.RouterGroup):
|
class PipelinesRouterGroup(group.RouterGroup):
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
@self.route('', methods=['GET', 'POST'])
|
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
sort_by = quart.request.args.get('sort_by', 'created_at')
|
sort_by = quart.request.args.get('sort_by', 'created_at')
|
||||||
@@ -23,11 +23,11 @@ class PipelinesRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
return self.success(data={'uuid': pipeline_uuid})
|
return self.success(data={'uuid': pipeline_uuid})
|
||||||
|
|
||||||
@self.route('/_/metadata', methods=['GET'])
|
@self.route('/_/metadata', methods=['GET'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
return self.success(data={'configs': await self.ap.pipeline_service.get_pipeline_metadata()})
|
return self.success(data={'configs': await self.ap.pipeline_service.get_pipeline_metadata()})
|
||||||
|
|
||||||
@self.route('/<pipeline_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
@self.route('/<pipeline_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(pipeline_uuid: str) -> str:
|
async def _(pipeline_uuid: str) -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
pipeline = await self.ap.pipeline_service.get_pipeline(pipeline_uuid)
|
pipeline = await self.ap.pipeline_service.get_pipeline(pipeline_uuid)
|
||||||
@@ -46,3 +46,34 @@ class PipelinesRouterGroup(group.RouterGroup):
|
|||||||
await self.ap.pipeline_service.delete_pipeline(pipeline_uuid)
|
await self.ap.pipeline_service.delete_pipeline(pipeline_uuid)
|
||||||
|
|
||||||
return self.success()
|
return self.success()
|
||||||
|
|
||||||
|
@self.route('/<pipeline_uuid>/extensions', methods=['GET', 'PUT'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
|
async def _(pipeline_uuid: str) -> str:
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
# Get current extensions and available plugins
|
||||||
|
pipeline = await self.ap.pipeline_service.get_pipeline(pipeline_uuid)
|
||||||
|
if pipeline is None:
|
||||||
|
return self.http_status(404, -1, 'pipeline not found')
|
||||||
|
|
||||||
|
plugins = await self.ap.plugin_connector.list_plugins()
|
||||||
|
mcp_servers = await self.ap.mcp_service.get_mcp_servers(contain_runtime_info=True)
|
||||||
|
|
||||||
|
return self.success(
|
||||||
|
data={
|
||||||
|
'bound_plugins': pipeline.get('extensions_preferences', {}).get('plugins', []),
|
||||||
|
'available_plugins': plugins,
|
||||||
|
'bound_mcp_servers': pipeline.get('extensions_preferences', {}).get('mcp_servers', []),
|
||||||
|
'available_mcp_servers': mcp_servers,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
elif quart.request.method == 'PUT':
|
||||||
|
# Update bound plugins and MCP servers for this pipeline
|
||||||
|
json_data = await quart.request.json
|
||||||
|
bound_plugins = json_data.get('bound_plugins', [])
|
||||||
|
bound_mcp_servers = json_data.get('bound_mcp_servers', [])
|
||||||
|
|
||||||
|
await self.ap.pipeline_service.update_pipeline_extensions(
|
||||||
|
pipeline_uuid, bound_plugins, bound_mcp_servers
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.success()
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from ... import group
|
|||||||
@group.group_class('bots', '/api/v1/platform/bots')
|
@group.group_class('bots', '/api/v1/platform/bots')
|
||||||
class BotsRouterGroup(group.RouterGroup):
|
class BotsRouterGroup(group.RouterGroup):
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
@self.route('', methods=['GET', 'POST'])
|
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
return self.success(data={'bots': await self.ap.bot_service.get_bots()})
|
return self.success(data={'bots': await self.ap.bot_service.get_bots()})
|
||||||
@@ -15,7 +15,7 @@ class BotsRouterGroup(group.RouterGroup):
|
|||||||
bot_uuid = await self.ap.bot_service.create_bot(json_data)
|
bot_uuid = await self.ap.bot_service.create_bot(json_data)
|
||||||
return self.success(data={'uuid': bot_uuid})
|
return self.success(data={'uuid': bot_uuid})
|
||||||
|
|
||||||
@self.route('/<bot_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
@self.route('/<bot_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(bot_uuid: str) -> str:
|
async def _(bot_uuid: str) -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
bot = await self.ap.bot_service.get_bot(bot_uuid)
|
bot = await self.ap.bot_service.get_bot(bot_uuid)
|
||||||
@@ -30,7 +30,7 @@ class BotsRouterGroup(group.RouterGroup):
|
|||||||
await self.ap.bot_service.delete_bot(bot_uuid)
|
await self.ap.bot_service.delete_bot(bot_uuid)
|
||||||
return self.success()
|
return self.success()
|
||||||
|
|
||||||
@self.route('/<bot_uuid>/logs', methods=['POST'])
|
@self.route('/<bot_uuid>/logs', methods=['POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(bot_uuid: str) -> str:
|
async def _(bot_uuid: str) -> str:
|
||||||
json_data = await quart.request.json
|
json_data = await quart.request.json
|
||||||
from_index = json_data.get('from_index', -1)
|
from_index = json_data.get('from_index', -1)
|
||||||
|
|||||||
@@ -2,6 +2,10 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import quart
|
import quart
|
||||||
|
import re
|
||||||
|
import httpx
|
||||||
|
import uuid
|
||||||
|
import os
|
||||||
|
|
||||||
from .....core import taskmgr
|
from .....core import taskmgr
|
||||||
from .. import group
|
from .. import group
|
||||||
@@ -45,9 +49,12 @@ class PluginsRouterGroup(group.RouterGroup):
|
|||||||
return self.http_status(404, -1, 'plugin not found')
|
return self.http_status(404, -1, 'plugin not found')
|
||||||
return self.success(data={'plugin': plugin})
|
return self.success(data={'plugin': plugin})
|
||||||
elif quart.request.method == 'DELETE':
|
elif quart.request.method == 'DELETE':
|
||||||
|
delete_data = quart.request.args.get('delete_data', 'false').lower() == 'true'
|
||||||
ctx = taskmgr.TaskContext.new()
|
ctx = taskmgr.TaskContext.new()
|
||||||
wrapper = self.ap.task_mgr.create_user_task(
|
wrapper = self.ap.task_mgr.create_user_task(
|
||||||
self.ap.plugin_connector.delete_plugin(author, plugin_name, task_context=ctx),
|
self.ap.plugin_connector.delete_plugin(
|
||||||
|
author, plugin_name, delete_data=delete_data, task_context=ctx
|
||||||
|
),
|
||||||
kind='plugin-operation',
|
kind='plugin-operation',
|
||||||
name=f'plugin-remove-{plugin_name}',
|
name=f'plugin-remove-{plugin_name}',
|
||||||
label=f'Removing plugin {plugin_name}',
|
label=f'Removing plugin {plugin_name}',
|
||||||
@@ -89,23 +96,145 @@ class PluginsRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
return quart.Response(icon_data, mimetype=mime_type)
|
return quart.Response(icon_data, mimetype=mime_type)
|
||||||
|
|
||||||
|
@self.route('/github/releases', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
|
async def _() -> str:
|
||||||
|
"""Get releases from a GitHub repository URL"""
|
||||||
|
data = await quart.request.json
|
||||||
|
repo_url = data.get('repo_url', '')
|
||||||
|
|
||||||
|
# Parse GitHub repository URL to extract owner and repo
|
||||||
|
# Supports: https://github.com/owner/repo or github.com/owner/repo
|
||||||
|
pattern = r'github\.com/([^/]+)/([^/]+?)(?:\.git)?(?:/.*)?$'
|
||||||
|
match = re.search(pattern, repo_url)
|
||||||
|
|
||||||
|
if not match:
|
||||||
|
return self.http_status(400, -1, 'Invalid GitHub repository URL')
|
||||||
|
|
||||||
|
owner, repo = match.groups()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Fetch releases from GitHub API
|
||||||
|
url = f'https://api.github.com/repos/{owner}/{repo}/releases'
|
||||||
|
async with httpx.AsyncClient(
|
||||||
|
trust_env=True,
|
||||||
|
follow_redirects=True,
|
||||||
|
timeout=10,
|
||||||
|
) as client:
|
||||||
|
response = await client.get(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
releases = response.json()
|
||||||
|
|
||||||
|
# Format releases data for frontend
|
||||||
|
formatted_releases = []
|
||||||
|
for release in releases:
|
||||||
|
formatted_releases.append(
|
||||||
|
{
|
||||||
|
'id': release['id'],
|
||||||
|
'tag_name': release['tag_name'],
|
||||||
|
'name': release['name'],
|
||||||
|
'published_at': release['published_at'],
|
||||||
|
'prerelease': release['prerelease'],
|
||||||
|
'draft': release['draft'],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.success(data={'releases': formatted_releases, 'owner': owner, 'repo': repo})
|
||||||
|
except httpx.RequestError as e:
|
||||||
|
return self.http_status(500, -1, f'Failed to fetch releases: {str(e)}')
|
||||||
|
|
||||||
|
@self.route(
|
||||||
|
'/github/release-assets',
|
||||||
|
methods=['POST'],
|
||||||
|
auth_type=group.AuthType.USER_TOKEN,
|
||||||
|
)
|
||||||
|
async def _() -> str:
|
||||||
|
"""Get assets from a specific GitHub release"""
|
||||||
|
data = await quart.request.json
|
||||||
|
owner = data.get('owner', '')
|
||||||
|
repo = data.get('repo', '')
|
||||||
|
release_id = data.get('release_id', '')
|
||||||
|
|
||||||
|
if not all([owner, repo, release_id]):
|
||||||
|
return self.http_status(400, -1, 'Missing required parameters')
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Fetch release assets from GitHub API
|
||||||
|
url = f'https://api.github.com/repos/{owner}/{repo}/releases/{release_id}'
|
||||||
|
async with httpx.AsyncClient(
|
||||||
|
trust_env=True,
|
||||||
|
follow_redirects=True,
|
||||||
|
timeout=10,
|
||||||
|
) as client:
|
||||||
|
response = await client.get(
|
||||||
|
url,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
release = response.json()
|
||||||
|
|
||||||
|
# Format assets data for frontend
|
||||||
|
formatted_assets = []
|
||||||
|
for asset in release.get('assets', []):
|
||||||
|
formatted_assets.append(
|
||||||
|
{
|
||||||
|
'id': asset['id'],
|
||||||
|
'name': asset['name'],
|
||||||
|
'size': asset['size'],
|
||||||
|
'download_url': asset['browser_download_url'],
|
||||||
|
'content_type': asset['content_type'],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# add zipball as a downloadable asset
|
||||||
|
# formatted_assets.append(
|
||||||
|
# {
|
||||||
|
# "id": 0,
|
||||||
|
# "name": "Source code (zip)",
|
||||||
|
# "size": -1,
|
||||||
|
# "download_url": release["zipball_url"],
|
||||||
|
# "content_type": "application/zip",
|
||||||
|
# }
|
||||||
|
# )
|
||||||
|
|
||||||
|
return self.success(data={'assets': formatted_assets})
|
||||||
|
except httpx.RequestError as e:
|
||||||
|
return self.http_status(500, -1, f'Failed to fetch release assets: {str(e)}')
|
||||||
|
|
||||||
@self.route('/install/github', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
@self.route('/install/github', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
|
"""Install plugin from GitHub release asset"""
|
||||||
data = await quart.request.json
|
data = await quart.request.json
|
||||||
|
asset_url = data.get('asset_url', '')
|
||||||
|
owner = data.get('owner', '')
|
||||||
|
repo = data.get('repo', '')
|
||||||
|
release_tag = data.get('release_tag', '')
|
||||||
|
|
||||||
|
if not asset_url:
|
||||||
|
return self.http_status(400, -1, 'Missing asset_url parameter')
|
||||||
|
|
||||||
ctx = taskmgr.TaskContext.new()
|
ctx = taskmgr.TaskContext.new()
|
||||||
short_source_str = data['source'][-8:]
|
install_info = {
|
||||||
|
'asset_url': asset_url,
|
||||||
|
'owner': owner,
|
||||||
|
'repo': repo,
|
||||||
|
'release_tag': release_tag,
|
||||||
|
'github_url': f'https://github.com/{owner}/{repo}',
|
||||||
|
}
|
||||||
|
|
||||||
wrapper = self.ap.task_mgr.create_user_task(
|
wrapper = self.ap.task_mgr.create_user_task(
|
||||||
self.ap.plugin_mgr.install_plugin(data['source'], task_context=ctx),
|
self.ap.plugin_connector.install_plugin(PluginInstallSource.GITHUB, install_info, task_context=ctx),
|
||||||
kind='plugin-operation',
|
kind='plugin-operation',
|
||||||
name='plugin-install-github',
|
name='plugin-install-github',
|
||||||
label=f'Installing plugin from github ...{short_source_str}',
|
label=f'Installing plugin from GitHub {owner}/{repo}@{release_tag}',
|
||||||
context=ctx,
|
context=ctx,
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.success(data={'task_id': wrapper.id})
|
return self.success(data={'task_id': wrapper.id})
|
||||||
|
|
||||||
@self.route('/install/marketplace', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
@self.route(
|
||||||
|
'/install/marketplace',
|
||||||
|
methods=['POST'],
|
||||||
|
auth_type=group.AuthType.USER_TOKEN,
|
||||||
|
)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
data = await quart.request.json
|
data = await quart.request.json
|
||||||
|
|
||||||
@@ -128,10 +257,8 @@ class PluginsRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
file_bytes = file.read()
|
file_bytes = file.read()
|
||||||
|
|
||||||
file_base64 = base64.b64encode(file_bytes).decode('utf-8')
|
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
'plugin_file': file_base64,
|
'plugin_file': file_bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = taskmgr.TaskContext.new()
|
ctx = taskmgr.TaskContext.new()
|
||||||
@@ -144,3 +271,39 @@ class PluginsRouterGroup(group.RouterGroup):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return self.success(data={'task_id': wrapper.id})
|
return self.success(data={'task_id': wrapper.id})
|
||||||
|
|
||||||
|
@self.route('/config-files', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
|
async def _() -> str:
|
||||||
|
"""Upload a file for plugin configuration"""
|
||||||
|
file = (await quart.request.files).get('file')
|
||||||
|
if file is None:
|
||||||
|
return self.http_status(400, -1, 'file is required')
|
||||||
|
|
||||||
|
# Check file size (10MB limit)
|
||||||
|
MAX_FILE_SIZE = 10 * 1024 * 1024 # 10MB
|
||||||
|
file_bytes = file.read()
|
||||||
|
if len(file_bytes) > MAX_FILE_SIZE:
|
||||||
|
return self.http_status(400, -1, 'file size exceeds 10MB limit')
|
||||||
|
|
||||||
|
# Generate unique file key with original extension
|
||||||
|
original_filename = file.filename
|
||||||
|
_, ext = os.path.splitext(original_filename)
|
||||||
|
file_key = f'plugin_config_{uuid.uuid4().hex}{ext}'
|
||||||
|
|
||||||
|
# Save file using storage manager
|
||||||
|
await self.ap.storage_mgr.storage_provider.save(file_key, file_bytes)
|
||||||
|
|
||||||
|
return self.success(data={'file_key': file_key})
|
||||||
|
|
||||||
|
@self.route('/config-files/<file_key>', methods=['DELETE'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
|
async def _(file_key: str) -> str:
|
||||||
|
"""Delete a plugin configuration file"""
|
||||||
|
# Only allow deletion of files with plugin_config_ prefix for security
|
||||||
|
if not file_key.startswith('plugin_config_'):
|
||||||
|
return self.http_status(400, -1, 'invalid file key')
|
||||||
|
|
||||||
|
try:
|
||||||
|
await self.ap.storage_mgr.storage_provider.delete(file_key)
|
||||||
|
return self.success(data={'deleted': True})
|
||||||
|
except Exception as e:
|
||||||
|
return self.http_status(500, -1, f'failed to delete file: {str(e)}')
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from ... import group
|
|||||||
@group.group_class('models/llm', '/api/v1/provider/models/llm')
|
@group.group_class('models/llm', '/api/v1/provider/models/llm')
|
||||||
class LLMModelsRouterGroup(group.RouterGroup):
|
class LLMModelsRouterGroup(group.RouterGroup):
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
@self.route('', methods=['GET', 'POST'])
|
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
return self.success(data={'models': await self.ap.llm_model_service.get_llm_models()})
|
return self.success(data={'models': await self.ap.llm_model_service.get_llm_models()})
|
||||||
@@ -17,7 +17,7 @@ class LLMModelsRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
return self.success(data={'uuid': model_uuid})
|
return self.success(data={'uuid': model_uuid})
|
||||||
|
|
||||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(model_uuid: str) -> str:
|
async def _(model_uuid: str) -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
model = await self.ap.llm_model_service.get_llm_model(model_uuid)
|
model = await self.ap.llm_model_service.get_llm_model(model_uuid)
|
||||||
@@ -37,7 +37,7 @@ class LLMModelsRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
return self.success()
|
return self.success()
|
||||||
|
|
||||||
@self.route('/<model_uuid>/test', methods=['POST'])
|
@self.route('/<model_uuid>/test', methods=['POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(model_uuid: str) -> str:
|
async def _(model_uuid: str) -> str:
|
||||||
json_data = await quart.request.json
|
json_data = await quart.request.json
|
||||||
|
|
||||||
@@ -49,7 +49,7 @@ class LLMModelsRouterGroup(group.RouterGroup):
|
|||||||
@group.group_class('models/embedding', '/api/v1/provider/models/embedding')
|
@group.group_class('models/embedding', '/api/v1/provider/models/embedding')
|
||||||
class EmbeddingModelsRouterGroup(group.RouterGroup):
|
class EmbeddingModelsRouterGroup(group.RouterGroup):
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
@self.route('', methods=['GET', 'POST'])
|
@self.route('', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _() -> str:
|
async def _() -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
return self.success(data={'models': await self.ap.embedding_models_service.get_embedding_models()})
|
return self.success(data={'models': await self.ap.embedding_models_service.get_embedding_models()})
|
||||||
@@ -60,7 +60,7 @@ class EmbeddingModelsRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
return self.success(data={'uuid': model_uuid})
|
return self.success(data={'uuid': model_uuid})
|
||||||
|
|
||||||
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'])
|
@self.route('/<model_uuid>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(model_uuid: str) -> str:
|
async def _(model_uuid: str) -> str:
|
||||||
if quart.request.method == 'GET':
|
if quart.request.method == 'GET':
|
||||||
model = await self.ap.embedding_models_service.get_embedding_model(model_uuid)
|
model = await self.ap.embedding_models_service.get_embedding_model(model_uuid)
|
||||||
@@ -80,7 +80,7 @@ class EmbeddingModelsRouterGroup(group.RouterGroup):
|
|||||||
|
|
||||||
return self.success()
|
return self.success()
|
||||||
|
|
||||||
@self.route('/<model_uuid>/test', methods=['POST'])
|
@self.route('/<model_uuid>/test', methods=['POST'], auth_type=group.AuthType.USER_TOKEN_OR_API_KEY)
|
||||||
async def _(model_uuid: str) -> str:
|
async def _(model_uuid: str) -> str:
|
||||||
json_data = await quart.request.json
|
json_data = await quart.request.json
|
||||||
|
|
||||||
|
|||||||
62
pkg/api/http/controller/groups/resources/mcp.py
Normal file
62
pkg/api/http/controller/groups/resources/mcp.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import quart
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
|
||||||
|
from ... import group
|
||||||
|
|
||||||
|
|
||||||
|
@group.group_class('mcp', '/api/v1/mcp')
|
||||||
|
class MCPRouterGroup(group.RouterGroup):
|
||||||
|
async def initialize(self) -> None:
|
||||||
|
@self.route('/servers', methods=['GET', 'POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
|
async def _() -> str:
|
||||||
|
"""获取MCP服务器列表"""
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
servers = await self.ap.mcp_service.get_mcp_servers(contain_runtime_info=True)
|
||||||
|
|
||||||
|
return self.success(data={'servers': servers})
|
||||||
|
|
||||||
|
elif quart.request.method == 'POST':
|
||||||
|
data = await quart.request.json
|
||||||
|
|
||||||
|
try:
|
||||||
|
uuid = await self.ap.mcp_service.create_mcp_server(data)
|
||||||
|
return self.success(data={'uuid': uuid})
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
return self.http_status(500, -1, f'Failed to create MCP server: {str(e)}')
|
||||||
|
|
||||||
|
@self.route('/servers/<server_name>', methods=['GET', 'PUT', 'DELETE'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
|
async def _(server_name: str) -> str:
|
||||||
|
"""获取、更新或删除MCP服务器配置"""
|
||||||
|
|
||||||
|
server_data = await self.ap.mcp_service.get_mcp_server_by_name(server_name)
|
||||||
|
if server_data is None:
|
||||||
|
return self.http_status(404, -1, 'Server not found')
|
||||||
|
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
return self.success(data={'server': server_data})
|
||||||
|
|
||||||
|
elif quart.request.method == 'PUT':
|
||||||
|
data = await quart.request.json
|
||||||
|
try:
|
||||||
|
await self.ap.mcp_service.update_mcp_server(server_data['uuid'], data)
|
||||||
|
return self.success()
|
||||||
|
except Exception as e:
|
||||||
|
return self.http_status(500, -1, f'Failed to update MCP server: {str(e)}')
|
||||||
|
|
||||||
|
elif quart.request.method == 'DELETE':
|
||||||
|
try:
|
||||||
|
await self.ap.mcp_service.delete_mcp_server(server_data['uuid'])
|
||||||
|
return self.success()
|
||||||
|
except Exception as e:
|
||||||
|
return self.http_status(500, -1, f'Failed to delete MCP server: {str(e)}')
|
||||||
|
|
||||||
|
@self.route('/servers/<server_name>/test', methods=['POST'], auth_type=group.AuthType.USER_TOKEN)
|
||||||
|
async def _(server_name: str) -> str:
|
||||||
|
"""测试MCP服务器连接"""
|
||||||
|
server_data = await quart.request.json
|
||||||
|
task_id = await self.ap.mcp_service.test_mcp_server(server_name=server_name, server_data=server_data)
|
||||||
|
return self.success(data={'task_id': task_id})
|
||||||
@@ -13,7 +13,6 @@ class SystemRouterGroup(group.RouterGroup):
|
|||||||
data={
|
data={
|
||||||
'version': constants.semantic_version,
|
'version': constants.semantic_version,
|
||||||
'debug': constants.debug_mode,
|
'debug': constants.debug_mode,
|
||||||
'enabled_platform_count': len(self.ap.platform_mgr.get_running_adapters()),
|
|
||||||
'enable_marketplace': self.ap.instance_config.data.get('plugin', {}).get(
|
'enable_marketplace': self.ap.instance_config.data.get('plugin', {}).get(
|
||||||
'enable_marketplace', True
|
'enable_marketplace', True
|
||||||
),
|
),
|
||||||
@@ -91,3 +90,26 @@ class SystemRouterGroup(group.RouterGroup):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return self.success(data=resp)
|
return self.success(data=resp)
|
||||||
|
|
||||||
|
@self.route(
|
||||||
|
'/status/plugin-system',
|
||||||
|
methods=['GET'],
|
||||||
|
auth_type=group.AuthType.USER_TOKEN,
|
||||||
|
)
|
||||||
|
async def _() -> str:
|
||||||
|
plugin_connector_error = 'ok'
|
||||||
|
is_connected = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
await self.ap.plugin_connector.ping_plugin_runtime()
|
||||||
|
except Exception as e:
|
||||||
|
plugin_connector_error = str(e)
|
||||||
|
is_connected = False
|
||||||
|
|
||||||
|
return self.success(
|
||||||
|
data={
|
||||||
|
'is_enable': self.ap.plugin_connector.is_enable_plugin,
|
||||||
|
'is_connected': is_connected,
|
||||||
|
'plugin_connector_error': plugin_connector_error,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|||||||
49
pkg/api/http/controller/groups/webhooks.py
Normal file
49
pkg/api/http/controller/groups/webhooks.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
import quart
|
||||||
|
|
||||||
|
from .. import group
|
||||||
|
|
||||||
|
|
||||||
|
@group.group_class('webhooks', '/api/v1/webhooks')
|
||||||
|
class WebhooksRouterGroup(group.RouterGroup):
|
||||||
|
async def initialize(self) -> None:
|
||||||
|
@self.route('', methods=['GET', 'POST'])
|
||||||
|
async def _() -> str:
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
webhooks = await self.ap.webhook_service.get_webhooks()
|
||||||
|
return self.success(data={'webhooks': webhooks})
|
||||||
|
elif quart.request.method == 'POST':
|
||||||
|
json_data = await quart.request.json
|
||||||
|
name = json_data.get('name', '')
|
||||||
|
url = json_data.get('url', '')
|
||||||
|
description = json_data.get('description', '')
|
||||||
|
enabled = json_data.get('enabled', True)
|
||||||
|
|
||||||
|
if not name:
|
||||||
|
return self.http_status(400, -1, 'Name is required')
|
||||||
|
if not url:
|
||||||
|
return self.http_status(400, -1, 'URL is required')
|
||||||
|
|
||||||
|
webhook = await self.ap.webhook_service.create_webhook(name, url, description, enabled)
|
||||||
|
return self.success(data={'webhook': webhook})
|
||||||
|
|
||||||
|
@self.route('/<int:webhook_id>', methods=['GET', 'PUT', 'DELETE'])
|
||||||
|
async def _(webhook_id: int) -> str:
|
||||||
|
if quart.request.method == 'GET':
|
||||||
|
webhook = await self.ap.webhook_service.get_webhook(webhook_id)
|
||||||
|
if webhook is None:
|
||||||
|
return self.http_status(404, -1, 'Webhook not found')
|
||||||
|
return self.success(data={'webhook': webhook})
|
||||||
|
|
||||||
|
elif quart.request.method == 'PUT':
|
||||||
|
json_data = await quart.request.json
|
||||||
|
name = json_data.get('name')
|
||||||
|
url = json_data.get('url')
|
||||||
|
description = json_data.get('description')
|
||||||
|
enabled = json_data.get('enabled')
|
||||||
|
|
||||||
|
await self.ap.webhook_service.update_webhook(webhook_id, name, url, description, enabled)
|
||||||
|
return self.success()
|
||||||
|
|
||||||
|
elif quart.request.method == 'DELETE':
|
||||||
|
await self.ap.webhook_service.delete_webhook(webhook_id)
|
||||||
|
return self.success()
|
||||||
@@ -5,6 +5,7 @@ import os
|
|||||||
|
|
||||||
import quart
|
import quart
|
||||||
import quart_cors
|
import quart_cors
|
||||||
|
from werkzeug.exceptions import RequestEntityTooLarge
|
||||||
|
|
||||||
from ....core import app, entities as core_entities
|
from ....core import app, entities as core_entities
|
||||||
from ....utils import importutil
|
from ....utils import importutil
|
||||||
@@ -15,12 +16,14 @@ from .groups import provider as groups_provider
|
|||||||
from .groups import platform as groups_platform
|
from .groups import platform as groups_platform
|
||||||
from .groups import pipelines as groups_pipelines
|
from .groups import pipelines as groups_pipelines
|
||||||
from .groups import knowledge as groups_knowledge
|
from .groups import knowledge as groups_knowledge
|
||||||
|
from .groups import resources as groups_resources
|
||||||
|
|
||||||
importutil.import_modules_in_pkg(groups)
|
importutil.import_modules_in_pkg(groups)
|
||||||
importutil.import_modules_in_pkg(groups_provider)
|
importutil.import_modules_in_pkg(groups_provider)
|
||||||
importutil.import_modules_in_pkg(groups_platform)
|
importutil.import_modules_in_pkg(groups_platform)
|
||||||
importutil.import_modules_in_pkg(groups_pipelines)
|
importutil.import_modules_in_pkg(groups_pipelines)
|
||||||
importutil.import_modules_in_pkg(groups_knowledge)
|
importutil.import_modules_in_pkg(groups_knowledge)
|
||||||
|
importutil.import_modules_in_pkg(groups_resources)
|
||||||
|
|
||||||
|
|
||||||
class HTTPController:
|
class HTTPController:
|
||||||
@@ -33,7 +36,20 @@ class HTTPController:
|
|||||||
self.quart_app = quart.Quart(__name__)
|
self.quart_app = quart.Quart(__name__)
|
||||||
quart_cors.cors(self.quart_app, allow_origin='*')
|
quart_cors.cors(self.quart_app, allow_origin='*')
|
||||||
|
|
||||||
|
# Set maximum content length to prevent large file uploads
|
||||||
|
self.quart_app.config['MAX_CONTENT_LENGTH'] = group.MAX_FILE_SIZE
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
|
# Register custom error handler for file size limit
|
||||||
|
@self.quart_app.errorhandler(RequestEntityTooLarge)
|
||||||
|
async def handle_request_entity_too_large(e):
|
||||||
|
return quart.jsonify(
|
||||||
|
{
|
||||||
|
'code': 400,
|
||||||
|
'msg': 'File size exceeds 10MB limit. Please split large files into smaller parts.',
|
||||||
|
}
|
||||||
|
), 400
|
||||||
|
|
||||||
await self.register_routes()
|
await self.register_routes()
|
||||||
|
|
||||||
async def run(self) -> None:
|
async def run(self) -> None:
|
||||||
|
|||||||
79
pkg/api/http/service/apikey.py
Normal file
79
pkg/api/http/service/apikey.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import secrets
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from ....core import app
|
||||||
|
from ....entity.persistence import apikey
|
||||||
|
|
||||||
|
|
||||||
|
class ApiKeyService:
|
||||||
|
ap: app.Application
|
||||||
|
|
||||||
|
def __init__(self, ap: app.Application) -> None:
|
||||||
|
self.ap = ap
|
||||||
|
|
||||||
|
async def get_api_keys(self) -> list[dict]:
|
||||||
|
"""Get all API keys"""
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(apikey.ApiKey))
|
||||||
|
|
||||||
|
keys = result.all()
|
||||||
|
return [self.ap.persistence_mgr.serialize_model(apikey.ApiKey, key) for key in keys]
|
||||||
|
|
||||||
|
async def create_api_key(self, name: str, description: str = '') -> dict:
|
||||||
|
"""Create a new API key"""
|
||||||
|
# Generate a secure random API key
|
||||||
|
key = f'lbk_{secrets.token_urlsafe(32)}'
|
||||||
|
|
||||||
|
key_data = {'name': name, 'key': key, 'description': description}
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(sqlalchemy.insert(apikey.ApiKey).values(**key_data))
|
||||||
|
|
||||||
|
# Retrieve the created key
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(apikey.ApiKey).where(apikey.ApiKey.key == key)
|
||||||
|
)
|
||||||
|
created_key = result.first()
|
||||||
|
|
||||||
|
return self.ap.persistence_mgr.serialize_model(apikey.ApiKey, created_key)
|
||||||
|
|
||||||
|
async def get_api_key(self, key_id: int) -> dict | None:
|
||||||
|
"""Get a specific API key by ID"""
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(apikey.ApiKey).where(apikey.ApiKey.id == key_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
key = result.first()
|
||||||
|
|
||||||
|
if key is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.ap.persistence_mgr.serialize_model(apikey.ApiKey, key)
|
||||||
|
|
||||||
|
async def verify_api_key(self, key: str) -> bool:
|
||||||
|
"""Verify if an API key is valid"""
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(apikey.ApiKey).where(apikey.ApiKey.key == key)
|
||||||
|
)
|
||||||
|
|
||||||
|
key_obj = result.first()
|
||||||
|
return key_obj is not None
|
||||||
|
|
||||||
|
async def delete_api_key(self, key_id: int) -> None:
|
||||||
|
"""Delete an API key"""
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.delete(apikey.ApiKey).where(apikey.ApiKey.id == key_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def update_api_key(self, key_id: int, name: str = None, description: str = None) -> None:
|
||||||
|
"""Update an API key's metadata (name, description)"""
|
||||||
|
update_data = {}
|
||||||
|
if name is not None:
|
||||||
|
update_data['name'] = name
|
||||||
|
if description is not None:
|
||||||
|
update_data['description'] = description
|
||||||
|
|
||||||
|
if update_data:
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(apikey.ApiKey).where(apikey.ApiKey.id == key_id).values(**update_data)
|
||||||
|
)
|
||||||
158
pkg/api/http/service/mcp.py
Normal file
158
pkg/api/http/service/mcp.py
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sqlalchemy
|
||||||
|
import uuid
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from ....core import app
|
||||||
|
from ....entity.persistence import mcp as persistence_mcp
|
||||||
|
from ....core import taskmgr
|
||||||
|
from ....provider.tools.loaders.mcp import RuntimeMCPSession, MCPSessionStatus
|
||||||
|
|
||||||
|
|
||||||
|
class MCPService:
|
||||||
|
ap: app.Application
|
||||||
|
|
||||||
|
def __init__(self, ap: app.Application) -> None:
|
||||||
|
self.ap = ap
|
||||||
|
|
||||||
|
async def get_runtime_info(self, server_name: str) -> dict | None:
|
||||||
|
session = self.ap.tool_mgr.mcp_tool_loader.get_session(server_name)
|
||||||
|
if session:
|
||||||
|
return session.get_runtime_info_dict()
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def get_mcp_servers(self, contain_runtime_info: bool = False) -> list[dict]:
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_mcp.MCPServer))
|
||||||
|
|
||||||
|
servers = result.all()
|
||||||
|
serialized_servers = [
|
||||||
|
self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server) for server in servers
|
||||||
|
]
|
||||||
|
if contain_runtime_info:
|
||||||
|
for server in serialized_servers:
|
||||||
|
runtime_info = await self.get_runtime_info(server['name'])
|
||||||
|
|
||||||
|
server['runtime_info'] = runtime_info if runtime_info else None
|
||||||
|
|
||||||
|
return serialized_servers
|
||||||
|
|
||||||
|
async def create_mcp_server(self, server_data: dict) -> str:
|
||||||
|
server_data['uuid'] = str(uuid.uuid4())
|
||||||
|
await self.ap.persistence_mgr.execute_async(sqlalchemy.insert(persistence_mcp.MCPServer).values(server_data))
|
||||||
|
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_data['uuid'])
|
||||||
|
)
|
||||||
|
server_entity = result.first()
|
||||||
|
if server_entity:
|
||||||
|
server_config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server_entity)
|
||||||
|
if self.ap.tool_mgr.mcp_tool_loader:
|
||||||
|
task = asyncio.create_task(self.ap.tool_mgr.mcp_tool_loader.host_mcp_server(server_config))
|
||||||
|
self.ap.tool_mgr.mcp_tool_loader._hosted_mcp_tasks.append(task)
|
||||||
|
|
||||||
|
return server_data['uuid']
|
||||||
|
|
||||||
|
async def get_mcp_server_by_name(self, server_name: str) -> dict | None:
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.name == server_name)
|
||||||
|
)
|
||||||
|
server = result.first()
|
||||||
|
if server is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
runtime_info = await self.get_runtime_info(server.name)
|
||||||
|
server_data = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server)
|
||||||
|
server_data['runtime_info'] = runtime_info if runtime_info else None
|
||||||
|
return server_data
|
||||||
|
|
||||||
|
async def update_mcp_server(self, server_uuid: str, server_data: dict) -> None:
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||||
|
)
|
||||||
|
old_server = result.first()
|
||||||
|
old_server_name = old_server.name if old_server else None
|
||||||
|
old_enable = old_server.enable if old_server else False
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(persistence_mcp.MCPServer)
|
||||||
|
.where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||||
|
.values(server_data)
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.ap.tool_mgr.mcp_tool_loader:
|
||||||
|
new_enable = server_data.get('enable', False)
|
||||||
|
|
||||||
|
need_remove = old_server_name and old_server_name in self.ap.tool_mgr.mcp_tool_loader.sessions
|
||||||
|
need_start = new_enable
|
||||||
|
|
||||||
|
|
||||||
|
if old_enable and not new_enable:
|
||||||
|
if need_remove:
|
||||||
|
await self.ap.tool_mgr.mcp_tool_loader.remove_mcp_server(old_server_name)
|
||||||
|
|
||||||
|
elif not old_enable and new_enable:
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||||
|
)
|
||||||
|
updated_server = result.first()
|
||||||
|
if updated_server:
|
||||||
|
server_config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, updated_server)
|
||||||
|
task = asyncio.create_task(self.ap.tool_mgr.mcp_tool_loader.host_mcp_server(server_config))
|
||||||
|
self.ap.tool_mgr.mcp_tool_loader._hosted_mcp_tasks.append(task)
|
||||||
|
|
||||||
|
elif old_enable and new_enable:
|
||||||
|
if need_remove:
|
||||||
|
await self.ap.tool_mgr.mcp_tool_loader.remove_mcp_server(old_server_name)
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||||
|
)
|
||||||
|
updated_server = result.first()
|
||||||
|
if updated_server:
|
||||||
|
server_config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, updated_server)
|
||||||
|
task = asyncio.create_task(self.ap.tool_mgr.mcp_tool_loader.host_mcp_server(server_config))
|
||||||
|
self.ap.tool_mgr.mcp_tool_loader._hosted_mcp_tasks.append(task)
|
||||||
|
|
||||||
|
|
||||||
|
async def delete_mcp_server(self, server_uuid: str) -> None:
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||||
|
)
|
||||||
|
server = result.first()
|
||||||
|
server_name = server.name if server else None
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.delete(persistence_mcp.MCPServer).where(persistence_mcp.MCPServer.uuid == server_uuid)
|
||||||
|
)
|
||||||
|
|
||||||
|
if server_name and self.ap.tool_mgr.mcp_tool_loader:
|
||||||
|
if server_name in self.ap.tool_mgr.mcp_tool_loader.sessions:
|
||||||
|
await self.ap.tool_mgr.mcp_tool_loader.remove_mcp_server(server_name)
|
||||||
|
|
||||||
|
async def test_mcp_server(self, server_name: str, server_data: dict) -> int:
|
||||||
|
"""测试 MCP 服务器连接并返回任务 ID"""
|
||||||
|
|
||||||
|
runtime_mcp_session: RuntimeMCPSession | None = None
|
||||||
|
|
||||||
|
if server_name != '_':
|
||||||
|
runtime_mcp_session = self.ap.tool_mgr.mcp_tool_loader.get_session(server_name)
|
||||||
|
if runtime_mcp_session is None:
|
||||||
|
raise ValueError(f'Server not found: {server_name}')
|
||||||
|
|
||||||
|
if runtime_mcp_session.status == MCPSessionStatus.ERROR:
|
||||||
|
coroutine = runtime_mcp_session.start()
|
||||||
|
else:
|
||||||
|
coroutine = runtime_mcp_session.refresh()
|
||||||
|
else:
|
||||||
|
runtime_mcp_session = await self.ap.tool_mgr.mcp_tool_loader.load_mcp_server(server_config=server_data)
|
||||||
|
coroutine = runtime_mcp_session.start()
|
||||||
|
|
||||||
|
ctx = taskmgr.TaskContext.new()
|
||||||
|
wrapper = self.ap.task_mgr.create_user_task(
|
||||||
|
coroutine,
|
||||||
|
kind='mcp-operation',
|
||||||
|
name=f'mcp-test-{server_name}',
|
||||||
|
label=f'Testing MCP server {server_name}',
|
||||||
|
context=ctx,
|
||||||
|
)
|
||||||
|
return wrapper.id
|
||||||
@@ -1,13 +1,14 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
|
from langbot_plugin.api.entities.builtin.provider import message as provider_message
|
||||||
|
|
||||||
from ....core import app
|
from ....core import app
|
||||||
from ....entity.persistence import model as persistence_model
|
from ....entity.persistence import model as persistence_model
|
||||||
from ....entity.persistence import pipeline as persistence_pipeline
|
from ....entity.persistence import pipeline as persistence_pipeline
|
||||||
from ....provider.modelmgr import requester as model_requester
|
from ....provider.modelmgr import requester as model_requester
|
||||||
from langbot_plugin.api.entities.builtin.provider import message as provider_message
|
|
||||||
|
|
||||||
|
|
||||||
class LLMModelsService:
|
class LLMModelsService:
|
||||||
@@ -104,12 +105,18 @@ class LLMModelsService:
|
|||||||
else:
|
else:
|
||||||
runtime_llm_model = await self.ap.model_mgr.init_runtime_llm_model(model_data)
|
runtime_llm_model = await self.ap.model_mgr.init_runtime_llm_model(model_data)
|
||||||
|
|
||||||
|
# Mon Nov 10 2025: Commented for some providers may not support thinking parameter
|
||||||
|
# # 有些模型厂商默认开启了思考功能,测试容易延迟
|
||||||
|
# extra_args = model_data.get('extra_args', {})
|
||||||
|
# if not extra_args or 'thinking' not in extra_args:
|
||||||
|
# extra_args['thinking'] = {'type': 'disabled'}
|
||||||
|
|
||||||
await runtime_llm_model.requester.invoke_llm(
|
await runtime_llm_model.requester.invoke_llm(
|
||||||
query=None,
|
query=None,
|
||||||
model=runtime_llm_model,
|
model=runtime_llm_model,
|
||||||
messages=[provider_message.Message(role='user', content='Hello, world!')],
|
messages=[provider_message.Message(role='user', content='Hello, world! Please just reply a "Hello".')],
|
||||||
funcs=[],
|
funcs=[],
|
||||||
extra_args=model_data.get('extra_args', {}),
|
# extra_args=extra_args,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -136,3 +136,33 @@ class PipelineService:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
await self.ap.pipeline_mgr.remove_pipeline(pipeline_uuid)
|
await self.ap.pipeline_mgr.remove_pipeline(pipeline_uuid)
|
||||||
|
|
||||||
|
async def update_pipeline_extensions(self, pipeline_uuid: str, bound_plugins: list[dict], bound_mcp_servers: list[str] = None) -> None:
|
||||||
|
"""Update the bound plugins and MCP servers for a pipeline"""
|
||||||
|
# Get current pipeline
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(persistence_pipeline.LegacyPipeline).where(
|
||||||
|
persistence_pipeline.LegacyPipeline.uuid == pipeline_uuid
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
pipeline = result.first()
|
||||||
|
if pipeline is None:
|
||||||
|
raise ValueError(f'Pipeline {pipeline_uuid} not found')
|
||||||
|
|
||||||
|
# Update extensions_preferences
|
||||||
|
extensions_preferences = pipeline.extensions_preferences or {}
|
||||||
|
extensions_preferences['plugins'] = bound_plugins
|
||||||
|
if bound_mcp_servers is not None:
|
||||||
|
extensions_preferences['mcp_servers'] = bound_mcp_servers
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(persistence_pipeline.LegacyPipeline)
|
||||||
|
.where(persistence_pipeline.LegacyPipeline.uuid == pipeline_uuid)
|
||||||
|
.values(extensions_preferences=extensions_preferences)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reload pipeline to apply changes
|
||||||
|
await self.ap.pipeline_mgr.remove_pipeline(pipeline_uuid)
|
||||||
|
pipeline = await self.get_pipeline(pipeline_uuid)
|
||||||
|
await self.ap.pipeline_mgr.load_pipeline(pipeline)
|
||||||
|
|||||||
81
pkg/api/http/service/webhook.py
Normal file
81
pkg/api/http/service/webhook.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from ....core import app
|
||||||
|
from ....entity.persistence import webhook
|
||||||
|
|
||||||
|
|
||||||
|
class WebhookService:
|
||||||
|
ap: app.Application
|
||||||
|
|
||||||
|
def __init__(self, ap: app.Application) -> None:
|
||||||
|
self.ap = ap
|
||||||
|
|
||||||
|
async def get_webhooks(self) -> list[dict]:
|
||||||
|
"""Get all webhooks"""
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(webhook.Webhook))
|
||||||
|
|
||||||
|
webhooks = result.all()
|
||||||
|
return [self.ap.persistence_mgr.serialize_model(webhook.Webhook, wh) for wh in webhooks]
|
||||||
|
|
||||||
|
async def create_webhook(self, name: str, url: str, description: str = '', enabled: bool = True) -> dict:
|
||||||
|
"""Create a new webhook"""
|
||||||
|
webhook_data = {'name': name, 'url': url, 'description': description, 'enabled': enabled}
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(sqlalchemy.insert(webhook.Webhook).values(**webhook_data))
|
||||||
|
|
||||||
|
# Retrieve the created webhook
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(webhook.Webhook).where(webhook.Webhook.url == url).order_by(webhook.Webhook.id.desc())
|
||||||
|
)
|
||||||
|
created_webhook = result.first()
|
||||||
|
|
||||||
|
return self.ap.persistence_mgr.serialize_model(webhook.Webhook, created_webhook)
|
||||||
|
|
||||||
|
async def get_webhook(self, webhook_id: int) -> dict | None:
|
||||||
|
"""Get a specific webhook by ID"""
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(webhook.Webhook).where(webhook.Webhook.id == webhook_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
wh = result.first()
|
||||||
|
|
||||||
|
if wh is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.ap.persistence_mgr.serialize_model(webhook.Webhook, wh)
|
||||||
|
|
||||||
|
async def update_webhook(
|
||||||
|
self, webhook_id: int, name: str = None, url: str = None, description: str = None, enabled: bool = None
|
||||||
|
) -> None:
|
||||||
|
"""Update a webhook's metadata"""
|
||||||
|
update_data = {}
|
||||||
|
if name is not None:
|
||||||
|
update_data['name'] = name
|
||||||
|
if url is not None:
|
||||||
|
update_data['url'] = url
|
||||||
|
if description is not None:
|
||||||
|
update_data['description'] = description
|
||||||
|
if enabled is not None:
|
||||||
|
update_data['enabled'] = enabled
|
||||||
|
|
||||||
|
if update_data:
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(webhook.Webhook).where(webhook.Webhook.id == webhook_id).values(**update_data)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def delete_webhook(self, webhook_id: int) -> None:
|
||||||
|
"""Delete a webhook"""
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.delete(webhook.Webhook).where(webhook.Webhook.id == webhook_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_enabled_webhooks(self) -> list[dict]:
|
||||||
|
"""Get all enabled webhooks"""
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.select(webhook.Webhook).where(webhook.Webhook.enabled == True)
|
||||||
|
)
|
||||||
|
|
||||||
|
webhooks = result.all()
|
||||||
|
return [self.ap.persistence_mgr.serialize_model(webhook.Webhook, wh) for wh in webhooks]
|
||||||
@@ -59,14 +59,15 @@ class CommandManager:
|
|||||||
context: command_context.ExecuteContext,
|
context: command_context.ExecuteContext,
|
||||||
operator_list: list[operator.CommandOperator],
|
operator_list: list[operator.CommandOperator],
|
||||||
operator: operator.CommandOperator = None,
|
operator: operator.CommandOperator = None,
|
||||||
|
bound_plugins: list[str] | None = None,
|
||||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||||
"""执行命令"""
|
"""执行命令"""
|
||||||
|
|
||||||
command_list = await self.ap.plugin_connector.list_commands()
|
command_list = await self.ap.plugin_connector.list_commands(bound_plugins)
|
||||||
|
|
||||||
for command in command_list:
|
for command in command_list:
|
||||||
if command.metadata.name == context.command:
|
if command.metadata.name == context.command:
|
||||||
async for ret in self.ap.plugin_connector.execute_command(context):
|
async for ret in self.ap.plugin_connector.execute_command(context, bound_plugins):
|
||||||
yield ret
|
yield ret
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@@ -102,5 +103,8 @@ class CommandManager:
|
|||||||
|
|
||||||
ctx.shift()
|
ctx.shift()
|
||||||
|
|
||||||
async for ret in self._execute(ctx, self.cmd_list):
|
# Get bound plugins from query
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
|
||||||
|
async for ret in self._execute(ctx, self.cmd_list, bound_plugins=bound_plugins):
|
||||||
yield ret
|
yield ret
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import traceback
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from ..platform import botmgr as im_mgr
|
from ..platform import botmgr as im_mgr
|
||||||
|
from ..platform.webhook_pusher import WebhookPusher
|
||||||
from ..provider.session import sessionmgr as llm_session_mgr
|
from ..provider.session import sessionmgr as llm_session_mgr
|
||||||
from ..provider.modelmgr import modelmgr as llm_model_mgr
|
from ..provider.modelmgr import modelmgr as llm_model_mgr
|
||||||
from ..provider.tools import toolmgr as llm_tool_mgr
|
from ..provider.tools import toolmgr as llm_tool_mgr
|
||||||
@@ -22,6 +23,9 @@ from ..api.http.service import model as model_service
|
|||||||
from ..api.http.service import pipeline as pipeline_service
|
from ..api.http.service import pipeline as pipeline_service
|
||||||
from ..api.http.service import bot as bot_service
|
from ..api.http.service import bot as bot_service
|
||||||
from ..api.http.service import knowledge as knowledge_service
|
from ..api.http.service import knowledge as knowledge_service
|
||||||
|
from ..api.http.service import mcp as mcp_service
|
||||||
|
from ..api.http.service import apikey as apikey_service
|
||||||
|
from ..api.http.service import webhook as webhook_service
|
||||||
from ..discover import engine as discover_engine
|
from ..discover import engine as discover_engine
|
||||||
from ..storage import mgr as storagemgr
|
from ..storage import mgr as storagemgr
|
||||||
from ..utils import logcache
|
from ..utils import logcache
|
||||||
@@ -43,6 +47,8 @@ class Application:
|
|||||||
|
|
||||||
platform_mgr: im_mgr.PlatformManager = None
|
platform_mgr: im_mgr.PlatformManager = None
|
||||||
|
|
||||||
|
webhook_pusher: WebhookPusher = None
|
||||||
|
|
||||||
cmd_mgr: cmdmgr.CommandManager = None
|
cmd_mgr: cmdmgr.CommandManager = None
|
||||||
|
|
||||||
sess_mgr: llm_session_mgr.SessionManager = None
|
sess_mgr: llm_session_mgr.SessionManager = None
|
||||||
@@ -119,6 +125,12 @@ class Application:
|
|||||||
|
|
||||||
knowledge_service: knowledge_service.KnowledgeService = None
|
knowledge_service: knowledge_service.KnowledgeService = None
|
||||||
|
|
||||||
|
mcp_service: mcp_service.MCPService = None
|
||||||
|
|
||||||
|
apikey_service: apikey_service.ApiKeyService = None
|
||||||
|
|
||||||
|
webhook_service: webhook_service.WebhookService = None
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import shutil
|
|||||||
|
|
||||||
|
|
||||||
required_files = {
|
required_files = {
|
||||||
'plugins/__init__.py': 'templates/__init__.py',
|
|
||||||
'data/config.yaml': 'templates/config.yaml',
|
'data/config.yaml': 'templates/config.yaml',
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -15,7 +14,6 @@ required_paths = [
|
|||||||
'data/metadata',
|
'data/metadata',
|
||||||
'data/logs',
|
'data/logs',
|
||||||
'data/labels',
|
'data/labels',
|
||||||
'plugins',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ from ...provider.modelmgr import modelmgr as llm_model_mgr
|
|||||||
from ...provider.tools import toolmgr as llm_tool_mgr
|
from ...provider.tools import toolmgr as llm_tool_mgr
|
||||||
from ...rag.knowledge import kbmgr as rag_mgr
|
from ...rag.knowledge import kbmgr as rag_mgr
|
||||||
from ...platform import botmgr as im_mgr
|
from ...platform import botmgr as im_mgr
|
||||||
|
from ...platform.webhook_pusher import WebhookPusher
|
||||||
from ...persistence import mgr as persistencemgr
|
from ...persistence import mgr as persistencemgr
|
||||||
from ...api.http.controller import main as http_controller
|
from ...api.http.controller import main as http_controller
|
||||||
from ...api.http.service import user as user_service
|
from ...api.http.service import user as user_service
|
||||||
@@ -19,6 +20,9 @@ from ...api.http.service import model as model_service
|
|||||||
from ...api.http.service import pipeline as pipeline_service
|
from ...api.http.service import pipeline as pipeline_service
|
||||||
from ...api.http.service import bot as bot_service
|
from ...api.http.service import bot as bot_service
|
||||||
from ...api.http.service import knowledge as knowledge_service
|
from ...api.http.service import knowledge as knowledge_service
|
||||||
|
from ...api.http.service import mcp as mcp_service
|
||||||
|
from ...api.http.service import apikey as apikey_service
|
||||||
|
from ...api.http.service import webhook as webhook_service
|
||||||
from ...discover import engine as discover_engine
|
from ...discover import engine as discover_engine
|
||||||
from ...storage import mgr as storagemgr
|
from ...storage import mgr as storagemgr
|
||||||
from ...utils import logcache
|
from ...utils import logcache
|
||||||
@@ -91,6 +95,10 @@ class BuildAppStage(stage.BootingStage):
|
|||||||
await im_mgr_inst.initialize()
|
await im_mgr_inst.initialize()
|
||||||
ap.platform_mgr = im_mgr_inst
|
ap.platform_mgr = im_mgr_inst
|
||||||
|
|
||||||
|
# Initialize webhook pusher
|
||||||
|
webhook_pusher_inst = WebhookPusher(ap)
|
||||||
|
ap.webhook_pusher = webhook_pusher_inst
|
||||||
|
|
||||||
pipeline_mgr = pipelinemgr.PipelineManager(ap)
|
pipeline_mgr = pipelinemgr.PipelineManager(ap)
|
||||||
await pipeline_mgr.initialize()
|
await pipeline_mgr.initialize()
|
||||||
ap.pipeline_mgr = pipeline_mgr
|
ap.pipeline_mgr = pipeline_mgr
|
||||||
@@ -126,5 +134,14 @@ class BuildAppStage(stage.BootingStage):
|
|||||||
knowledge_service_inst = knowledge_service.KnowledgeService(ap)
|
knowledge_service_inst = knowledge_service.KnowledgeService(ap)
|
||||||
ap.knowledge_service = knowledge_service_inst
|
ap.knowledge_service = knowledge_service_inst
|
||||||
|
|
||||||
|
mcp_service_inst = mcp_service.MCPService(ap)
|
||||||
|
ap.mcp_service = mcp_service_inst
|
||||||
|
|
||||||
|
apikey_service_inst = apikey_service.ApiKeyService(ap)
|
||||||
|
ap.apikey_service = apikey_service_inst
|
||||||
|
|
||||||
|
webhook_service_inst = webhook_service.WebhookService(ap)
|
||||||
|
ap.webhook_service = webhook_service_inst
|
||||||
|
|
||||||
ctrl = controller.Controller(ap)
|
ctrl = controller.Controller(ap)
|
||||||
ap.ctrl = ctrl
|
ap.ctrl = ctrl
|
||||||
|
|||||||
@@ -1,11 +1,93 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from .. import stage, app
|
from .. import stage, app
|
||||||
from ..bootutils import config
|
from ..bootutils import config
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_env_overrides_to_config(cfg: dict) -> dict:
|
||||||
|
"""Apply environment variable overrides to data/config.yaml
|
||||||
|
|
||||||
|
Environment variables should be uppercase and use __ (double underscore)
|
||||||
|
to represent nested keys. For example:
|
||||||
|
- CONCURRENCY__PIPELINE overrides concurrency.pipeline
|
||||||
|
- PLUGIN__RUNTIME_WS_URL overrides plugin.runtime_ws_url
|
||||||
|
|
||||||
|
Arrays and dict types are ignored.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg: Configuration dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated configuration dictionary
|
||||||
|
"""
|
||||||
|
|
||||||
|
def convert_value(value: str, original_value: Any) -> Any:
|
||||||
|
"""Convert string value to appropriate type based on original value
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value: String value from environment variable
|
||||||
|
original_value: Original value to infer type from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Converted value (falls back to string if conversion fails)
|
||||||
|
"""
|
||||||
|
if isinstance(original_value, bool):
|
||||||
|
return value.lower() in ('true', '1', 'yes', 'on')
|
||||||
|
elif isinstance(original_value, int):
|
||||||
|
try:
|
||||||
|
return int(value)
|
||||||
|
except ValueError:
|
||||||
|
# If conversion fails, keep as string (user error, but non-breaking)
|
||||||
|
return value
|
||||||
|
elif isinstance(original_value, float):
|
||||||
|
try:
|
||||||
|
return float(value)
|
||||||
|
except ValueError:
|
||||||
|
# If conversion fails, keep as string (user error, but non-breaking)
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
# Process environment variables
|
||||||
|
for env_key, env_value in os.environ.items():
|
||||||
|
# Check if the environment variable is uppercase and contains __
|
||||||
|
if not env_key.isupper():
|
||||||
|
continue
|
||||||
|
if '__' not in env_key:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f'apply env overrides to config: env_key: {env_key}, env_value: {env_value}')
|
||||||
|
|
||||||
|
# Convert environment variable name to config path
|
||||||
|
# e.g., CONCURRENCY__PIPELINE -> ['concurrency', 'pipeline']
|
||||||
|
keys = [key.lower() for key in env_key.split('__')]
|
||||||
|
|
||||||
|
# Navigate to the target value and validate the path
|
||||||
|
current = cfg
|
||||||
|
|
||||||
|
for i, key in enumerate(keys):
|
||||||
|
if not isinstance(current, dict) or key not in current:
|
||||||
|
break
|
||||||
|
|
||||||
|
if i == len(keys) - 1:
|
||||||
|
# At the final key - check if it's a scalar value
|
||||||
|
if isinstance(current[key], (dict, list)):
|
||||||
|
# Skip dict and list types
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# Valid scalar value - convert and set it
|
||||||
|
converted_value = convert_value(env_value, current[key])
|
||||||
|
current[key] = converted_value
|
||||||
|
else:
|
||||||
|
# Navigate deeper
|
||||||
|
current = current[key]
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
|
||||||
|
|
||||||
@stage.stage_class('LoadConfigStage')
|
@stage.stage_class('LoadConfigStage')
|
||||||
class LoadConfigStage(stage.BootingStage):
|
class LoadConfigStage(stage.BootingStage):
|
||||||
"""Load config file stage"""
|
"""Load config file stage"""
|
||||||
@@ -54,6 +136,10 @@ class LoadConfigStage(stage.BootingStage):
|
|||||||
ap.instance_config = await config.load_yaml_config(
|
ap.instance_config = await config.load_yaml_config(
|
||||||
'data/config.yaml', 'templates/config.yaml', completion=False
|
'data/config.yaml', 'templates/config.yaml', completion=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Apply environment variable overrides to data/config.yaml
|
||||||
|
ap.instance_config.data = _apply_env_overrides_to_config(ap.instance_config.data)
|
||||||
|
|
||||||
await ap.instance_config.dump_config()
|
await ap.instance_config.dump_config()
|
||||||
|
|
||||||
ap.sensitive_meta = await config.load_json_config(
|
ap.sensitive_meta = await config.load_json_config(
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ class TaskWrapper:
|
|||||||
'state': self.task._state,
|
'state': self.task._state,
|
||||||
'exception': self.assume_exception().__str__() if self.assume_exception() is not None else None,
|
'exception': self.assume_exception().__str__() if self.assume_exception() is not None else None,
|
||||||
'exception_traceback': exception_traceback,
|
'exception_traceback': exception_traceback,
|
||||||
'result': self.assume_result().__str__() if self.assume_result() is not None else None,
|
'result': self.assume_result() if self.assume_result() is not None else None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
21
pkg/entity/persistence/apikey.py
Normal file
21
pkg/entity/persistence/apikey.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from .base import Base
|
||||||
|
|
||||||
|
|
||||||
|
class ApiKey(Base):
|
||||||
|
"""API Key for external service authentication"""
|
||||||
|
|
||||||
|
__tablename__ = 'api_keys'
|
||||||
|
|
||||||
|
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
|
||||||
|
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||||
|
key = sqlalchemy.Column(sqlalchemy.String(255), nullable=False, unique=True)
|
||||||
|
description = sqlalchemy.Column(sqlalchemy.String(512), nullable=True, default='')
|
||||||
|
created_at = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, server_default=sqlalchemy.func.now())
|
||||||
|
updated_at = sqlalchemy.Column(
|
||||||
|
sqlalchemy.DateTime,
|
||||||
|
nullable=False,
|
||||||
|
server_default=sqlalchemy.func.now(),
|
||||||
|
onupdate=sqlalchemy.func.now(),
|
||||||
|
)
|
||||||
20
pkg/entity/persistence/mcp.py
Normal file
20
pkg/entity/persistence/mcp.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from .base import Base
|
||||||
|
|
||||||
|
|
||||||
|
class MCPServer(Base):
|
||||||
|
__tablename__ = 'mcp_servers'
|
||||||
|
|
||||||
|
uuid = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True, unique=True)
|
||||||
|
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||||
|
enable = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
|
||||||
|
mode = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) # stdio, sse
|
||||||
|
extra_args = sqlalchemy.Column(sqlalchemy.JSON, nullable=False, default={})
|
||||||
|
created_at = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, server_default=sqlalchemy.func.now())
|
||||||
|
updated_at = sqlalchemy.Column(
|
||||||
|
sqlalchemy.DateTime,
|
||||||
|
nullable=False,
|
||||||
|
server_default=sqlalchemy.func.now(),
|
||||||
|
onupdate=sqlalchemy.func.now(),
|
||||||
|
)
|
||||||
@@ -1,12 +1,13 @@
|
|||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
|
|
||||||
from .base import Base
|
from .base import Base
|
||||||
|
from ...utils import constants
|
||||||
|
|
||||||
|
|
||||||
initial_metadata = [
|
initial_metadata = [
|
||||||
{
|
{
|
||||||
'key': 'database_version',
|
'key': 'database_version',
|
||||||
'value': '0',
|
'value': str(constants.required_database_version),
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ class LegacyPipeline(Base):
|
|||||||
is_default = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
|
is_default = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=False)
|
||||||
stages = sqlalchemy.Column(sqlalchemy.JSON, nullable=False)
|
stages = sqlalchemy.Column(sqlalchemy.JSON, nullable=False)
|
||||||
config = sqlalchemy.Column(sqlalchemy.JSON, nullable=False)
|
config = sqlalchemy.Column(sqlalchemy.JSON, nullable=False)
|
||||||
|
extensions_preferences = sqlalchemy.Column(sqlalchemy.JSON, nullable=False, default={})
|
||||||
|
|
||||||
|
|
||||||
class PipelineRunRecord(Base):
|
class PipelineRunRecord(Base):
|
||||||
|
|||||||
22
pkg/entity/persistence/webhook.py
Normal file
22
pkg/entity/persistence/webhook.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from .base import Base
|
||||||
|
|
||||||
|
|
||||||
|
class Webhook(Base):
|
||||||
|
"""Webhook for pushing bot events to external systems"""
|
||||||
|
|
||||||
|
__tablename__ = 'webhooks'
|
||||||
|
|
||||||
|
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
|
||||||
|
name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
||||||
|
url = sqlalchemy.Column(sqlalchemy.String(1024), nullable=False)
|
||||||
|
description = sqlalchemy.Column(sqlalchemy.String(512), nullable=True, default='')
|
||||||
|
enabled = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False, default=True)
|
||||||
|
created_at = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, server_default=sqlalchemy.func.now())
|
||||||
|
updated_at = sqlalchemy.Column(
|
||||||
|
sqlalchemy.DateTime,
|
||||||
|
nullable=False,
|
||||||
|
server_default=sqlalchemy.func.now(),
|
||||||
|
onupdate=sqlalchemy.func.now(),
|
||||||
|
)
|
||||||
@@ -78,6 +78,8 @@ class PersistenceManager:
|
|||||||
|
|
||||||
self.ap.logger.info(f'Successfully upgraded database to version {last_migration_number}.')
|
self.ap.logger.info(f'Successfully upgraded database to version {last_migration_number}.')
|
||||||
|
|
||||||
|
await self.write_default_pipeline()
|
||||||
|
|
||||||
async def create_tables(self):
|
async def create_tables(self):
|
||||||
# create tables
|
# create tables
|
||||||
async with self.get_db_engine().connect() as conn:
|
async with self.get_db_engine().connect() as conn:
|
||||||
@@ -98,6 +100,7 @@ class PersistenceManager:
|
|||||||
if row is None:
|
if row is None:
|
||||||
await self.execute_async(sqlalchemy.insert(metadata.Metadata).values(item))
|
await self.execute_async(sqlalchemy.insert(metadata.Metadata).values(item))
|
||||||
|
|
||||||
|
async def write_default_pipeline(self):
|
||||||
# write default pipeline
|
# write default pipeline
|
||||||
result = await self.execute_async(sqlalchemy.select(pipeline.LegacyPipeline))
|
result = await self.execute_async(sqlalchemy.select(pipeline.LegacyPipeline))
|
||||||
default_pipeline_uuid = None
|
default_pipeline_uuid = None
|
||||||
@@ -115,6 +118,7 @@ class PersistenceManager:
|
|||||||
'name': 'ChatPipeline',
|
'name': 'ChatPipeline',
|
||||||
'description': 'Default pipeline, new bots will be bound to this pipeline | 默认提供的流水线,您配置的机器人将自动绑定到此流水线',
|
'description': 'Default pipeline, new bots will be bound to this pipeline | 默认提供的流水线,您配置的机器人将自动绑定到此流水线',
|
||||||
'config': pipeline_config,
|
'config': pipeline_config,
|
||||||
|
'extensions_preferences': {},
|
||||||
}
|
}
|
||||||
|
|
||||||
await self.execute_async(sqlalchemy.insert(pipeline.LegacyPipeline).values(pipeline_data))
|
await self.execute_async(sqlalchemy.insert(pipeline.LegacyPipeline).values(pipeline_data))
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ class DBMigratePluginConfig(migration.DBMigration):
|
|||||||
|
|
||||||
if 'plugin' not in self.ap.instance_config.data:
|
if 'plugin' not in self.ap.instance_config.data:
|
||||||
self.ap.instance_config.data['plugin'] = {
|
self.ap.instance_config.data['plugin'] = {
|
||||||
'runtime_ws_url': 'ws://localhost:5400/control/ws',
|
'runtime_ws_url': 'ws://langbot_plugin_runtime:5400/control/ws',
|
||||||
|
'enable_marketplace': True,
|
||||||
|
'cloud_service_url': 'https://space.langbot.app',
|
||||||
}
|
}
|
||||||
|
|
||||||
await self.ap.instance_config.dump_config()
|
await self.ap.instance_config.dump_config()
|
||||||
|
|||||||
@@ -0,0 +1,20 @@
|
|||||||
|
import sqlalchemy
|
||||||
|
from .. import migration
|
||||||
|
|
||||||
|
|
||||||
|
@migration.migration_class(9)
|
||||||
|
class DBMigratePipelineExtensionPreferences(migration.DBMigration):
|
||||||
|
"""Pipeline extension preferences"""
|
||||||
|
|
||||||
|
async def upgrade(self):
|
||||||
|
"""Upgrade"""
|
||||||
|
|
||||||
|
sql_text = sqlalchemy.text(
|
||||||
|
"ALTER TABLE legacy_pipelines ADD COLUMN extensions_preferences JSON NOT NULL DEFAULT '{}'"
|
||||||
|
)
|
||||||
|
await self.ap.persistence_mgr.execute_async(sql_text)
|
||||||
|
|
||||||
|
async def downgrade(self):
|
||||||
|
"""Downgrade"""
|
||||||
|
sql_text = sqlalchemy.text('ALTER TABLE legacy_pipelines DROP COLUMN extensions_preferences')
|
||||||
|
await self.ap.persistence_mgr.execute_async(sql_text)
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
from .. import migration
|
||||||
|
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from ...entity.persistence import pipeline as persistence_pipeline
|
||||||
|
|
||||||
|
|
||||||
|
@migration.migration_class(10)
|
||||||
|
class DBMigratePipelineMultiKnowledgeBase(migration.DBMigration):
|
||||||
|
"""Pipeline support multiple knowledge base binding"""
|
||||||
|
|
||||||
|
async def upgrade(self):
|
||||||
|
"""Upgrade"""
|
||||||
|
# read all pipelines
|
||||||
|
pipelines = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_pipeline.LegacyPipeline))
|
||||||
|
|
||||||
|
for pipeline in pipelines:
|
||||||
|
serialized_pipeline = self.ap.persistence_mgr.serialize_model(persistence_pipeline.LegacyPipeline, pipeline)
|
||||||
|
|
||||||
|
config = serialized_pipeline['config']
|
||||||
|
|
||||||
|
# Convert knowledge-base from string to array
|
||||||
|
if 'local-agent' in config['ai']:
|
||||||
|
current_kb = config['ai']['local-agent'].get('knowledge-base', '')
|
||||||
|
|
||||||
|
# If it's already a list, skip
|
||||||
|
if isinstance(current_kb, list):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Convert string to list
|
||||||
|
if current_kb and current_kb != '__none__':
|
||||||
|
config['ai']['local-agent']['knowledge-bases'] = [current_kb]
|
||||||
|
else:
|
||||||
|
config['ai']['local-agent']['knowledge-bases'] = []
|
||||||
|
|
||||||
|
# Remove old field
|
||||||
|
if 'knowledge-base' in config['ai']['local-agent']:
|
||||||
|
del config['ai']['local-agent']['knowledge-base']
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(persistence_pipeline.LegacyPipeline)
|
||||||
|
.where(persistence_pipeline.LegacyPipeline.uuid == serialized_pipeline['uuid'])
|
||||||
|
.values(
|
||||||
|
{
|
||||||
|
'config': config,
|
||||||
|
'for_version': self.ap.ver_mgr.get_current_version(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def downgrade(self):
|
||||||
|
"""Downgrade"""
|
||||||
|
# read all pipelines
|
||||||
|
pipelines = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_pipeline.LegacyPipeline))
|
||||||
|
|
||||||
|
for pipeline in pipelines:
|
||||||
|
serialized_pipeline = self.ap.persistence_mgr.serialize_model(persistence_pipeline.LegacyPipeline, pipeline)
|
||||||
|
|
||||||
|
config = serialized_pipeline['config']
|
||||||
|
|
||||||
|
# Convert knowledge-bases from array back to string
|
||||||
|
if 'local-agent' in config['ai']:
|
||||||
|
current_kbs = config['ai']['local-agent'].get('knowledge-bases', [])
|
||||||
|
|
||||||
|
# If it's already a string, skip
|
||||||
|
if isinstance(current_kbs, str):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Convert list to string (take first one or empty)
|
||||||
|
if current_kbs and len(current_kbs) > 0:
|
||||||
|
config['ai']['local-agent']['knowledge-base'] = current_kbs[0]
|
||||||
|
else:
|
||||||
|
config['ai']['local-agent']['knowledge-base'] = ''
|
||||||
|
|
||||||
|
# Remove new field
|
||||||
|
if 'knowledge-bases' in config['ai']['local-agent']:
|
||||||
|
del config['ai']['local-agent']['knowledge-bases']
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(persistence_pipeline.LegacyPipeline)
|
||||||
|
.where(persistence_pipeline.LegacyPipeline.uuid == serialized_pipeline['uuid'])
|
||||||
|
.values(
|
||||||
|
{
|
||||||
|
'config': config,
|
||||||
|
'for_version': self.ap.ver_mgr.get_current_version(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
40
pkg/persistence/migrations/dbm011_dify_base_prompt_config.py
Normal file
40
pkg/persistence/migrations/dbm011_dify_base_prompt_config.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
from .. import migration
|
||||||
|
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from ...entity.persistence import pipeline as persistence_pipeline
|
||||||
|
|
||||||
|
|
||||||
|
@migration.migration_class(11)
|
||||||
|
class DBMigrateDifyApiConfig(migration.DBMigration):
|
||||||
|
"""Langflow API config"""
|
||||||
|
|
||||||
|
async def upgrade(self):
|
||||||
|
"""Upgrade"""
|
||||||
|
# read all pipelines
|
||||||
|
pipelines = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_pipeline.LegacyPipeline))
|
||||||
|
|
||||||
|
for pipeline in pipelines:
|
||||||
|
serialized_pipeline = self.ap.persistence_mgr.serialize_model(persistence_pipeline.LegacyPipeline, pipeline)
|
||||||
|
|
||||||
|
config = serialized_pipeline['config']
|
||||||
|
|
||||||
|
if 'base-prompt' not in config['ai']['dify-service-api']:
|
||||||
|
config['ai']['dify-service-api']['base-prompt'] = (
|
||||||
|
'When the file content is readable, please read the content of this file. When the file is an image, describe the content of this image.',
|
||||||
|
)
|
||||||
|
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.update(persistence_pipeline.LegacyPipeline)
|
||||||
|
.where(persistence_pipeline.LegacyPipeline.uuid == serialized_pipeline['uuid'])
|
||||||
|
.values(
|
||||||
|
{
|
||||||
|
'config': config,
|
||||||
|
'for_version': self.ap.ver_mgr.get_current_version(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def downgrade(self):
|
||||||
|
"""Downgrade"""
|
||||||
|
pass
|
||||||
@@ -21,10 +21,15 @@ class LongTextProcessStage(stage.PipelineStage):
|
|||||||
- resp_message_chain
|
- resp_message_chain
|
||||||
"""
|
"""
|
||||||
|
|
||||||
strategy_impl: strategy.LongTextStrategy
|
strategy_impl: strategy.LongTextStrategy | None
|
||||||
|
|
||||||
async def initialize(self, pipeline_config: dict):
|
async def initialize(self, pipeline_config: dict):
|
||||||
config = pipeline_config['output']['long-text-processing']
|
config = pipeline_config['output']['long-text-processing']
|
||||||
|
|
||||||
|
if config['strategy'] == 'none':
|
||||||
|
self.strategy_impl = None
|
||||||
|
return
|
||||||
|
|
||||||
if config['strategy'] == 'image':
|
if config['strategy'] == 'image':
|
||||||
use_font = config['font-path']
|
use_font = config['font-path']
|
||||||
try:
|
try:
|
||||||
@@ -67,6 +72,10 @@ class LongTextProcessStage(stage.PipelineStage):
|
|||||||
await self.strategy_impl.initialize()
|
await self.strategy_impl.initialize()
|
||||||
|
|
||||||
async def process(self, query: pipeline_query.Query, stage_inst_name: str) -> entities.StageProcessResult:
|
async def process(self, query: pipeline_query.Query, stage_inst_name: str) -> entities.StageProcessResult:
|
||||||
|
if self.strategy_impl is None:
|
||||||
|
self.ap.logger.debug('Long message processing strategy is not set, skip long message processing.')
|
||||||
|
return entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
||||||
|
|
||||||
# 检查是否包含非 Plain 组件
|
# 检查是否包含非 Plain 组件
|
||||||
contains_non_plain = False
|
contains_non_plain = False
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ class ForwardComponentStrategy(strategy_model.LongTextStrategy):
|
|||||||
platform_message.ForwardMessageNode(
|
platform_message.ForwardMessageNode(
|
||||||
sender_id=query.adapter.bot_account_id,
|
sender_id=query.adapter.bot_account_id,
|
||||||
sender_name='User',
|
sender_name='User',
|
||||||
message_chain=platform_message.MessageChain([message]),
|
message_chain=platform_message.MessageChain([platform_message.Plain(text=message)]),
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -68,6 +68,12 @@ class RuntimePipeline:
|
|||||||
|
|
||||||
stage_containers: list[StageInstContainer]
|
stage_containers: list[StageInstContainer]
|
||||||
"""阶段实例容器"""
|
"""阶段实例容器"""
|
||||||
|
|
||||||
|
bound_plugins: list[str]
|
||||||
|
"""绑定到此流水线的插件列表(格式:author/plugin_name)"""
|
||||||
|
|
||||||
|
bound_mcp_servers: list[str]
|
||||||
|
"""绑定到此流水线的MCP服务器列表(格式:uuid)"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -78,9 +84,20 @@ class RuntimePipeline:
|
|||||||
self.ap = ap
|
self.ap = ap
|
||||||
self.pipeline_entity = pipeline_entity
|
self.pipeline_entity = pipeline_entity
|
||||||
self.stage_containers = stage_containers
|
self.stage_containers = stage_containers
|
||||||
|
|
||||||
|
# Extract bound plugins and MCP servers from extensions_preferences
|
||||||
|
extensions_prefs = pipeline_entity.extensions_preferences or {}
|
||||||
|
plugin_list = extensions_prefs.get('plugins', [])
|
||||||
|
self.bound_plugins = [f"{p['author']}/{p['name']}" for p in plugin_list] if plugin_list else []
|
||||||
|
|
||||||
|
mcp_server_list = extensions_prefs.get('mcp_servers', [])
|
||||||
|
self.bound_mcp_servers = mcp_server_list if mcp_server_list else []
|
||||||
|
|
||||||
async def run(self, query: pipeline_query.Query):
|
async def run(self, query: pipeline_query.Query):
|
||||||
query.pipeline_config = self.pipeline_entity.config
|
query.pipeline_config = self.pipeline_entity.config
|
||||||
|
# Store bound plugins and MCP servers in query for filtering
|
||||||
|
query.variables['_pipeline_bound_plugins'] = self.bound_plugins
|
||||||
|
query.variables['_pipeline_bound_mcp_servers'] = self.bound_mcp_servers
|
||||||
await self.process_query(query)
|
await self.process_query(query)
|
||||||
|
|
||||||
async def _check_output(self, query: pipeline_query.Query, result: pipeline_entities.StageProcessResult):
|
async def _check_output(self, query: pipeline_query.Query, result: pipeline_entities.StageProcessResult):
|
||||||
@@ -96,7 +113,7 @@ class RuntimePipeline:
|
|||||||
if query.pipeline_config['output']['misc']['at-sender'] and isinstance(
|
if query.pipeline_config['output']['misc']['at-sender'] and isinstance(
|
||||||
query.message_event, platform_events.GroupMessage
|
query.message_event, platform_events.GroupMessage
|
||||||
):
|
):
|
||||||
result.user_notice.insert(0, platform_message.At(query.message_event.sender.id))
|
result.user_notice.insert(0, platform_message.At(target=query.message_event.sender.id))
|
||||||
if await query.adapter.is_stream_output_supported():
|
if await query.adapter.is_stream_output_supported():
|
||||||
await query.adapter.reply_message_chunk(
|
await query.adapter.reply_message_chunk(
|
||||||
message_source=query.message_event,
|
message_source=query.message_event,
|
||||||
@@ -188,6 +205,9 @@ class RuntimePipeline:
|
|||||||
async def process_query(self, query: pipeline_query.Query):
|
async def process_query(self, query: pipeline_query.Query):
|
||||||
"""处理请求"""
|
"""处理请求"""
|
||||||
try:
|
try:
|
||||||
|
# Get bound plugins for this pipeline
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
|
||||||
# ======== 触发 MessageReceived 事件 ========
|
# ======== 触发 MessageReceived 事件 ========
|
||||||
event_type = (
|
event_type = (
|
||||||
events.PersonMessageReceived
|
events.PersonMessageReceived
|
||||||
@@ -203,7 +223,7 @@ class RuntimePipeline:
|
|||||||
message_chain=query.message_chain,
|
message_chain=query.message_chain,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_ctx = await self.ap.plugin_connector.emit_event(event_obj)
|
event_ctx = await self.ap.plugin_connector.emit_event(event_obj, bound_plugins)
|
||||||
|
|
||||||
if event_ctx.is_prevented_default():
|
if event_ctx.is_prevented_default():
|
||||||
return
|
return
|
||||||
@@ -213,7 +233,7 @@ class RuntimePipeline:
|
|||||||
await self._execute_from_stage(0, query)
|
await self._execute_from_stage(0, query)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
inst_name = query.current_stage_name if query.current_stage_name else 'unknown'
|
inst_name = query.current_stage_name if query.current_stage_name else 'unknown'
|
||||||
self.ap.logger.error(f'处理请求时出错 query_id={query.query_id} stage={inst_name} : {e}')
|
self.ap.logger.error(f'Error processing query {query.query_id} stage={inst_name} : {e}')
|
||||||
self.ap.logger.error(f'Traceback: {traceback.format_exc()}')
|
self.ap.logger.error(f'Traceback: {traceback.format_exc()}')
|
||||||
finally:
|
finally:
|
||||||
self.ap.logger.debug(f'Query {query.query_id} processed')
|
self.ap.logger.debug(f'Query {query.query_id} processed')
|
||||||
|
|||||||
@@ -35,11 +35,17 @@ class PreProcessor(stage.PipelineStage):
|
|||||||
session = await self.ap.sess_mgr.get_session(query)
|
session = await self.ap.sess_mgr.get_session(query)
|
||||||
|
|
||||||
# When not local-agent, llm_model is None
|
# When not local-agent, llm_model is None
|
||||||
llm_model = (
|
try:
|
||||||
await self.ap.model_mgr.get_model_by_uuid(query.pipeline_config['ai']['local-agent']['model'])
|
llm_model = (
|
||||||
if selected_runner == 'local-agent'
|
await self.ap.model_mgr.get_model_by_uuid(query.pipeline_config['ai']['local-agent']['model'])
|
||||||
else None
|
if selected_runner == 'local-agent'
|
||||||
)
|
else None
|
||||||
|
)
|
||||||
|
except ValueError:
|
||||||
|
self.ap.logger.warning(
|
||||||
|
f'LLM model {query.pipeline_config["ai"]["local-agent"]["model"] + " "}not found or not configured'
|
||||||
|
)
|
||||||
|
llm_model = None
|
||||||
|
|
||||||
conversation = await self.ap.sess_mgr.get_conversation(
|
conversation = await self.ap.sess_mgr.get_conversation(
|
||||||
query,
|
query,
|
||||||
@@ -54,12 +60,19 @@ class PreProcessor(stage.PipelineStage):
|
|||||||
query.prompt = conversation.prompt.copy()
|
query.prompt = conversation.prompt.copy()
|
||||||
query.messages = conversation.messages.copy()
|
query.messages = conversation.messages.copy()
|
||||||
|
|
||||||
if selected_runner == 'local-agent':
|
if selected_runner == 'local-agent' and llm_model:
|
||||||
query.use_funcs = []
|
query.use_funcs = []
|
||||||
query.use_llm_model_uuid = llm_model.model_entity.uuid
|
query.use_llm_model_uuid = llm_model.model_entity.uuid
|
||||||
|
|
||||||
if llm_model.model_entity.abilities.__contains__('func_call'):
|
if llm_model.model_entity.abilities.__contains__('func_call'):
|
||||||
query.use_funcs = await self.ap.tool_mgr.get_all_tools()
|
# Get bound plugins and MCP servers for filtering tools
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
bound_mcp_servers = query.variables.get('_pipeline_bound_mcp_servers', None)
|
||||||
|
query.use_funcs = await self.ap.tool_mgr.get_all_tools(bound_plugins, bound_mcp_servers)
|
||||||
|
|
||||||
|
self.ap.logger.debug(f'Bound plugins: {bound_plugins}')
|
||||||
|
self.ap.logger.debug(f'Bound MCP servers: {bound_mcp_servers}')
|
||||||
|
self.ap.logger.debug(f'Use funcs: {query.use_funcs}')
|
||||||
|
|
||||||
variables = {
|
variables = {
|
||||||
'session_id': f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
'session_id': f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||||
@@ -72,7 +85,11 @@ class PreProcessor(stage.PipelineStage):
|
|||||||
|
|
||||||
# Check if this model supports vision, if not, remove all images
|
# Check if this model supports vision, if not, remove all images
|
||||||
# TODO this checking should be performed in runner, and in this stage, the image should be reserved
|
# TODO this checking should be performed in runner, and in this stage, the image should be reserved
|
||||||
if selected_runner == 'local-agent' and not llm_model.model_entity.abilities.__contains__('vision'):
|
if (
|
||||||
|
selected_runner == 'local-agent'
|
||||||
|
and llm_model
|
||||||
|
and not llm_model.model_entity.abilities.__contains__('vision')
|
||||||
|
):
|
||||||
for msg in query.messages:
|
for msg in query.messages:
|
||||||
if isinstance(msg.content, list):
|
if isinstance(msg.content, list):
|
||||||
for me in msg.content:
|
for me in msg.content:
|
||||||
@@ -89,15 +106,22 @@ class PreProcessor(stage.PipelineStage):
|
|||||||
content_list.append(provider_message.ContentElement.from_text(me.text))
|
content_list.append(provider_message.ContentElement.from_text(me.text))
|
||||||
plain_text += me.text
|
plain_text += me.text
|
||||||
elif isinstance(me, platform_message.Image):
|
elif isinstance(me, platform_message.Image):
|
||||||
if selected_runner != 'local-agent' or llm_model.model_entity.abilities.__contains__('vision'):
|
if selected_runner != 'local-agent' or (
|
||||||
|
llm_model and llm_model.model_entity.abilities.__contains__('vision')
|
||||||
|
):
|
||||||
if me.base64 is not None:
|
if me.base64 is not None:
|
||||||
content_list.append(provider_message.ContentElement.from_image_base64(me.base64))
|
content_list.append(provider_message.ContentElement.from_image_base64(me.base64))
|
||||||
|
elif isinstance(me, platform_message.File):
|
||||||
|
# if me.url is not None:
|
||||||
|
content_list.append(provider_message.ContentElement.from_file_url(me.url, me.name))
|
||||||
elif isinstance(me, platform_message.Quote) and qoute_msg:
|
elif isinstance(me, platform_message.Quote) and qoute_msg:
|
||||||
for msg in me.origin:
|
for msg in me.origin:
|
||||||
if isinstance(msg, platform_message.Plain):
|
if isinstance(msg, platform_message.Plain):
|
||||||
content_list.append(provider_message.ContentElement.from_text(msg.text))
|
content_list.append(provider_message.ContentElement.from_text(msg.text))
|
||||||
elif isinstance(msg, platform_message.Image):
|
elif isinstance(msg, platform_message.Image):
|
||||||
if selected_runner != 'local-agent' or llm_model.model_entity.abilities.__contains__('vision'):
|
if selected_runner != 'local-agent' or (
|
||||||
|
llm_model and llm_model.model_entity.abilities.__contains__('vision')
|
||||||
|
):
|
||||||
if msg.base64 is not None:
|
if msg.base64 is not None:
|
||||||
content_list.append(provider_message.ContentElement.from_image_base64(msg.base64))
|
content_list.append(provider_message.ContentElement.from_image_base64(msg.base64))
|
||||||
|
|
||||||
@@ -113,7 +137,9 @@ class PreProcessor(stage.PipelineStage):
|
|||||||
query=query,
|
query=query,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
# Get bound plugins for filtering
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||||
|
|
||||||
query.prompt.messages = event_ctx.event.default_prompt
|
query.prompt.messages = event_ctx.event.default_prompt
|
||||||
query.messages = event_ctx.event.prompt
|
query.messages = event_ctx.event.prompt
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ from .. import handler
|
|||||||
from ... import entities
|
from ... import entities
|
||||||
from ....provider import runner as runner_module
|
from ....provider import runner as runner_module
|
||||||
|
|
||||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
|
||||||
import langbot_plugin.api.entities.events as events
|
import langbot_plugin.api.entities.events as events
|
||||||
from ....utils import importutil
|
from ....utils import importutil
|
||||||
from ....provider import runners
|
from ....provider import runners
|
||||||
@@ -44,21 +43,24 @@ class ChatMessageHandler(handler.MessageHandler):
|
|||||||
query=query,
|
query=query,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
# Get bound plugins for filtering
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||||
|
|
||||||
is_create_card = False # 判断下是否需要创建流式卡片
|
is_create_card = False # 判断下是否需要创建流式卡片
|
||||||
|
|
||||||
if event_ctx.is_prevented_default():
|
if event_ctx.is_prevented_default():
|
||||||
if event_ctx.event.reply is not None:
|
if event_ctx.event.reply_message_chain is not None:
|
||||||
mc = platform_message.MessageChain(event_ctx.event.reply)
|
mc = event_ctx.event.reply_message_chain
|
||||||
query.resp_messages.append(mc)
|
query.resp_messages.append(mc)
|
||||||
|
|
||||||
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
||||||
else:
|
else:
|
||||||
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
|
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
|
||||||
else:
|
else:
|
||||||
if event_ctx.event.alter is not None:
|
if event_ctx.event.user_message_alter is not None:
|
||||||
# if isinstance(event_ctx.event, str): # 现在暂时不考虑多模态alter
|
# if isinstance(event_ctx.event, str): # 现在暂时不考虑多模态alter
|
||||||
query.user_message.content = event_ctx.event.alter
|
query.user_message.content = event_ctx.event.user_message_alter
|
||||||
|
|
||||||
text_length = 0
|
text_length = 0
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import typing
|
|||||||
from .. import handler
|
from .. import handler
|
||||||
from ... import entities
|
from ... import entities
|
||||||
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||||
import langbot_plugin.api.entities.builtin.platform.message as platform_message
|
|
||||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||||
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||||
import langbot_plugin.api.entities.events as events
|
import langbot_plugin.api.entities.events as events
|
||||||
@@ -46,11 +45,13 @@ class CommandHandler(handler.MessageHandler):
|
|||||||
query=query,
|
query=query,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
# Get bound plugins for filtering
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||||
|
|
||||||
if event_ctx.is_prevented_default():
|
if event_ctx.is_prevented_default():
|
||||||
if event_ctx.event.reply is not None:
|
if event_ctx.event.reply_message_chain is not None:
|
||||||
mc = platform_message.MessageChain(event_ctx.event.reply)
|
mc = event_ctx.event.reply_message_chain
|
||||||
|
|
||||||
query.resp_messages.append(mc)
|
query.resp_messages.append(mc)
|
||||||
|
|
||||||
@@ -59,9 +60,6 @@ class CommandHandler(handler.MessageHandler):
|
|||||||
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
|
yield entities.StageProcessResult(result_type=entities.ResultType.INTERRUPT, new_query=query)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if event_ctx.event.alter is not None:
|
|
||||||
query.message_chain = platform_message.MessageChain([platform_message.Plain(event_ctx.event.alter)])
|
|
||||||
|
|
||||||
session = await self.ap.sess_mgr.get_session(query)
|
session = await self.ap.sess_mgr.get_session(query)
|
||||||
|
|
||||||
async for ret in self.ap.cmd_mgr.execute(
|
async for ret in self.ap.cmd_mgr.execute(
|
||||||
@@ -78,7 +76,12 @@ class CommandHandler(handler.MessageHandler):
|
|||||||
self.ap.logger.info(f'Command({query.query_id}) error: {self.cut_str(str(ret.error))}')
|
self.ap.logger.info(f'Command({query.query_id}) error: {self.cut_str(str(ret.error))}')
|
||||||
|
|
||||||
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
yield entities.StageProcessResult(result_type=entities.ResultType.CONTINUE, new_query=query)
|
||||||
elif ret.text is not None or ret.image_url is not None or ret.image_base64 is not None:
|
elif (
|
||||||
|
ret.text is not None
|
||||||
|
or ret.image_url is not None
|
||||||
|
or ret.image_base64 is not None
|
||||||
|
or ret.file_url is not None
|
||||||
|
):
|
||||||
content: list[provider_message.ContentElement] = []
|
content: list[provider_message.ContentElement] = []
|
||||||
|
|
||||||
if ret.text is not None:
|
if ret.text is not None:
|
||||||
@@ -90,6 +93,9 @@ class CommandHandler(handler.MessageHandler):
|
|||||||
if ret.image_base64 is not None:
|
if ret.image_base64 is not None:
|
||||||
content.append(provider_message.ContentElement.from_image_base64(ret.image_base64))
|
content.append(provider_message.ContentElement.from_image_base64(ret.image_base64))
|
||||||
|
|
||||||
|
if ret.file_url is not None:
|
||||||
|
# 此时为 file 类型
|
||||||
|
content.append(provider_message.ContentElement.from_file_url(ret.file_url, ret.file_name))
|
||||||
query.resp_messages.append(
|
query.resp_messages.append(
|
||||||
provider_message.Message(
|
provider_message.Message(
|
||||||
role='command',
|
role='command',
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class SendResponseBackStage(stage.PipelineStage):
|
|||||||
if query.pipeline_config['output']['misc']['at-sender'] and isinstance(
|
if query.pipeline_config['output']['misc']['at-sender'] and isinstance(
|
||||||
query.message_event, platform_events.GroupMessage
|
query.message_event, platform_events.GroupMessage
|
||||||
):
|
):
|
||||||
query.resp_message_chain[-1].insert(0, platform_message.At(query.message_event.sender.id))
|
query.resp_message_chain[-1].insert(0, platform_message.At(target=query.message_event.sender.id))
|
||||||
|
|
||||||
quote_origin = query.pipeline_config['output']['misc']['quote-origin']
|
quote_origin = query.pipeline_config['output']['misc']['quote-origin']
|
||||||
|
|
||||||
|
|||||||
@@ -16,26 +16,17 @@ class AtBotRule(rule_model.GroupRespondRule):
|
|||||||
rule_dict: dict,
|
rule_dict: dict,
|
||||||
query: pipeline_query.Query,
|
query: pipeline_query.Query,
|
||||||
) -> entities.RuleJudgeResult:
|
) -> entities.RuleJudgeResult:
|
||||||
|
found = False
|
||||||
|
|
||||||
def remove_at(message_chain: platform_message.MessageChain):
|
def remove_at(message_chain: platform_message.MessageChain):
|
||||||
|
nonlocal found
|
||||||
for component in message_chain.root:
|
for component in message_chain.root:
|
||||||
if isinstance(component, platform_message.At) and component.target == query.adapter.bot_account_id:
|
if isinstance(component, platform_message.At) and str(component.target) == str(query.adapter.bot_account_id):
|
||||||
message_chain.remove(component)
|
message_chain.remove(component)
|
||||||
|
found = True
|
||||||
break
|
break
|
||||||
|
|
||||||
remove_at(message_chain)
|
remove_at(message_chain)
|
||||||
remove_at(message_chain) # 回复消息时会at两次,检查并删除重复的
|
remove_at(message_chain) # 回复消息时会at两次,检查并删除重复的
|
||||||
|
|
||||||
# if message_chain.has(platform_message.At(query.adapter.bot_account_id)) and rule_dict['at']:
|
return entities.RuleJudgeResult(matching=found, replacement=message_chain)
|
||||||
# message_chain.remove(platform_message.At(query.adapter.bot_account_id))
|
|
||||||
|
|
||||||
# if message_chain.has(
|
|
||||||
# platform_message.At(query.adapter.bot_account_id)
|
|
||||||
# ): # 回复消息时会at两次,检查并删除重复的
|
|
||||||
# message_chain.remove(platform_message.At(query.adapter.bot_account_id))
|
|
||||||
|
|
||||||
# return entities.RuleJudgeResult(
|
|
||||||
# matching=True,
|
|
||||||
# replacement=message_chain,
|
|
||||||
# )
|
|
||||||
|
|
||||||
return entities.RuleJudgeResult(matching=False, replacement=message_chain)
|
|
||||||
|
|||||||
@@ -72,7 +72,9 @@ class ResponseWrapper(stage.PipelineStage):
|
|||||||
query=query,
|
query=query,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
# Get bound plugins for filtering
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||||
|
|
||||||
if event_ctx.is_prevented_default():
|
if event_ctx.is_prevented_default():
|
||||||
yield entities.StageProcessResult(
|
yield entities.StageProcessResult(
|
||||||
@@ -80,8 +82,8 @@ class ResponseWrapper(stage.PipelineStage):
|
|||||||
new_query=query,
|
new_query=query,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if event_ctx.event.reply is not None:
|
if event_ctx.event.reply_message_chain is not None:
|
||||||
query.resp_message_chain.append(platform_message.MessageChain(event_ctx.event.reply))
|
query.resp_message_chain.append(event_ctx.event.reply_message_chain)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
query.resp_message_chain.append(result.get_content_platform_message_chain())
|
query.resp_message_chain.append(result.get_content_platform_message_chain())
|
||||||
@@ -115,7 +117,9 @@ class ResponseWrapper(stage.PipelineStage):
|
|||||||
query=query,
|
query=query,
|
||||||
)
|
)
|
||||||
|
|
||||||
event_ctx = await self.ap.plugin_connector.emit_event(event)
|
# Get bound plugins for filtering
|
||||||
|
bound_plugins = query.variables.get('_pipeline_bound_plugins', None)
|
||||||
|
event_ctx = await self.ap.plugin_connector.emit_event(event, bound_plugins)
|
||||||
|
|
||||||
if event_ctx.is_prevented_default():
|
if event_ctx.is_prevented_default():
|
||||||
yield entities.StageProcessResult(
|
yield entities.StageProcessResult(
|
||||||
@@ -123,10 +127,8 @@ class ResponseWrapper(stage.PipelineStage):
|
|||||||
new_query=query,
|
new_query=query,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if event_ctx.event.reply is not None:
|
if event_ctx.event.reply_message_chain is not None:
|
||||||
query.resp_message_chain.append(
|
query.resp_message_chain.append(event_ctx.event.reply_message_chain)
|
||||||
platform_message.MessageChain(text=event_ctx.event.reply)
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
query.resp_message_chain.append(
|
query.resp_message_chain.append(
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ from ..entity.persistence import bot as persistence_bot
|
|||||||
from ..entity.errors import platform as platform_errors
|
from ..entity.errors import platform as platform_errors
|
||||||
|
|
||||||
from .logger import EventLogger
|
from .logger import EventLogger
|
||||||
|
from .webhook_pusher import WebhookPusher
|
||||||
|
|
||||||
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
import langbot_plugin.api.entities.builtin.provider.session as provider_session
|
||||||
import langbot_plugin.api.entities.builtin.platform.events as platform_events
|
import langbot_plugin.api.entities.builtin.platform.events as platform_events
|
||||||
@@ -66,6 +67,14 @@ class RuntimeBot:
|
|||||||
message_session_id=f'person_{event.sender.id}',
|
message_session_id=f'person_{event.sender.id}',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Push to webhooks
|
||||||
|
if hasattr(self.ap, 'webhook_pusher') and self.ap.webhook_pusher:
|
||||||
|
asyncio.create_task(
|
||||||
|
self.ap.webhook_pusher.push_person_message(
|
||||||
|
event, self.bot_entity.uuid, adapter.__class__.__name__
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
await self.ap.query_pool.add_query(
|
await self.ap.query_pool.add_query(
|
||||||
bot_uuid=self.bot_entity.uuid,
|
bot_uuid=self.bot_entity.uuid,
|
||||||
launcher_type=provider_session.LauncherTypes.PERSON,
|
launcher_type=provider_session.LauncherTypes.PERSON,
|
||||||
@@ -91,6 +100,14 @@ class RuntimeBot:
|
|||||||
message_session_id=f'group_{event.group.id}',
|
message_session_id=f'group_{event.group.id}',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Push to webhooks
|
||||||
|
if hasattr(self.ap, 'webhook_pusher') and self.ap.webhook_pusher:
|
||||||
|
asyncio.create_task(
|
||||||
|
self.ap.webhook_pusher.push_group_message(
|
||||||
|
event, self.bot_entity.uuid, adapter.__class__.__name__
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
await self.ap.query_pool.add_query(
|
await self.ap.query_pool.add_query(
|
||||||
bot_uuid=self.bot_entity.uuid,
|
bot_uuid=self.bot_entity.uuid,
|
||||||
launcher_type=provider_session.LauncherTypes.GROUP,
|
launcher_type=provider_session.LauncherTypes.GROUP,
|
||||||
@@ -157,6 +174,9 @@ class PlatformManager:
|
|||||||
self.adapter_dict = {}
|
self.adapter_dict = {}
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
|
# delete all bot log images
|
||||||
|
await self.ap.storage_mgr.storage_provider.delete_dir_recursive('bot_log_images')
|
||||||
|
|
||||||
self.adapter_components = self.ap.discover.get_components_by_kind('MessagePlatformAdapter')
|
self.adapter_components = self.ap.discover.get_components_by_kind('MessagePlatformAdapter')
|
||||||
adapter_dict: dict[str, type[abstract_platform_adapter.AbstractMessagePlatformAdapter]] = {}
|
adapter_dict: dict[str, type[abstract_platform_adapter.AbstractMessagePlatformAdapter]] = {}
|
||||||
for component in self.adapter_components:
|
for component in self.adapter_components:
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ class EventLogger(abstract_platform_event_logger.AbstractEventLogger):
|
|||||||
extension = mimetypes.guess_extension(mime_type)
|
extension = mimetypes.guess_extension(mime_type)
|
||||||
if extension is None:
|
if extension is None:
|
||||||
extension = '.jpg'
|
extension = '.jpg'
|
||||||
image_key = f'{message_session_id}-{uuid.uuid4()}{extension}'
|
image_key = f'bot_log_images/{message_session_id}-{uuid.uuid4()}{extension}'
|
||||||
await self.ap.storage_mgr.storage_provider.save(image_key, img_bytes)
|
await self.ap.storage_mgr.storage_provider.save(image_key, img_bytes)
|
||||||
image_keys.append(image_key)
|
image_keys.append(image_key)
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
|
||||||
import traceback
|
import traceback
|
||||||
import typing
|
import typing
|
||||||
from libs.dingtalk_api.dingtalkevent import DingTalkEvent
|
from libs.dingtalk_api.dingtalkevent import DingTalkEvent
|
||||||
@@ -36,14 +37,31 @@ class DingTalkMessageConverter(abstract_platform_adapter.AbstractMessageConverte
|
|||||||
if atUser.dingtalk_id == event.incoming_message.chatbot_user_id:
|
if atUser.dingtalk_id == event.incoming_message.chatbot_user_id:
|
||||||
yiri_msg_list.append(platform_message.At(target=bot_name))
|
yiri_msg_list.append(platform_message.At(target=bot_name))
|
||||||
|
|
||||||
if event.content:
|
if event.rich_content:
|
||||||
text_content = event.content.replace('@' + bot_name, '')
|
elements = event.rich_content.get("Elements")
|
||||||
yiri_msg_list.append(platform_message.Plain(text=text_content))
|
for element in elements:
|
||||||
if event.picture:
|
if element.get('Type') == 'text':
|
||||||
yiri_msg_list.append(platform_message.Image(base64=event.picture))
|
text = element.get('Content', '').replace('@' + bot_name, '')
|
||||||
|
if text.strip():
|
||||||
|
yiri_msg_list.append(platform_message.Plain(text=text))
|
||||||
|
elif element.get('Type') == 'image' and element.get('Picture'):
|
||||||
|
yiri_msg_list.append(platform_message.Image(base64=element['Picture']))
|
||||||
|
else:
|
||||||
|
# 回退到原有简单逻辑
|
||||||
|
if event.content:
|
||||||
|
text_content = event.content.replace('@' + bot_name, '')
|
||||||
|
yiri_msg_list.append(platform_message.Plain(text=text_content))
|
||||||
|
if event.picture:
|
||||||
|
yiri_msg_list.append(platform_message.Image(base64=event.picture))
|
||||||
|
|
||||||
|
# 处理其他类型消息(文件、音频等)
|
||||||
|
if event.file:
|
||||||
|
yiri_msg_list.append(platform_message.File(url=event.file, name=event.name))
|
||||||
if event.audio:
|
if event.audio:
|
||||||
yiri_msg_list.append(platform_message.Voice(base64=event.audio))
|
yiri_msg_list.append(platform_message.Voice(base64=event.audio))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
chain = platform_message.MessageChain(yiri_msg_list)
|
chain = platform_message.MessageChain(yiri_msg_list)
|
||||||
|
|
||||||
return chain
|
return chain
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import langbot_plugin.api.definition.abstract.platform.event_logger as abstract_
|
|||||||
from ..logger import EventLogger
|
from ..logger import EventLogger
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 语音功能相关异常定义
|
# 语音功能相关异常定义
|
||||||
class VoiceConnectionError(Exception):
|
class VoiceConnectionError(Exception):
|
||||||
"""语音连接基础异常"""
|
"""语音连接基础异常"""
|
||||||
|
|||||||
@@ -139,19 +139,15 @@ class QQOfficialAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter
|
|||||||
event_converter: QQOfficialEventConverter = QQOfficialEventConverter()
|
event_converter: QQOfficialEventConverter = QQOfficialEventConverter()
|
||||||
|
|
||||||
def __init__(self, config: dict, logger: EventLogger):
|
def __init__(self, config: dict, logger: EventLogger):
|
||||||
self.config = config
|
bot = QQOfficialClient(
|
||||||
self.logger = logger
|
app_id=config['appid'], secret=config['secret'], token=config['token'], logger=logger
|
||||||
|
)
|
||||||
|
|
||||||
required_keys = [
|
super().__init__(
|
||||||
'appid',
|
config=config,
|
||||||
'secret',
|
logger=logger,
|
||||||
]
|
bot=bot,
|
||||||
missing_keys = [key for key in required_keys if key not in config]
|
bot_account_id=config['appid'],
|
||||||
if missing_keys:
|
|
||||||
raise command_errors.ParamNotEnoughError('QQ官方机器人缺少相关配置项,请查看文档或联系管理员')
|
|
||||||
|
|
||||||
self.bot = QQOfficialClient(
|
|
||||||
app_id=config['appid'], secret=config['secret'], token=config['token'], logger=self.logger
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def reply_message(
|
async def reply_message(
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ class TelegramEventConverter(abstract_platform_adapter.AbstractEventConverter):
|
|||||||
sender=platform_entities.Friend(
|
sender=platform_entities.Friend(
|
||||||
id=event.effective_chat.id,
|
id=event.effective_chat.id,
|
||||||
nickname=event.effective_chat.first_name,
|
nickname=event.effective_chat.first_name,
|
||||||
remark=event.effective_chat.id,
|
remark=str(event.effective_chat.id),
|
||||||
),
|
),
|
||||||
message_chain=lb_message,
|
message_chain=lb_message,
|
||||||
time=event.message.date.timestamp(),
|
time=event.message.date.timestamp(),
|
||||||
|
|||||||
@@ -139,7 +139,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
|||||||
pattern = r'@\S{1,20}'
|
pattern = r'@\S{1,20}'
|
||||||
content_no_preifx = re.sub(pattern, '', content_no_preifx)
|
content_no_preifx = re.sub(pattern, '', content_no_preifx)
|
||||||
|
|
||||||
return platform_message.MessageChain([platform_message.Plain(content_no_preifx)])
|
return platform_message.MessageChain([platform_message.Plain(text=content_no_preifx)])
|
||||||
|
|
||||||
async def _handler_image(self, message: Optional[dict], content_no_preifx: str) -> platform_message.MessageChain:
|
async def _handler_image(self, message: Optional[dict], content_no_preifx: str) -> platform_message.MessageChain:
|
||||||
"""处理图像消息 (msg_type=3)"""
|
"""处理图像消息 (msg_type=3)"""
|
||||||
@@ -265,7 +265,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
|||||||
# 文本消息
|
# 文本消息
|
||||||
try:
|
try:
|
||||||
if '<msg>' not in quote_data:
|
if '<msg>' not in quote_data:
|
||||||
quote_data_message_list.append(platform_message.Plain(quote_data))
|
quote_data_message_list.append(platform_message.Plain(text=quote_data))
|
||||||
else:
|
else:
|
||||||
# 引用消息展开
|
# 引用消息展开
|
||||||
quote_data_xml = ET.fromstring(quote_data)
|
quote_data_xml = ET.fromstring(quote_data)
|
||||||
@@ -280,7 +280,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
|||||||
quote_data_message_list.extend(await self._handler_compound(None, quote_data))
|
quote_data_message_list.extend(await self._handler_compound(None, quote_data))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f'处理引用消息异常 expcetion:{e}')
|
self.logger.error(f'处理引用消息异常 expcetion:{e}')
|
||||||
quote_data_message_list.append(platform_message.Plain(quote_data))
|
quote_data_message_list.append(platform_message.Plain(text=quote_data))
|
||||||
message_list.append(
|
message_list.append(
|
||||||
platform_message.Quote(
|
platform_message.Quote(
|
||||||
sender_id=sender_id,
|
sender_id=sender_id,
|
||||||
@@ -290,7 +290,7 @@ class WeChatPadMessageConverter(abstract_platform_adapter.AbstractMessageConvert
|
|||||||
if len(user_data) > 0:
|
if len(user_data) > 0:
|
||||||
pattern = r'@\S{1,20}'
|
pattern = r'@\S{1,20}'
|
||||||
user_data = re.sub(pattern, '', user_data)
|
user_data = re.sub(pattern, '', user_data)
|
||||||
message_list.append(platform_message.Plain(user_data))
|
message_list.append(platform_message.Plain(text=user_data))
|
||||||
|
|
||||||
return platform_message.MessageChain(message_list)
|
return platform_message.MessageChain(message_list)
|
||||||
|
|
||||||
@@ -543,7 +543,6 @@ class WeChatPadAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter)
|
|||||||
] = {}
|
] = {}
|
||||||
|
|
||||||
def __init__(self, config: dict, logger: EventLogger):
|
def __init__(self, config: dict, logger: EventLogger):
|
||||||
|
|
||||||
quart_app = quart.Quart(__name__)
|
quart_app = quart.Quart(__name__)
|
||||||
|
|
||||||
message_converter = WeChatPadMessageConverter(config, logger)
|
message_converter = WeChatPadMessageConverter(config, logger)
|
||||||
@@ -551,15 +550,14 @@ class WeChatPadAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter)
|
|||||||
bot = WeChatPadClient(config['wechatpad_url'], config['token'])
|
bot = WeChatPadClient(config['wechatpad_url'], config['token'])
|
||||||
super().__init__(
|
super().__init__(
|
||||||
config=config,
|
config=config,
|
||||||
logger = logger,
|
logger=logger,
|
||||||
quart_app = quart_app,
|
quart_app=quart_app,
|
||||||
message_converter =message_converter,
|
message_converter=message_converter,
|
||||||
event_converter = event_converter,
|
event_converter=event_converter,
|
||||||
listeners={},
|
listeners={},
|
||||||
bot_account_id ='',
|
bot_account_id='',
|
||||||
name="WeChatPad",
|
name='WeChatPad',
|
||||||
bot=bot,
|
bot=bot,
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def ws_message(self, data):
|
async def ws_message(self, data):
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ class WecomBotEventConverter(abstract_platform_adapter.AbstractEventConverter):
|
|||||||
return platform_events.FriendMessage(
|
return platform_events.FriendMessage(
|
||||||
sender=platform_entities.Friend(
|
sender=platform_entities.Friend(
|
||||||
id=event.userid,
|
id=event.userid,
|
||||||
nickname='',
|
nickname=event.username,
|
||||||
remark='',
|
remark='',
|
||||||
),
|
),
|
||||||
message_chain=message_chain,
|
message_chain=message_chain,
|
||||||
@@ -61,10 +61,10 @@ class WecomBotEventConverter(abstract_platform_adapter.AbstractEventConverter):
|
|||||||
sender = platform_entities.GroupMember(
|
sender = platform_entities.GroupMember(
|
||||||
id=event.userid,
|
id=event.userid,
|
||||||
permission='MEMBER',
|
permission='MEMBER',
|
||||||
member_name=event.userid,
|
member_name=event.username,
|
||||||
group=platform_entities.Group(
|
group=platform_entities.Group(
|
||||||
id=str(event.chatid),
|
id=str(event.chatid),
|
||||||
name='',
|
name=event.chatname,
|
||||||
permission=platform_entities.Permission.Member,
|
permission=platform_entities.Permission.Member,
|
||||||
),
|
),
|
||||||
special_title='',
|
special_title='',
|
||||||
@@ -117,6 +117,50 @@ class WecomBotAdapter(abstract_platform_adapter.AbstractMessagePlatformAdapter):
|
|||||||
content = await self.message_converter.yiri2target(message)
|
content = await self.message_converter.yiri2target(message)
|
||||||
await self.bot.set_message(message_source.source_platform_object.message_id, content)
|
await self.bot.set_message(message_source.source_platform_object.message_id, content)
|
||||||
|
|
||||||
|
async def reply_message_chunk(
|
||||||
|
self,
|
||||||
|
message_source: platform_events.MessageEvent,
|
||||||
|
bot_message,
|
||||||
|
message: platform_message.MessageChain,
|
||||||
|
quote_origin: bool = False,
|
||||||
|
is_final: bool = False,
|
||||||
|
):
|
||||||
|
"""将流水线增量输出写入企业微信 stream 会话。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message_source: 流水线提供的原始消息事件。
|
||||||
|
bot_message: 当前片段对应的模型元信息(未使用)。
|
||||||
|
message: 需要回复的消息链。
|
||||||
|
quote_origin: 是否引用原消息(企业微信暂不支持)。
|
||||||
|
is_final: 标记当前片段是否为最终回复。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: 包含 `stream` 键,标识写入是否成功。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
在流水线 `reply_message_chunk` 调用中自动触发,无需手动调用。
|
||||||
|
"""
|
||||||
|
# 转换为纯文本(智能机器人当前协议仅支持文本流)
|
||||||
|
content = await self.message_converter.yiri2target(message)
|
||||||
|
msg_id = message_source.source_platform_object.message_id
|
||||||
|
|
||||||
|
# 将片段推送到 WecomBotClient 中的队列,返回值用于判断是否走降级逻辑
|
||||||
|
success = await self.bot.push_stream_chunk(msg_id, content, is_final=is_final)
|
||||||
|
if not success and is_final:
|
||||||
|
# 未命中流式队列时使用旧有 set_message 兜底
|
||||||
|
await self.bot.set_message(msg_id, content)
|
||||||
|
return {'stream': success}
|
||||||
|
|
||||||
|
async def is_stream_output_supported(self) -> bool:
|
||||||
|
"""智能机器人侧默认开启流式能力。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: 恒定返回 True。
|
||||||
|
|
||||||
|
Example:
|
||||||
|
流水线执行阶段会调用此方法以确认是否启用流式。"""
|
||||||
|
return True
|
||||||
|
|
||||||
async def send_message(self, target_type, target_id, message):
|
async def send_message(self, target_type, target_id, message):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
106
pkg/platform/webhook_pusher.py
Normal file
106
pkg/platform/webhook_pusher.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import aiohttp
|
||||||
|
import uuid
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from ..core import app
|
||||||
|
|
||||||
|
import langbot_plugin.api.entities.builtin.platform.events as platform_events
|
||||||
|
|
||||||
|
|
||||||
|
class WebhookPusher:
|
||||||
|
"""Push bot events to configured webhooks"""
|
||||||
|
|
||||||
|
ap: app.Application
|
||||||
|
logger: logging.Logger
|
||||||
|
|
||||||
|
def __init__(self, ap: app.Application):
|
||||||
|
self.ap = ap
|
||||||
|
self.logger = self.ap.logger
|
||||||
|
|
||||||
|
async def push_person_message(self, event: platform_events.FriendMessage, bot_uuid: str, adapter_name: str) -> None:
|
||||||
|
"""Push person message event to webhooks"""
|
||||||
|
try:
|
||||||
|
webhooks = await self.ap.webhook_service.get_enabled_webhooks()
|
||||||
|
if not webhooks:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Build payload
|
||||||
|
payload = {
|
||||||
|
'uuid': str(uuid.uuid4()), # unique id for the event
|
||||||
|
'event_type': 'bot.person_message',
|
||||||
|
'data': {
|
||||||
|
'bot_uuid': bot_uuid,
|
||||||
|
'adapter_name': adapter_name,
|
||||||
|
'sender': {
|
||||||
|
'id': str(event.sender.id),
|
||||||
|
'name': getattr(event.sender, 'name', ''),
|
||||||
|
},
|
||||||
|
'message': event.message_chain.model_dump(),
|
||||||
|
'timestamp': event.time if hasattr(event, 'time') else None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Push to all webhooks asynchronously
|
||||||
|
tasks = [self._push_to_webhook(webhook['url'], payload) for webhook in webhooks]
|
||||||
|
await asyncio.gather(*tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f'Failed to push person message to webhooks: {e}')
|
||||||
|
|
||||||
|
async def push_group_message(self, event: platform_events.GroupMessage, bot_uuid: str, adapter_name: str) -> None:
|
||||||
|
"""Push group message event to webhooks"""
|
||||||
|
try:
|
||||||
|
webhooks = await self.ap.webhook_service.get_enabled_webhooks()
|
||||||
|
if not webhooks:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Build payload
|
||||||
|
payload = {
|
||||||
|
'uuid': str(uuid.uuid4()), # unique id for the event
|
||||||
|
'event_type': 'bot.group_message',
|
||||||
|
'data': {
|
||||||
|
'bot_uuid': bot_uuid,
|
||||||
|
'adapter_name': adapter_name,
|
||||||
|
'group': {
|
||||||
|
'id': str(event.group.id),
|
||||||
|
'name': getattr(event.group, 'name', ''),
|
||||||
|
},
|
||||||
|
'sender': {
|
||||||
|
'id': str(event.sender.id),
|
||||||
|
'name': getattr(event.sender, 'name', ''),
|
||||||
|
},
|
||||||
|
'message': event.message_chain.model_dump(),
|
||||||
|
'timestamp': event.time if hasattr(event, 'time') else None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Push to all webhooks asynchronously
|
||||||
|
tasks = [self._push_to_webhook(webhook['url'], payload) for webhook in webhooks]
|
||||||
|
await asyncio.gather(*tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f'Failed to push group message to webhooks: {e}')
|
||||||
|
|
||||||
|
async def _push_to_webhook(self, url: str, payload: dict) -> None:
|
||||||
|
"""Push payload to a single webhook URL"""
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.post(
|
||||||
|
url,
|
||||||
|
json=payload,
|
||||||
|
headers={'Content-Type': 'application/json'},
|
||||||
|
timeout=aiohttp.ClientTimeout(total=15),
|
||||||
|
) as response:
|
||||||
|
if response.status >= 400:
|
||||||
|
self.logger.warning(f'Webhook {url} returned status {response.status}')
|
||||||
|
else:
|
||||||
|
self.logger.debug(f'Successfully pushed to webhook {url}')
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
self.logger.warning(f'Timeout pushing to webhook {url}')
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning(f'Error pushing to webhook {url}: {e}')
|
||||||
@@ -6,19 +6,24 @@ from typing import Any
|
|||||||
import typing
|
import typing
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import httpx
|
||||||
from async_lru import alru_cache
|
from async_lru import alru_cache
|
||||||
|
|
||||||
from ..core import app
|
from ..core import app
|
||||||
from . import handler
|
from . import handler
|
||||||
from ..utils import platform
|
from ..utils import platform
|
||||||
from langbot_plugin.runtime.io.controllers.stdio import client as stdio_client_controller
|
from langbot_plugin.runtime.io.controllers.stdio import (
|
||||||
|
client as stdio_client_controller,
|
||||||
|
)
|
||||||
from langbot_plugin.runtime.io.controllers.ws import client as ws_client_controller
|
from langbot_plugin.runtime.io.controllers.ws import client as ws_client_controller
|
||||||
from langbot_plugin.api.entities import events
|
from langbot_plugin.api.entities import events
|
||||||
from langbot_plugin.api.entities import context
|
from langbot_plugin.api.entities import context
|
||||||
import langbot_plugin.runtime.io.connection as base_connection
|
import langbot_plugin.runtime.io.connection as base_connection
|
||||||
from langbot_plugin.api.definition.components.manifest import ComponentManifest
|
from langbot_plugin.api.definition.components.manifest import ComponentManifest
|
||||||
from langbot_plugin.api.entities.builtin.command import context as command_context
|
from langbot_plugin.api.entities.builtin.command import (
|
||||||
|
context as command_context,
|
||||||
|
errors as command_errors,
|
||||||
|
)
|
||||||
from langbot_plugin.runtime.plugin.mgr import PluginInstallSource
|
from langbot_plugin.runtime.plugin.mgr import PluginInstallSource
|
||||||
from ..core import taskmgr
|
from ..core import taskmgr
|
||||||
|
|
||||||
@@ -32,14 +37,23 @@ class PluginRuntimeConnector:
|
|||||||
|
|
||||||
handler_task: asyncio.Task
|
handler_task: asyncio.Task
|
||||||
|
|
||||||
|
heartbeat_task: asyncio.Task | None = None
|
||||||
|
|
||||||
stdio_client_controller: stdio_client_controller.StdioClientController
|
stdio_client_controller: stdio_client_controller.StdioClientController
|
||||||
|
|
||||||
ctrl: stdio_client_controller.StdioClientController | ws_client_controller.WebSocketClientController
|
ctrl: stdio_client_controller.StdioClientController | ws_client_controller.WebSocketClientController
|
||||||
|
|
||||||
|
runtime_subprocess_on_windows: asyncio.subprocess.Process | None = None
|
||||||
|
|
||||||
|
runtime_subprocess_on_windows_task: asyncio.Task | None = None
|
||||||
|
|
||||||
runtime_disconnect_callback: typing.Callable[
|
runtime_disconnect_callback: typing.Callable[
|
||||||
[PluginRuntimeConnector], typing.Coroutine[typing.Any, typing.Any, None]
|
[PluginRuntimeConnector], typing.Coroutine[typing.Any, typing.Any, None]
|
||||||
]
|
]
|
||||||
|
|
||||||
|
is_enable_plugin: bool = True
|
||||||
|
"""Mark if the plugin system is enabled"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
ap: app.Application,
|
ap: app.Application,
|
||||||
@@ -49,10 +63,26 @@ class PluginRuntimeConnector:
|
|||||||
):
|
):
|
||||||
self.ap = ap
|
self.ap = ap
|
||||||
self.runtime_disconnect_callback = runtime_disconnect_callback
|
self.runtime_disconnect_callback = runtime_disconnect_callback
|
||||||
|
self.is_enable_plugin = self.ap.instance_config.data.get('plugin', {}).get('enable', True)
|
||||||
|
|
||||||
|
async def heartbeat_loop(self):
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(20)
|
||||||
|
try:
|
||||||
|
await self.ping_plugin_runtime()
|
||||||
|
self.ap.logger.debug('Heartbeat to plugin runtime success.')
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.debug(f'Failed to heartbeat to plugin runtime: {e}')
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
|
if not self.is_enable_plugin:
|
||||||
|
self.ap.logger.info('Plugin system is disabled.')
|
||||||
|
return
|
||||||
|
|
||||||
async def new_connection_callback(connection: base_connection.Connection):
|
async def new_connection_callback(connection: base_connection.Connection):
|
||||||
async def disconnect_callback(rchandler: handler.RuntimeConnectionHandler) -> bool:
|
async def disconnect_callback(
|
||||||
|
rchandler: handler.RuntimeConnectionHandler,
|
||||||
|
) -> bool:
|
||||||
if platform.get_platform() == 'docker' or platform.use_websocket_to_connect_plugin_runtime():
|
if platform.get_platform() == 'docker' or platform.use_websocket_to_connect_plugin_runtime():
|
||||||
self.ap.logger.error('Disconnected from plugin runtime, trying to reconnect...')
|
self.ap.logger.error('Disconnected from plugin runtime, trying to reconnect...')
|
||||||
await self.runtime_disconnect_callback(self)
|
await self.runtime_disconnect_callback(self)
|
||||||
@@ -64,6 +94,7 @@ class PluginRuntimeConnector:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
self.handler = handler.RuntimeConnectionHandler(connection, disconnect_callback, self.ap)
|
self.handler = handler.RuntimeConnectionHandler(connection, disconnect_callback, self.ap)
|
||||||
|
|
||||||
self.handler_task = asyncio.create_task(self.handler.run())
|
self.handler_task = asyncio.create_task(self.handler.run())
|
||||||
_ = await self.handler.ping()
|
_ = await self.handler.ping()
|
||||||
self.ap.logger.info('Connected to plugin runtime.')
|
self.ap.logger.info('Connected to plugin runtime.')
|
||||||
@@ -77,8 +108,14 @@ class PluginRuntimeConnector:
|
|||||||
'runtime_ws_url', 'ws://langbot_plugin_runtime:5400/control/ws'
|
'runtime_ws_url', 'ws://langbot_plugin_runtime:5400/control/ws'
|
||||||
)
|
)
|
||||||
|
|
||||||
async def make_connection_failed_callback(ctrl: ws_client_controller.WebSocketClientController) -> None:
|
async def make_connection_failed_callback(
|
||||||
self.ap.logger.error('Failed to connect to plugin runtime, trying to reconnect...')
|
ctrl: ws_client_controller.WebSocketClientController,
|
||||||
|
exc: Exception = None,
|
||||||
|
) -> None:
|
||||||
|
if exc is not None:
|
||||||
|
self.ap.logger.error(f'Failed to connect to plugin runtime({ws_url}): {exc}')
|
||||||
|
else:
|
||||||
|
self.ap.logger.error(f'Failed to connect to plugin runtime({ws_url}), trying to reconnect...')
|
||||||
await self.runtime_disconnect_callback(self)
|
await self.runtime_disconnect_callback(self)
|
||||||
|
|
||||||
self.ctrl = ws_client_controller.WebSocketClientController(
|
self.ctrl = ws_client_controller.WebSocketClientController(
|
||||||
@@ -86,6 +123,42 @@ class PluginRuntimeConnector:
|
|||||||
make_connection_failed_callback=make_connection_failed_callback,
|
make_connection_failed_callback=make_connection_failed_callback,
|
||||||
)
|
)
|
||||||
task = self.ctrl.run(new_connection_callback)
|
task = self.ctrl.run(new_connection_callback)
|
||||||
|
elif platform.get_platform() == 'win32':
|
||||||
|
# Due to Windows's lack of supports for both stdio and subprocess:
|
||||||
|
# See also: https://docs.python.org/zh-cn/3.13/library/asyncio-platforms.html
|
||||||
|
# We have to launch runtime via cmd but communicate via ws.
|
||||||
|
self.ap.logger.info('(windows) use cmd to launch plugin runtime and communicate via ws')
|
||||||
|
|
||||||
|
if self.runtime_subprocess_on_windows is None: # only launch once
|
||||||
|
python_path = sys.executable
|
||||||
|
env = os.environ.copy()
|
||||||
|
self.runtime_subprocess_on_windows = await asyncio.create_subprocess_exec(
|
||||||
|
python_path,
|
||||||
|
'-m', 'langbot_plugin.cli.__init__', 'rt',
|
||||||
|
env=env,
|
||||||
|
)
|
||||||
|
|
||||||
|
# hold the process
|
||||||
|
self.runtime_subprocess_on_windows_task = asyncio.create_task(self.runtime_subprocess_on_windows.wait())
|
||||||
|
|
||||||
|
ws_url = 'ws://localhost:5400/control/ws'
|
||||||
|
|
||||||
|
async def make_connection_failed_callback(
|
||||||
|
ctrl: ws_client_controller.WebSocketClientController,
|
||||||
|
exc: Exception = None,
|
||||||
|
) -> None:
|
||||||
|
if exc is not None:
|
||||||
|
self.ap.logger.error(f'(windows) Failed to connect to plugin runtime({ws_url}): {exc}')
|
||||||
|
else:
|
||||||
|
self.ap.logger.error(f'(windows) Failed to connect to plugin runtime({ws_url}), trying to reconnect...')
|
||||||
|
await self.runtime_disconnect_callback(self)
|
||||||
|
|
||||||
|
self.ctrl = ws_client_controller.WebSocketClientController(
|
||||||
|
ws_url=ws_url,
|
||||||
|
make_connection_failed_callback=make_connection_failed_callback,
|
||||||
|
)
|
||||||
|
task = self.ctrl.run(new_connection_callback)
|
||||||
|
|
||||||
else: # stdio
|
else: # stdio
|
||||||
self.ap.logger.info('use stdio to connect to plugin runtime')
|
self.ap.logger.info('use stdio to connect to plugin runtime')
|
||||||
# cmd: lbp rt -s
|
# cmd: lbp rt -s
|
||||||
@@ -98,17 +171,53 @@ class PluginRuntimeConnector:
|
|||||||
)
|
)
|
||||||
task = self.ctrl.run(new_connection_callback)
|
task = self.ctrl.run(new_connection_callback)
|
||||||
|
|
||||||
|
if self.heartbeat_task is None:
|
||||||
|
self.heartbeat_task = asyncio.create_task(self.heartbeat_loop())
|
||||||
|
|
||||||
asyncio.create_task(task)
|
asyncio.create_task(task)
|
||||||
|
|
||||||
async def initialize_plugins(self):
|
async def initialize_plugins(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
async def ping_plugin_runtime(self):
|
||||||
|
if not hasattr(self, 'handler'):
|
||||||
|
raise Exception('Plugin runtime is not connected')
|
||||||
|
|
||||||
|
return await self.handler.ping()
|
||||||
|
|
||||||
async def install_plugin(
|
async def install_plugin(
|
||||||
self,
|
self,
|
||||||
install_source: PluginInstallSource,
|
install_source: PluginInstallSource,
|
||||||
install_info: dict[str, Any],
|
install_info: dict[str, Any],
|
||||||
task_context: taskmgr.TaskContext | None = None,
|
task_context: taskmgr.TaskContext | None = None,
|
||||||
):
|
):
|
||||||
|
if install_source == PluginInstallSource.LOCAL:
|
||||||
|
# transfer file before install
|
||||||
|
file_bytes = install_info['plugin_file']
|
||||||
|
file_key = await self.handler.send_file(file_bytes, 'lbpkg')
|
||||||
|
install_info['plugin_file_key'] = file_key
|
||||||
|
del install_info['plugin_file']
|
||||||
|
self.ap.logger.info(f'Transfered file {file_key} to plugin runtime')
|
||||||
|
elif install_source == PluginInstallSource.GITHUB:
|
||||||
|
# download and transfer file
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(
|
||||||
|
trust_env=True,
|
||||||
|
follow_redirects=True,
|
||||||
|
timeout=20,
|
||||||
|
) as client:
|
||||||
|
response = await client.get(
|
||||||
|
install_info['asset_url'],
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
file_bytes = response.content
|
||||||
|
file_key = await self.handler.send_file(file_bytes, 'lbpkg')
|
||||||
|
install_info['plugin_file_key'] = file_key
|
||||||
|
self.ap.logger.info(f'Transfered file {file_key} to plugin runtime')
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Failed to download file from GitHub: {e}')
|
||||||
|
raise Exception(f'Failed to download file from GitHub: {e}')
|
||||||
|
|
||||||
async for ret in self.handler.install_plugin(install_source.value, install_info):
|
async for ret in self.handler.install_plugin(install_source.value, install_info):
|
||||||
current_action = ret.get('current_action', None)
|
current_action = ret.get('current_action', None)
|
||||||
if current_action is not None:
|
if current_action is not None:
|
||||||
@@ -121,7 +230,10 @@ class PluginRuntimeConnector:
|
|||||||
task_context.trace(trace)
|
task_context.trace(trace)
|
||||||
|
|
||||||
async def upgrade_plugin(
|
async def upgrade_plugin(
|
||||||
self, plugin_author: str, plugin_name: str, task_context: taskmgr.TaskContext | None = None
|
self,
|
||||||
|
plugin_author: str,
|
||||||
|
plugin_name: str,
|
||||||
|
task_context: taskmgr.TaskContext | None = None,
|
||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
async for ret in self.handler.upgrade_plugin(plugin_author, plugin_name):
|
async for ret in self.handler.upgrade_plugin(plugin_author, plugin_name):
|
||||||
current_action = ret.get('current_action', None)
|
current_action = ret.get('current_action', None)
|
||||||
@@ -135,7 +247,11 @@ class PluginRuntimeConnector:
|
|||||||
task_context.trace(trace)
|
task_context.trace(trace)
|
||||||
|
|
||||||
async def delete_plugin(
|
async def delete_plugin(
|
||||||
self, plugin_author: str, plugin_name: str, task_context: taskmgr.TaskContext | None = None
|
self,
|
||||||
|
plugin_author: str,
|
||||||
|
plugin_name: str,
|
||||||
|
delete_data: bool = False,
|
||||||
|
task_context: taskmgr.TaskContext | None = None,
|
||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
async for ret in self.handler.delete_plugin(plugin_author, plugin_name):
|
async for ret in self.handler.delete_plugin(plugin_author, plugin_name):
|
||||||
current_action = ret.get('current_action', None)
|
current_action = ret.get('current_action', None)
|
||||||
@@ -148,7 +264,16 @@ class PluginRuntimeConnector:
|
|||||||
if task_context is not None:
|
if task_context is not None:
|
||||||
task_context.trace(trace)
|
task_context.trace(trace)
|
||||||
|
|
||||||
|
# Clean up plugin settings and binary storage if requested
|
||||||
|
if delete_data:
|
||||||
|
if task_context is not None:
|
||||||
|
task_context.trace('Cleaning up plugin configuration and storage...')
|
||||||
|
await self.handler.cleanup_plugin_data(plugin_author, plugin_name)
|
||||||
|
|
||||||
async def list_plugins(self) -> list[dict[str, Any]]:
|
async def list_plugins(self) -> list[dict[str, Any]]:
|
||||||
|
if not self.is_enable_plugin:
|
||||||
|
return []
|
||||||
|
|
||||||
return await self.handler.list_plugins()
|
return await self.handler.list_plugins()
|
||||||
|
|
||||||
async def get_plugin_info(self, author: str, plugin_name: str) -> dict[str, Any]:
|
async def get_plugin_info(self, author: str, plugin_name: str) -> dict[str, Any]:
|
||||||
@@ -164,32 +289,62 @@ class PluginRuntimeConnector:
|
|||||||
async def emit_event(
|
async def emit_event(
|
||||||
self,
|
self,
|
||||||
event: events.BaseEventModel,
|
event: events.BaseEventModel,
|
||||||
|
bound_plugins: list[str] | None = None,
|
||||||
) -> context.EventContext:
|
) -> context.EventContext:
|
||||||
event_ctx = context.EventContext.from_event(event)
|
event_ctx = context.EventContext.from_event(event)
|
||||||
|
|
||||||
event_ctx_result = await self.handler.emit_event(event_ctx.model_dump(serialize_as_any=True))
|
if not self.is_enable_plugin:
|
||||||
|
return event_ctx
|
||||||
|
|
||||||
|
# Pass include_plugins to runtime for filtering
|
||||||
|
event_ctx_result = await self.handler.emit_event(
|
||||||
|
event_ctx.model_dump(serialize_as_any=False), include_plugins=bound_plugins
|
||||||
|
)
|
||||||
|
|
||||||
event_ctx = context.EventContext.model_validate(event_ctx_result['event_context'])
|
event_ctx = context.EventContext.model_validate(event_ctx_result['event_context'])
|
||||||
|
|
||||||
return event_ctx
|
return event_ctx
|
||||||
|
|
||||||
async def list_tools(self) -> list[ComponentManifest]:
|
async def list_tools(self, bound_plugins: list[str] | None = None) -> list[ComponentManifest]:
|
||||||
list_tools_data = await self.handler.list_tools()
|
if not self.is_enable_plugin:
|
||||||
|
return []
|
||||||
|
|
||||||
return [ComponentManifest.model_validate(tool) for tool in list_tools_data]
|
# Pass include_plugins to runtime for filtering
|
||||||
|
list_tools_data = await self.handler.list_tools(include_plugins=bound_plugins)
|
||||||
|
|
||||||
async def call_tool(self, tool_name: str, parameters: dict[str, Any]) -> dict[str, Any]:
|
tools = [ComponentManifest.model_validate(tool) for tool in list_tools_data]
|
||||||
return await self.handler.call_tool(tool_name, parameters)
|
|
||||||
|
|
||||||
async def list_commands(self) -> list[ComponentManifest]:
|
return tools
|
||||||
list_commands_data = await self.handler.list_commands()
|
|
||||||
|
|
||||||
return [ComponentManifest.model_validate(command) for command in list_commands_data]
|
async def call_tool(
|
||||||
|
self, tool_name: str, parameters: dict[str, Any], bound_plugins: list[str] | None = None
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
if not self.is_enable_plugin:
|
||||||
|
return {'error': 'Tool not found: plugin system is disabled'}
|
||||||
|
|
||||||
|
# Pass include_plugins to runtime for validation
|
||||||
|
return await self.handler.call_tool(tool_name, parameters, include_plugins=bound_plugins)
|
||||||
|
|
||||||
|
async def list_commands(self, bound_plugins: list[str] | None = None) -> list[ComponentManifest]:
|
||||||
|
if not self.is_enable_plugin:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Pass include_plugins to runtime for filtering
|
||||||
|
list_commands_data = await self.handler.list_commands(include_plugins=bound_plugins)
|
||||||
|
|
||||||
|
commands = [ComponentManifest.model_validate(command) for command in list_commands_data]
|
||||||
|
|
||||||
|
return commands
|
||||||
|
|
||||||
async def execute_command(
|
async def execute_command(
|
||||||
self, command_ctx: command_context.ExecuteContext
|
self, command_ctx: command_context.ExecuteContext, bound_plugins: list[str] | None = None
|
||||||
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
) -> typing.AsyncGenerator[command_context.CommandReturn, None]:
|
||||||
gen = self.handler.execute_command(command_ctx.model_dump(serialize_as_any=True))
|
if not self.is_enable_plugin:
|
||||||
|
yield command_context.CommandReturn(error=command_errors.CommandNotFoundError(command_ctx.command))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Pass include_plugins to runtime for validation
|
||||||
|
gen = self.handler.execute_command(command_ctx.model_dump(serialize_as_any=True), include_plugins=bound_plugins)
|
||||||
|
|
||||||
async for ret in gen:
|
async for ret in gen:
|
||||||
cmd_ret = command_context.CommandReturn.model_validate(ret)
|
cmd_ret = command_context.CommandReturn.model_validate(ret)
|
||||||
@@ -197,6 +352,13 @@ class PluginRuntimeConnector:
|
|||||||
yield cmd_ret
|
yield cmd_ret
|
||||||
|
|
||||||
def dispose(self):
|
def dispose(self):
|
||||||
if isinstance(self.ctrl, stdio_client_controller.StdioClientController):
|
# No need to consider the shutdown on Windows
|
||||||
|
# for Windows can kill processes and subprocesses chainly
|
||||||
|
|
||||||
|
if self.is_enable_plugin and isinstance(self.ctrl, stdio_client_controller.StdioClientController):
|
||||||
self.ap.logger.info('Terminating plugin runtime process...')
|
self.ap.logger.info('Terminating plugin runtime process...')
|
||||||
self.ctrl.process.terminate()
|
self.ctrl.process.terminate()
|
||||||
|
|
||||||
|
if self.heartbeat_task is not None:
|
||||||
|
self.heartbeat_task.cancel()
|
||||||
|
self.heartbeat_task = None
|
||||||
|
|||||||
@@ -56,7 +56,9 @@ class RuntimeConnectionHandler(handler.Handler):
|
|||||||
.where(persistence_plugin.PluginSetting.plugin_name == plugin_name)
|
.where(persistence_plugin.PluginSetting.plugin_name == plugin_name)
|
||||||
)
|
)
|
||||||
|
|
||||||
if result.first() is not None:
|
setting = result.first()
|
||||||
|
|
||||||
|
if setting is not None:
|
||||||
# delete plugin setting
|
# delete plugin setting
|
||||||
await self.ap.persistence_mgr.execute_async(
|
await self.ap.persistence_mgr.execute_async(
|
||||||
sqlalchemy.delete(persistence_plugin.PluginSetting)
|
sqlalchemy.delete(persistence_plugin.PluginSetting)
|
||||||
@@ -71,6 +73,10 @@ class RuntimeConnectionHandler(handler.Handler):
|
|||||||
plugin_name=plugin_name,
|
plugin_name=plugin_name,
|
||||||
install_source=install_source,
|
install_source=install_source,
|
||||||
install_info=install_info,
|
install_info=install_info,
|
||||||
|
# inherit from existing setting
|
||||||
|
enabled=setting.enabled if setting is not None else True,
|
||||||
|
priority=setting.priority if setting is not None else 0,
|
||||||
|
config=setting.config if setting is not None else {}, # noqa: F821
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -292,7 +298,7 @@ class RuntimeConnectionHandler(handler.Handler):
|
|||||||
@self.action(PluginToRuntimeAction.GET_LLM_MODELS)
|
@self.action(PluginToRuntimeAction.GET_LLM_MODELS)
|
||||||
async def get_llm_models(data: dict[str, Any]) -> handler.ActionResponse:
|
async def get_llm_models(data: dict[str, Any]) -> handler.ActionResponse:
|
||||||
"""Get llm models"""
|
"""Get llm models"""
|
||||||
llm_models = await self.ap.model_service.get_llm_models(include_secret=False)
|
llm_models = await self.ap.llm_model_service.get_llm_models(include_secret=False)
|
||||||
return handler.ActionResponse.success(
|
return handler.ActionResponse.success(
|
||||||
data={
|
data={
|
||||||
'llm_models': llm_models,
|
'llm_models': llm_models,
|
||||||
@@ -430,6 +436,25 @@ class RuntimeConnectionHandler(handler.Handler):
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@self.action(RuntimeToLangBotAction.GET_CONFIG_FILE)
|
||||||
|
async def get_config_file(data: dict[str, Any]) -> handler.ActionResponse:
|
||||||
|
"""Get a config file by file key"""
|
||||||
|
file_key = data['file_key']
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Load file from storage
|
||||||
|
file_bytes = await self.ap.storage_mgr.storage_provider.load(file_key)
|
||||||
|
|
||||||
|
return handler.ActionResponse.success(
|
||||||
|
data={
|
||||||
|
'file_base64': base64.b64encode(file_bytes).decode('utf-8'),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return handler.ActionResponse.error(
|
||||||
|
message=f'Failed to load config file {file_key}: {e}',
|
||||||
|
)
|
||||||
|
|
||||||
async def ping(self) -> dict[str, Any]:
|
async def ping(self) -> dict[str, Any]:
|
||||||
"""Ping the runtime"""
|
"""Ping the runtime"""
|
||||||
return await self.call_action(
|
return await self.call_action(
|
||||||
@@ -529,24 +554,28 @@ class RuntimeConnectionHandler(handler.Handler):
|
|||||||
async def emit_event(
|
async def emit_event(
|
||||||
self,
|
self,
|
||||||
event_context: dict[str, Any],
|
event_context: dict[str, Any],
|
||||||
|
include_plugins: list[str] | None = None,
|
||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""Emit event"""
|
"""Emit event"""
|
||||||
result = await self.call_action(
|
result = await self.call_action(
|
||||||
LangBotToRuntimeAction.EMIT_EVENT,
|
LangBotToRuntimeAction.EMIT_EVENT,
|
||||||
{
|
{
|
||||||
'event_context': event_context,
|
'event_context': event_context,
|
||||||
|
'include_plugins': include_plugins,
|
||||||
},
|
},
|
||||||
timeout=30,
|
timeout=60,
|
||||||
)
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def list_tools(self) -> list[dict[str, Any]]:
|
async def list_tools(self, include_plugins: list[str] | None = None) -> list[dict[str, Any]]:
|
||||||
"""List tools"""
|
"""List tools"""
|
||||||
result = await self.call_action(
|
result = await self.call_action(
|
||||||
LangBotToRuntimeAction.LIST_TOOLS,
|
LangBotToRuntimeAction.LIST_TOOLS,
|
||||||
{},
|
{
|
||||||
timeout=10,
|
'include_plugins': include_plugins,
|
||||||
|
},
|
||||||
|
timeout=20,
|
||||||
)
|
)
|
||||||
|
|
||||||
return result['tools']
|
return result['tools']
|
||||||
@@ -560,38 +589,74 @@ class RuntimeConnectionHandler(handler.Handler):
|
|||||||
'plugin_name': plugin_name,
|
'plugin_name': plugin_name,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
return result
|
|
||||||
|
|
||||||
async def call_tool(self, tool_name: str, parameters: dict[str, Any]) -> dict[str, Any]:
|
plugin_icon_file_key = result['plugin_icon_file_key']
|
||||||
|
mime_type = result['mime_type']
|
||||||
|
|
||||||
|
plugin_icon_bytes = await self.read_local_file(plugin_icon_file_key)
|
||||||
|
|
||||||
|
await self.delete_local_file(plugin_icon_file_key)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'plugin_icon_base64': base64.b64encode(plugin_icon_bytes).decode('utf-8'),
|
||||||
|
'mime_type': mime_type,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def cleanup_plugin_data(self, plugin_author: str, plugin_name: str) -> None:
|
||||||
|
"""Cleanup plugin settings and binary storage"""
|
||||||
|
# Delete plugin settings
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.delete(persistence_plugin.PluginSetting)
|
||||||
|
.where(persistence_plugin.PluginSetting.plugin_author == plugin_author)
|
||||||
|
.where(persistence_plugin.PluginSetting.plugin_name == plugin_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Delete all binary storage for this plugin
|
||||||
|
owner = f'{plugin_author}/{plugin_name}'
|
||||||
|
await self.ap.persistence_mgr.execute_async(
|
||||||
|
sqlalchemy.delete(persistence_bstorage.BinaryStorage)
|
||||||
|
.where(persistence_bstorage.BinaryStorage.owner_type == 'plugin')
|
||||||
|
.where(persistence_bstorage.BinaryStorage.owner == owner)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def call_tool(
|
||||||
|
self, tool_name: str, parameters: dict[str, Any], include_plugins: list[str] | None = None
|
||||||
|
) -> dict[str, Any]:
|
||||||
"""Call tool"""
|
"""Call tool"""
|
||||||
result = await self.call_action(
|
result = await self.call_action(
|
||||||
LangBotToRuntimeAction.CALL_TOOL,
|
LangBotToRuntimeAction.CALL_TOOL,
|
||||||
{
|
{
|
||||||
'tool_name': tool_name,
|
'tool_name': tool_name,
|
||||||
'tool_parameters': parameters,
|
'tool_parameters': parameters,
|
||||||
|
'include_plugins': include_plugins,
|
||||||
},
|
},
|
||||||
timeout=30,
|
timeout=60,
|
||||||
)
|
)
|
||||||
|
|
||||||
return result['tool_response']
|
return result['tool_response']
|
||||||
|
|
||||||
async def list_commands(self) -> list[dict[str, Any]]:
|
async def list_commands(self, include_plugins: list[str] | None = None) -> list[dict[str, Any]]:
|
||||||
"""List commands"""
|
"""List commands"""
|
||||||
result = await self.call_action(
|
result = await self.call_action(
|
||||||
LangBotToRuntimeAction.LIST_COMMANDS,
|
LangBotToRuntimeAction.LIST_COMMANDS,
|
||||||
{},
|
{
|
||||||
|
'include_plugins': include_plugins,
|
||||||
|
},
|
||||||
timeout=10,
|
timeout=10,
|
||||||
)
|
)
|
||||||
return result['commands']
|
return result['commands']
|
||||||
|
|
||||||
async def execute_command(self, command_context: dict[str, Any]) -> typing.AsyncGenerator[dict[str, Any], None]:
|
async def execute_command(
|
||||||
|
self, command_context: dict[str, Any], include_plugins: list[str] | None = None
|
||||||
|
) -> typing.AsyncGenerator[dict[str, Any], None]:
|
||||||
"""Execute command"""
|
"""Execute command"""
|
||||||
gen = self.call_action_generator(
|
gen = self.call_action_generator(
|
||||||
LangBotToRuntimeAction.EXECUTE_COMMAND,
|
LangBotToRuntimeAction.EXECUTE_COMMAND,
|
||||||
{
|
{
|
||||||
'command_context': command_context,
|
'command_context': command_context,
|
||||||
|
'include_plugins': include_plugins,
|
||||||
},
|
},
|
||||||
timeout=30,
|
timeout=60,
|
||||||
)
|
)
|
||||||
|
|
||||||
async for ret in gen:
|
async for ret in gen:
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ class ModelManager:
|
|||||||
try:
|
try:
|
||||||
await self.load_llm_model(llm_model)
|
await self.load_llm_model(llm_model)
|
||||||
except provider_errors.RequesterNotFoundError as e:
|
except provider_errors.RequesterNotFoundError as e:
|
||||||
self.ap.logger.warning(f'Requester {e.requester_name} not found, skipping model {llm_model.uuid}')
|
self.ap.logger.warning(f'Requester {e.requester_name} not found, skipping llm model {llm_model.uuid}')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.ap.logger.error(f'Failed to load model {llm_model.uuid}: {e}\n{traceback.format_exc()}')
|
self.ap.logger.error(f'Failed to load model {llm_model.uuid}: {e}\n{traceback.format_exc()}')
|
||||||
|
|
||||||
@@ -67,7 +67,14 @@ class ModelManager:
|
|||||||
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_model.EmbeddingModel))
|
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_model.EmbeddingModel))
|
||||||
embedding_models = result.all()
|
embedding_models = result.all()
|
||||||
for embedding_model in embedding_models:
|
for embedding_model in embedding_models:
|
||||||
await self.load_embedding_model(embedding_model)
|
try:
|
||||||
|
await self.load_embedding_model(embedding_model)
|
||||||
|
except provider_errors.RequesterNotFoundError as e:
|
||||||
|
self.ap.logger.warning(
|
||||||
|
f'Requester {e.requester_name} not found, skipping embedding model {embedding_model.uuid}'
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Failed to load model {embedding_model.uuid}: {e}\n{traceback.format_exc()}')
|
||||||
|
|
||||||
async def init_runtime_llm_model(
|
async def init_runtime_llm_model(
|
||||||
self,
|
self,
|
||||||
@@ -107,6 +114,9 @@ class ModelManager:
|
|||||||
elif isinstance(model_info, dict):
|
elif isinstance(model_info, dict):
|
||||||
model_info = persistence_model.EmbeddingModel(**model_info)
|
model_info = persistence_model.EmbeddingModel(**model_info)
|
||||||
|
|
||||||
|
if model_info.requester not in self.requester_dict:
|
||||||
|
raise provider_errors.RequesterNotFoundError(model_info.requester)
|
||||||
|
|
||||||
requester_inst = self.requester_dict[model_info.requester](ap=self.ap, config=model_info.requester_config)
|
requester_inst = self.requester_dict[model_info.requester](ap=self.ap, config=model_info.requester_config)
|
||||||
|
|
||||||
await requester_inst.initialize()
|
await requester_inst.initialize()
|
||||||
|
|||||||
@@ -1,9 +1,14 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import typing
|
import typing
|
||||||
|
import dashscope
|
||||||
import openai
|
import openai
|
||||||
|
|
||||||
from . import modelscopechatcmpl
|
from . import modelscopechatcmpl
|
||||||
|
from .. import requester
|
||||||
|
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||||
|
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||||
|
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||||
|
|
||||||
|
|
||||||
class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
|
class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
|
||||||
@@ -15,3 +20,211 @@ class BailianChatCompletions(modelscopechatcmpl.ModelScopeChatCompletions):
|
|||||||
'base_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
'base_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||||
'timeout': 120,
|
'timeout': 120,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async def _closure_stream(
|
||||||
|
self,
|
||||||
|
query: pipeline_query.Query,
|
||||||
|
req_messages: list[dict],
|
||||||
|
use_model: requester.RuntimeLLMModel,
|
||||||
|
use_funcs: list[resource_tool.LLMTool] = None,
|
||||||
|
extra_args: dict[str, typing.Any] = {},
|
||||||
|
remove_think: bool = False,
|
||||||
|
) -> provider_message.Message | typing.AsyncGenerator[provider_message.MessageChunk, None]:
|
||||||
|
self.client.api_key = use_model.token_mgr.get_token()
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['model'] = use_model.model_entity.name
|
||||||
|
|
||||||
|
if use_funcs:
|
||||||
|
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
args['tools'] = tools
|
||||||
|
|
||||||
|
# 设置此次请求中的messages
|
||||||
|
messages = req_messages.copy()
|
||||||
|
|
||||||
|
is_use_dashscope_call = False # 是否使用阿里原生库调用
|
||||||
|
is_enable_multi_model = True # 是否支持多轮对话
|
||||||
|
use_time_num = 0 # 模型已调用次数,防止存在多文件时重复调用
|
||||||
|
use_time_ids = [] # 已调用的ID列表
|
||||||
|
message_id = 0 # 记录消息序号
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
# print(msg)
|
||||||
|
if 'content' in msg and isinstance(msg['content'], list):
|
||||||
|
for me in msg['content']:
|
||||||
|
if me['type'] == 'image_base64':
|
||||||
|
me['image_url'] = {'url': me['image_base64']}
|
||||||
|
me['type'] = 'image_url'
|
||||||
|
del me['image_base64']
|
||||||
|
elif me['type'] == 'file_url' and '.' in me.get('file_name', ''):
|
||||||
|
# 1. 视频文件推理
|
||||||
|
# https://bailian.console.aliyun.com/?tab=doc#/doc/?type=model&url=2845871
|
||||||
|
file_type = me.get('file_name').lower().split('.')[-1]
|
||||||
|
if file_type in ['mp4', 'avi', 'mkv', 'mov', 'flv', 'wmv']:
|
||||||
|
me['type'] = 'video_url'
|
||||||
|
me['video_url'] = {'url': me['file_url']}
|
||||||
|
del me['file_url']
|
||||||
|
del me['file_name']
|
||||||
|
use_time_num +=1
|
||||||
|
use_time_ids.append(message_id)
|
||||||
|
is_enable_multi_model = False
|
||||||
|
# 2. 语音文件识别, 无法通过openai的audio字段传递,暂时不支持
|
||||||
|
# https://bailian.console.aliyun.com/?tab=doc#/doc/?type=model&url=2979031
|
||||||
|
elif file_type in ['aac', 'amr', 'aiff', 'flac', 'm4a',
|
||||||
|
'mp3', 'mpeg', 'ogg', 'opus', 'wav', 'webm', 'wma']:
|
||||||
|
me['audio'] = me['file_url']
|
||||||
|
me['type'] = 'audio'
|
||||||
|
del me['file_url']
|
||||||
|
del me['type']
|
||||||
|
del me['file_name']
|
||||||
|
is_use_dashscope_call = True
|
||||||
|
use_time_num +=1
|
||||||
|
use_time_ids.append(message_id)
|
||||||
|
is_enable_multi_model = False
|
||||||
|
message_id += 1
|
||||||
|
|
||||||
|
# 使用列表推导式,保留不在 use_time_ids[:-1] 中的元素,仅保留最后一个多媒体消息
|
||||||
|
if not is_enable_multi_model and use_time_num > 1:
|
||||||
|
messages = [msg for idx, msg in enumerate(messages) if idx not in use_time_ids[:-1]]
|
||||||
|
|
||||||
|
if not is_enable_multi_model:
|
||||||
|
messages = [msg for msg in messages if 'resp_message_id' not in msg]
|
||||||
|
|
||||||
|
args['messages'] = messages
|
||||||
|
args['stream'] = True
|
||||||
|
|
||||||
|
# 流式处理状态
|
||||||
|
# tool_calls_map: dict[str, provider_message.ToolCall] = {}
|
||||||
|
chunk_idx = 0
|
||||||
|
thinking_started = False
|
||||||
|
thinking_ended = False
|
||||||
|
role = 'assistant' # 默认角色
|
||||||
|
|
||||||
|
if is_use_dashscope_call:
|
||||||
|
response = dashscope.MultiModalConversation.call(
|
||||||
|
# 若没有配置环境变量,请用百炼API Key将下行替换为:api_key = "sk-xxx"
|
||||||
|
api_key=use_model.token_mgr.get_token(),
|
||||||
|
model=use_model.model_entity.name,
|
||||||
|
messages=messages,
|
||||||
|
result_format="message",
|
||||||
|
asr_options={
|
||||||
|
# "language": "zh", # 可选,若已知音频的语种,可通过该参数指定待识别语种,以提升识别准确率
|
||||||
|
"enable_lid": True,
|
||||||
|
"enable_itn": False
|
||||||
|
},
|
||||||
|
stream=True
|
||||||
|
)
|
||||||
|
content_length_list = []
|
||||||
|
previous_length = 0 # 记录上一次的内容长度
|
||||||
|
for res in response:
|
||||||
|
chunk = res["output"]
|
||||||
|
# 解析 chunk 数据
|
||||||
|
if hasattr(chunk, 'choices') and chunk.choices:
|
||||||
|
choice = chunk.choices[0]
|
||||||
|
delta_content = choice["message"].content[0]["text"]
|
||||||
|
finish_reason = choice["finish_reason"]
|
||||||
|
content_length_list.append(len(delta_content))
|
||||||
|
else:
|
||||||
|
delta_content = ""
|
||||||
|
finish_reason = None
|
||||||
|
|
||||||
|
# 跳过空的第一个 chunk(只有 role 没有内容)
|
||||||
|
if chunk_idx == 0 and not delta_content:
|
||||||
|
chunk_idx += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 检查 content_length_list 是否有足够的数据
|
||||||
|
if len(content_length_list) >= 2:
|
||||||
|
now_content = delta_content[previous_length: content_length_list[-1]]
|
||||||
|
previous_length = content_length_list[-1] # 更新上一次的长度
|
||||||
|
else:
|
||||||
|
now_content = delta_content # 第一次循环时直接使用 delta_content
|
||||||
|
previous_length = len(delta_content) # 更新上一次的长度
|
||||||
|
|
||||||
|
# 构建 MessageChunk - 只包含增量内容
|
||||||
|
chunk_data = {
|
||||||
|
'role': role,
|
||||||
|
'content': now_content if now_content else None,
|
||||||
|
'is_final': bool(finish_reason) and finish_reason != "null",
|
||||||
|
}
|
||||||
|
|
||||||
|
# 移除 None 值
|
||||||
|
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
|
||||||
|
yield provider_message.MessageChunk(**chunk_data)
|
||||||
|
chunk_idx += 1
|
||||||
|
else:
|
||||||
|
async for chunk in self._req_stream(args, extra_body=extra_args):
|
||||||
|
# 解析 chunk 数据
|
||||||
|
if hasattr(chunk, 'choices') and chunk.choices:
|
||||||
|
choice = chunk.choices[0]
|
||||||
|
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
|
||||||
|
finish_reason = getattr(choice, 'finish_reason', None)
|
||||||
|
else:
|
||||||
|
delta = {}
|
||||||
|
finish_reason = None
|
||||||
|
|
||||||
|
# 从第一个 chunk 获取 role,后续使用这个 role
|
||||||
|
if 'role' in delta and delta['role']:
|
||||||
|
role = delta['role']
|
||||||
|
|
||||||
|
# 获取增量内容
|
||||||
|
delta_content = delta.get('content', '')
|
||||||
|
reasoning_content = delta.get('reasoning_content', '')
|
||||||
|
|
||||||
|
# 处理 reasoning_content
|
||||||
|
if reasoning_content:
|
||||||
|
# accumulated_reasoning += reasoning_content
|
||||||
|
# 如果设置了 remove_think,跳过 reasoning_content
|
||||||
|
if remove_think:
|
||||||
|
chunk_idx += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 第一次出现 reasoning_content,添加 <think> 开始标签
|
||||||
|
if not thinking_started:
|
||||||
|
thinking_started = True
|
||||||
|
delta_content = '<think>\n' + reasoning_content
|
||||||
|
else:
|
||||||
|
# 继续输出 reasoning_content
|
||||||
|
delta_content = reasoning_content
|
||||||
|
elif thinking_started and not thinking_ended and delta_content:
|
||||||
|
# reasoning_content 结束,normal content 开始,添加 </think> 结束标签
|
||||||
|
thinking_ended = True
|
||||||
|
delta_content = '\n</think>\n' + delta_content
|
||||||
|
|
||||||
|
# 处理工具调用增量
|
||||||
|
if delta.get('tool_calls'):
|
||||||
|
for tool_call in delta['tool_calls']:
|
||||||
|
if tool_call['id'] != '':
|
||||||
|
tool_id = tool_call['id']
|
||||||
|
if tool_call['function']['name'] is not None:
|
||||||
|
tool_name = tool_call['function']['name']
|
||||||
|
|
||||||
|
if tool_call['type'] is None:
|
||||||
|
tool_call['type'] = 'function'
|
||||||
|
tool_call['id'] = tool_id
|
||||||
|
tool_call['function']['name'] = tool_name
|
||||||
|
tool_call['function']['arguments'] = (
|
||||||
|
'' if tool_call['function']['arguments'] is None else tool_call['function']['arguments']
|
||||||
|
)
|
||||||
|
|
||||||
|
# 跳过空的第一个 chunk(只有 role 没有内容)
|
||||||
|
if chunk_idx == 0 and not delta_content and not reasoning_content and not delta.get('tool_calls'):
|
||||||
|
chunk_idx += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 构建 MessageChunk - 只包含增量内容
|
||||||
|
chunk_data = {
|
||||||
|
'role': role,
|
||||||
|
'content': delta_content if delta_content else None,
|
||||||
|
'tool_calls': delta.get('tool_calls'),
|
||||||
|
'is_final': bool(finish_reason),
|
||||||
|
}
|
||||||
|
|
||||||
|
# 移除 None 值
|
||||||
|
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
|
||||||
|
|
||||||
|
yield provider_message.MessageChunk(**chunk_data)
|
||||||
|
chunk_idx += 1
|
||||||
|
# return
|
||||||
|
|||||||
BIN
pkg/provider/modelmgr/requesters/jiekouai.png
Normal file
BIN
pkg/provider/modelmgr/requesters/jiekouai.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.5 KiB |
208
pkg/provider/modelmgr/requesters/jiekouaichatcmpl.py
Normal file
208
pkg/provider/modelmgr/requesters/jiekouaichatcmpl.py
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import openai
|
||||||
|
import typing
|
||||||
|
|
||||||
|
from . import chatcmpl
|
||||||
|
from .. import requester
|
||||||
|
import openai.types.chat.chat_completion as chat_completion
|
||||||
|
import re
|
||||||
|
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||||
|
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||||
|
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||||
|
|
||||||
|
|
||||||
|
class JieKouAIChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||||
|
"""接口 AI ChatCompletion API 请求器"""
|
||||||
|
|
||||||
|
client: openai.AsyncClient
|
||||||
|
|
||||||
|
default_config: dict[str, typing.Any] = {
|
||||||
|
'base_url': 'https://api.jiekou.ai/openai',
|
||||||
|
'timeout': 120,
|
||||||
|
}
|
||||||
|
|
||||||
|
is_think: bool = False
|
||||||
|
|
||||||
|
async def _make_msg(
|
||||||
|
self,
|
||||||
|
chat_completion: chat_completion.ChatCompletion,
|
||||||
|
remove_think: bool,
|
||||||
|
) -> provider_message.Message:
|
||||||
|
chatcmpl_message = chat_completion.choices[0].message.model_dump()
|
||||||
|
# print(chatcmpl_message.keys(), chatcmpl_message.values())
|
||||||
|
|
||||||
|
# 确保 role 字段存在且不为 None
|
||||||
|
if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
|
||||||
|
chatcmpl_message['role'] = 'assistant'
|
||||||
|
|
||||||
|
reasoning_content = chatcmpl_message['reasoning_content'] if 'reasoning_content' in chatcmpl_message else None
|
||||||
|
|
||||||
|
# deepseek的reasoner模型
|
||||||
|
chatcmpl_message['content'] = await self._process_thinking_content(
|
||||||
|
chatcmpl_message['content'], reasoning_content, remove_think
|
||||||
|
)
|
||||||
|
|
||||||
|
# 移除 reasoning_content 字段,避免传递给 Message
|
||||||
|
if 'reasoning_content' in chatcmpl_message:
|
||||||
|
del chatcmpl_message['reasoning_content']
|
||||||
|
|
||||||
|
message = provider_message.Message(**chatcmpl_message)
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
async def _process_thinking_content(
|
||||||
|
self,
|
||||||
|
content: str,
|
||||||
|
reasoning_content: str = None,
|
||||||
|
remove_think: bool = False,
|
||||||
|
) -> tuple[str, str]:
|
||||||
|
"""处理思维链内容
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content: 原始内容
|
||||||
|
reasoning_content: reasoning_content 字段内容
|
||||||
|
remove_think: 是否移除思维链
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
处理后的内容
|
||||||
|
"""
|
||||||
|
if remove_think:
|
||||||
|
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL)
|
||||||
|
else:
|
||||||
|
if reasoning_content is not None:
|
||||||
|
content = '<think>\n' + reasoning_content + '\n</think>\n' + content
|
||||||
|
return content
|
||||||
|
|
||||||
|
async def _make_msg_chunk(
|
||||||
|
self,
|
||||||
|
delta: dict[str, typing.Any],
|
||||||
|
idx: int,
|
||||||
|
) -> provider_message.MessageChunk:
|
||||||
|
# 处理流式chunk和完整响应的差异
|
||||||
|
# print(chat_completion.choices[0])
|
||||||
|
|
||||||
|
# 确保 role 字段存在且不为 None
|
||||||
|
if 'role' not in delta or delta['role'] is None:
|
||||||
|
delta['role'] = 'assistant'
|
||||||
|
|
||||||
|
reasoning_content = delta['reasoning_content'] if 'reasoning_content' in delta else None
|
||||||
|
|
||||||
|
delta['content'] = '' if delta['content'] is None else delta['content']
|
||||||
|
# print(reasoning_content)
|
||||||
|
|
||||||
|
# deepseek的reasoner模型
|
||||||
|
|
||||||
|
if reasoning_content is not None:
|
||||||
|
delta['content'] += reasoning_content
|
||||||
|
|
||||||
|
message = provider_message.MessageChunk(**delta)
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
async def _closure_stream(
|
||||||
|
self,
|
||||||
|
query: pipeline_query.Query,
|
||||||
|
req_messages: list[dict],
|
||||||
|
use_model: requester.RuntimeLLMModel,
|
||||||
|
use_funcs: list[resource_tool.LLMTool] = None,
|
||||||
|
extra_args: dict[str, typing.Any] = {},
|
||||||
|
remove_think: bool = False,
|
||||||
|
) -> provider_message.Message | typing.AsyncGenerator[provider_message.MessageChunk, None]:
|
||||||
|
self.client.api_key = use_model.token_mgr.get_token()
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['model'] = use_model.model_entity.name
|
||||||
|
|
||||||
|
if use_funcs:
|
||||||
|
tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
args['tools'] = tools
|
||||||
|
|
||||||
|
# 设置此次请求中的messages
|
||||||
|
messages = req_messages.copy()
|
||||||
|
|
||||||
|
# 检查vision
|
||||||
|
for msg in messages:
|
||||||
|
if 'content' in msg and isinstance(msg['content'], list):
|
||||||
|
for me in msg['content']:
|
||||||
|
if me['type'] == 'image_base64':
|
||||||
|
me['image_url'] = {'url': me['image_base64']}
|
||||||
|
me['type'] = 'image_url'
|
||||||
|
del me['image_base64']
|
||||||
|
|
||||||
|
args['messages'] = messages
|
||||||
|
args['stream'] = True
|
||||||
|
|
||||||
|
# tool_calls_map: dict[str, provider_message.ToolCall] = {}
|
||||||
|
chunk_idx = 0
|
||||||
|
thinking_started = False
|
||||||
|
thinking_ended = False
|
||||||
|
role = 'assistant' # 默认角色
|
||||||
|
async for chunk in self._req_stream(args, extra_body=extra_args):
|
||||||
|
# 解析 chunk 数据
|
||||||
|
if hasattr(chunk, 'choices') and chunk.choices:
|
||||||
|
choice = chunk.choices[0]
|
||||||
|
delta = choice.delta.model_dump() if hasattr(choice, 'delta') else {}
|
||||||
|
finish_reason = getattr(choice, 'finish_reason', None)
|
||||||
|
else:
|
||||||
|
delta = {}
|
||||||
|
finish_reason = None
|
||||||
|
|
||||||
|
# 从第一个 chunk 获取 role,后续使用这个 role
|
||||||
|
if 'role' in delta and delta['role']:
|
||||||
|
role = delta['role']
|
||||||
|
|
||||||
|
# 获取增量内容
|
||||||
|
delta_content = delta.get('content', '')
|
||||||
|
# reasoning_content = delta.get('reasoning_content', '')
|
||||||
|
|
||||||
|
if remove_think:
|
||||||
|
if delta['content'] is not None:
|
||||||
|
if '<think>' in delta['content'] and not thinking_started and not thinking_ended:
|
||||||
|
thinking_started = True
|
||||||
|
continue
|
||||||
|
elif delta['content'] == r'</think>' and not thinking_ended:
|
||||||
|
thinking_ended = True
|
||||||
|
continue
|
||||||
|
elif thinking_ended and delta['content'] == '\n\n' and thinking_started:
|
||||||
|
thinking_started = False
|
||||||
|
continue
|
||||||
|
elif thinking_started and not thinking_ended:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# delta_tool_calls = None
|
||||||
|
if delta.get('tool_calls'):
|
||||||
|
for tool_call in delta['tool_calls']:
|
||||||
|
if tool_call['id'] and tool_call['function']['name']:
|
||||||
|
tool_id = tool_call['id']
|
||||||
|
tool_name = tool_call['function']['name']
|
||||||
|
|
||||||
|
if tool_call['id'] is None:
|
||||||
|
tool_call['id'] = tool_id
|
||||||
|
if tool_call['function']['name'] is None:
|
||||||
|
tool_call['function']['name'] = tool_name
|
||||||
|
if tool_call['function']['arguments'] is None:
|
||||||
|
tool_call['function']['arguments'] = ''
|
||||||
|
if tool_call['type'] is None:
|
||||||
|
tool_call['type'] = 'function'
|
||||||
|
|
||||||
|
# 跳过空的第一个 chunk(只有 role 没有内容)
|
||||||
|
if chunk_idx == 0 and not delta_content and not delta.get('tool_calls'):
|
||||||
|
chunk_idx += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 构建 MessageChunk - 只包含增量内容
|
||||||
|
chunk_data = {
|
||||||
|
'role': role,
|
||||||
|
'content': delta_content if delta_content else None,
|
||||||
|
'tool_calls': delta.get('tool_calls'),
|
||||||
|
'is_final': bool(finish_reason),
|
||||||
|
}
|
||||||
|
|
||||||
|
# 移除 None 值
|
||||||
|
chunk_data = {k: v for k, v in chunk_data.items() if v is not None}
|
||||||
|
|
||||||
|
yield provider_message.MessageChunk(**chunk_data)
|
||||||
|
chunk_idx += 1
|
||||||
38
pkg/provider/modelmgr/requesters/jiekouaichatcmpl.yaml
Normal file
38
pkg/provider/modelmgr/requesters/jiekouaichatcmpl.yaml
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: LLMAPIRequester
|
||||||
|
metadata:
|
||||||
|
name: jiekouai-chat-completions
|
||||||
|
label:
|
||||||
|
en_US: JieKou AI
|
||||||
|
zh_Hans: 接口 AI
|
||||||
|
icon: jiekouai.png
|
||||||
|
spec:
|
||||||
|
config:
|
||||||
|
- name: base_url
|
||||||
|
label:
|
||||||
|
en_US: Base URL
|
||||||
|
zh_Hans: 基础 URL
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
default: "https://api.jiekou.ai/openai"
|
||||||
|
- name: args
|
||||||
|
label:
|
||||||
|
en_US: Args
|
||||||
|
zh_Hans: 附加参数
|
||||||
|
type: object
|
||||||
|
required: true
|
||||||
|
default: {}
|
||||||
|
- name: timeout
|
||||||
|
label:
|
||||||
|
en_US: Timeout
|
||||||
|
zh_Hans: 超时时间
|
||||||
|
type: int
|
||||||
|
required: true
|
||||||
|
default: 120
|
||||||
|
support_type:
|
||||||
|
- llm
|
||||||
|
- text-embedding
|
||||||
|
execution:
|
||||||
|
python:
|
||||||
|
path: ./jiekouaichatcmpl.py
|
||||||
|
attr: JieKouAIChatCompletions
|
||||||
1
pkg/provider/modelmgr/requesters/tokenpony.svg
Normal file
1
pkg/provider/modelmgr/requesters/tokenpony.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="450" height="280" viewBox="0 0 450 280" class="cursor-pointer h-24 flex-shrink-0 w-149"><g fill="none" fill-rule="nonzero"><path fill="#0005DE" d="M97.705 6.742c58.844 0 90.962 34.353 90.962 98.341v21.843c-15.118-2.479-30.297-6.573-45.558-12.3v-9.543c0-35.97-15.564-56.281-45.404-56.281s-45.404 20.31-45.404 56.281v72.48c0 36.117 15.65 56.818 45.404 56.818 26.78 0 42.133-16.768 44.936-46.452q22.397 6.473 44.905 9.356c-6.15 51.52-37.492 79.155-89.841 79.155-58.678 0-90.963-34.72-90.963-98.878v-72.479c0-63.988 32.119-98.34 90.963-98.34m253.627 0c58.844 0 90.963 34.353 90.963 98.341v72.48c0 64.157-32.285 98.877-90.963 98.877-52.438 0-83.797-27.729-89.874-79.415 15-2.026 29.965-5.252 44.887-9.67 2.658 30.042 18.036 47.026 44.987 47.026 29.755 0 45.404-20.7 45.404-56.819v-72.479c0-35.97-15.564-56.281-45.404-56.281s-45.403 20.31-45.403 56.281v8.778c-15.262 5.868-30.44 10.104-45.559 12.725v-21.503c0-63.988 32.118-98.34 90.962-98.34m-164.37 140.026.57.09.831.127-.83-.128a234.5 234.5 0 0 0 35.979 2.79q18.408.002 36.858-2.928l1.401-.226a242 242 0 0 0 1.45-.244l-1.037.175q.729-.12 1.458-.247l-.421.072 1.26-.219-.84.147a244 244 0 0 0 2.8-.5l-.792.144q.648-.117 1.298-.239l-.506.094q.66-.122 1.322-.248l-.816.154q.759-.142 1.518-.289l-.702.135a247 247 0 0 0 5.364-1.084l-.463.098a250 250 0 0 0 3.928-.864l-.785.178 1.45-.33-.665.152q.597-.137 1.193-.276l-.528.123a253 253 0 0 0 3.685-.882l-.254.063q.683-.168 1.366-.34l-1.112.277q.809-.2 1.618-.405l-.506.128q.818-.206 1.634-.417l-1.128.289q.71-.18 1.419-.365l1.506-.397a259 259 0 0 0 1.804-.488l-.433.119a261 261 0 0 0 3.751-1.053l-.681.196a264 264 0 0 0 1.735-.502l-1.054.306q.636-.184 1.272-.37l-.218.064 1.238-.366-1.02.302a266 266 0 0 0 2.936-.882l-1.026.312q.71-.214 1.42-.433l-.394.121q.675-.207 1.35-.418l-.955.297q.8-.246 1.6-.499l-.645.202q.86-.269 1.72-.543l-1.076.341q.666-.21 1.33-.423l-.254.082q.833-.266 1.665-.539l-1.41.457q.874-.28 1.75-.568l-.34.111q.702-.229 1.403-.462l-1.063.351q.818-.269 1.634-.542l-.571.19a276 276 0 0 0 4.038-1.378l-.735.256q.657-.228 1.315-.46l-.58.204q16.86-5.903 33.78-14.256l-7.114-12.453 42.909 6.553-13.148 45.541-7.734-13.537q-23.832 11.94-47.755 19.504l-.199.063a298 298 0 0 1-11.65 3.412 288 288 0 0 1-10.39 2.603 280 280 0 0 1-11.677 2.431 273 273 0 0 1-11.643 1.903 263.5 263.5 0 0 1-36.858 2.599q-17.437 0-34.844-2.323l-.227-.03q-.635-.085-1.27-.174l1.497.204a268 268 0 0 1-13.673-2.182 275 275 0 0 1-12.817-2.697 282 282 0 0 1-11.859-3.057 291 291 0 0 1-7.21-2.123c-17.23-5.314-34.43-12.334-51.59-21.051l-8.258 14.455-13.148-45.541 42.909-6.553-6.594 11.544q18.421 9.24 36.776 15.572l1.316.45 1.373.462-.831-.278q.795.267 1.589.53l-.758-.252q.632.211 1.264.419l-.506-.167q.642.212 1.284.42l-.778-.253a271 271 0 0 0 3.914 1.251l-.227-.07a267 267 0 0 0 3.428 1.046l-.194-.058 1.315.389-1.121-.331q.864.256 1.73.508l-.609-.177q.826.241 1.651.478l-1.043-.3 1.307.375-.264-.075q.802.228 1.603.452l-1.34-.377q1.034.294 2.067.58l-.727-.203q.713.2 1.426.394l-.699-.192q.62.171 1.237.338l-.538-.146a259 259 0 0 0 3.977 1.051l-.66-.17q.683.177 1.367.35l-.707-.18q.687.175 1.373.348l-.666-.168q.738.186 1.475.368l-.809-.2q.716.179 1.43.353l-.621-.153a253 253 0 0 0 3.766.898l-.308-.07q.735.17 1.472.336l-1.164-.266q.747.173 1.496.34l-.332-.074q.845.19 1.69.374l-1.358-.3q.932.21 1.864.41l-.505-.11q.726.159 1.452.313l-.947-.203q.72.156 1.44.307l-.493-.104q.684.144 1.368.286l-.875-.182q.743.155 1.485.306l-.61-.124q.932.192 1.864.376l-1.254-.252q.904.184 1.809.361l-.555-.109q.752.15 1.504.293l-.95-.184q.69.135 1.377.265l-.427-.081q.784.15 1.569.295l-1.142-.214q.717.136 1.434.268l-.292-.054a244 244 0 0 0 3.808.673l-.68-.116 1.063.18-.383-.064q1.076.18 2.152.352z"></path></g></svg>
|
||||||
|
After Width: | Height: | Size: 3.6 KiB |
31
pkg/provider/modelmgr/requesters/tokenpony.yaml
Normal file
31
pkg/provider/modelmgr/requesters/tokenpony.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: LLMAPIRequester
|
||||||
|
metadata:
|
||||||
|
name: tokenpony-chat-completions
|
||||||
|
label:
|
||||||
|
en_US: TokenPony
|
||||||
|
zh_Hans: 小马算力
|
||||||
|
icon: tokenpony.svg
|
||||||
|
spec:
|
||||||
|
config:
|
||||||
|
- name: base_url
|
||||||
|
label:
|
||||||
|
en_US: Base URL
|
||||||
|
zh_Hans: 基础 URL
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
default: "https://api.tokenpony.cn/v1"
|
||||||
|
- name: timeout
|
||||||
|
label:
|
||||||
|
en_US: Timeout
|
||||||
|
zh_Hans: 超时时间
|
||||||
|
type: integer
|
||||||
|
required: true
|
||||||
|
default: 120
|
||||||
|
support_type:
|
||||||
|
- llm
|
||||||
|
- text-embedding
|
||||||
|
execution:
|
||||||
|
python:
|
||||||
|
path: ./tokenponychatcmpl.py
|
||||||
|
attr: TokenPonyChatCompletions
|
||||||
17
pkg/provider/modelmgr/requesters/tokenponychatcmpl.py
Normal file
17
pkg/provider/modelmgr/requesters/tokenponychatcmpl.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing
|
||||||
|
import openai
|
||||||
|
|
||||||
|
from . import chatcmpl
|
||||||
|
|
||||||
|
|
||||||
|
class TokenPonyChatCompletions(chatcmpl.OpenAIChatCompletions):
|
||||||
|
"""TokenPony ChatCompletion API 请求器"""
|
||||||
|
|
||||||
|
client: openai.AsyncClient
|
||||||
|
|
||||||
|
default_config: dict[str, typing.Any] = {
|
||||||
|
'base_url': 'https://api.tokenpony.cn/v1',
|
||||||
|
'timeout': 120,
|
||||||
|
}
|
||||||
313
pkg/provider/runners/cozeapi.py
Normal file
313
pkg/provider/runners/cozeapi.py
Normal file
@@ -0,0 +1,313 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
import base64
|
||||||
|
|
||||||
|
from .. import runner
|
||||||
|
from ...core import app
|
||||||
|
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||||
|
from ...utils import image
|
||||||
|
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||||
|
from libs.coze_server_api.client import AsyncCozeAPIClient
|
||||||
|
|
||||||
|
@runner.runner_class('coze-api')
|
||||||
|
class CozeAPIRunner(runner.RequestRunner):
|
||||||
|
"""Coze API 对话请求器"""
|
||||||
|
|
||||||
|
def __init__(self, ap: app.Application, pipeline_config: dict):
|
||||||
|
self.pipeline_config = pipeline_config
|
||||||
|
self.ap = ap
|
||||||
|
self.agent_token = pipeline_config["ai"]['coze-api']['api-key']
|
||||||
|
self.bot_id = pipeline_config["ai"]['coze-api'].get('bot-id')
|
||||||
|
self.chat_timeout = pipeline_config["ai"]['coze-api'].get('timeout')
|
||||||
|
self.auto_save_history = pipeline_config["ai"]['coze-api'].get('auto_save_history')
|
||||||
|
self.api_base = pipeline_config["ai"]['coze-api'].get('api-base')
|
||||||
|
|
||||||
|
self.coze = AsyncCozeAPIClient(
|
||||||
|
self.agent_token,
|
||||||
|
self.api_base
|
||||||
|
)
|
||||||
|
|
||||||
|
def _process_thinking_content(
|
||||||
|
self,
|
||||||
|
content: str,
|
||||||
|
) -> tuple[str, str]:
|
||||||
|
"""处理思维链内容
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content: 原始内容
|
||||||
|
Returns:
|
||||||
|
(处理后的内容, 提取的思维链内容)
|
||||||
|
"""
|
||||||
|
remove_think = self.pipeline_config.get('output', {}).get('misc', {}).get('remove-think', False)
|
||||||
|
thinking_content = ''
|
||||||
|
# 从 content 中提取 <think> 标签内容
|
||||||
|
if content and '<think>' in content and '</think>' in content:
|
||||||
|
import re
|
||||||
|
|
||||||
|
think_pattern = r'<think>(.*?)</think>'
|
||||||
|
think_matches = re.findall(think_pattern, content, re.DOTALL)
|
||||||
|
if think_matches:
|
||||||
|
thinking_content = '\n'.join(think_matches)
|
||||||
|
# 移除 content 中的 <think> 标签
|
||||||
|
content = re.sub(think_pattern, '', content, flags=re.DOTALL).strip()
|
||||||
|
|
||||||
|
# 根据 remove_think 参数决定是否保留思维链
|
||||||
|
if remove_think:
|
||||||
|
return content, ''
|
||||||
|
else:
|
||||||
|
# 如果有思维链内容,将其以 <think> 格式添加到 content 开头
|
||||||
|
if thinking_content:
|
||||||
|
content = f'<think>\n{thinking_content}\n</think>\n{content}'.strip()
|
||||||
|
return content, thinking_content
|
||||||
|
|
||||||
|
async def _preprocess_user_message(self, query: pipeline_query.Query) -> list[dict]:
|
||||||
|
"""预处理用户消息,转换为Coze消息格式
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[dict]: Coze消息列表
|
||||||
|
"""
|
||||||
|
messages = []
|
||||||
|
|
||||||
|
if isinstance(query.user_message.content, list):
|
||||||
|
# 多模态消息处理
|
||||||
|
content_parts = []
|
||||||
|
|
||||||
|
for ce in query.user_message.content:
|
||||||
|
if ce.type == 'text':
|
||||||
|
content_parts.append({"type": "text", "text": ce.text})
|
||||||
|
elif ce.type == 'image_base64':
|
||||||
|
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||||
|
file_bytes = base64.b64decode(image_b64)
|
||||||
|
file_id = await self._get_file_id(file_bytes)
|
||||||
|
content_parts.append({"type": "image", "file_id": file_id})
|
||||||
|
elif ce.type == 'file':
|
||||||
|
# 处理文件,上传到Coze
|
||||||
|
file_id = await self._get_file_id(ce.file)
|
||||||
|
content_parts.append({"type": "file", "file_id": file_id})
|
||||||
|
|
||||||
|
# 创建多模态消息
|
||||||
|
if content_parts:
|
||||||
|
messages.append({
|
||||||
|
"role": "user",
|
||||||
|
"content": json.dumps(content_parts),
|
||||||
|
"content_type": "object_string",
|
||||||
|
"meta_data": None
|
||||||
|
})
|
||||||
|
|
||||||
|
elif isinstance(query.user_message.content, str):
|
||||||
|
# 纯文本消息
|
||||||
|
messages.append({
|
||||||
|
"role": "user",
|
||||||
|
"content": query.user_message.content,
|
||||||
|
"content_type": "text",
|
||||||
|
"meta_data": None
|
||||||
|
})
|
||||||
|
|
||||||
|
return messages
|
||||||
|
|
||||||
|
async def _get_file_id(self, file) -> str:
|
||||||
|
"""上传文件到Coze服务
|
||||||
|
Args:
|
||||||
|
file: 文件
|
||||||
|
Returns:
|
||||||
|
str: 文件ID
|
||||||
|
"""
|
||||||
|
file_id = await self.coze.upload(file=file)
|
||||||
|
return file_id
|
||||||
|
|
||||||
|
async def _chat_messages(
|
||||||
|
self, query: pipeline_query.Query
|
||||||
|
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||||
|
"""调用聊天助手(非流式)
|
||||||
|
|
||||||
|
注意:由于cozepy没有提供非流式API,这里使用流式API并在结束后一次性返回完整内容
|
||||||
|
"""
|
||||||
|
user_id = f'{query.launcher_id}_{query.sender_id}'
|
||||||
|
|
||||||
|
# 预处理用户消息
|
||||||
|
additional_messages = await self._preprocess_user_message(query)
|
||||||
|
|
||||||
|
# 获取会话ID
|
||||||
|
conversation_id = None
|
||||||
|
|
||||||
|
# 收集完整内容
|
||||||
|
full_content = ''
|
||||||
|
full_reasoning = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 调用Coze API流式接口
|
||||||
|
async for chunk in self.coze.chat_messages(
|
||||||
|
bot_id=self.bot_id,
|
||||||
|
user_id=user_id,
|
||||||
|
additional_messages=additional_messages,
|
||||||
|
conversation_id=conversation_id,
|
||||||
|
timeout=self.chat_timeout,
|
||||||
|
auto_save_history=self.auto_save_history,
|
||||||
|
stream=True
|
||||||
|
):
|
||||||
|
self.ap.logger.debug(f'coze-chat-stream: {chunk}')
|
||||||
|
|
||||||
|
event_type = chunk.get('event')
|
||||||
|
data = chunk.get('data', {})
|
||||||
|
# Removed debug print statement to avoid cluttering logs in production
|
||||||
|
|
||||||
|
if event_type == 'conversation.message.delta':
|
||||||
|
# 收集内容
|
||||||
|
if 'content' in data:
|
||||||
|
full_content += data.get('content', '')
|
||||||
|
|
||||||
|
# 收集推理内容(如果有)
|
||||||
|
if 'reasoning_content' in data:
|
||||||
|
full_reasoning += data.get('reasoning_content', '')
|
||||||
|
|
||||||
|
elif event_type.split(".")[-1] == 'done' : # 本地部署coze时,结束event不为done
|
||||||
|
# 保存会话ID
|
||||||
|
if 'conversation_id' in data:
|
||||||
|
conversation_id = data.get('conversation_id')
|
||||||
|
|
||||||
|
elif event_type == 'error':
|
||||||
|
# 处理错误
|
||||||
|
error_msg = f"Coze API错误: {data.get('message', '未知错误')}"
|
||||||
|
yield provider_message.Message(
|
||||||
|
role='assistant',
|
||||||
|
content=error_msg,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 处理思维链内容
|
||||||
|
content, thinking_content = self._process_thinking_content(full_content)
|
||||||
|
if full_reasoning:
|
||||||
|
remove_think = self.pipeline_config.get('output', {}).get('misc', {}).get('remove-think', False)
|
||||||
|
if not remove_think:
|
||||||
|
content = f'<think>\n{full_reasoning}\n</think>\n{content}'.strip()
|
||||||
|
|
||||||
|
# 一次性返回完整内容
|
||||||
|
yield provider_message.Message(
|
||||||
|
role='assistant',
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 保存会话ID
|
||||||
|
if conversation_id and query.session.using_conversation:
|
||||||
|
query.session.using_conversation.uuid = conversation_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Coze API错误: {str(e)}')
|
||||||
|
yield provider_message.Message(
|
||||||
|
role='assistant',
|
||||||
|
content=f'Coze API调用失败: {str(e)}',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def _chat_messages_chunk(
|
||||||
|
self, query: pipeline_query.Query
|
||||||
|
) -> typing.AsyncGenerator[provider_message.MessageChunk, None]:
|
||||||
|
"""调用聊天助手(流式)"""
|
||||||
|
user_id = f'{query.launcher_id}_{query.sender_id}'
|
||||||
|
|
||||||
|
# 预处理用户消息
|
||||||
|
additional_messages = await self._preprocess_user_message(query)
|
||||||
|
|
||||||
|
# 获取会话ID
|
||||||
|
conversation_id = None
|
||||||
|
|
||||||
|
start_reasoning = False
|
||||||
|
stop_reasoning = False
|
||||||
|
message_idx = 1
|
||||||
|
is_final = False
|
||||||
|
full_content = ''
|
||||||
|
remove_think = self.pipeline_config.get('output', {}).get('misc', {}).get('remove-think', False)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 调用Coze API流式接口
|
||||||
|
async for chunk in self.coze.chat_messages(
|
||||||
|
bot_id=self.bot_id,
|
||||||
|
user_id=user_id,
|
||||||
|
additional_messages=additional_messages,
|
||||||
|
conversation_id=conversation_id,
|
||||||
|
timeout=self.chat_timeout,
|
||||||
|
auto_save_history=self.auto_save_history,
|
||||||
|
stream=True
|
||||||
|
):
|
||||||
|
self.ap.logger.debug(f'coze-chat-stream-chunk: {chunk}')
|
||||||
|
|
||||||
|
event_type = chunk.get('event')
|
||||||
|
data = chunk.get('data', {})
|
||||||
|
content = ""
|
||||||
|
|
||||||
|
if event_type == 'conversation.message.delta':
|
||||||
|
message_idx += 1
|
||||||
|
# 处理内容增量
|
||||||
|
if "reasoning_content" in data and not remove_think:
|
||||||
|
|
||||||
|
reasoning_content = data.get('reasoning_content', '')
|
||||||
|
if reasoning_content and not start_reasoning:
|
||||||
|
content = f"<think/>\n"
|
||||||
|
start_reasoning = True
|
||||||
|
content += reasoning_content
|
||||||
|
|
||||||
|
if 'content' in data:
|
||||||
|
if data.get('content', ''):
|
||||||
|
content += data.get('content', '')
|
||||||
|
if not stop_reasoning and start_reasoning:
|
||||||
|
content = f"</think>\n{content}"
|
||||||
|
stop_reasoning = True
|
||||||
|
|
||||||
|
|
||||||
|
elif event_type.split(".")[-1] == 'done' : # 本地部署coze时,结束event不为done
|
||||||
|
# 保存会话ID
|
||||||
|
if 'conversation_id' in data:
|
||||||
|
conversation_id = data.get('conversation_id')
|
||||||
|
if query.session.using_conversation:
|
||||||
|
query.session.using_conversation.uuid = conversation_id
|
||||||
|
is_final = True
|
||||||
|
|
||||||
|
|
||||||
|
elif event_type == 'error':
|
||||||
|
# 处理错误
|
||||||
|
error_msg = f"Coze API错误: {data.get('message', '未知错误')}"
|
||||||
|
yield provider_message.MessageChunk(
|
||||||
|
role='assistant',
|
||||||
|
content=error_msg,
|
||||||
|
finish_reason='error'
|
||||||
|
)
|
||||||
|
return
|
||||||
|
full_content += content
|
||||||
|
if message_idx % 8 == 0 or is_final:
|
||||||
|
if full_content:
|
||||||
|
yield provider_message.MessageChunk(
|
||||||
|
role='assistant',
|
||||||
|
content=full_content,
|
||||||
|
is_final=is_final
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Coze API流式调用错误: {str(e)}')
|
||||||
|
yield provider_message.MessageChunk(
|
||||||
|
role='assistant',
|
||||||
|
content=f'Coze API流式调用失败: {str(e)}',
|
||||||
|
finish_reason='error'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||||
|
"""运行"""
|
||||||
|
msg_seq = 0
|
||||||
|
if await query.adapter.is_stream_output_supported():
|
||||||
|
async for msg in self._chat_messages_chunk(query):
|
||||||
|
if isinstance(msg, provider_message.MessageChunk):
|
||||||
|
msg_seq += 1
|
||||||
|
msg.msg_sequence = msg_seq
|
||||||
|
yield msg
|
||||||
|
else:
|
||||||
|
async for msg in self._chat_messages(query):
|
||||||
|
yield msg
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -14,6 +14,7 @@ import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
|||||||
from libs.dify_service_api.v1 import client, errors
|
from libs.dify_service_api.v1 import client, errors
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@runner.runner_class('dify-service-api')
|
@runner.runner_class('dify-service-api')
|
||||||
class DifyServiceAPIRunner(runner.RequestRunner):
|
class DifyServiceAPIRunner(runner.RequestRunner):
|
||||||
"""Dify Service API 对话请求器"""
|
"""Dify Service API 对话请求器"""
|
||||||
@@ -77,7 +78,7 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
|||||||
tuple[str, list[str]]: 纯文本和图片的 Dify 服务图片 ID
|
tuple[str, list[str]]: 纯文本和图片的 Dify 服务图片 ID
|
||||||
"""
|
"""
|
||||||
plain_text = ''
|
plain_text = ''
|
||||||
image_ids = []
|
file_ids = []
|
||||||
|
|
||||||
if isinstance(query.user_message.content, list):
|
if isinstance(query.user_message.content, list):
|
||||||
for ce in query.user_message.content:
|
for ce in query.user_message.content:
|
||||||
@@ -92,11 +93,24 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
|||||||
f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||||
)
|
)
|
||||||
image_id = file_upload_resp['id']
|
image_id = file_upload_resp['id']
|
||||||
image_ids.append(image_id)
|
file_ids.append(image_id)
|
||||||
|
# elif ce.type == "file_url":
|
||||||
|
# file_bytes = base64.b64decode(ce.file_url)
|
||||||
|
# file_upload_resp = await self.dify_client.upload_file(
|
||||||
|
# file_bytes,
|
||||||
|
# f'{query.session.launcher_type.value}_{query.session.launcher_id}',
|
||||||
|
# )
|
||||||
|
# file_id = file_upload_resp['id']
|
||||||
|
# file_ids.append(file_id)
|
||||||
elif isinstance(query.user_message.content, str):
|
elif isinstance(query.user_message.content, str):
|
||||||
plain_text = query.user_message.content
|
plain_text = query.user_message.content
|
||||||
|
# plain_text = "When the file content is readable, please read the content of this file. When the file is an image, describe the content of this image." if file_ids and not plain_text else plain_text
|
||||||
|
# plain_text = "The user message type cannot be parsed." if not file_ids and not plain_text else plain_text
|
||||||
|
# plain_text = plain_text if plain_text else "When the file content is readable, please read the content of this file. When the file is an image, describe the content of this image."
|
||||||
|
# print(self.pipeline_config['ai'])
|
||||||
|
plain_text = plain_text if plain_text else self.pipeline_config['ai']['dify-service-api']['base-prompt']
|
||||||
|
|
||||||
return plain_text, image_ids
|
return plain_text, file_ids
|
||||||
|
|
||||||
async def _chat_messages(
|
async def _chat_messages(
|
||||||
self, query: pipeline_query.Query
|
self, query: pipeline_query.Query
|
||||||
@@ -110,7 +124,6 @@ class DifyServiceAPIRunner(runner.RequestRunner):
|
|||||||
files = [
|
files = [
|
||||||
{
|
{
|
||||||
'type': 'image',
|
'type': 'image',
|
||||||
'transfer_method': 'local_file',
|
|
||||||
'upload_file_id': image_id,
|
'upload_file_id': image_id,
|
||||||
}
|
}
|
||||||
for image_id in image_ids
|
for image_id in image_ids
|
||||||
|
|||||||
@@ -40,10 +40,14 @@ class LocalAgentRunner(runner.RequestRunner):
|
|||||||
"""运行请求"""
|
"""运行请求"""
|
||||||
pending_tool_calls = []
|
pending_tool_calls = []
|
||||||
|
|
||||||
kb_uuid = query.pipeline_config['ai']['local-agent']['knowledge-base']
|
# Get knowledge bases list (new field)
|
||||||
|
kb_uuids = query.pipeline_config['ai']['local-agent'].get('knowledge-bases', [])
|
||||||
if kb_uuid == '__none__':
|
|
||||||
kb_uuid = None
|
# Fallback to old field for backward compatibility
|
||||||
|
if not kb_uuids:
|
||||||
|
old_kb_uuid = query.pipeline_config['ai']['local-agent'].get('knowledge-base', '')
|
||||||
|
if old_kb_uuid and old_kb_uuid != '__none__':
|
||||||
|
kb_uuids = [old_kb_uuid]
|
||||||
|
|
||||||
user_message = copy.deepcopy(query.user_message)
|
user_message = copy.deepcopy(query.user_message)
|
||||||
|
|
||||||
@@ -57,21 +61,28 @@ class LocalAgentRunner(runner.RequestRunner):
|
|||||||
user_message_text += ce.text
|
user_message_text += ce.text
|
||||||
break
|
break
|
||||||
|
|
||||||
if kb_uuid and user_message_text:
|
if kb_uuids and user_message_text:
|
||||||
# only support text for now
|
# only support text for now
|
||||||
kb = await self.ap.rag_mgr.get_knowledge_base_by_uuid(kb_uuid)
|
all_results = []
|
||||||
|
|
||||||
|
# Retrieve from each knowledge base
|
||||||
|
for kb_uuid in kb_uuids:
|
||||||
|
kb = await self.ap.rag_mgr.get_knowledge_base_by_uuid(kb_uuid)
|
||||||
|
|
||||||
if not kb:
|
if not kb:
|
||||||
self.ap.logger.warning(f'Knowledge base {kb_uuid} not found')
|
self.ap.logger.warning(f'Knowledge base {kb_uuid} not found, skipping')
|
||||||
raise ValueError(f'Knowledge base {kb_uuid} not found')
|
continue
|
||||||
|
|
||||||
result = await kb.retrieve(user_message_text, kb.knowledge_base_entity.top_k)
|
result = await kb.retrieve(user_message_text, kb.knowledge_base_entity.top_k)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
all_results.extend(result)
|
||||||
|
|
||||||
final_user_message_text = ''
|
final_user_message_text = ''
|
||||||
|
|
||||||
if result:
|
if all_results:
|
||||||
rag_context = '\n\n'.join(
|
rag_context = '\n\n'.join(
|
||||||
f'[{i + 1}] {entry.metadata.get("text", "")}' for i, entry in enumerate(result)
|
f'[{i + 1}] {entry.metadata.get("text", "")}' for i, entry in enumerate(all_results)
|
||||||
)
|
)
|
||||||
final_user_message_text = rag_combined_prompt_template.format(
|
final_user_message_text = rag_combined_prompt_template.format(
|
||||||
rag_context=rag_context, user_message=user_message_text
|
rag_context=rag_context, user_message=user_message_text
|
||||||
|
|||||||
205
pkg/provider/runners/tboxapi.py
Normal file
205
pkg/provider/runners/tboxapi.py
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing
|
||||||
|
import json
|
||||||
|
import base64
|
||||||
|
import tempfile
|
||||||
|
import os
|
||||||
|
|
||||||
|
from tboxsdk.tbox import TboxClient
|
||||||
|
from tboxsdk.model.file import File, FileType
|
||||||
|
|
||||||
|
from .. import runner
|
||||||
|
from ...core import app
|
||||||
|
from ...utils import image
|
||||||
|
import langbot_plugin.api.entities.builtin.pipeline.query as pipeline_query
|
||||||
|
import langbot_plugin.api.entities.builtin.provider.message as provider_message
|
||||||
|
|
||||||
|
|
||||||
|
class TboxAPIError(Exception):
|
||||||
|
"""TBox API 请求失败"""
|
||||||
|
|
||||||
|
def __init__(self, message: str):
|
||||||
|
self.message = message
|
||||||
|
super().__init__(self.message)
|
||||||
|
|
||||||
|
|
||||||
|
@runner.runner_class('tbox-app-api')
|
||||||
|
class TboxAPIRunner(runner.RequestRunner):
|
||||||
|
"蚂蚁百宝箱API对话请求器"
|
||||||
|
|
||||||
|
# 运行器内部使用的配置
|
||||||
|
app_id: str # 蚂蚁百宝箱平台中的应用ID
|
||||||
|
api_key: str # 在蚂蚁百宝箱平台中申请的令牌
|
||||||
|
|
||||||
|
def __init__(self, ap: app.Application, pipeline_config: dict):
|
||||||
|
"""初始化"""
|
||||||
|
self.ap = ap
|
||||||
|
self.pipeline_config = pipeline_config
|
||||||
|
|
||||||
|
# 初始化Tbox 参数配置
|
||||||
|
self.app_id = self.pipeline_config['ai']['tbox-app-api']['app-id']
|
||||||
|
self.api_key = self.pipeline_config['ai']['tbox-app-api']['api-key']
|
||||||
|
|
||||||
|
# 初始化Tbox client
|
||||||
|
self.tbox_client = TboxClient(authorization=self.api_key)
|
||||||
|
|
||||||
|
async def _preprocess_user_message(self, query: pipeline_query.Query) -> tuple[str, list[str]]:
|
||||||
|
"""预处理用户消息,提取纯文本,并将图片上传到 Tbox 服务
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[str, list[str]]: 纯文本和图片的 Tbox 文件ID
|
||||||
|
"""
|
||||||
|
plain_text = ''
|
||||||
|
image_ids = []
|
||||||
|
|
||||||
|
if isinstance(query.user_message.content, list):
|
||||||
|
for ce in query.user_message.content:
|
||||||
|
if ce.type == 'text':
|
||||||
|
plain_text += ce.text
|
||||||
|
elif ce.type == 'image_base64':
|
||||||
|
image_b64, image_format = await image.extract_b64_and_format(ce.image_base64)
|
||||||
|
# 创建临时文件
|
||||||
|
file_bytes = base64.b64decode(image_b64)
|
||||||
|
try:
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=f'.{image_format}', delete=False) as tmp_file:
|
||||||
|
tmp_file.write(file_bytes)
|
||||||
|
tmp_file_path = tmp_file.name
|
||||||
|
file_upload_resp = self.tbox_client.upload_file(
|
||||||
|
tmp_file_path
|
||||||
|
)
|
||||||
|
image_id = file_upload_resp.get("data", "")
|
||||||
|
image_ids.append(image_id)
|
||||||
|
finally:
|
||||||
|
# 清理临时文件
|
||||||
|
if os.path.exists(tmp_file_path):
|
||||||
|
os.unlink(tmp_file_path)
|
||||||
|
elif isinstance(query.user_message.content, str):
|
||||||
|
plain_text = query.user_message.content
|
||||||
|
|
||||||
|
return plain_text, image_ids
|
||||||
|
|
||||||
|
async def _agent_messages(
|
||||||
|
self, query: pipeline_query.Query
|
||||||
|
) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||||
|
"""TBox 智能体对话请求"""
|
||||||
|
|
||||||
|
plain_text, image_ids = await self._preprocess_user_message(query)
|
||||||
|
remove_think = self.pipeline_config['output'].get('misc', {}).get('remove-think')
|
||||||
|
|
||||||
|
try:
|
||||||
|
is_stream = await query.adapter.is_stream_output_supported()
|
||||||
|
except AttributeError:
|
||||||
|
is_stream = False
|
||||||
|
|
||||||
|
# 获取Tbox的conversation_id
|
||||||
|
conversation_id = query.session.using_conversation.uuid or None
|
||||||
|
|
||||||
|
files = None
|
||||||
|
if image_ids:
|
||||||
|
files = [
|
||||||
|
File(file_id=image_id, type=FileType.IMAGE)
|
||||||
|
for image_id in image_ids
|
||||||
|
]
|
||||||
|
|
||||||
|
# 发送对话请求
|
||||||
|
response = self.tbox_client.chat(
|
||||||
|
app_id=self.app_id, # Tbox中智能体应用的ID
|
||||||
|
user_id=query.bot_uuid, # 用户ID
|
||||||
|
query=plain_text, # 用户输入的文本信息
|
||||||
|
stream=is_stream, # 是否流式输出
|
||||||
|
conversation_id=conversation_id, # 会话ID,为None时Tbox会自动创建一个新会话
|
||||||
|
files=files, # 图片内容
|
||||||
|
)
|
||||||
|
|
||||||
|
if is_stream:
|
||||||
|
# 解析Tbox流式输出内容,并发送给上游
|
||||||
|
for chunk in self._process_stream_message(response, query, remove_think):
|
||||||
|
yield chunk
|
||||||
|
else:
|
||||||
|
message = self._process_non_stream_message(response, query, remove_think)
|
||||||
|
yield provider_message.Message(
|
||||||
|
role='assistant',
|
||||||
|
content=message,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _process_non_stream_message(self, response: typing.Dict, query: pipeline_query.Query, remove_think: bool):
|
||||||
|
if response.get('errorCode') != "0":
|
||||||
|
raise TboxAPIError(f'Tbox API 请求失败: {response.get("errorMsg", "")}')
|
||||||
|
payload = response.get('data', {})
|
||||||
|
conversation_id = payload.get('conversationId', '')
|
||||||
|
query.session.using_conversation.uuid = conversation_id
|
||||||
|
thinking_content = payload.get('reasoningContent', [])
|
||||||
|
result = ""
|
||||||
|
if thinking_content and not remove_think:
|
||||||
|
result += f'<think>\n{thinking_content[0].get("text", "")}\n</think>\n'
|
||||||
|
content = payload.get('result', [])
|
||||||
|
if content:
|
||||||
|
result += content[0].get('chunk', '')
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _process_stream_message(self, response: typing.Generator[dict], query: pipeline_query.Query, remove_think: bool):
|
||||||
|
idx_msg = 0
|
||||||
|
pending_content = ''
|
||||||
|
conversation_id = None
|
||||||
|
think_start = False
|
||||||
|
think_end = False
|
||||||
|
for chunk in response:
|
||||||
|
if chunk.get('type', '') == 'chunk':
|
||||||
|
"""
|
||||||
|
Tbox返回的消息内容chunk结构
|
||||||
|
{'lane': 'default', 'payload': {'conversationId': '20250918tBI947065406', 'messageId': '20250918TB1f53230954', 'text': '️'}, 'type': 'chunk'}
|
||||||
|
"""
|
||||||
|
# 如果包含思考过程,拼接</think>
|
||||||
|
if think_start and not think_end:
|
||||||
|
pending_content += '\n</think>\n'
|
||||||
|
think_end = True
|
||||||
|
|
||||||
|
payload = chunk.get('payload', {})
|
||||||
|
if not conversation_id:
|
||||||
|
conversation_id = payload.get('conversationId')
|
||||||
|
query.session.using_conversation.uuid = conversation_id
|
||||||
|
if payload.get('text'):
|
||||||
|
idx_msg += 1
|
||||||
|
pending_content += payload.get('text')
|
||||||
|
elif chunk.get('type', '') == 'thinking' and not remove_think:
|
||||||
|
"""
|
||||||
|
Tbox返回的思考过程chunk结构
|
||||||
|
{'payload': '{"ext_data":{"text":"日期"},"event":"flow.node.llm.thinking","entity":{"node_type":"text-completion","execute_id":"6","group_id":0,"parent_execute_id":"6","node_name":"模型推理","node_id":"TC_5u6gl0"}}', 'type': 'thinking'}
|
||||||
|
"""
|
||||||
|
payload = json.loads(chunk.get('payload', '{}'))
|
||||||
|
if payload.get('ext_data', {}).get('text'):
|
||||||
|
idx_msg += 1
|
||||||
|
content = payload.get('ext_data', {}).get('text')
|
||||||
|
if not think_start:
|
||||||
|
think_start = True
|
||||||
|
pending_content += f'<think>\n{content}'
|
||||||
|
else:
|
||||||
|
pending_content += content
|
||||||
|
elif chunk.get('type', '') == 'error':
|
||||||
|
raise TboxAPIError(
|
||||||
|
f'Tbox API 请求失败: status_code={chunk.get("status_code")} message={chunk.get("message")} request_id={chunk.get("request_id")} '
|
||||||
|
)
|
||||||
|
|
||||||
|
if idx_msg % 8 == 0:
|
||||||
|
yield provider_message.MessageChunk(
|
||||||
|
role='assistant',
|
||||||
|
content=pending_content,
|
||||||
|
is_final=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Tbox不返回END事件,默认发一个最终消息
|
||||||
|
yield provider_message.MessageChunk(
|
||||||
|
role='assistant',
|
||||||
|
content=pending_content,
|
||||||
|
is_final=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(self, query: pipeline_query.Query) -> typing.AsyncGenerator[provider_message.Message, None]:
|
||||||
|
"""运行"""
|
||||||
|
msg_seq = 0
|
||||||
|
async for msg in self._agent_messages(query):
|
||||||
|
if isinstance(msg, provider_message.MessageChunk):
|
||||||
|
msg_seq += 1
|
||||||
|
msg.msg_sequence = msg_seq
|
||||||
|
yield msg
|
||||||
@@ -35,7 +35,7 @@ class ToolLoader(abc.ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
async def get_tools(self, bound_plugins: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||||
"""获取所有工具"""
|
"""获取所有工具"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import enum
|
||||||
import typing
|
import typing
|
||||||
from contextlib import AsyncExitStack
|
from contextlib import AsyncExitStack
|
||||||
|
import traceback
|
||||||
|
import sqlalchemy
|
||||||
|
import asyncio
|
||||||
|
|
||||||
from mcp import ClientSession, StdioServerParameters
|
from mcp import ClientSession, StdioServerParameters
|
||||||
from mcp.client.stdio import stdio_client
|
from mcp.client.stdio import stdio_client
|
||||||
@@ -10,6 +14,13 @@ from mcp.client.sse import sse_client
|
|||||||
from .. import loader
|
from .. import loader
|
||||||
from ....core import app
|
from ....core import app
|
||||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||||
|
from ....entity.persistence import mcp as persistence_mcp
|
||||||
|
|
||||||
|
|
||||||
|
class MCPSessionStatus(enum.Enum):
|
||||||
|
CONNECTING = 'connecting'
|
||||||
|
CONNECTED = 'connected'
|
||||||
|
ERROR = 'error'
|
||||||
|
|
||||||
|
|
||||||
class RuntimeMCPSession:
|
class RuntimeMCPSession:
|
||||||
@@ -19,6 +30,8 @@ class RuntimeMCPSession:
|
|||||||
|
|
||||||
server_name: str
|
server_name: str
|
||||||
|
|
||||||
|
server_uuid: str
|
||||||
|
|
||||||
server_config: dict
|
server_config: dict
|
||||||
|
|
||||||
session: ClientSession
|
session: ClientSession
|
||||||
@@ -27,16 +40,34 @@ class RuntimeMCPSession:
|
|||||||
|
|
||||||
functions: list[resource_tool.LLMTool] = []
|
functions: list[resource_tool.LLMTool] = []
|
||||||
|
|
||||||
def __init__(self, server_name: str, server_config: dict, ap: app.Application):
|
enable: bool
|
||||||
|
|
||||||
|
# connected: bool
|
||||||
|
status: MCPSessionStatus
|
||||||
|
|
||||||
|
_lifecycle_task: asyncio.Task | None
|
||||||
|
|
||||||
|
_shutdown_event: asyncio.Event
|
||||||
|
|
||||||
|
_ready_event: asyncio.Event
|
||||||
|
|
||||||
|
def __init__(self, server_name: str, server_config: dict, enable: bool, ap: app.Application):
|
||||||
self.server_name = server_name
|
self.server_name = server_name
|
||||||
|
self.server_uuid = server_config.get('uuid', '')
|
||||||
self.server_config = server_config
|
self.server_config = server_config
|
||||||
self.ap = ap
|
self.ap = ap
|
||||||
|
self.enable = enable
|
||||||
self.session = None
|
self.session = None
|
||||||
|
|
||||||
self.exit_stack = AsyncExitStack()
|
self.exit_stack = AsyncExitStack()
|
||||||
self.functions = []
|
self.functions = []
|
||||||
|
|
||||||
|
self.status = MCPSessionStatus.CONNECTING
|
||||||
|
|
||||||
|
self._lifecycle_task = None
|
||||||
|
self._shutdown_event = asyncio.Event()
|
||||||
|
self._ready_event = asyncio.Event()
|
||||||
|
|
||||||
async def _init_stdio_python_server(self):
|
async def _init_stdio_python_server(self):
|
||||||
server_params = StdioServerParameters(
|
server_params = StdioServerParameters(
|
||||||
command=self.server_config['command'],
|
command=self.server_config['command'],
|
||||||
@@ -58,6 +89,7 @@ class RuntimeMCPSession:
|
|||||||
self.server_config['url'],
|
self.server_config['url'],
|
||||||
headers=self.server_config.get('headers', {}),
|
headers=self.server_config.get('headers', {}),
|
||||||
timeout=self.server_config.get('timeout', 10),
|
timeout=self.server_config.get('timeout', 10),
|
||||||
|
sse_read_timeout=self.server_config.get('ssereadtimeout', 30),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -67,19 +99,65 @@ class RuntimeMCPSession:
|
|||||||
|
|
||||||
await self.session.initialize()
|
await self.session.initialize()
|
||||||
|
|
||||||
async def initialize(self):
|
async def _lifecycle_loop(self):
|
||||||
self.ap.logger.debug(f'初始化 MCP 会话: {self.server_name} {self.server_config}')
|
"""在后台任务中管理整个MCP会话的生命周期"""
|
||||||
|
try:
|
||||||
|
if self.server_config['mode'] == 'stdio':
|
||||||
|
await self._init_stdio_python_server()
|
||||||
|
elif self.server_config['mode'] == 'sse':
|
||||||
|
await self._init_sse_server()
|
||||||
|
else:
|
||||||
|
raise ValueError(f'无法识别 MCP 服务器类型: {self.server_name}: {self.server_config}')
|
||||||
|
|
||||||
if self.server_config['mode'] == 'stdio':
|
await self.refresh()
|
||||||
await self._init_stdio_python_server()
|
|
||||||
elif self.server_config['mode'] == 'sse':
|
self.status = MCPSessionStatus.CONNECTED
|
||||||
await self._init_sse_server()
|
|
||||||
else:
|
# 通知start()方法连接已建立
|
||||||
raise ValueError(f'无法识别 MCP 服务器类型: {self.server_name}: {self.server_config}')
|
self._ready_event.set()
|
||||||
|
|
||||||
|
# 等待shutdown信号
|
||||||
|
await self._shutdown_event.wait()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.status = MCPSessionStatus.ERROR
|
||||||
|
self.ap.logger.error(f'Error in MCP session lifecycle {self.server_name}: {e}\n{traceback.format_exc()}')
|
||||||
|
# 即使出错也要设置ready事件,让start()方法知道初始化已完成
|
||||||
|
self._ready_event.set()
|
||||||
|
finally:
|
||||||
|
# 在同一个任务中清理所有资源
|
||||||
|
try:
|
||||||
|
if self.exit_stack:
|
||||||
|
await self.exit_stack.aclose()
|
||||||
|
self.functions.clear()
|
||||||
|
self.session = None
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Error cleaning up MCP session {self.server_name}: {e}\n{traceback.format_exc()}')
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
if not self.enable:
|
||||||
|
return
|
||||||
|
|
||||||
|
# 创建后台任务来管理生命周期
|
||||||
|
self._lifecycle_task = asyncio.create_task(self._lifecycle_loop())
|
||||||
|
|
||||||
|
# 等待连接建立或失败(带超时)
|
||||||
|
try:
|
||||||
|
await asyncio.wait_for(self._ready_event.wait(), timeout=30.0)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
self.status = MCPSessionStatus.ERROR
|
||||||
|
raise Exception('Connection timeout after 30 seconds')
|
||||||
|
|
||||||
|
# 检查是否有错误
|
||||||
|
if self.status == MCPSessionStatus.ERROR:
|
||||||
|
raise Exception('Connection failed, please check URL')
|
||||||
|
|
||||||
|
async def refresh(self):
|
||||||
|
self.functions.clear()
|
||||||
|
|
||||||
tools = await self.session.list_tools()
|
tools = await self.session.list_tools()
|
||||||
|
|
||||||
self.ap.logger.debug(f'获取 MCP 工具: {tools}')
|
self.ap.logger.debug(f'Refresh MCP tools: {tools}')
|
||||||
|
|
||||||
for tool in tools.tools:
|
for tool in tools.tools:
|
||||||
|
|
||||||
@@ -101,58 +179,220 @@ class RuntimeMCPSession:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_tools(self) -> list[resource_tool.LLMTool]:
|
||||||
|
return self.functions
|
||||||
|
|
||||||
|
def get_runtime_info_dict(self) -> dict:
|
||||||
|
return {
|
||||||
|
'status': self.status.value,
|
||||||
|
'tool_count': len(self.get_tools()),
|
||||||
|
'tools': [
|
||||||
|
{
|
||||||
|
'name': tool.name,
|
||||||
|
'description': tool.description,
|
||||||
|
}
|
||||||
|
for tool in self.get_tools()
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
async def shutdown(self):
|
async def shutdown(self):
|
||||||
"""关闭工具"""
|
"""关闭会话并清理资源"""
|
||||||
await self.session._exit_stack.aclose()
|
try:
|
||||||
|
# 设置shutdown事件,通知lifecycle任务退出
|
||||||
|
self._shutdown_event.set()
|
||||||
|
|
||||||
|
# 等待lifecycle任务完成(带超时)
|
||||||
|
if self._lifecycle_task and not self._lifecycle_task.done():
|
||||||
|
try:
|
||||||
|
await asyncio.wait_for(self._lifecycle_task, timeout=5.0)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
self.ap.logger.warning(f'MCP session {self.server_name} shutdown timeout, cancelling task')
|
||||||
|
self._lifecycle_task.cancel()
|
||||||
|
try:
|
||||||
|
await self._lifecycle_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.ap.logger.info(f'MCP session {self.server_name} shutdown complete')
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Error shutting down MCP session {self.server_name}: {e}\n{traceback.format_exc()}')
|
||||||
|
|
||||||
|
|
||||||
@loader.loader_class('mcp')
|
# @loader.loader_class('mcp')
|
||||||
class MCPLoader(loader.ToolLoader):
|
class MCPLoader(loader.ToolLoader):
|
||||||
"""MCP 工具加载器。
|
"""MCP 工具加载器。
|
||||||
|
|
||||||
在此加载器中管理所有与 MCP Server 的连接。
|
在此加载器中管理所有与 MCP Server 的连接。
|
||||||
"""
|
"""
|
||||||
|
|
||||||
sessions: dict[str, RuntimeMCPSession] = {}
|
sessions: dict[str, RuntimeMCPSession]
|
||||||
|
|
||||||
_last_listed_functions: list[resource_tool.LLMTool] = []
|
_last_listed_functions: list[resource_tool.LLMTool]
|
||||||
|
|
||||||
|
_hosted_mcp_tasks: list[asyncio.Task]
|
||||||
|
|
||||||
def __init__(self, ap: app.Application):
|
def __init__(self, ap: app.Application):
|
||||||
super().__init__(ap)
|
super().__init__(ap)
|
||||||
self.sessions = {}
|
self.sessions = {}
|
||||||
self._last_listed_functions = []
|
self._last_listed_functions = []
|
||||||
|
self._hosted_mcp_tasks = []
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
for server_config in self.ap.instance_config.data.get('mcp', {}).get('servers', []):
|
await self.load_mcp_servers_from_db()
|
||||||
if not server_config['enable']:
|
|
||||||
continue
|
|
||||||
session = RuntimeMCPSession(server_config['name'], server_config, self.ap)
|
|
||||||
await session.initialize()
|
|
||||||
# self.ap.event_loop.create_task(session.initialize())
|
|
||||||
self.sessions[server_config['name']] = session
|
|
||||||
|
|
||||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
async def load_mcp_servers_from_db(self):
|
||||||
|
self.ap.logger.info('Loading MCP servers from db...')
|
||||||
|
|
||||||
|
self.sessions = {}
|
||||||
|
|
||||||
|
result = await self.ap.persistence_mgr.execute_async(sqlalchemy.select(persistence_mcp.MCPServer))
|
||||||
|
servers = result.all()
|
||||||
|
|
||||||
|
for server in servers:
|
||||||
|
config = self.ap.persistence_mgr.serialize_model(persistence_mcp.MCPServer, server)
|
||||||
|
|
||||||
|
task = asyncio.create_task(self.host_mcp_server(config))
|
||||||
|
self._hosted_mcp_tasks.append(task)
|
||||||
|
|
||||||
|
async def host_mcp_server(self, server_config: dict):
|
||||||
|
self.ap.logger.debug(f'Loading MCP server {server_config}')
|
||||||
|
try:
|
||||||
|
session = await self.load_mcp_server(server_config)
|
||||||
|
self.sessions[server_config['name']] = session
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(
|
||||||
|
f'Failed to load MCP server from db: {server_config["name"]}({server_config["uuid"]}): {e}\n{traceback.format_exc()}'
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.ap.logger.debug(f'Starting MCP server {server_config["name"]}({server_config["uuid"]})')
|
||||||
|
try:
|
||||||
|
await session.start()
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(
|
||||||
|
f'Failed to start MCP server {server_config["name"]}({server_config["uuid"]}): {e}\n{traceback.format_exc()}'
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.ap.logger.debug(f'Started MCP server {server_config["name"]}({server_config["uuid"]})')
|
||||||
|
|
||||||
|
async def load_mcp_server(self, server_config: dict) -> RuntimeMCPSession:
|
||||||
|
"""加载 MCP 服务器到运行时
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_config: 服务器配置字典,必须包含:
|
||||||
|
- name: 服务器名称
|
||||||
|
- mode: 连接模式 (stdio/sse)
|
||||||
|
- enable: 是否启用
|
||||||
|
- extra_args: 额外的配置参数 (可选)
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = server_config['name']
|
||||||
|
uuid = server_config['uuid']
|
||||||
|
mode = server_config['mode']
|
||||||
|
enable = server_config['enable']
|
||||||
|
extra_args = server_config.get('extra_args', {})
|
||||||
|
|
||||||
|
mixed_config = {
|
||||||
|
'name': name,
|
||||||
|
'uuid': uuid,
|
||||||
|
'mode': mode,
|
||||||
|
'enable': enable,
|
||||||
|
**extra_args,
|
||||||
|
}
|
||||||
|
|
||||||
|
session = RuntimeMCPSession(name, mixed_config, enable, self.ap)
|
||||||
|
|
||||||
|
return session
|
||||||
|
|
||||||
|
async def get_tools(self, bound_mcp_servers: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||||
all_functions = []
|
all_functions = []
|
||||||
|
|
||||||
for session in self.sessions.values():
|
for session in self.sessions.values():
|
||||||
all_functions.extend(session.functions)
|
# If bound_mcp_servers is specified, only include tools from those servers
|
||||||
|
if bound_mcp_servers is not None:
|
||||||
|
if session.server_uuid in bound_mcp_servers:
|
||||||
|
all_functions.extend(session.get_tools())
|
||||||
|
else:
|
||||||
|
# If no bound servers specified, include all tools
|
||||||
|
all_functions.extend(session.get_tools())
|
||||||
|
|
||||||
self._last_listed_functions = all_functions
|
self._last_listed_functions = all_functions
|
||||||
|
|
||||||
return all_functions
|
return all_functions
|
||||||
|
|
||||||
async def has_tool(self, name: str) -> bool:
|
async def has_tool(self, name: str) -> bool:
|
||||||
return name in [f.name for f in self._last_listed_functions]
|
"""检查工具是否存在"""
|
||||||
|
for session in self.sessions.values():
|
||||||
|
for function in session.get_tools():
|
||||||
|
if function.name == name:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
async def invoke_tool(self, name: str, parameters: dict) -> typing.Any:
|
async def invoke_tool(self, name: str, parameters: dict) -> typing.Any:
|
||||||
for server_name, session in self.sessions.items():
|
"""执行工具调用"""
|
||||||
for function in session.functions:
|
for session in self.sessions.values():
|
||||||
|
for function in session.get_tools():
|
||||||
if function.name == name:
|
if function.name == name:
|
||||||
return await function.func(**parameters)
|
self.ap.logger.debug(f'Invoking MCP tool: {name} with parameters: {parameters}')
|
||||||
|
try:
|
||||||
|
result = await function.func(**parameters)
|
||||||
|
self.ap.logger.debug(f'MCP tool {name} executed successfully')
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Error invoking MCP tool {name}: {e}\n{traceback.format_exc()}')
|
||||||
|
raise
|
||||||
|
|
||||||
raise ValueError(f'未找到工具: {name}')
|
raise ValueError(f'Tool not found: {name}')
|
||||||
|
|
||||||
|
async def remove_mcp_server(self, server_name: str):
|
||||||
|
"""移除 MCP 服务器"""
|
||||||
|
if server_name not in self.sessions:
|
||||||
|
self.ap.logger.warning(f'MCP server {server_name} not found in sessions, skipping removal')
|
||||||
|
return
|
||||||
|
|
||||||
|
session = self.sessions.pop(server_name)
|
||||||
|
await session.shutdown()
|
||||||
|
self.ap.logger.info(f'Removed MCP server: {server_name}')
|
||||||
|
|
||||||
|
def get_session(self, server_name: str) -> RuntimeMCPSession | None:
|
||||||
|
"""获取指定名称的 MCP 会话"""
|
||||||
|
return self.sessions.get(server_name)
|
||||||
|
|
||||||
|
def has_session(self, server_name: str) -> bool:
|
||||||
|
"""检查是否存在指定名称的 MCP 会话"""
|
||||||
|
return server_name in self.sessions
|
||||||
|
|
||||||
|
def get_all_server_names(self) -> list[str]:
|
||||||
|
"""获取所有已加载的 MCP 服务器名称"""
|
||||||
|
return list(self.sessions.keys())
|
||||||
|
|
||||||
|
def get_server_tool_count(self, server_name: str) -> int:
|
||||||
|
"""获取指定服务器的工具数量"""
|
||||||
|
session = self.get_session(server_name)
|
||||||
|
return len(session.get_tools()) if session else 0
|
||||||
|
|
||||||
|
def get_all_servers_info(self) -> dict[str, dict]:
|
||||||
|
"""获取所有服务器的信息"""
|
||||||
|
info = {}
|
||||||
|
for server_name, session in self.sessions.items():
|
||||||
|
info[server_name] = {
|
||||||
|
'name': server_name,
|
||||||
|
'mode': session.server_config.get('mode'),
|
||||||
|
'enable': session.enable,
|
||||||
|
'tools_count': len(session.get_tools()),
|
||||||
|
'tool_names': [f.name for f in session.get_tools()],
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
|
||||||
async def shutdown(self):
|
async def shutdown(self):
|
||||||
"""关闭工具"""
|
"""关闭所有工具"""
|
||||||
for session in self.sessions.values():
|
self.ap.logger.info('Shutting down all MCP sessions...')
|
||||||
await session.shutdown()
|
for server_name, session in list(self.sessions.items()):
|
||||||
|
try:
|
||||||
|
await session.shutdown()
|
||||||
|
self.ap.logger.debug(f'Shutdown MCP session: {server_name}')
|
||||||
|
except Exception as e:
|
||||||
|
self.ap.logger.error(f'Error shutting down MCP session {server_name}: {e}\n{traceback.format_exc()}')
|
||||||
|
self.sessions.clear()
|
||||||
|
self.ap.logger.info('All MCP sessions shutdown complete')
|
||||||
|
|||||||
@@ -7,18 +7,18 @@ from .. import loader
|
|||||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||||
|
|
||||||
|
|
||||||
@loader.loader_class('plugin-tool-loader')
|
# @loader.loader_class('plugin-tool-loader')
|
||||||
class PluginToolLoader(loader.ToolLoader):
|
class PluginToolLoader(loader.ToolLoader):
|
||||||
"""插件工具加载器。
|
"""插件工具加载器。
|
||||||
|
|
||||||
本加载器中不存储工具信息,仅负责从插件系统中获取工具信息。
|
本加载器中不存储工具信息,仅负责从插件系统中获取工具信息。
|
||||||
"""
|
"""
|
||||||
|
|
||||||
async def get_tools(self) -> list[resource_tool.LLMTool]:
|
async def get_tools(self, bound_plugins: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||||
# 从插件系统获取工具(内容函数)
|
# 从插件系统获取工具(内容函数)
|
||||||
all_functions: list[resource_tool.LLMTool] = []
|
all_functions: list[resource_tool.LLMTool] = []
|
||||||
|
|
||||||
for tool in await self.ap.plugin_connector.list_tools():
|
for tool in await self.ap.plugin_connector.list_tools(bound_plugins):
|
||||||
tool_obj = resource_tool.LLMTool(
|
tool_obj = resource_tool.LLMTool(
|
||||||
name=tool.metadata.name,
|
name=tool.metadata.name,
|
||||||
human_desc=tool.metadata.description.en_US,
|
human_desc=tool.metadata.description.en_US,
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ from __future__ import annotations
|
|||||||
import typing
|
import typing
|
||||||
|
|
||||||
from ...core import app
|
from ...core import app
|
||||||
from . import loader as tools_loader
|
|
||||||
from ...utils import importutil
|
from ...utils import importutil
|
||||||
from . import loaders
|
from . import loaders
|
||||||
|
from .loaders import mcp as mcp_loader, plugin as plugin_loader
|
||||||
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
import langbot_plugin.api.entities.builtin.resource.tool as resource_tool
|
||||||
|
|
||||||
importutil.import_modules_in_pkg(loaders)
|
importutil.import_modules_in_pkg(loaders)
|
||||||
@@ -16,25 +16,24 @@ class ToolManager:
|
|||||||
|
|
||||||
ap: app.Application
|
ap: app.Application
|
||||||
|
|
||||||
loaders: list[tools_loader.ToolLoader]
|
plugin_tool_loader: plugin_loader.PluginToolLoader
|
||||||
|
mcp_tool_loader: mcp_loader.MCPLoader
|
||||||
|
|
||||||
def __init__(self, ap: app.Application):
|
def __init__(self, ap: app.Application):
|
||||||
self.ap = ap
|
self.ap = ap
|
||||||
self.all_functions = []
|
|
||||||
self.loaders = []
|
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
for loader_cls in tools_loader.preregistered_loaders:
|
self.plugin_tool_loader = plugin_loader.PluginToolLoader(self.ap)
|
||||||
loader_inst = loader_cls(self.ap)
|
await self.plugin_tool_loader.initialize()
|
||||||
await loader_inst.initialize()
|
self.mcp_tool_loader = mcp_loader.MCPLoader(self.ap)
|
||||||
self.loaders.append(loader_inst)
|
await self.mcp_tool_loader.initialize()
|
||||||
|
|
||||||
async def get_all_tools(self) -> list[resource_tool.LLMTool]:
|
async def get_all_tools(self, bound_plugins: list[str] | None = None, bound_mcp_servers: list[str] | None = None) -> list[resource_tool.LLMTool]:
|
||||||
"""获取所有函数"""
|
"""获取所有函数"""
|
||||||
all_functions: list[resource_tool.LLMTool] = []
|
all_functions: list[resource_tool.LLMTool] = []
|
||||||
|
|
||||||
for loader in self.loaders:
|
all_functions.extend(await self.plugin_tool_loader.get_tools(bound_plugins))
|
||||||
all_functions.extend(await loader.get_tools())
|
all_functions.extend(await self.mcp_tool_loader.get_tools(bound_mcp_servers))
|
||||||
|
|
||||||
return all_functions
|
return all_functions
|
||||||
|
|
||||||
@@ -93,13 +92,14 @@ class ToolManager:
|
|||||||
async def execute_func_call(self, name: str, parameters: dict) -> typing.Any:
|
async def execute_func_call(self, name: str, parameters: dict) -> typing.Any:
|
||||||
"""执行函数调用"""
|
"""执行函数调用"""
|
||||||
|
|
||||||
for loader in self.loaders:
|
if await self.plugin_tool_loader.has_tool(name):
|
||||||
if await loader.has_tool(name):
|
return await self.plugin_tool_loader.invoke_tool(name, parameters)
|
||||||
return await loader.invoke_tool(name, parameters)
|
elif await self.mcp_tool_loader.has_tool(name):
|
||||||
|
return await self.mcp_tool_loader.invoke_tool(name, parameters)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f'未找到工具: {name}')
|
raise ValueError(f'未找到工具: {name}')
|
||||||
|
|
||||||
async def shutdown(self):
|
async def shutdown(self):
|
||||||
"""关闭所有工具"""
|
"""关闭所有工具"""
|
||||||
for loader in self.loaders:
|
await self.plugin_tool_loader.shutdown()
|
||||||
await loader.shutdown()
|
await self.mcp_tool_loader.shutdown()
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import json
|
|||||||
from typing import List
|
from typing import List
|
||||||
from pkg.rag.knowledge.services import base_service
|
from pkg.rag.knowledge.services import base_service
|
||||||
from pkg.core import app
|
from pkg.core import app
|
||||||
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||||
|
|
||||||
|
|
||||||
class Chunker(base_service.BaseService):
|
class Chunker(base_service.BaseService):
|
||||||
@@ -27,21 +28,6 @@ class Chunker(base_service.BaseService):
|
|||||||
"""
|
"""
|
||||||
if not text:
|
if not text:
|
||||||
return []
|
return []
|
||||||
# words = text.split()
|
|
||||||
# chunks = []
|
|
||||||
# current_chunk = []
|
|
||||||
|
|
||||||
# for word in words:
|
|
||||||
# current_chunk.append(word)
|
|
||||||
# if len(current_chunk) > self.chunk_size:
|
|
||||||
# chunks.append(" ".join(current_chunk[:self.chunk_size]))
|
|
||||||
# current_chunk = current_chunk[self.chunk_size - self.chunk_overlap:]
|
|
||||||
|
|
||||||
# if current_chunk:
|
|
||||||
# chunks.append(" ".join(current_chunk))
|
|
||||||
|
|
||||||
# A more robust chunking strategy (e.g., using recursive character text splitter)
|
|
||||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
||||||
|
|
||||||
text_splitter = RecursiveCharacterTextSplitter(
|
text_splitter = RecursiveCharacterTextSplitter(
|
||||||
chunk_size=self.chunk_size,
|
chunk_size=self.chunk_size,
|
||||||
|
|||||||
@@ -42,3 +42,10 @@ class StorageProvider(abc.ABC):
|
|||||||
key: str,
|
key: str,
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
async def delete_dir_recursive(
|
||||||
|
self,
|
||||||
|
dir_path: str,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
import shutil
|
||||||
|
|
||||||
from ...core import app
|
from ...core import app
|
||||||
|
|
||||||
@@ -22,6 +23,8 @@ class LocalStorageProvider(provider.StorageProvider):
|
|||||||
key: str,
|
key: str,
|
||||||
value: bytes,
|
value: bytes,
|
||||||
):
|
):
|
||||||
|
if not os.path.exists(os.path.join(LOCAL_STORAGE_PATH, os.path.dirname(key))):
|
||||||
|
os.makedirs(os.path.join(LOCAL_STORAGE_PATH, os.path.dirname(key)))
|
||||||
async with aiofiles.open(os.path.join(LOCAL_STORAGE_PATH, f'{key}'), 'wb') as f:
|
async with aiofiles.open(os.path.join(LOCAL_STORAGE_PATH, f'{key}'), 'wb') as f:
|
||||||
await f.write(value)
|
await f.write(value)
|
||||||
|
|
||||||
@@ -43,3 +46,11 @@ class LocalStorageProvider(provider.StorageProvider):
|
|||||||
key: str,
|
key: str,
|
||||||
):
|
):
|
||||||
os.remove(os.path.join(LOCAL_STORAGE_PATH, f'{key}'))
|
os.remove(os.path.join(LOCAL_STORAGE_PATH, f'{key}'))
|
||||||
|
|
||||||
|
async def delete_dir_recursive(
|
||||||
|
self,
|
||||||
|
dir_path: str,
|
||||||
|
):
|
||||||
|
# 直接删除整个目录
|
||||||
|
if os.path.exists(os.path.join(LOCAL_STORAGE_PATH, dir_path)):
|
||||||
|
shutil.rmtree(os.path.join(LOCAL_STORAGE_PATH, dir_path))
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
semantic_version = 'v4.3.1'
|
semantic_version = 'v4.5.0'
|
||||||
|
|
||||||
required_database_version = 8
|
required_database_version = 11
|
||||||
"""Tag the version of the database schema, used to check if the database needs to be migrated"""
|
"""Tag the version of the database schema, used to check if the database needs to be migrated"""
|
||||||
|
|
||||||
debug_mode = False
|
debug_mode = False
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "langbot"
|
name = "langbot"
|
||||||
version = "4.3.1"
|
version = "4.5.0"
|
||||||
description = "Easy-to-use global IM bot platform designed for LLM era"
|
description = "Easy-to-use global IM bot platform designed for LLM era"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10.1,<4.0"
|
requires-python = ">=3.10.1,<4.0"
|
||||||
@@ -60,11 +60,13 @@ dependencies = [
|
|||||||
"ebooklib>=0.18",
|
"ebooklib>=0.18",
|
||||||
"html2text>=2024.2.26",
|
"html2text>=2024.2.26",
|
||||||
"langchain>=0.2.0",
|
"langchain>=0.2.0",
|
||||||
|
"langchain-text-splitters>=0.0.1",
|
||||||
"chromadb>=0.4.24",
|
"chromadb>=0.4.24",
|
||||||
"qdrant-client (>=1.15.1,<2.0.0)",
|
"qdrant-client (>=1.15.1,<2.0.0)",
|
||||||
"langbot-plugin==0.1.1",
|
"langbot-plugin==0.1.10",
|
||||||
"asyncpg>=0.30.0",
|
"asyncpg>=0.30.0",
|
||||||
"line-bot-sdk>=3.19.0"
|
"line-bot-sdk>=3.19.0",
|
||||||
|
"tboxsdk>=0.0.10",
|
||||||
]
|
]
|
||||||
keywords = [
|
keywords = [
|
||||||
"bot",
|
"bot",
|
||||||
@@ -102,6 +104,7 @@ dev = [
|
|||||||
"pre-commit>=4.2.0",
|
"pre-commit>=4.2.0",
|
||||||
"pytest>=8.4.1",
|
"pytest>=8.4.1",
|
||||||
"pytest-asyncio>=1.0.0",
|
"pytest-asyncio>=1.0.0",
|
||||||
|
"pytest-cov>=7.0.0",
|
||||||
"ruff>=0.11.9",
|
"ruff>=0.11.9",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
39
pytest.ini
Normal file
39
pytest.ini
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
[pytest]
|
||||||
|
# Test discovery patterns
|
||||||
|
python_files = test_*.py
|
||||||
|
python_classes = Test*
|
||||||
|
python_functions = test_*
|
||||||
|
|
||||||
|
# Test paths
|
||||||
|
testpaths = tests
|
||||||
|
|
||||||
|
# Asyncio configuration
|
||||||
|
asyncio_mode = auto
|
||||||
|
|
||||||
|
# Output options
|
||||||
|
addopts =
|
||||||
|
-v
|
||||||
|
--strict-markers
|
||||||
|
--tb=short
|
||||||
|
--disable-warnings
|
||||||
|
|
||||||
|
# Markers
|
||||||
|
markers =
|
||||||
|
asyncio: mark test as async
|
||||||
|
unit: mark test as unit test
|
||||||
|
integration: mark test as integration test
|
||||||
|
slow: mark test as slow running
|
||||||
|
|
||||||
|
# Coverage options (when using pytest-cov)
|
||||||
|
[coverage:run]
|
||||||
|
source = pkg
|
||||||
|
omit =
|
||||||
|
*/tests/*
|
||||||
|
*/test_*.py
|
||||||
|
*/__pycache__/*
|
||||||
|
*/site-packages/*
|
||||||
|
|
||||||
|
[coverage:report]
|
||||||
|
precision = 2
|
||||||
|
show_missing = True
|
||||||
|
skip_covered = False
|
||||||
31
run_tests.sh
Executable file
31
run_tests.sh
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to run all unit tests
|
||||||
|
# This script helps avoid circular import issues by setting up the environment properly
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Setting up test environment..."
|
||||||
|
|
||||||
|
# Activate virtual environment if it exists
|
||||||
|
if [ -d ".venv" ]; then
|
||||||
|
source .venv/bin/activate
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if pytest is installed
|
||||||
|
if ! command -v pytest &> /dev/null; then
|
||||||
|
echo "Installing test dependencies..."
|
||||||
|
pip install pytest pytest-asyncio pytest-cov
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running all unit tests..."
|
||||||
|
|
||||||
|
# Run tests with coverage
|
||||||
|
pytest tests/unit_tests/ -v --tb=short \
|
||||||
|
--cov=pkg \
|
||||||
|
--cov-report=xml \
|
||||||
|
"$@"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Test run complete!"
|
||||||
|
echo "Coverage report saved to coverage.xml"
|
||||||
@@ -10,8 +10,6 @@ command:
|
|||||||
concurrency:
|
concurrency:
|
||||||
pipeline: 20
|
pipeline: 20
|
||||||
session: 1
|
session: 1
|
||||||
mcp:
|
|
||||||
servers: []
|
|
||||||
proxy:
|
proxy:
|
||||||
http: ''
|
http: ''
|
||||||
https: ''
|
https: ''
|
||||||
@@ -38,6 +36,7 @@ vdb:
|
|||||||
port: 6333
|
port: 6333
|
||||||
api_key: ''
|
api_key: ''
|
||||||
plugin:
|
plugin:
|
||||||
|
enable: true
|
||||||
runtime_ws_url: 'ws://langbot_plugin_runtime:5400/control/ws'
|
runtime_ws_url: 'ws://langbot_plugin_runtime:5400/control/ws'
|
||||||
enable_marketplace: true
|
enable_marketplace: true
|
||||||
cloud_service_url: 'https://space.langbot.app'
|
cloud_service_url: 'https://space.langbot.app'
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user