91 Commits
v1.1.0 ... main

Author SHA1 Message Date
sun-guannan
2eb6c8d761 Update README-zh.md 2025-11-22 22:12:31 +08:00
sun-guannan
b83a013a42 Update README-zh.md 2025-11-16 11:34:59 +08:00
sun-guannan
b74247e60c Update README-zh.md 2025-11-09 23:32:11 +08:00
sun-guannan
13a57ba0f2 Update README-zh.md 2025-11-05 17:39:50 +08:00
sun-guannan
ff61c38114 Update README.md 2025-10-30 11:21:52 +08:00
sun-guannan
10ade1b57a Update README.md 2025-10-30 11:20:15 +08:00
sun-guannan
e5e74e275a Update README.md 2025-10-30 11:19:29 +08:00
sun-guannan
59c04e8f77 Update README-zh.md 2025-10-30 00:19:52 +08:00
sun-guannan
2cff5d2e20 Update README-zh.md 2025-10-25 17:18:06 +08:00
sun-guannan
aaf196f926 Update README-zh.md 2025-10-16 10:39:19 +08:00
sun-guannan
cfde6304f9 Update README-zh.md 2025-10-10 21:13:01 +08:00
sun-guannan
80c03c00ea Update README-zh.md 2025-09-30 08:58:41 +08:00
sun-guannan
b97931392c Merge pull request #46 from sun-guannan/dev
enable render index track
2025-09-23 16:12:57 +08:00
sun-guannan
66fb7d066b enable render index track 2025-09-23 16:11:31 +08:00
sun-guannan
4377d92548 Update README-zh.md 2025-09-23 15:48:23 +08:00
sun-guannan
77d2c9e108 Update README-zh.md 2025-09-17 08:48:45 +08:00
sun-guannan
d904c78187 Update README-zh.md 2025-09-09 15:04:42 +08:00
sun-guannan
04e01449e3 Update README-zh.md 2025-09-06 15:56:27 +08:00
sun-guannan
aba58dd845 Update README-zh.md 2025-08-31 17:01:37 +08:00
sun-guannan
83c33c15c4 Update README-zh.md 2025-08-23 21:58:43 +08:00
sun-guannan
f0ca6afe5a Update README-zh.md 2025-08-19 08:57:08 +08:00
sun-guannan
76a5bc3e15 Update README-zh.md 2025-08-19 08:56:57 +08:00
sun-guannan
d68eb4ddb7 Update README.md 2025-08-17 22:00:13 +08:00
sun-guannan
2e82bb719c Merge pull request #34 from lwsinclair/add-mseep-badge
Add MSeeP.ai badge
2025-08-17 16:18:26 +08:00
sun-guannan
cc7cba9af6 Update README.md 2025-08-17 16:14:21 +08:00
sun-guannan
fa16b9d310 Update README-zh.md 2025-08-17 16:02:25 +08:00
sun-guannan
0052c44e88 Merge pull request #37 from sun-guannan/dev
v1.3.0
2025-08-17 16:00:37 +08:00
sun-guannan
f45b2fe314 fix merge 2025-08-17 16:00:22 +08:00
sun-guannan
55c28c6b4a Update README.md 2025-08-17 15:44:27 +08:00
sun-guannan
cce0500bc4 Update README.md 2025-08-17 15:42:51 +08:00
sun-guannan
9bc28ebab6 Update README-zh.md 2025-08-14 11:39:16 +08:00
Lawrence Sinclair
fc53398c27 Add MSeeP.ai badge to README.md 2025-08-13 18:49:37 -07:00
sun-guannan
29fc42bfc4 add 002 partten 2025-08-13 18:36:26 +08:00
sun-guannan
341fc022a9 Update README-zh.md 2025-08-12 18:30:42 +08:00
sun-guannan
195a927f04 add coze md 2025-08-12 18:14:06 +08:00
sun-guannan
0a05b6487b add pattern readme 2025-08-12 18:02:50 +08:00
sun-guannan
c94b11a1f5 add 001-words 2025-08-12 16:56:15 +08:00
sun-guannan
8134ed9489 add 001-words.py 2025-08-12 16:39:46 +08:00
sun-guannan
2dd2ff69bd Merge pull request #32 from KoDelioDa/main
修改 pattern/tmp.py,添加了口播的字幕代码模板
2025-08-12 15:32:43 +08:00
sun-guannan
4a77eea949 default font is system 2025-08-12 15:29:35 +08:00
KoDelioDa
fef03abb47 修改 pattern/tmp.py,添加了口播的字幕代码模板 2025-08-11 23:39:55 +08:00
sun-guannan
93c1c76e5b create pattern 2025-08-11 22:33:45 +08:00
sun-guannan
7d9105e2e0 fix bug 2025-08-11 21:49:11 +08:00
sun-guannan
f24d1d2d44 Merge pull request #29 from sun-guannan/dev
Dev merge
2025-08-10 16:57:37 +08:00
sun-guannan
af1faf7e77 modify readme 2025-08-10 16:56:41 +08:00
sun-guannan
6d6932ab2d modify 2025-08-10 16:56:02 +08:00
sun-guannan
74327da3f4 modify readme 2025-08-10 16:53:56 +08:00
sun-guannan
78c69abfd3 modify readme 2025-08-10 16:53:11 +08:00
sun-guannan
8300dab49b modify readme 2025-08-10 16:51:59 +08:00
sun-guannan
007bc7d83d modify readme 2025-08-10 16:50:36 +08:00
sun-guannan
fc0c05498e modify readme 2025-08-10 16:47:57 +08:00
sun-guannan
7881ef0e87 modify readme 2025-08-10 16:44:14 +08:00
sun-guannan
57a7bdcce2 modify readme 2025-08-10 16:42:08 +08:00
sun-guannan
5c2d17fbad modify readme 2025-08-10 16:41:07 +08:00
sun-guannan
8fd71e07e9 modifyreadme 2025-08-10 16:39:17 +08:00
sun-guannan
c814db83db modify readme 2025-08-10 16:35:53 +08:00
sun-guannan
10035b22ac modify readme 2025-08-10 16:32:40 +08:00
sun-guannan
358b896e9d modify readme 2025-08-10 16:30:42 +08:00
sun-guannan
2fd0233682 modify readme 2025-08-10 16:28:18 +08:00
sun-guannan
57855f3a92 modify readme 2025-08-10 16:24:56 +08:00
sun-guannan
0c1bc2924a modify readme 2025-08-10 16:22:49 +08:00
sun-guannan
5a9479fdee modify readme 2025-08-10 16:19:56 +08:00
sun-guannan
878b6ef6dc modify readme 2025-08-10 16:17:08 +08:00
sun-guannan
4e1c593922 modify read me 2025-08-10 16:15:20 +08:00
sun-guannan
586185551f modify readme 2025-08-10 16:14:40 +08:00
sun-guannan
a79bd85ff4 modify readme 2025-08-10 16:13:22 +08:00
sun-guannan
5b21133535 modify readme 2025-08-10 16:11:02 +08:00
sun-guannan
93f43e339a modify readme 2025-08-10 16:09:31 +08:00
sun-guannan
f0c5254a04 modify readme 2025-08-10 16:07:14 +08:00
sun-guannan
1774884241 modify readme 2025-08-10 16:06:23 +08:00
sun-guannan
8ca273dfb7 modify readme 2025-08-10 15:57:24 +08:00
sun-guannan
fec8156784 t rebase --continue
merge from dev
2025-08-10 15:50:26 +08:00
sun-guannan
196775faf8 Merge branch 'main' into dev 2025-08-10 14:57:27 +08:00
sun-guannan
ae9e72a429 Merge branch 'main' into dev 2025-08-10 14:43:54 +08:00
sun-guannan
b5ffbff3bc Update README-zh.md 2025-08-09 16:02:43 +08:00
sun-guannan
50f7b3fe09 fix bug 2025-08-06 18:13:38 +08:00
sun-guannan
9fbe4dc4fe fix srt file bug 2025-08-05 16:39:55 +08:00
sun-guannan
62ff49af74 add effect catogory 2025-08-05 12:22:12 +08:00
AshReo
369fa2d45e Update README-zh.md 2025-08-04 13:01:03 +08:00
AshReo
cd89301572 Update README.md 2025-08-04 12:58:40 +08:00
AshReo
7b22159ca1 Update README.md 2025-08-04 12:57:54 +08:00
AshReo
fa2cf1ea4d Update README-zh.md 2025-08-04 12:56:36 +08:00
AshReo
e31a9776c2 Update README.md 2025-08-04 12:55:54 +08:00
AshReo
f8bdab82a8 Add MCP Support 2025-08-01 16:58:53 +08:00
AshReo
cedc0aa414 Update README-zh.md 2025-08-01 16:34:08 +08:00
AshReo
6e71134748 Update README-zh.md 2025-08-01 16:32:34 +08:00
AshReo
cc596541cd Update README.md 2025-08-01 16:32:00 +08:00
AshReo
f27ba69d7d Update README.md 2025-08-01 16:30:17 +08:00
AshReo
3ba187d4d5 Update README.md 2025-08-01 16:26:41 +08:00
AshReo
3f587bc6e7 Update README.md 2025-08-01 16:13:21 +08:00
AshReo
21580db6f0 Update README.md 2025-08-01 16:05:15 +08:00
25 changed files with 3327 additions and 273 deletions

View File

@@ -0,0 +1,254 @@
# CapCut API MCP Server Documentation
## Overview
The CapCut API MCP Server is a video editing service based on the Model Context Protocol (MCP), providing complete CapCut video editing functionality interfaces. Through the MCP protocol, you can easily integrate professional-grade video editing capabilities into various applications.
## Features
### 🎬 Core Capabilities
- **Draft Management**: Create, save, and manage video projects
- **Multimedia Support**: Video, audio, image, and text processing
- **Advanced Effects**: Effects, animations, transitions, and filters
- **Precise Control**: Timeline, keyframes, and layer management
### 🛠️ Available Tools (11 Tools)
| Tool Name | Description | Key Parameters |
|-----------|-------------|----------------|
| `create_draft` | Create new video draft project | width, height |
| `add_text` | Add text elements | text, font_size, color, shadow, background |
| `add_video` | Add video track | video_url, start, end, transform, volume |
| `add_audio` | Add audio track | audio_url, volume, speed, effects |
| `add_image` | Add image assets | image_url, transform, animation, transition |
| `add_subtitle` | Add subtitle files | srt_path, font_style, position |
| `add_effect` | Add visual effects | effect_type, parameters, duration |
| `add_sticker` | Add sticker elements | resource_id, position, scale, rotation |
| `add_video_keyframe` | Add keyframe animations | property_types, times, values |
| `get_video_duration` | Get video duration | video_url |
| `save_draft` | Save draft project | draft_id |
## Installation & Setup
### Requirements
- Python 3.10+
- CapCut Application (macOS/Windows)
- MCP Client Support
### Dependencies Installation
```bash
# Create virtual environment
python3.10 -m venv venv-mcp
source venv-mcp/bin/activate # macOS/Linux
# or venv-mcp\Scripts\activate # Windows
# Install dependencies
pip install -r requirements-mcp.txt
```
### MCP Configuration
Create or update `mcp_config.json` file:
```json
{
"mcpServers": {
"capcut-api": {
"command": "python3.10",
"args": ["mcp_server.py"],
"cwd": "/path/to/CapCutAPI-dev",
"env": {
"PYTHONPATH": "/path/to/CapCutAPI-dev"
}
}
}
}
```
## Usage Guide
### Basic Workflow
#### 1. Create Draft
```python
# Create 1080x1920 portrait project
result = mcp_client.call_tool("create_draft", {
"width": 1080,
"height": 1920
})
draft_id = result["draft_id"]
```
#### 2. Add Content
```python
# Add title text
mcp_client.call_tool("add_text", {
"text": "My Video Title",
"start": 0,
"end": 5,
"draft_id": draft_id,
"font_size": 48,
"font_color": "#FFFFFF"
})
# Add background video
mcp_client.call_tool("add_video", {
"video_url": "https://example.com/video.mp4",
"draft_id": draft_id,
"start": 0,
"end": 10,
"volume": 0.8
})
```
#### 3. Save Project
```python
# Save draft
result = mcp_client.call_tool("save_draft", {
"draft_id": draft_id
})
```
### Advanced Features
#### Text Styling
```python
# Text with shadow and background
mcp_client.call_tool("add_text", {
"text": "Advanced Text Effects",
"draft_id": draft_id,
"font_size": 56,
"font_color": "#FFD700",
"shadow_enabled": True,
"shadow_color": "#000000",
"shadow_alpha": 0.8,
"background_color": "#1E1E1E",
"background_alpha": 0.7,
"background_round_radius": 15
})
```
#### Keyframe Animation
```python
# Scale and opacity animation
mcp_client.call_tool("add_video_keyframe", {
"draft_id": draft_id,
"track_name": "video_main",
"property_types": ["scale_x", "scale_y", "alpha"],
"times": [0, 2, 4],
"values": ["1.0", "1.5", "0.5"]
})
```
#### Multi-Style Text
```python
# Different colored text segments
mcp_client.call_tool("add_text", {
"text": "Colorful Text Effect",
"draft_id": draft_id,
"text_styles": [
{"start": 0, "end": 2, "font_color": "#FF0000"},
{"start": 2, "end": 4, "font_color": "#00FF00"}
]
})
```
## Testing & Validation
### Using Test Client
```bash
# Run test client
python test_mcp_client.py
```
### Functionality Checklist
- [ ] Server starts successfully
- [ ] Tool list retrieval works
- [ ] Draft creation functionality
- [ ] Text addition functionality
- [ ] Video/audio/image addition
- [ ] Effects and animation functionality
- [ ] Draft saving functionality
## Troubleshooting
### Common Issues
#### 1. "CapCut modules not available"
**Solution**:
- Confirm CapCut application is installed
- Check Python path configuration
- Verify dependency package installation
#### 2. Server startup failure
**Solution**:
- Check virtual environment activation
- Verify configuration file paths
- Review error logs
#### 3. Tool call errors
**Solution**:
- Check parameter format
- Verify media file URLs
- Confirm time range settings
### Debug Mode
```bash
# Enable verbose logging
export DEBUG=1
python mcp_server.py
```
## Best Practices
### Performance Optimization
1. **Media Files**: Use compressed formats, avoid oversized files
2. **Time Management**: Plan element timelines reasonably, avoid overlaps
3. **Memory Usage**: Save drafts promptly, clean temporary files
### Error Handling
1. **Parameter Validation**: Check required parameters before calling
2. **Exception Catching**: Handle network and file errors
3. **Retry Mechanism**: Retry on temporary failures
## API Reference
### Common Parameters
- `draft_id`: Unique draft identifier
- `start/end`: Time range (seconds)
- `width/height`: Project dimensions
- `transform_x/y`: Position coordinates
- `scale_x/y`: Scale ratios
### Response Format
```json
{
"success": true,
"result": {
"draft_id": "dfd_cat_xxx",
"draft_url": "https://..."
},
"features_used": {
"shadow": false,
"background": false,
"multi_style": false
}
}
```
## Changelog
### v1.0.0
- Initial release
- Support for 11 core tools
- Complete MCP protocol implementation
## Technical Support
For questions or suggestions, please contact us through:
- GitHub Issues
- Technical Documentation
- Community Forums
---
*This documentation is continuously updated. Please follow the latest version.*

254
MCP_文档_中文.md Normal file
View File

@@ -0,0 +1,254 @@
# CapCut API MCP 服务器使用文档
## 概述
CapCut API MCP 服务器是一个基于 Model Context Protocol (MCP) 的视频编辑服务,提供了完整的 CapCut 视频编辑功能接口。通过 MCP 协议,您可以轻松地在各种应用中集成专业级的视频编辑能力。
## 功能特性
### 🎬 核心功能
- **草稿管理**: 创建、保存和管理视频项目
- **多媒体支持**: 视频、音频、图片、文本处理
- **高级效果**: 特效、动画、转场、滤镜
- **精确控制**: 时间轴、关键帧、图层管理
### 🛠️ 可用工具 (11个)
| 工具名称 | 功能描述 | 主要参数 |
|---------|----------|----------|
| `create_draft` | 创建新的视频草稿项目 | width, height |
| `add_text` | 添加文字元素 | text, font_size, color, shadow, background |
| `add_video` | 添加视频轨道 | video_url, start, end, transform, volume |
| `add_audio` | 添加音频轨道 | audio_url, volume, speed, effects |
| `add_image` | 添加图片素材 | image_url, transform, animation, transition |
| `add_subtitle` | 添加字幕文件 | srt_path, font_style, position |
| `add_effect` | 添加视觉特效 | effect_type, parameters, duration |
| `add_sticker` | 添加贴纸元素 | resource_id, position, scale, rotation |
| `add_video_keyframe` | 添加关键帧动画 | property_types, times, values |
| `get_video_duration` | 获取视频时长 | video_url |
| `save_draft` | 保存草稿项目 | draft_id |
## 安装配置
### 环境要求
- Python 3.10+
- CapCut 应用 (macOS/Windows)
- MCP 客户端支持
### 依赖安装
```bash
# 创建虚拟环境
python3.10 -m venv venv-mcp
source venv-mcp/bin/activate # macOS/Linux
# 或 venv-mcp\Scripts\activate # Windows
# 安装依赖
pip install -r requirements-mcp.txt
```
### MCP 配置
创建或更新 `mcp_config.json` 文件:
```json
{
"mcpServers": {
"capcut-api": {
"command": "python3.10",
"args": ["mcp_server.py"],
"cwd": "/path/to/CapCutAPI-dev",
"env": {
"PYTHONPATH": "/path/to/CapCutAPI-dev"
}
}
}
}
```
## 使用指南
### 基础工作流程
#### 1. 创建草稿
```python
# 创建 1080x1920 竖屏项目
result = mcp_client.call_tool("create_draft", {
"width": 1080,
"height": 1920
})
draft_id = result["draft_id"]
```
#### 2. 添加内容
```python
# 添加标题文字
mcp_client.call_tool("add_text", {
"text": "我的视频标题",
"start": 0,
"end": 5,
"draft_id": draft_id,
"font_size": 48,
"font_color": "#FFFFFF"
})
# 添加背景视频
mcp_client.call_tool("add_video", {
"video_url": "https://example.com/video.mp4",
"draft_id": draft_id,
"start": 0,
"end": 10,
"volume": 0.8
})
```
#### 3. 保存项目
```python
# 保存草稿
result = mcp_client.call_tool("save_draft", {
"draft_id": draft_id
})
```
### 高级功能示例
#### 文字样式设置
```python
# 带阴影和背景的文字
mcp_client.call_tool("add_text", {
"text": "高级文字效果",
"draft_id": draft_id,
"font_size": 56,
"font_color": "#FFD700",
"shadow_enabled": True,
"shadow_color": "#000000",
"shadow_alpha": 0.8,
"background_color": "#1E1E1E",
"background_alpha": 0.7,
"background_round_radius": 15
})
```
#### 关键帧动画
```python
# 缩放和透明度动画
mcp_client.call_tool("add_video_keyframe", {
"draft_id": draft_id,
"track_name": "video_main",
"property_types": ["scale_x", "scale_y", "alpha"],
"times": [0, 2, 4],
"values": ["1.0", "1.5", "0.5"]
})
```
#### 多样式文本
```python
# 不同颜色的文字段落
mcp_client.call_tool("add_text", {
"text": "彩色文字效果",
"draft_id": draft_id,
"text_styles": [
{"start": 0, "end": 2, "font_color": "#FF0000"},
{"start": 2, "end": 4, "font_color": "#00FF00"}
]
})
```
## 测试验证
### 使用测试客户端
```bash
# 运行测试客户端
python test_mcp_client.py
```
### 功能验证清单
- [ ] 服务器启动成功
- [ ] 工具列表获取正常
- [ ] 草稿创建功能
- [ ] 文本添加功能
- [ ] 视频/音频/图片添加
- [ ] 特效和动画功能
- [ ] 草稿保存功能
## 故障排除
### 常见问题
#### 1. "CapCut modules not available"
**解决方案**:
- 确认 CapCut 应用已安装
- 检查 Python 路径配置
- 验证依赖包安装
#### 2. 服务器启动失败
**解决方案**:
- 检查虚拟环境激活
- 验证配置文件路径
- 查看错误日志
#### 3. 工具调用错误
**解决方案**:
- 检查参数格式
- 验证媒体文件URL
- 确认时间范围设置
### 调试模式
```bash
# 启用详细日志
export DEBUG=1
python mcp_server.py
```
## 最佳实践
### 性能优化
1. **媒体文件**: 使用压缩格式,避免过大文件
2. **时间管理**: 合理规划元素时间轴,避免重叠
3. **内存使用**: 及时保存草稿,清理临时文件
### 错误处理
1. **参数验证**: 调用前检查必需参数
2. **异常捕获**: 处理网络和文件错误
3. **重试机制**: 对临时失败进行重试
## API 参考
### 通用参数
- `draft_id`: 草稿唯一标识符
- `start/end`: 时间范围(秒)
- `width/height`: 项目尺寸
- `transform_x/y`: 位置坐标
- `scale_x/y`: 缩放比例
### 返回格式
```json
{
"success": true,
"result": {
"draft_id": "dfd_cat_xxx",
"draft_url": "https://..."
},
"features_used": {
"shadow": false,
"background": false,
"multi_style": false
}
}
```
## 更新日志
### v1.0.0
- 初始版本发布
- 支持 11 个核心工具
- 完整的 MCP 协议实现
## 技术支持
如有问题或建议,请通过以下方式联系:
- GitHub Issues
- 技术文档
- 社区论坛
---
*本文档持续更新,请关注最新版本。*

View File

@@ -1,19 +1,37 @@
# CapCutAPI
# 通过CapCutAPI连接AI生成的一切 [在线体验](https://www.capcutapi.top)
轻量、灵活、易上手的剪映/CapCutAPI工具构建全自动化视频剪辑/混剪流水线。
直接体验https://www.capcutapi.top
<div align="center">
```
👏👏👏👏 庆祝github 600星送出价值6000点不记名云渲染券17740F41-5ECB-44B1-AAAE-1C458A0EFF43
👏👏👏👏 庆祝github 800星送出价值8000点不记名云渲染券040346B5-8D8F-459E-8EE7-332C0B827117
```
</div>
## 项目概览
**CapCutAPI** 是一款强大的云端 剪辑 API它赋予您对 AI 生成素材(包括图片、音频、视频和文字)的精确控制权。
它提供了精确的编辑能力来拼接原始的 AI 输出,例如给视频变速或将图片镜像反转。这种能力有效地解决了 AI 生成的结果缺乏精确控制,难以复制的问题,让您能够轻松地将创意想法转化为精致的视频。
所有这些功能均旨在对标剪映软件的功能,确保您在云端也能获得熟悉且高效的剪辑体验。
### 核心优势
1. 通过API的方式提供对标剪映/CapCut的剪辑能力。
2. 可以在网页实时预览剪辑结果,无需下载,极大方便工作流开发。
3. 可以下载剪辑结果,并导入到剪映/CapCut中二次编辑。
4. 可以利用API将剪辑结果生成视频实现全云端操作。
## 效果展示
<div align="center">
## 效果演示
**MCP,创建属于自己的剪辑Agent**
[![AI Cut](https://img.youtube.com/vi/fBqy6WFC78E/hqdefault.jpg)](https://www.youtube.com/watch?v=fBqy6WFC78E)
**通过工具将AI生成的图片视频组合起来**
**通过CapCutAPI将AI生成的图片视频组合起来**
[![Airbnb](https://img.youtube.com/vi/1zmQWt13Dx0/hqdefault.jpg)](https://www.youtube.com/watch?v=1zmQWt13Dx0)
@@ -21,154 +39,259 @@
[![Song](https://img.youtube.com/vi/rGNLE_slAJ8/hqdefault.jpg)](https://www.youtube.com/watch?v=rGNLE_slAJ8)
## 项目功能
</div>
本项目是一个基于Python的剪映/CapCut处理工具提供以下核心功能
## 核心功能
### 核心功能
- **草稿文件管理**:创建、读取、修改和保存剪映/CapCut草稿文件
- **素材处理**:支持视频、音频、图片、文本、贴纸等多种素材的添加和编辑
- **特效应用**:支持添加转场、滤镜、蒙版、动画等多种特效
- **API服务**提供HTTP API接口支持远程调用和自动化处理
- **AI集成**集成多种AI服务支持智能生成字幕、文本和图像
| 功能模块 | API | MCP 协议 | 描述 |
|---------|----------|----------|------|
| **草稿管理** | ✅ | ✅ | 创建、保存剪映/CapCut草稿文件 |
| **视频处理** | ✅ | ✅ | 多格式视频导入、剪辑、转场、特效 |
| **音频编辑** | ✅ | ✅ | 音频轨道、音量控制、音效处理 |
| **图像处理** | ✅ | ✅ | 图片导入、动画、蒙版、滤镜 |
| **文本编辑** | ✅ | ✅ | 多样式文本、阴影、背景、动画 |
| **字幕系统** | ✅ | ✅ | SRT 字幕导入、样式设置、时间同步 |
| **特效引擎** | ✅ | ✅ | 视觉特效、滤镜、转场动画 |
| **贴纸系统** | ✅ | ✅ | 贴纸素材、位置控制、动画效果 |
| **关键帧** | ✅ | ✅ | 属性动画、时间轴控制、缓动函数 |
| **媒体分析** | ✅ | ✅ | 视频时长获取、格式检测 |
### 主要API接口
## 快速开始
- `/create_draft`创建草稿
- `/add_video`:添加视频素材到草稿
- `/add_audio`:添加音频素材到草稿
- `/add_image`:添加图片素材到草稿
- `/add_text`:添加文本素材到草稿
- `/add_subtitle`:添加字幕到草稿
- `/add_effect`:添加特效到素材
- `/add_sticker`:添加贴纸到草稿
- `/save_draft`:保存草稿文件
### 1. 系统要求
## 配置说明
- Python 3.10+
- 剪映 或 CapCut 国际版
- FFmpeg
### 配置文件
项目支持通过配置文件进行自定义设置。要使用配置文件:
1. 复制`config.json.example``config.json`
2. 根据需要修改配置项
### 2. 安装部署
```bash
# 1. 克隆项目
git clone https://github.com/sun-guannan/CapCutAPI.git
cd CapCutAPI
# 2. 创建虚拟环境 (推荐)
python -m venv venv-capcut
source venv-capcut/bin/activate # Linux/macOS
# 或 venv-capcut\Scripts\activate # Windows
# 3. 安装依赖
pip install -r requirements.txt # HTTP API 基础依赖
pip install -r requirements-mcp.txt # MCP 协议支持 (可选)
# 4. 配置文件
cp config.json.example config.json
# 根据需要编辑 config.json
```
### 环境配置
#### ffmpeg
本项目依赖于ffmpeg您需要确保系统中已安装ffmpeg并且将其添加到系统的环境变量中。
#### Python 环境
本项目需要 Python 3.8.20 版本,请确保您的系统已安装正确版本的 Python。
#### 安装依赖
安装项目所需的依赖包:
### 3. 启动服务
```bash
pip install -r requirements.txt
python capcut_server.py # 启动HTTP API服务器, 默认端口: 9001
python mcp_server.py # 启动 MCP 协议服务,支持 stdio 通信
```
### 运行服务器
## MCP 集成指南
完成配置和环境设置后,执行以下命令启动服务器:
[MCP 文档](./MCP_文档_中文.md) • [MCP English Guide](./MCP_Documentation_English.md)
### 1. 客户端配置
创建或更新 `mcp_config.json` 配置文件:
```json
{
"mcpServers": {
"capcut-api": {
"command": "python3",
"args": ["mcp_server.py"],
"cwd": "/path/to/CapCutAPI",
"env": {
"PYTHONPATH": "/path/to/CapCutAPI",
"DEBUG": "0"
}
}
}
}
```
### 2. 连接测试
```bash
python capcut_server.py
```
# 测试 MCP 连接
python test_mcp_client.py
服务器启动后,您可以通过 API 接口访问相关功能。
# 预期输出
✅ MCP 服务器启动成功
✅ 获取到 11 个可用工具
✅ 草稿创建测试通过
```
## 使用示例
### 添加视频
### 1. API 示例
添加视频素材
```python
import requests
# 添加背景视频
response = requests.post("http://localhost:9001/add_video", json={
"video_url": "http://example.com/video.mp4",
"video_url": "https://example.com/background.mp4",
"start": 0,
"end": 10,
"end": 10
"volume": 0.8,
"transition": "fade_in"
})
print(f"视频添加结果: {response.json()}")
```
创建样式文本
```python
import requests
# 添加标题文字
response = requests.post("http://localhost:9001/add_text", json={
"text": "欢迎使用 CapCutAPI",
"start": 0,
"end": 5,
"font": "思源黑体",
"font_color": "#FFD700",
"font_size": 48,
"shadow_enabled": True,
"background_color": "#000000"
})
print(f"文本添加结果: {response.json()}")
```
可以在`example.py`文件中获取更多示例。
### 2. MCP 协议示例
完整工作流程
```python
# 1. 创建新项目
draft = mcp_client.call_tool("create_draft", {
"width": 1080,
"height": 1920
})
draft_id = draft["result"]["draft_id"]
print(response.json())
```
### 添加文本
```python
import requests
response = requests.post("http://localhost:9001/add_text", json={
"text": "你好,世界!",
# 2. 添加背景视频
mcp_client.call_tool("add_video", {
"video_url": "https://example.com/bg.mp4",
"draft_id": draft_id,
"start": 0,
"end": 3,
"font": "思源黑体",
"font_color": "#FF0000",
"font_size": 30.0
"end": 10,
"volume": 0.6
})
print(response.json())
```
# 3. 添加标题文字
mcp_client.call_tool("add_text", {
"text": "AI 驱动的视频制作",
"draft_id": draft_id,
"start": 1,
"end": 6,
"font_size": 56,
"shadow_enabled": True,
"background_color": "#1E1E1E"
})
### 保存草稿
# 4. 添加关键帧动画
mcp_client.call_tool("add_video_keyframe", {
"draft_id": draft_id,
"track_name": "main",
"property_types": ["scale_x", "scale_y", "alpha"],
"times": [0, 2, 4],
"values": ["1.0", "1.2", "0.8"]
})
# 5. 保存项目
result = mcp_client.call_tool("save_draft", {
"draft_id": draft_id
})
print(f"项目已保存: {result['result']['draft_url']}")
```
高级文本效果
```python
import requests
response = requests.post("http://localhost:9001/save_draft", json={
"draft_id": "123456",
"draft_folder":"your capcut draft folder"
# 多样式彩色文本
mcp_client.call_tool("add_text", {
"text": "彩色文字效果展示",
"draft_id": draft_id,
"start": 2,
"end": 8,
"font_size": 42,
"shadow_enabled": True,
"shadow_color": "#FFFFFF",
"background_alpha": 0.8,
"background_round_radius": 20,
"text_styles": [
{"start": 0, "end": 2, "font_color": "#FF6B6B"},
{"start": 2, "end": 4, "font_color": "#4ECDC4"},
{"start": 4, "end": 6, "font_color": "#45B7D1"}
]
})
print(response.json())
```
也可以用 REST Client 的 ```rest_client_test.http``` 进行http测试只需要安装对应的IDE插件
### 复制草稿到剪映/capcut草稿路径
调用`save_draft`会在服务器当前目录下生成一个`dfd_`开头的文件夹,将他复制到剪映/CapCut草稿目录即可看到生成的草稿
### 3. 下载草稿
调用 `save_draft` 会在`capcut_server.py`当前目录下生成一个 `dfd_` 开头的文件夹,将其复制到剪映/CapCut 草稿目录,即可在应用中看到生成的草稿。
### 更多示例
请参考项目的`example.py`文件,其中包含了更多的使用示例,如添加音频、添加特效等
## 模版
我们汇总了一些模版,放在`pattern`文件夹下
## 社区与支持
## 项目特点
我们欢迎各种形式的贡献!我们的迭代规则:
- 禁止直接向main提交pr
- 可以向dev分支提交pr
- 每周一从dev合并到main分支并发版
- **跨平台支持**同时支持剪映和CapCut国际版
- **自动化处理**:支持批量处理和自动化工作流
- **丰富的API**提供全面的API接口方便集成到其他系统
- **灵活的配置**:通过配置文件实现灵活的功能定制
- **AI增强**集成多种AI服务提升视频制作效率
## 进群交流
![image](https://github.com/user-attachments/assets/d09b0325-d3fe-4e1e-a458-d3342e63c038)
![交流群](https://github.com/user-attachments/assets/00a15f32-0cb2-408a-821c-5e673052238f)
- 反馈问题
- 功能建议
- 最新消息
## 合作
- 你想要利用这个API批量制作**出海**视频吗?
我提供免费的咨询服务帮助你利用这个API制作。
相应的,我要将工作流代码放到这个项目里公开出来。
### 🤝 合作机会
- 有兴趣加入我们?
我们的目标是提供稳定可靠的视频剪辑工具方便融合AI生成的图片/视频/语音。
如果你有兴趣可以先从将工程里的中文翻译成英文开始提交pr我会看到。
更深入的还有MCP剪辑Agent, web剪辑端云渲染这三个模块代码还没有开源出来。
- **出海视频制作**: 想要利用这个API批量制作出海视频吗我提供免费的咨询服务帮助你利用这个API制作。相应的我要将制作的工作流模板放到这个项目中的template目录中**开源**出来。
- 联系方式
微信sguann
抖音:剪映草稿助手
- **加入我们**: 我们的目标是提供稳定可靠的视频剪辑工具方便融合AI生成的图片/视频/语音。如果你有兴趣可以先从将工程里的中文翻译成英文开始提交pr我会看到。更深入的还有MCP剪辑Agent, web剪辑端云渲染这三个模块代码还没有开源出来。
- **联系方式**:
- 微信sguann
- 抖音:剪映草稿助手
## 📈 Star History
<div align="center">
[![Star History Chart](https://api.star-history.com/svg?repos=sun-guannan/CapCutAPI&type=Date)](https://www.star-history.com/#sun-guannan/CapCutAPI&Date)
![GitHub repo size](https://img.shields.io/github/repo-size/sun-guannan/CapCutAPI?style=flat-square)
![GitHub code size](https://img.shields.io/github/languages/code-size/sun-guannan/CapCutAPI?style=flat-square)
![GitHub issues](https://img.shields.io/github/issues/sun-guannan/CapCutAPI?style=flat-square)
![GitHub pull requests](https://img.shields.io/github/issues-pr/sun-guannan/CapCutAPI?style=flat-square)
![GitHub last commit](https://img.shields.io/github/last-commit/sun-guannan/CapCutAPI?style=flat-square)
</div>
*Made with ❤️ by the CapCutAPI Community*
</div>

313
README.md
View File

@@ -1,18 +1,36 @@
# CapCutAPI
Open source CapCut API tool.
# Connect AI generates via CapCutAPI [Try it online](https://www.capcutapi.top)
Try It: https://www.capcutapi.top
## Project Overview
**CapCutAPI** is a powerful editing API that empowers you to take full control of your AI-generated assets, including images, audio, video, and text. It provides the precision needed to refine and customize raw AI output, such as adjusting video speed or mirroring an image. This capability effectively solves the lack of control often found in AI video generation, allowing you to easily transform your creative ideas into polished videos.
[中文说明](https://github.com/sun-guannan/CapCutAPI/blob/main/README-zh.md)
All these features are designed to mirror the functionalities of the CapCut software, ensuring a familiar and efficient editing experience in the cloud.
## Gallery
Enjoy It! 😀😀😀
**MCP agent**
[中文说明](README-zh.md)
### Advantages
1. **API-Powered Editing:** Access all CapCut/Jianying editing features, including multi-track editing and keyframe animation, through a powerful API.
2. **Real-Time Cloud Preview:** Instantly preview your edits on a webpage without downloads, dramatically improving your workflow.
3. **Flexible Local Editing:** Export projects as drafts to import into CapCut or Jianying for further refinement.
4. **Automated Cloud Generation:** Use the API to render and generate final videos directly in the cloud.
## Demos
<div align="center">
**MCP, create your own editing Agent**
[![AI Cut](https://img.youtube.com/vi/fBqy6WFC78E/hqdefault.jpg)](https://www.youtube.com/watch?v=fBqy6WFC78E)
**Connect AI generated via CapCutAPI**
**Combine AI-generated images and videos using CapCutAPI**
[More](pattern)
[![Airbnb](https://img.youtube.com/vi/1zmQWt13Dx0/hqdefault.jpg)](https://www.youtube.com/watch?v=1zmQWt13Dx0)
@@ -20,132 +38,251 @@ Try It: https://www.capcutapi.top
[![Song](https://img.youtube.com/vi/rGNLE_slAJ8/hqdefault.jpg)](https://www.youtube.com/watch?v=rGNLE_slAJ8)
## Project Features
This project is a Python-based CapCut processing tool that offers the following core functionalities:
</div>
### Core Features
## Key Features
- **Draft File Management**: Create, read, modify, and save CapCut draft files
- **Material Processing**: Support adding and editing various materials such as videos, audios, images, texts, stickers, etc.
- **Effect Application**: Support adding multiple effects like transitions, filters, masks, animations, etc.
- **API Service**: Provide HTTP API interfaces to support remote calls and automated processing
- **AI Integration**: Integrate multiple AI services to support intelligent generation of subtitles, texts, and images
| Feature Module | API | MCP Protocol | Description |
|---------|----------|----------|------|
| **Draft Management** | ✅ | ✅ | Create and save Jianying/CapCut draft files |
| **Video Processing** | ✅ | ✅ | Import, clip, transition, and apply effects to multiple video formats |
| **Audio Editing** | ✅ | ✅ | Audio tracks, volume control, sound effects processing |
| **Image Processing** | ✅ | ✅ | Image import, animation, masks, filters |
| **Text Editing** | ✅ | ✅ | Multi-style text, shadows, backgrounds, animations |
| **Subtitle System** | ✅ | ✅ | SRT subtitle import, style settings, time synchronization |
| **Effects Engine** | ✅ | ✅ | Visual effects, filters, transition animations |
| **Sticker System** | ✅ | ✅ | Sticker assets, position control, animation effects |
| **Keyframes** | ✅ | ✅ | Property animation, timeline control, easing functions |
| **Media Analysis** | ✅ | ✅ | Get video duration, detect format |
### Main API Interfaces
## Quick Start
- `/create_draft`: Create a draft
- `/add_video`: Add video material to the draft
- `/add_audio`: Add audio material to the draft
- `/add_image`: Add image material to the draft
- `/add_text`: Add text material to the draft
- `/add_subtitle`: Add subtitles to the draft
- `/add_effect`: Add effects to materials
- `/add_sticker`: Add stickers to the draft
- `/save_draft`: Save the draft file
### 1\. System Requirements
## Configuration Instructions
- Python 3.10+
- Jianying or CapCut International version
- FFmpeg
### Configuration File
The project supports custom settings through a configuration file. To use the configuration file:
1. Copy `config.json.example` to `config.json`
2. Modify the configuration items as needed
### 2\. Installation and Deployment
```bash
# 1. Clone the project
git clone https://github.com/sun-guannan/CapCutAPI.git
cd CapCutAPI
# 2. Create a virtual environment (recommended)
python -m venv venv-capcut
source venv-capcut/bin/activate # Linux/macOS
# or venv-capcut\Scripts\activate # Windows
# 3. Install dependencies
pip install -r requirements.txt # HTTP API basic dependencies
pip install -r requirements-mcp.txt # MCP protocol support (optional)
# 4. Configuration file
cp config.json.example config.json
# Edit config.json as needed
```
### Environment Configuration
#### ffmpeg
This project depends on ffmpeg. You need to ensure that ffmpeg is installed on your system and added to the system's environment variables.
#### Python Environment
This project requires Python version 3.8.20. Please ensure that the correct version of Python is installed on your system.
#### Install Dependencies
Install the required dependency packages for the project:
### 3\. Start the service
```bash
pip install -r requirements.txt
python capcut_server.py # Start the HTTP API server, default port: 9001
python mcp_server.py # Start the MCP protocol service, supports stdio communication
```
### Run the Server
## MCP Integration Guide
After completing the configuration and environment setup, execute the following command to start the server:
[MCP 中文文档](https://www.google.com/search?q=./MCP_%E6%96%87%E6%A1%A3_%E4%B8%AD%E6%96%87.md) • [MCP English Guide](https://www.google.com/search?q=./MCP_Documentation_English.md)
### 1\. Client Configuration
Create or update the `mcp_config.json` configuration file:
```json
{
"mcpServers": {
"capcut-api": {
"command": "python3",
"args": ["mcp_server.py"],
"cwd": "/path/to/CapCutAPI",
"env": {
"PYTHONPATH": "/path/to/CapCutAPI",
"DEBUG": "0"
}
}
}
}
```
### 2\. Connection Test
```bash
python capcut_server.py
```
# Test MCP connection
python test_mcp_client.py
Once the server is started, you can access the related functions through the API interfaces.
# Expected output
✅ MCP server started successfully
✅ Got 11 available tools
✅ Draft creation test passed
```
## Usage Examples
### Adding a Video
### 1\. API Example
Add video material
```python
import requests
# Add background video
response = requests.post("http://localhost:9001/add_video", json={
"video_url": "http://example.com/video.mp4",
"video_url": "https://example.com/background.mp4",
"start": 0,
"end": 10,
"end": 10
"volume": 0.8,
"transition": "fade_in"
})
print(f"Video addition result: {response.json()}")
```
Create stylized text
```python
import requests
# Add title text
response = requests.post("http://localhost:9001/add_text", json={
"text": "Welcome to CapCutAPI",
"start": 0,
"end": 5,
"font": "Source Han Sans",read
"font_color": "#FFD700",
"font_size": 48,
"shadow_enabled": True,
"background_color": "#000000"
})
print(f"Text addition result: {response.json()}")
```
More examples can be found in the `example.py` file.
### 2\. MCP Protocol Example
Complete workflow
```python
# 1. Create a new project
draft = mcp_client.call_tool("create_draft", {
"width": 1080,
"height": 1920
})
draft_id = draft["result"]["draft_id"]
print(response.json())
```
### Adding Text
```python
import requests
response = requests.post("http://localhost:9001/add_text", json={
"text": "Hello, World!",
# 2. Add background video
mcp_client.call_tool("add_video", {
"video_url": "https://example.com/bg.mp4",
"draft_id": draft_id,
"start": 0,
"end": 3,
"font": "ZY_Courage",
"font_color": "#FF0000",
"font_size": 30.0
"end": 10,
"volume": 0.6
})
print(response.json())
# 3. Add title text
mcp_client.call_tool("add_text", {
"text": "AI-Driven Video Production",
"draft_id": draft_id,
"start": 1,
"end": 6,
"font_size": 56,
"shadow_enabled": True,
"background_color": "#1E1E1E"
})
# 4. Add keyframe animation
mcp_client.call_tool("add_video_keyframe", {
"draft_id": draft_id,
"track_name": "main",
"property_types": ["scale_x", "scale_y", "alpha"],
"times": [0, 2, 4],
"values": ["1.0", "1.2", "0.8"]
})
# 5. Save the project
result = mcp_client.call_tool("save_draft", {
"draft_id": draft_id
})
print(f"Project saved: {result['result']['draft_url']}")
```
### Saving a Draft
Advanced text effects
```python
import requests
response = requests.post("http://localhost:9001/save_draft", json={
"draft_id": "123456",
"draft_folder": "your capcut draft folder"
# Multi-style colored text
mcp_client.call_tool("add_text", {
"text": "Colored text effect demonstration",
"draft_id": draft_id,
"start": 2,
"end": 8,
"font_size": 42,
"shadow_enabled": True,
"shadow_color": "#FFFFFF",
"background_alpha": 0.8,
"background_round_radius": 20,
"text_styles": [
{"start": 0, "end": 2, "font_color": "#FF6B6B"},
{"start": 2, "end": 4, "font_color": "#4ECDC4"},
{"start": 4, "end": 6, "font_color": "#45B7D1"}
]
})
print(response.json())
```
You can also use the ```rest_client_test.http``` file of the REST Client for HTTP testing. Just need to install the corresponding IDE plugin
### Copying the Draft to CapCut Draft Path
### 3\. Downloading Drafts
Calling `save_draft` will generate a folder starting with `dfd_` in the current directory of the server. Copy this folder to the CapCut draft directory, and you will be able to see the generated draft.
Calling `save_draft` will generate a folder starting with `dfd_` in the current directory of `capcut_server.py`. Copy this to the CapCut/Jianying drafts directory to see the generated draft in the application.
### More Examples
## Pattern
Please refer to the `example.py` file in the project, which contains more usage examples such as adding audio and effects.
You can find a lot of pattern in the `pattern` directory.
## Project Features
## Community & Support
- **Cross-platform Support**: Supports both CapCut China version and CapCut International version
- **Automated Processing**: Supports batch processing and automated workflows
- **Rich APIs**: Provides comprehensive API interfaces for easy integration into other systems
- **Flexible Configuration**: Achieve flexible function customization through configuration files
- **AI Enhancement**: Integrate multiple AI services to improve video production efficiency
We welcome contributions of all forms\! Our iteration rules are:
- No direct PRs to main
- PRs can be submitted to the dev branch
- Merges from dev to main and releases will happen every Monday
## Contact Us
### 🤝 Collaboration
- **Video Production**: Want to use this API for batch production of videos with AIGC?
- **Join us**: Our goal is to provide a stable and reliable video editing tool that integrates well with AI-generated images, videos, and audio. If you are interested, submit a PR and I'll see it. For more in-depth involvement, the code for the MCP Editing Agent, web-based editing client, and cloud rendering modules has not been open-sourced yet.
**Contact**: abelchrisnic@gmail.com
## 📈 Star History
<div align="center">
[![Star History Chart](https://api.star-history.com/svg?repos=sun-guannan/CapCutAPI&type=Date)](https://www.star-history.com/#sun-guannan/CapCutAPI&Date)
![GitHub repo size](https://img.shields.io/github/repo-size/sun-guannan/CapCutAPI?style=flat-square)
![GitHub code size](https://img.shields.io/github/languages/code-size/sun-guannan/CapCutAPI?style=flat-square)
![GitHub issues](https://img.shields.io/github/issues/sun-guannan/CapCutAPI?style=flat-square)
![GitHub pull requests](https://img.shields.io/github/issues-pr/sun-guannan/CapCutAPI?style=flat-square)
![GitHub last commit](https://img.shields.io/github/last-commit/sun-guannan/CapCutAPI?style=flat-square)
[![Verified on MSeeP](https://mseep.ai/badge.svg)](https://mseep.ai/app/69c38d28-a97c-4397-849d-c3e3d241b800)
</div>
*Made with ❤️ by the CapCutAPI Community*

View File

@@ -1,12 +1,13 @@
from pyJianYingDraft import trange, Video_scene_effect_type, Video_character_effect_type, CapCut_Video_scene_effect_type, CapCut_Video_character_effect_type, exceptions
import pyJianYingDraft as draft
from typing import Optional, Dict, List, Union
from typing import Optional, Dict, List, Union, Literal
from create_draft import get_or_create_draft
from util import generate_draft_url
from settings import IS_CAPCUT_ENV
def add_effect_impl(
effect_type: str, # Changed to string type
effect_category: Literal["scene", "character"],
start: float = 0,
end: float = 3.0,
draft_id: Optional[str] = None,
@@ -18,6 +19,7 @@ def add_effect_impl(
"""
Add an effect to the specified draft
:param effect_type: Effect type name, will be matched from Video_scene_effect_type or Video_character_effect_type
:param effect_category: Effect category, "scene" or "character", default "scene"
:param start: Start time (seconds), default 0
:param end: End time (seconds), default 3 seconds
:param draft_id: Draft ID, if None or corresponding zip file not found, a new draft will be created
@@ -38,20 +40,35 @@ def add_effect_impl(
duration = end - start
t_range = trange(f"{start}s", f"{duration}s")
# Dynamically get effect type object
# Select the corresponding effect type based on effect category and environment
effect_enum = None
if IS_CAPCUT_ENV:
# If in CapCut environment, use CapCut effects
effect_enum = CapCut_Video_scene_effect_type[effect_type]
if effect_enum is None:
effect_enum = CapCut_Video_character_effect_type[effect_type]
if effect_category == "scene":
try:
effect_enum = CapCut_Video_scene_effect_type[effect_type]
except:
effect_enum = None
elif effect_category == "character":
try:
effect_enum = CapCut_Video_character_effect_type[effect_type]
except:
effect_enum = None
else:
# Default to using JianYing effects
effect_enum = Video_scene_effect_type[effect_type]
if effect_enum is None:
effect_enum = Video_character_effect_type[effect_type]
if effect_category == "scene":
try:
effect_enum = Video_scene_effect_type[effect_type]
except:
effect_enum = None
elif effect_category == "character":
try:
effect_enum = Video_character_effect_type[effect_type]
except:
effect_enum = None
if effect_enum is None:
raise ValueError(f"Unknown effect type: {effect_type}")
raise ValueError(f"Unknown {effect_category} effect type: {effect_type}")
# Add effect track (only when track doesn't exist)
if track_name is not None:

View File

@@ -80,7 +80,7 @@ def add_subtitle_impl(
raise Exception(f"Failed to download subtitle file: {str(e)}")
elif os.path.isfile(srt_path): # Check if it's a file
try:
with open(srt_path, 'r', encoding='utf-8') as f:
with open(srt_path, 'r', encoding='utf-8-sig') as f:
srt_content = f.read()
except Exception as e:
raise Exception(f"Failed to read local subtitle file: {str(e)}")

View File

@@ -11,44 +11,44 @@ def add_text_impl(
text: str,
start: float,
end: float,
draft_id: str = None,
draft_id: str | None = None, # Python 3.10+ 新语法
transform_y: float = -0.8,
transform_x: float = 0,
font: str = "文轩体", # Wenxuan Font
font: Optional[str] = None,
font_color: str = "#ffffff",
font_size: float = 8.0,
track_name: str = "text_main",
vertical: bool = False, # Whether to display vertically
font_alpha: float = 1.0, # Transparency, range 0.0-1.0
vertical: bool = False,
font_alpha: float = 1.0,
# Border parameters
border_alpha: float = 1.0,
border_color: str = "#000000",
border_width: float = 0.0, # Default no border display
border_width: float = 0.0,
# Background parameters
background_color: str = "#000000",
background_style: int = 1,
background_alpha: float = 0.0, # Default no background display
background_round_radius: float = 0.0, # 背景圆角半径范围0.0-1.0
background_height: float = 0.14, # 背景高度范围0.0-1.0
background_width: float = 0.14, # 背景宽度范围0.0-1.0
background_horizontal_offset: float = 0.5, # 背景水平偏移范围0.0-1.0
background_vertical_offset: float = 0.5, # 背景垂直偏移范围0.0-1.0
# 阴影参数
shadow_enabled: bool = False, # 是否启用阴影
shadow_alpha: float = 0.9, # 阴影透明度范围0.0-1.0
shadow_angle: float = -45.0, # 阴影角度,范围-180.0-180.0
shadow_color: str = "#000000", # 阴影颜色
shadow_distance: float = 5.0, # 阴影距离
shadow_smoothing: float = 0.15, # 阴影平滑度范围0.0-1.0
background_alpha: float = 0.0,
background_round_radius: float = 0.0,
background_height: float = 0.14,
background_width: float = 0.14,
background_horizontal_offset: float = 0.5,
background_vertical_offset: float = 0.5,
# Shadow parameters
shadow_enabled: bool = False,
shadow_alpha: float = 0.9,
shadow_angle: float = -45.0,
shadow_color: str = "#000000",
shadow_distance: float = 5.0,
shadow_smoothing: float = 0.15,
# Bubble effect
bubble_effect_id: Optional[str] = None,
bubble_resource_id: Optional[str] = None,
bubble_effect_id: str | None = None,
bubble_resource_id: str | None = None,
# Text effect
effect_effect_id: Optional[str] = None,
intro_animation: Optional[str] = None, # Intro animation type
intro_duration: float = 0.5, # Intro animation duration (seconds), default 0.5 seconds
outro_animation: Optional[str] = None, # Outro animation type
outro_duration: float = 0.5, # Outro animation duration (seconds), default 0.5 seconds
effect_effect_id: str | None = None,
intro_animation: str | None = None,
intro_duration: float = 0.5,
outro_animation: str | None = None,
outro_duration: float = 0.5,
width: int = 1080,
height: int = 1920,
fixed_width: float = -1, # Text fixed width ratio, default -1 means not fixed
@@ -102,11 +102,14 @@ def add_text_impl(
:return: Updated draft information
"""
# Validate if font is in Font_type
try:
font_type = getattr(Font_type, font)
except:
available_fonts = [attr for attr in dir(Font_type) if not attr.startswith('_')]
raise ValueError(f"Unsupported font: {font}, please use one of the fonts in Font_type: {available_fonts}")
if font is None:
font_type = None
else:
try:
font_type = getattr(Font_type, font)
except:
available_fonts = [attr for attr in dir(Font_type) if not attr.startswith('_')]
raise ValueError(f"Unsupported font: {font}, please use one of the fonts in Font_type: {available_fonts}")
# Validate alpha value range
if not 0.0 <= font_alpha <= 1.0:

View File

@@ -233,7 +233,7 @@ def add_subtitle():
time_offset = data.get('time_offset', 0.0) # Default 0 seconds
# Font style parameters
font = data.get('font', None)
font = data.get('font', "思源粗宋")
font_size = data.get('font_size', 5.0) # Default size 5.0
bold = data.get('bold', False) # Default not bold
italic = data.get('italic', False) # Default not italic
@@ -642,6 +642,7 @@ def add_effect():
# Get required parameters
effect_type = data.get('effect_type') # Effect type name, will match from Video_scene_effect_type or Video_character_effect_type
start = data.get('start', 0) # Start time (seconds), default 0
effect_category = data.get('effect_category', "scene") # Effect category, "scene" or "character", default "scene"
end = data.get('end', 3.0) # End time (seconds), default 3 seconds
draft_id = data.get('draft_id') # Draft ID, if None or corresponding zip file not found, create new draft
track_name = data.get('track_name', "effect_01") # Track name, can be omitted when there is only one effect track
@@ -665,6 +666,7 @@ def add_effect():
# Call add_effect_impl method
draft_result = add_effect_impl(
effect_type=effect_type,
effect_category=effect_category,
start=start,
end=end,
draft_id=draft_id,

View File

@@ -1,6 +1,6 @@
{
"is_capcut_env": true, // Whether to use CapCut environment (true) or JianYing environment (false)
"draft_domain": "https://www.install-ai-guider.top", // Base domain for draft operations
"draft_domain": "https://www.capcutapi.top", // Base domain for draft operations
"port": 9001, // Port number for the local server
"preview_router": "/draft/downloader", // Router path for preview functionality
"is_upload_draft": false, // Whether to upload drafts to remote storage

View File

@@ -9,6 +9,8 @@ import threading
from pyJianYingDraft.text_segment import TextStyleRange, Text_style, Text_border
from util import hex_to_rgb
import shutil
import os
# Base URL of the service, please modify according to actual situation
BASE_URL = f"http://localhost:{PORT}"
@@ -160,7 +162,7 @@ def add_text_impl(text, start, end, font, font_color, font_size, track_name, dra
return make_request("add_text", data)
def add_image_impl(image_url, width, height, start, end, track_name, draft_id=None,
def add_image_impl(image_url, start, end, width=None, height=None, track_name="image_main", draft_id=None,
transform_x=0, transform_y=0, scale_x=1.0, scale_y=1.0, transition=None, transition_duration=None,
mask_type=None, mask_center_x=0.0, mask_center_y=0.0, mask_size=0.5,
mask_rotation=0.0, mask_feather=0.0, mask_invert=False,
@@ -291,6 +293,7 @@ def add_video_impl(video_url, start=None, end=None, width=None, height=None, tra
data = {
"video_url": video_url,
"height": height,
"draft_id": draft_id,
"track_name": track_name,
"transform_y": transform_y,
"scale_x": scale_x,
@@ -325,7 +328,7 @@ def add_video_impl(video_url, start=None, end=None, width=None, height=None, tra
return make_request("add_video", data)
def add_effect(effect_type, start, end, draft_id=None, track_name="effect_01",
params=None, width=1080, height=1920):
params=None, width=1080, height=1920, effect_category=None):
"""API call to add effect"""
data = {
"effect_type": effect_type,
@@ -337,6 +340,9 @@ def add_effect(effect_type, start, end, draft_id=None, track_name="effect_01",
"height": height
}
if effect_category:
data["effect_category"] = effect_category
if draft_id:
data["draft_id"] = draft_id
@@ -364,6 +370,59 @@ def test_effect_01():
return effect_result
def test_effect_02():
"""Test service for adding effects"""
# draft_folder = "/Users/sunguannan/Movies/JianyingPro/User Data/Projects/com.lveditor.draft"
draft_folder = "/Users/sunguannan/Movies/CapCut/User Data/Projects/com.lveditor.draft"
print("\nTest: Adding effects")
# First add video track
image_result = add_video_impl(
video_url="https://pan.superbed.cn/share/1nbrg1fl/jimeng_daweidai.mp4",
start=0,
end=3.0,
target_start=0,
width=1080,
height=1920
)
print(f"Video added successfully! {image_result['output']['draft_id']}")
image_result = add_video_impl(
video_url="https://pan.superbed.cn/share/1nbrg1fl/jimeng_daweidai.mp4",
draft_id=image_result['output']['draft_id'],
start=0,
end=3.0,
target_start=3,
)
print(f"Video added successfully! {image_result['output']['draft_id']}")
# Then add effect
effect_result = add_effect(
effect_type="Like",
effect_category="character", # Explicitly specify as character effect
start=3,
end=6,
draft_id=image_result['output']['draft_id'],
track_name="effect_01"
)
print(f"Effect adding result: {effect_result}")
print(save_draft_impl(effect_result['output']['draft_id'], draft_folder))
source_folder = os.path.join(os.getcwd(), effect_result['output']['draft_id'])
destination_folder = os.path.join(draft_folder, effect_result['output']['draft_id'])
if os.path.exists(source_folder):
print(f"Moving {effect_result['output']['draft_id']} to {draft_folder}")
shutil.move(source_folder, destination_folder)
print("Folder moved successfully!")
else:
print(f"Source folder {source_folder} does not exist")
# Add log to prompt user to find the draft in CapCut
print(f"\n===== IMPORTANT =====\nPlease open CapCut and find the draft named '{effect_result['output']['draft_id']}'\n======================")
# Return the first test result for subsequent operations (if any)
return effect_result
def test_text():
"""Test adding text with various features"""
draft_folder = CAPCUT_DRAFT_FOLDER
@@ -1181,7 +1240,7 @@ def test_audio04():
query_draft_status_impl_polling(audio_result['output']['draft_id'])
save_draft_impl(audio_result['output']['draft_id'], draft_folder)
def add_subtitle_impl(srt, draft_id=None, time_offset=0.0, font_size=5.0,
def add_subtitle_impl(srt, draft_id=None, time_offset=0.0, font_size=5.0, font = "思源粗宋",
bold=False, italic=False, underline=False, font_color="#ffffff",
transform_x=0.0, transform_y=0.0, scale_x=1.0, scale_y=1.0,
vertical=False, track_name="subtitle", alpha=1,
@@ -1194,6 +1253,7 @@ def add_subtitle_impl(srt, draft_id=None, time_offset=0.0, font_size=5.0,
"srt": srt, # Modified parameter name to match server side
"draft_id": draft_id,
"time_offset": time_offset,
"font": font,
"font_size": font_size,
"bold": bold,
"italic": italic,
@@ -2280,36 +2340,37 @@ def test_transition_02():
print("Unable to get draft ID, skipping save operation.")
if __name__ == "__main__":
test01()
test02()
test_effect_01() # Run effect test
test_audio01()
test_audio02()
test_audio03()
test_audio04()
test_image01()
test_image02()
test_image03()
test_image04()
# test_video()
test_video_02()
test_text()
test_video_track01()
test_video_track02()
test_video_track03()
test_video_track04()
test_keyframe()
test_keyframe_02()
# test01()
# test02()
# test_effect_01() # Run effect test
# test_effect_02()
# test_audio01()
# test_audio02()
# test_audio03()
# test_audio04()
# test_image01()
# test_image02()
# test_image03()
# test_image04()
# # test_video()
# test_video_02()
# test_text()
# test_video_track01()
# test_video_track02()
# test_video_track03()
# test_video_track04()
# test_keyframe()
# test_keyframe_02()
test_subtitle_01()
test_subtitle_02()
test_subtitle()
test_stiker_01()
test_stiker_02()
test_stiker_03()
test_transition_01()
test_transition_02()
# test_generate_image01()
# test_generate_image02()
# test_speech_01()
test_mask_01()
test_mask_02()
# test_subtitle_02()
# test_subtitle()
# test_stiker_01()
# test_stiker_02()
# test_stiker_03()
# test_transition_01()
# test_transition_02()
# # test_generate_image01()
# # test_generate_image02()
# # test_speech_01()
# test_mask_01()
# test_mask_02()

View File

@@ -0,0 +1,66 @@
import os
import shutil
import sys
# 添加父目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# 从example.py中导入必要的函数
from example import add_video_impl, add_effect, save_draft_impl
def example_capcut_effect():
"""Test service for adding effects"""
# draft_folder = "/Users/sunguannan/Movies/JianyingPro/User Data/Projects/com.lveditor.draft"
draft_folder = "/Users/sunguannan/Movies/CapCut/User Data/Projects/com.lveditor.draft"
print("\nTest: Adding effects")
# First add video track
image_result = add_video_impl(
video_url="https://pan.superbed.cn/share/1nbrg1fl/jimeng_daweidai.mp4",
start=0,
end=3.0,
target_start=0,
width=1080,
height=1920
)
print(f"Video added successfully! {image_result['output']['draft_id']}")
image_result = add_video_impl(
video_url="https://pan.superbed.cn/share/1nbrg1fl/jimeng_daweidai.mp4",
draft_id=image_result['output']['draft_id'],
start=0,
end=3.0,
target_start=3,
)
print(f"Video added successfully! {image_result['output']['draft_id']}")
# Then add effect
effect_result = add_effect(
effect_type="Like",
effect_category="character", # Explicitly specify as character effect
start=3,
end=6,
draft_id=image_result['output']['draft_id'],
track_name="effect_01"
)
print(f"Effect adding result: {effect_result}")
print(save_draft_impl(effect_result['output']['draft_id'], draft_folder))
source_folder = os.path.join(os.getcwd(), effect_result['output']['draft_id'])
destination_folder = os.path.join(draft_folder, effect_result['output']['draft_id'])
if os.path.exists(source_folder):
print(f"Moving {effect_result['output']['draft_id']} to {draft_folder}")
shutil.move(source_folder, destination_folder)
print("Folder moved successfully!")
else:
print(f"Source folder {source_folder} does not exist")
# Add log to prompt user to find the draft in CapCut
print(f"\n===== IMPORTANT =====\nPlease open CapCut and find the draft named '{effect_result['output']['draft_id']}'\n=======================")
# Return the first test result for subsequent operations (if any)
return effect_result
if __name__ == "__main__":
example_capcut_effect()

12
mcp_config.json Normal file
View File

@@ -0,0 +1,12 @@
{
"mcpServers": {
"capcut-api": {
"command": "python3.10",
"args": ["mcp_server.py"],
"cwd": "/Users/chuham/Downloads/CapCutAPI-dev",
"env": {
"PYTHONPATH": "/Users/chuham/Downloads/CapCutAPI-dev"
}
}
}
}

479
mcp_server.py Normal file
View File

@@ -0,0 +1,479 @@
#!/usr/bin/env python3
"""
CapCut API MCP Server (Complete Version)
完整版本的MCP服务器集成所有CapCut API接口
"""
import sys
import os
import json
import traceback
import io
import contextlib
from typing import Any, Dict, List, Optional
# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# 导入CapCut API功能
try:
from create_draft import get_or_create_draft
from add_text_impl import add_text_impl
from add_video_track import add_video_track
from add_audio_track import add_audio_track
from add_image_impl import add_image_impl
from add_subtitle_impl import add_subtitle_impl
from add_effect_impl import add_effect_impl
from add_sticker_impl import add_sticker_impl
from add_video_keyframe_impl import add_video_keyframe_impl
from get_duration_impl import get_video_duration
from save_draft_impl import save_draft_impl
from pyJianYingDraft.text_segment import TextStyleRange
CAPCUT_AVAILABLE = True
except ImportError as e:
print(f"Warning: Could not import CapCut modules: {e}", file=sys.stderr)
CAPCUT_AVAILABLE = False
# 完整的工具定义
TOOLS = [
{
"name": "create_draft",
"description": "创建新的CapCut草稿",
"inputSchema": {
"type": "object",
"properties": {
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"}
}
}
},
{
"name": "add_video",
"description": "添加视频到草稿,支持转场、蒙版、背景模糊等效果",
"inputSchema": {
"type": "object",
"properties": {
"video_url": {"type": "string", "description": "视频URL"},
"draft_id": {"type": "string", "description": "草稿ID"},
"start": {"type": "number", "default": 0, "description": "开始时间(秒)"},
"end": {"type": "number", "description": "结束时间(秒)"},
"target_start": {"type": "number", "default": 0, "description": "目标开始时间(秒)"},
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"},
"transform_x": {"type": "number", "default": 0, "description": "X轴位置"},
"transform_y": {"type": "number", "default": 0, "description": "Y轴位置"},
"scale_x": {"type": "number", "default": 1, "description": "X轴缩放"},
"scale_y": {"type": "number", "default": 1, "description": "Y轴缩放"},
"speed": {"type": "number", "default": 1.0, "description": "播放速度"},
"track_name": {"type": "string", "default": "main", "description": "轨道名称"},
"volume": {"type": "number", "default": 1.0, "description": "音量"},
"transition": {"type": "string", "description": "转场类型"},
"transition_duration": {"type": "number", "default": 0.5, "description": "转场时长"},
"mask_type": {"type": "string", "description": "蒙版类型"},
"background_blur": {"type": "integer", "description": "背景模糊级别(1-4)"}
},
"required": ["video_url"]
}
},
{
"name": "add_audio",
"description": "添加音频到草稿,支持音效处理",
"inputSchema": {
"type": "object",
"properties": {
"audio_url": {"type": "string", "description": "音频URL"},
"draft_id": {"type": "string", "description": "草稿ID"},
"start": {"type": "number", "default": 0, "description": "开始时间(秒)"},
"end": {"type": "number", "description": "结束时间(秒)"},
"target_start": {"type": "number", "default": 0, "description": "目标开始时间(秒)"},
"volume": {"type": "number", "default": 1.0, "description": "音量"},
"speed": {"type": "number", "default": 1.0, "description": "播放速度"},
"track_name": {"type": "string", "default": "audio_main", "description": "轨道名称"},
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"}
},
"required": ["audio_url"]
}
},
{
"name": "add_image",
"description": "添加图片到草稿,支持动画、转场、蒙版等效果",
"inputSchema": {
"type": "object",
"properties": {
"image_url": {"type": "string", "description": "图片URL"},
"draft_id": {"type": "string", "description": "草稿ID"},
"start": {"type": "number", "default": 0, "description": "开始时间(秒)"},
"end": {"type": "number", "default": 3.0, "description": "结束时间(秒)"},
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"},
"transform_x": {"type": "number", "default": 0, "description": "X轴位置"},
"transform_y": {"type": "number", "default": 0, "description": "Y轴位置"},
"scale_x": {"type": "number", "default": 1, "description": "X轴缩放"},
"scale_y": {"type": "number", "default": 1, "description": "Y轴缩放"},
"track_name": {"type": "string", "default": "main", "description": "轨道名称"},
"intro_animation": {"type": "string", "description": "入场动画"},
"outro_animation": {"type": "string", "description": "出场动画"},
"transition": {"type": "string", "description": "转场类型"},
"mask_type": {"type": "string", "description": "蒙版类型"}
},
"required": ["image_url"]
}
},
{
"name": "add_text",
"description": "添加文本到草稿,支持文本多样式、文字阴影和文字背景",
"inputSchema": {
"type": "object",
"properties": {
"text": {"type": "string", "description": "文本内容"},
"start": {"type": "number", "description": "开始时间(秒)"},
"end": {"type": "number", "description": "结束时间(秒)"},
"draft_id": {"type": "string", "description": "草稿ID"},
"font_color": {"type": "string", "default": "#ffffff", "description": "字体颜色"},
"font_size": {"type": "integer", "default": 24, "description": "字体大小"},
"shadow_enabled": {"type": "boolean", "default": False, "description": "是否启用文字阴影"},
"shadow_color": {"type": "string", "default": "#000000", "description": "阴影颜色"},
"shadow_alpha": {"type": "number", "default": 0.8, "description": "阴影透明度"},
"shadow_angle": {"type": "number", "default": 315.0, "description": "阴影角度"},
"shadow_distance": {"type": "number", "default": 5.0, "description": "阴影距离"},
"shadow_smoothing": {"type": "number", "default": 0.0, "description": "阴影平滑度"},
"background_color": {"type": "string", "description": "背景颜色"},
"background_alpha": {"type": "number", "default": 1.0, "description": "背景透明度"},
"background_style": {"type": "integer", "default": 0, "description": "背景样式"},
"background_round_radius": {"type": "number", "default": 0.0, "description": "背景圆角半径"},
"text_styles": {"type": "array", "description": "文本多样式配置列表"}
},
"required": ["text", "start", "end"]
}
},
{
"name": "add_subtitle",
"description": "添加字幕到草稿支持SRT文件和样式设置",
"inputSchema": {
"type": "object",
"properties": {
"srt_path": {"type": "string", "description": "SRT字幕文件路径或URL"},
"draft_id": {"type": "string", "description": "草稿ID"},
"track_name": {"type": "string", "default": "subtitle", "description": "轨道名称"},
"time_offset": {"type": "number", "default": 0, "description": "时间偏移(秒)"},
"font": {"type": "string", "description": "字体"},
"font_size": {"type": "number", "default": 8.0, "description": "字体大小"},
"font_color": {"type": "string", "default": "#FFFFFF", "description": "字体颜色"},
"bold": {"type": "boolean", "default": False, "description": "是否粗体"},
"italic": {"type": "boolean", "default": False, "description": "是否斜体"},
"underline": {"type": "boolean", "default": False, "description": "是否下划线"},
"border_width": {"type": "number", "default": 0.0, "description": "边框宽度"},
"border_color": {"type": "string", "default": "#000000", "description": "边框颜色"},
"background_color": {"type": "string", "default": "#000000", "description": "背景颜色"},
"background_alpha": {"type": "number", "default": 0.0, "description": "背景透明度"},
"transform_x": {"type": "number", "default": 0.0, "description": "X轴位置"},
"transform_y": {"type": "number", "default": -0.8, "description": "Y轴位置"},
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"}
},
"required": ["srt_path"]
}
},
{
"name": "add_effect",
"description": "添加特效到草稿",
"inputSchema": {
"type": "object",
"properties": {
"effect_type": {"type": "string", "description": "特效类型名称"},
"draft_id": {"type": "string", "description": "草稿ID"},
"start": {"type": "number", "default": 0, "description": "开始时间(秒)"},
"end": {"type": "number", "default": 3.0, "description": "结束时间(秒)"},
"track_name": {"type": "string", "default": "effect_01", "description": "轨道名称"},
"params": {"type": "array", "description": "特效参数列表"},
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"}
},
"required": ["effect_type"]
}
},
{
"name": "add_sticker",
"description": "添加贴纸到草稿",
"inputSchema": {
"type": "object",
"properties": {
"resource_id": {"type": "string", "description": "贴纸资源ID"},
"draft_id": {"type": "string", "description": "草稿ID"},
"start": {"type": "number", "description": "开始时间(秒)"},
"end": {"type": "number", "description": "结束时间(秒)"},
"transform_x": {"type": "number", "default": 0, "description": "X轴位置"},
"transform_y": {"type": "number", "default": 0, "description": "Y轴位置"},
"scale_x": {"type": "number", "default": 1.0, "description": "X轴缩放"},
"scale_y": {"type": "number", "default": 1.0, "description": "Y轴缩放"},
"alpha": {"type": "number", "default": 1.0, "description": "透明度"},
"rotation": {"type": "number", "default": 0.0, "description": "旋转角度"},
"track_name": {"type": "string", "default": "sticker_main", "description": "轨道名称"},
"width": {"type": "integer", "default": 1080, "description": "视频宽度"},
"height": {"type": "integer", "default": 1920, "description": "视频高度"}
},
"required": ["resource_id", "start", "end"]
}
},
{
"name": "add_video_keyframe",
"description": "添加视频关键帧,支持位置、缩放、旋转、透明度等属性动画",
"inputSchema": {
"type": "object",
"properties": {
"draft_id": {"type": "string", "description": "草稿ID"},
"track_name": {"type": "string", "default": "main", "description": "轨道名称"},
"property_type": {"type": "string", "description": "关键帧属性类型(position_x, position_y, rotation, scale_x, scale_y, uniform_scale, alpha, saturation, contrast, brightness, volume)"},
"time": {"type": "number", "default": 0.0, "description": "关键帧时间点(秒)"},
"value": {"type": "string", "description": "关键帧值"},
"property_types": {"type": "array", "description": "批量模式:关键帧属性类型列表"},
"times": {"type": "array", "description": "批量模式:关键帧时间点列表"},
"values": {"type": "array", "description": "批量模式:关键帧值列表"}
}
}
},
{
"name": "get_video_duration",
"description": "获取视频时长",
"inputSchema": {
"type": "object",
"properties": {
"video_url": {"type": "string", "description": "视频URL"}
},
"required": ["video_url"]
}
},
{
"name": "save_draft",
"description": "保存草稿",
"inputSchema": {
"type": "object",
"properties": {
"draft_id": {"type": "string", "description": "草稿ID"}
}
}
}
]
@contextlib.contextmanager
def capture_stdout():
"""捕获标准输出防止CapCut API的调试信息干扰JSON响应"""
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
yield sys.stdout
finally:
sys.stdout = old_stdout
def convert_text_styles(text_styles_data):
"""将字典格式的text_styles转换为TextStyleRange对象列表"""
if not text_styles_data:
return None
try:
text_style_ranges = []
for style_dict in text_styles_data:
style_range = TextStyleRange(
start=style_dict.get("start", 0),
end=style_dict.get("end", 0),
font_size=style_dict.get("font_size"),
font_color=style_dict.get("font_color"),
bold=style_dict.get("bold", False),
italic=style_dict.get("italic", False),
underline=style_dict.get("underline", False)
)
text_style_ranges.append(style_range)
return text_style_ranges
except Exception as e:
print(f"[ERROR] Error converting text_styles: {e}", file=sys.stderr)
return None
def execute_tool(tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""执行具体的工具"""
try:
print(f"[DEBUG] Executing tool: {tool_name} with args: {arguments}", file=sys.stderr)
if not CAPCUT_AVAILABLE:
return {"success": False, "error": "CapCut modules not available"}
# 捕获标准输出,防止调试信息干扰
with capture_stdout() as captured:
if tool_name == "create_draft":
draft_id, script = get_or_create_draft(
width=arguments.get("width", 1080),
height=arguments.get("height", 1920)
)
result = {
"draft_id": str(draft_id),
"draft_url": f"https://www.install-ai-guider.top/draft/downloader?draft_id={draft_id}"
}
elif tool_name == "add_video":
result = add_video_track(**arguments)
elif tool_name == "add_audio":
result = add_audio_track(**arguments)
elif tool_name == "add_image":
result = add_image_impl(**arguments)
elif tool_name == "add_text":
# 处理text_styles参数
text_styles_converted = None
if "text_styles" in arguments and arguments["text_styles"]:
text_styles_converted = convert_text_styles(arguments["text_styles"])
arguments["text_styles"] = text_styles_converted
result = add_text_impl(**arguments)
elif tool_name == "add_subtitle":
result = add_subtitle_impl(**arguments)
elif tool_name == "add_effect":
result = add_effect_impl(**arguments)
elif tool_name == "add_sticker":
result = add_sticker_impl(**arguments)
elif tool_name == "add_video_keyframe":
result = add_video_keyframe_impl(**arguments)
elif tool_name == "get_video_duration":
duration = get_video_duration(arguments["video_url"])
result = {"duration": duration}
elif tool_name == "save_draft":
save_result = save_draft_impl(**arguments)
if isinstance(save_result, dict) and "draft_url" in save_result:
result = {"draft_url": save_result["draft_url"]}
else:
result = {"draft_url": f"https://www.install-ai-guider.top/draft/downloader?draft_id=unknown"}
else:
return {"success": False, "error": f"Unknown tool: {tool_name}"}
return {
"success": True,
"result": result,
"features_used": {
"shadow": arguments.get("shadow_enabled", False) if tool_name == "add_text" else False,
"background": bool(arguments.get("background_color")) if tool_name == "add_text" else False,
"multi_style": bool(arguments.get("text_styles")) if tool_name == "add_text" else False
}
}
except Exception as e:
print(f"[ERROR] Tool execution error: {e}", file=sys.stderr)
print(f"[ERROR] Traceback: {traceback.format_exc()}", file=sys.stderr)
return {"success": False, "error": str(e)}
def handle_request(request_data: str) -> Optional[str]:
"""处理JSON-RPC请求"""
try:
request = json.loads(request_data.strip())
print(f"[DEBUG] Received request: {request.get('method', 'unknown')}", file=sys.stderr)
if request.get("method") == "initialize":
response = {
"jsonrpc": "2.0",
"id": request.get("id"),
"result": {
"protocolVersion": "2024-11-05",
"capabilities": {
"experimental": {},
"tools": {"listChanged": False}
},
"serverInfo": {
"name": "capcut-api",
"version": "1.12.3"
}
}
}
return json.dumps(response)
elif request.get("method") == "notifications/initialized":
return None
elif request.get("method") == "tools/list":
response = {
"jsonrpc": "2.0",
"id": request.get("id"),
"result": {"tools": TOOLS}
}
return json.dumps(response)
elif request.get("method") == "tools/call":
tool_name = request["params"]["name"]
arguments = request["params"].get("arguments", {})
result = execute_tool(tool_name, arguments)
response = {
"jsonrpc": "2.0",
"id": request.get("id"),
"result": {
"content": [
{
"type": "text",
"text": json.dumps(result, ensure_ascii=False, indent=2)
}
]
}
}
return json.dumps(response)
else:
error_response = {
"jsonrpc": "2.0",
"id": request.get("id"),
"error": {"code": -32601, "message": "Method not found"}
}
return json.dumps(error_response)
except Exception as e:
print(f"[ERROR] Request handling error: {e}", file=sys.stderr)
print(f"[ERROR] Traceback: {traceback.format_exc()}", file=sys.stderr)
error_response = {
"jsonrpc": "2.0",
"id": None,
"error": {"code": 0, "message": str(e)}
}
return json.dumps(error_response)
def main():
"""主函数"""
print("🚀 Starting CapCut API MCP Server (Complete Version)...", file=sys.stderr)
print(f"📋 Available tools: {len(TOOLS)} tools loaded", file=sys.stderr)
print("✨ Features: 视频、音频、图片、文本、字幕、特效、贴纸、关键帧", file=sys.stderr)
print("🔌 Waiting for client connections...", file=sys.stderr)
try:
while True:
try:
line = sys.stdin.readline()
if not line:
print("[DEBUG] EOF received, shutting down", file=sys.stderr)
break
response = handle_request(line)
if response:
print(response)
sys.stdout.flush()
except EOFError:
print("[DEBUG] EOF exception, shutting down", file=sys.stderr)
break
except Exception as e:
print(f"[ERROR] Server error: {e}", file=sys.stderr)
print(f"[ERROR] Traceback: {traceback.format_exc()}", file=sys.stderr)
except KeyboardInterrupt:
print("[INFO] Server stopped by user", file=sys.stderr)
except Exception as e:
print(f"[ERROR] Fatal server error: {e}", file=sys.stderr)
print(f"[ERROR] Traceback: {traceback.format_exc()}", file=sys.stderr)
if __name__ == "__main__":
main()

12
pattern/001-words-coze.md Normal file

File diff suppressed because one or more lines are too long

626
pattern/001-words.py Normal file
View File

@@ -0,0 +1,626 @@
import requests
import json
from flask import Flask, request, jsonify, Response
import sys
import time
import json
sys.path.append('/Users/sunguannan/capcutapi')
from example import add_image_impl
PORT=9001 #端口
BASE_URL = f"http://localhost:{PORT}"
draft_folder = "/Users/sunguannan/Movies/JianyingPro/User Data/Projects/com.lveditor.draft"
def make_request(endpoint, data, method='POST'):
"""Send HTTP request to the server and handle the response"""
url = f"{BASE_URL}/{endpoint}"
headers = {'Content-Type': 'application/json'}
try:
if method == 'POST':
response = requests.post(url, data=json.dumps(data), headers=headers)
elif method == 'GET':
response = requests.get(url, params=data, headers=headers)
else:
raise ValueError(f"Unsupported HTTP method: {method}")
response.raise_for_status() # Raise an exception if the request fails
return response.json()
except requests.exceptions.RequestException as e:
print(f"Request error: {e}")
sys.exit(1)
except json.JSONDecodeError:
print("Unable to parse server response")
sys.exit(1)
def save_draft_impl(draft_id, draft_folder):
"""API wrapper for save_draft service"""
data = {
"draft_id": draft_id,
"draft_folder": draft_folder
}
return make_request("save_draft", data)
def query_script_impl(draft_id):
"""API wrapper for query_script service"""
data = {
"draft_id": draft_id
}
return make_request("query_script", data)
def add_text_impl(text, start, end, font, font_color, font_size, track_name, draft_folder="123", draft_id=None,
vertical=False, transform_x=0, transform_y=0, font_alpha=1.0,
border_color=None, border_width=0.0, border_alpha=1.0,
background_color=None, background_alpha=1.0, background_style=None,
background_round_radius=0.0, background_height=0.14, background_width=0.14,
background_horizontal_offset=0.5, background_vertical_offset=0.5,
shadow_enabled=False, shadow_alpha=0.9, shadow_angle=-45.0,
shadow_color="#000000", shadow_distance=5.0, shadow_smoothing=0.15,
bubble_effect_id=None, bubble_resource_id=None,
effect_effect_id=None,
intro_animation=None, intro_duration=0.5,
outro_animation=None, outro_duration=0.5,
width=1080, height=1920,
fixed_width=-1, fixed_height=-1,
text_styles=None):
"""Add text with support for multiple styles, shadows, and backgrounds"""
data = {
"draft_folder": draft_folder,
"text": text,
"start": start,
"end": end,
"font": font,
"font_color": font_color,
"font_size": font_size,
"alpha": font_alpha,
"track_name": track_name,
"vertical": vertical,
"transform_x": transform_x,
"transform_y": transform_y
}
# Add border parameters
if border_color:
data["border_color"] = border_color
data["border_width"] = border_width
data["border_alpha"] = border_alpha
# Add background parameters
if background_color:
data["background_color"] = background_color
data["background_alpha"] = background_alpha
if background_style:
data["background_style"] = background_style
data["background_round_radius"] = background_round_radius
data["background_height"] = background_height
data["background_width"] = background_width
data["background_horizontal_offset"] = background_horizontal_offset
data["background_vertical_offset"] = background_vertical_offset
# Add shadow parameters
if shadow_enabled:
data["shadow_enabled"] = shadow_enabled
data["shadow_alpha"] = shadow_alpha
data["shadow_angle"] = shadow_angle
data["shadow_color"] = shadow_color
data["shadow_distance"] = shadow_distance
data["shadow_smoothing"] = shadow_smoothing
# Add bubble effect parameters
if bubble_effect_id:
data["bubble_effect_id"] = bubble_effect_id
if bubble_resource_id:
data["bubble_resource_id"] = bubble_resource_id
# Add text effect parameters
if effect_effect_id:
data["effect_effect_id"] = effect_effect_id
# Add intro animation parameters
if intro_animation:
data["intro_animation"] = intro_animation
data["intro_duration"] = intro_duration
# Add outro animation parameters
if outro_animation:
data["outro_animation"] = outro_animation
data["outro_duration"] = outro_duration
# Add size parameters
data["width"] = width
data["height"] = height
# Add fixed size parameters
if fixed_width > 0:
data["fixed_width"] = fixed_width
if fixed_height > 0:
data["fixed_height"] = fixed_height
if draft_id:
data["draft_id"] = draft_id
# Add text styles parameters
if text_styles:
data["text_styles"] = text_styles
if draft_id:
data["draft_id"] = draft_id
return make_request("add_text", data)
def group_sentences(corrected_srt, threshold=1.0):
"""按时间间隔分句"""
if not corrected_srt:
return []
sentences = []
current_sentence = [corrected_srt[0]]
for i in range(1, len(corrected_srt)):
prev_end = corrected_srt[i-1]["end"]
curr_start = corrected_srt[i]["start"]
if curr_start - prev_end > threshold:
sentences.append(current_sentence)
current_sentence = [corrected_srt[i]]
else:
current_sentence.append(corrected_srt[i])
sentences.append(current_sentence)
return sentences
def adjust_sentence_timing(sentences, gap_adjust=1, time_precision=3):
"""调整句子间的时间间隔,并保留原始时间"""
def round_time(t):
return round(t, time_precision) if time_precision is not None else t
adjusted_sentences = []
total_offset = 0.0
prev_end = sentences[0][-1]["end"]
# 第一句保持原时间
first_sentence = [
{
"word": w["word"],
"start": w["start"],
"end": w["end"],
"original_start": w["start"],
"original_end": w["end"]
}
for w in sentences[0]
]
adjusted_sentences.append(first_sentence)
for i in range(1, len(sentences)):
sentence = sentences[i]
curr_start = sentence[0]["start"]
natural_gap = curr_start - prev_end
adjusted_gap = natural_gap if gap_adjust == 0 else (1.0 if natural_gap > 1.0 else natural_gap)
move_amount = natural_gap - adjusted_gap
total_offset += move_amount
adjusted_sentence = []
for w in sentence:
adjusted_sentence.append({
"word": w["word"],
"start": round_time(w["start"] - total_offset),
"end": round_time(w["end"] - total_offset),
"original_start": w["start"],
"original_end": w["end"]
})
adjusted_sentences.append(adjusted_sentence)
prev_end = sentence[-1]["end"]
return adjusted_sentences
def split_into_paragraphs(sentence, max_words=5, max_chunk_duration=1.5):
"""把句子按词数和时长分段"""
paragraphs = []
i = 0
n = len(sentence)
while i < n:
paragraph = [sentence[i]]
current_start = sentence[i]["start"]
current_end = sentence[i]["end"]
i += 1
while i < n:
current_word = sentence[i]
is_continuous = abs(current_word["start"] - current_end) < 0.001
if (len(paragraph) >= max_words or
(current_word["end"] - current_start) >= max_chunk_duration or
not is_continuous):
break
paragraph.append(current_word)
current_end = current_word["end"]
i += 1
paragraphs.append(paragraph)
return paragraphs
def build_segments_by_mode(
mode,
paragraph,
track_name,
font,
font_size,
highlight_color,
normal_color,
transform_x,
transform_y,
fixed_width,
shadow_enabled,
shadow_color,
border_color,
border_width,
border_alpha,
background_color,
):
"""根据模式生成字幕片段"""
segments = []
#print("二级代码返回调试fx", fixed_width)
if mode == "word_pop":
# 单词跳出
for w in paragraph:
text_styles = []
word_count = len(w["word"].replace(" ", "")) #统计有多少个字
text_styles.append({
"start": 0,
"end": word_count,
"border": {
"alpha": border_alpha,
"color": border_color,
"width": border_width
}
})
segments.append({
"text": w["word"],
"start": w["start"],
"end": w["end"],
"font": font,
"track_name": track_name,
"font_color": normal_color,
"font_size": font_size,
"transform_x": transform_x,
"transform_y": transform_y,
"shadow_enabled": shadow_enabled,
"fixed_width": fixed_width,
"text_styles": text_styles,
"shadow_color": shadow_color,
"border_color": border_color,
"border_width": border_width,
"border_alpha": border_alpha,
"background_color": background_color,
})
elif mode == "word_highlight":
# 单词高亮:当前词亮,其他灰
paragraph_text = " ".join(w["word"] for w in paragraph)
offsets = []
ci = 0
for w in paragraph:
offsets.append((ci, ci + len(w["word"])))
ci += len(w["word"]) + 1
for idx, w in enumerate(paragraph):
text_styles = []
for k, (s, e) in enumerate(offsets):
color = highlight_color if k == idx else normal_color
text_styles.append({
"start": s,
"end": e,
"style": {
"color": color,
"size": font_size,
},
"border": {
"alpha": border_alpha,
"color": border_color,
"width": border_width
}
})
print("text_styles", text_styles)
segments.append({
"text": paragraph_text,
"start": w["start"],
"end": w["end"],
"font": font,
"track_name": track_name,
"font_color": normal_color,
"font_size": font_size,
"text_styles": text_styles,
"transform_x": transform_x,
"transform_y": transform_y,
"shadow_enabled": shadow_enabled,
"fixed_width": fixed_width,
"shadow_color": shadow_color,
"border_color": border_color,
"border_width": border_width,
"border_alpha": border_alpha,
"background_color": background_color,
})
elif mode == "sentence_fade":
# 句子渐显:已亮过的词继续保持亮
paragraph_text = " ".join(w["word"] for w in paragraph)
offsets = []
ci = 0
for w in paragraph:
offsets.append((ci, ci + len(w["word"])))
ci += len(w["word"]) + 1
for idx, w in enumerate(paragraph):
text_styles = []
for k, (s, e) in enumerate(offsets):
color = highlight_color if k <= idx else normal_color
text_styles.append({
"start": s,
"end": e,
"style": {"color": color, "size": font_size},
"border": {
"alpha": border_alpha,
"color": border_color,
"width": border_width
}
})
segments.append({
"text": paragraph_text,
"start": w["start"],
"end": w["end"],
"font": font,
"track_name": track_name,
"font_color": normal_color,
"font_size": font_size,
"text_styles": text_styles,
"transform_x": transform_x,
"transform_y": transform_y,
"shadow_enabled": shadow_enabled,
"fixed_width": fixed_width,
"shadow_color": shadow_color,
"border_color": border_color,
"border_width": border_width,
"border_alpha": border_alpha,
"background_color": background_color,
})
elif mode == "sentence_pop":
# 句子跳出
text = " ".join(w["word"] for w in paragraph)
start_time = paragraph[0]["start"]
end_time = paragraph[-1]["end"]
text_styles = []
word_count = len(text.replace(" ", "")) #统计有多少个字
text_styles.append({
"start": 0,
"end": word_count,
"border": {
"alpha": border_alpha,
"color": border_color,
"width": border_width
}
})
segments.append({
"text": text,
"start": start_time,
"end": end_time,
"font": font,
"track_name": track_name,
"font_color": normal_color,
"font_size": font_size,
"transform_x": transform_x,
"transform_y": transform_y,
"shadow_enabled": shadow_enabled,
"fixed_width": fixed_width,
"text_styles": text_styles,
"shadow_color": shadow_color,
"border_color": border_color,
"border_width": border_width,
"border_alpha": border_alpha,
"background_color": background_color,
})
else:
raise ValueError(f"未知模式: {mode}")
"""segments.append({
"file_name": file_name,
})"""
return segments
corrected_srt = [{
"word": "Hello",
"start": 0.0,
"end": 0.64,
"confidence": 0.93917525
},
{
"word": "I'm",
"start": 0.64,
"end": 0.79999995,
"confidence": 0.9976464
},
{
"word": "PAWA",
"start": 0.79999995,
"end": 1.36,
"confidence": 0.6848311
},
{
"word": "Nice",
"start": 1.36,
"end": 1.52,
"confidence": 0.9850389
},
{
"word": "To",
"start": 1.52,
"end": 1.68,
"confidence": 0.9926886
},
{
"word": "Meet",
"start": 1.68,
"end": 2.08,
"confidence": 0.9972697
},
{
"word": "You",
"start": 2.08,
"end": 2.72,
"confidence": 0.9845563
},
{
"word": "Enjoy",
"start": 2.72,
"end": 3.04,
"confidence": 0.99794894
},
{
"word": "My",
"start": 3.04,
"end": 3.1999998,
"confidence": 0.9970203
},
{
"word": "Parttern",
"start": 3.1999998,
"end": 3.36,
"confidence": 0.9970235
},
{
"word": "Thank",
"start": 3.36,
"end": 3.6799998,
"confidence": 0.98627764
},
{
"word": "You",
"start": 3.6799998,
"end": 4.0,
"confidence": 0.9939551
},
]
def add_koubo_from_srt(
corrected_srt,
track_name,
mode="word_pop",
font="ZY_Modern",
font_size=32,
highlight_color="#FFD700",
normal_color="#AAAAAA", max_chunk_duration=1.5, max_words=5,
gap_adjust=1,
time_precision=3,
transform_x=0.5,
transform_y=0.3,
fixed_width=-1,
shadow_enabled=True,
shadow_color="#000000",
border_color="#000000",
border_width=0.5,
border_alpha=1.0,
background_color="#000000",
):
"""统一入口:根据 mode 选择字幕效果"""
sentences = group_sentences(corrected_srt)
adjusted_sentences = adjust_sentence_timing(sentences, gap_adjust, time_precision)
all_paragraphs = [split_into_paragraphs(s, max_words, max_chunk_duration) for s in adjusted_sentences]
draft_id_ret = None
for sentence_paragraphs in all_paragraphs:
for paragraph in sentence_paragraphs:
segments = build_segments_by_mode(
mode,
paragraph,
track_name,
font,
font_size,
highlight_color,
normal_color,
transform_x,
transform_y,
fixed_width,
shadow_enabled,
shadow_color,
border_color,
border_width,
border_alpha,
background_color,
)
#print("segments", segments)
for seg in segments:
#print("二级代码返回调试fx", seg)
if draft_id_ret:
seg["draft_id"] = draft_id_ret
print("seg", seg)
res = add_text_impl(**seg)
if draft_id_ret is None and isinstance(res, dict):
try:
draft_id_ret = res["output"]["draft_id"]
except:
pass
return draft_id_ret
colors = {
"shadow_color": "#000000",
"border_color": "#FFD700",
"background_color": "#000000",
"normal_color": "#FFFFFF",
"highlight_color": "#DA70D6" # 紫色
}
draft_id = add_koubo_from_srt(
corrected_srt,
track_name="main_text",
font_size=15,
gap_adjust=0,
transform_x=0,
transform_y=-0.45,# 0=保持原间隔1=调整>1s的间隔
fixed_width = 0.6,
mode="word_highlight",
shadow_enabled=True,
border_width=10,
border_alpha=1.0,
**colors,
font="ZY_Modern", #设置自己的字体,需要在字体库中添加
)
add_image_impl(image_url="https://pic1.imgdb.cn/item/689aff2758cb8da5c81e64a2.png", start = 0, end = 4, draft_id=draft_id)
save_result = save_draft_impl(draft_id, draft_folder)
print(save_result)
"""
# 单词高亮
mode="word_highlight"
# 单词跳出
mode="word_pop"
# 句子渐显
mode="sentence_fade"
# 句子跳出
mode="sentence_pop"
"""

606
pattern/002-relationship.py Normal file
View File

@@ -0,0 +1,606 @@
import json
import random
import requests
# Set API keys
QWEN_API_KEY = "your qwen api key"
PEXELS_API_KEY = "your pexels api key"
CAPCUT_API_KEY = "your capcut api key"
LICENSE_KEY="your capcut license key",
def llm(query = ""):
"""
Call the Tongyi Qianwen large language model
Parameters:
Returns:
dict: Dictionary containing title and list of sentences
"""
# Build system prompt
system_prompt = """
* **Context:** You are an AI expert specializing in modern interpersonal relationships and emotional communication. Your knowledge base is built on a deep understanding of popular emotional content on social media, and you excel at interpreting the dynamics and perspectives of relationships in a relaxed, colloquial style.
* **Objective:** When the user inputs "Give me some random returns", your goal is to **randomly create** advice about male-female emotions, behavioral habits, or relationship guidance. Your content must strictly mimic the unique style shown in the examples below.
* **Style:** Your generated content should have the following characteristics:
* **Structure:** Use a list format, with each point being a short, independent sentence.
* **Wording:** Use colloquial language, often using the "When..." sentence pattern to describe a scenario.
* **Theme:** Content should revolve around "how to understand the other person", "which behaviors are attractive", or "advice for a specific gender".
* **Tone:** Your tone should be friendly, sincere, slightly teasing, like a friend sharing experiences on social media.
* **Audience:** Your audience is anyone interested in modern emotional relationships who wants to get advice in a relaxed way.
* **Response:** When you receive the instruction "Give me some random returns", please **randomly select one** from the following examples as your response. Or, you can **randomly generate** a new one with a completely consistent style, and return it in the same JSON format:
**Example 1 (How to understand girls):**
```json
{
"title": "How to understand girls",
"sentences": [
"Hands on her stomach (Insecure)",
"She leans on you (Feels safe)",
"Covers her smile (Thinks your going to judge)",
"Stops texting you (Feels like she is annoying you)",
"Says she is fine (She is everything but fine)",
"When she hugs you (You mean a lot to her)"
]
}
```
**Example 2 (Tips for girls):**
```json
{
"title": "Tips for the girls (From the guys)",
"sentences": [
"99/100 guys dont know what hip dips are and actually love your stretch marks",
"We can't tell that you like us by just viewing our story, just message us",
"When he's out with his boys, let him have this time (this is very important)",
"'I'm not ready for a relationship' - unless your the luckiest girl in the world your not getting cuffed",
"As Bruno mars said, 'your perfect just the way you are' so just be you, it'll work"
]
}
```
**Example 3 (Things guys find attractive in girls):**
```json
{
"title": "Things girls do that guys find attractive",
"sentences": [
"Bed hair when it's all messy >>>",
"When you come work out with us",
"Your sleepy voice in the morning or after a nap",
"When you wear our t-shirts as pyjamas",
"When you have a funny or really bad laugh",
"When you initiate ...",
"When your good with animals or animals like you"
]
}
```
"""
# Build user prompt
user_prompt = f"Randomly create advice about male-female emotions, behavioral habits, or relationship guidance, based on user input: {query}"
# Prepare request data
url = "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
headers = {
"Authorization": f"Bearer {QWEN_API_KEY}",
"Content-Type": "application/json"
}
data = {
"model": "qwen-plus",
"messages": [
{
"role": "system",
"content": system_prompt
},
{
"role": "user",
"content": user_prompt
}
],
"temperature": 0.7,
"max_tokens": 16384,
"response_format": {"type": "json_object"}
}
try:
# Send HTTP request
response = requests.post(url, headers=headers, json=data, timeout=30)
# Check response status
if response.status_code == 200:
response_data = response.json()
# Extract content from response
if 'choices' in response_data and len(response_data['choices']) > 0:
content = response_data['choices'][0]['message']['content']
try:
# Parse JSON response
result = json.loads(content)
# Ensure result contains necessary fields
if "title" in result and "sentences" in result:
return result
except json.JSONDecodeError:
pass # If JSON parsing fails, will return predefined example
# If API call fails or parsing fails, print error message
print(f"Error: {response.status_code}, {response.text if hasattr(response, 'text') else 'No response text'}")
except Exception as e:
# Catch all possible exceptions
print(f"Exception occurred: {str(e)}")
def search_pexels_videos(query="twilight", min_duration=10, orientation="portrait", per_page=15):
"""
Call Pexels API to search for videos
Parameters:
query (str): Search keyword, default is "twilight"
min_duration (int): Minimum video duration (seconds), default is 10 seconds
orientation (str): Video orientation, default is "portrait"
per_page (int): Number of results per page, default is 15
Returns:
list: List containing video information
"""
url = "https://api.pexels.com/videos/search"
headers = {
"Authorization": PEXELS_API_KEY
}
params = {
"query": query,
"orientation": orientation,
"per_page": per_page,
"min_duration": min_duration
}
try:
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
data = response.json()
videos = []
for video in data.get("videos", []):
# Get video file information
video_files = video.get("video_files", [])
# Filter out 16:9 ratio video files
portrait_videos = [file for file in video_files
if file.get("width") and file.get("height") and
file.get("height") / file.get("width") > 1.7] # Close to 16:9 ratio
if portrait_videos:
# Select highest quality video file
best_quality = max(portrait_videos, key=lambda x: x.get("width", 0) * x.get("height", 0))
videos.append({
"id": video.get("id"),
"url": video.get("url"),
"image": video.get("image"),
"duration": video.get("duration"),
"user": video.get("user", {}).get("name"),
"video_url": best_quality.get("link"),
"width": best_quality.get("width"),
"height": best_quality.get("height"),
"file_type": best_quality.get("file_type")
})
if videos:
random_video = random.choice(videos)
return random_video['video_url']
return []
else:
print(f"Error: {response.status_code}, {response.text}")
return []
except Exception as e:
print(f"Exception occurred: {str(e)}")
return []
def create_capcut_draft(width=1080, height=1920):
"""
Call CapCut API to create a new draft
Parameters:
width (int): Video width, default is 1080
height (int): Video height, default is 1920
Returns:
dict: Dictionary containing draft ID and download URL, or error message if failed
"""
url = "https://open.capcutapi.top/cut_jianying/create_draft"
headers = {
"Authorization": f"Bearer {CAPCUT_API_KEY}",
"Content-Type": "application/json"
}
data = {
"width": width,
"height": height
}
try:
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
if result.get("success"):
return {
"success": True,
"draft_id": result.get("output", {}).get("draft_id"),
"draft_url": result.get("output", {}).get("draft_url")
}
else:
return {
"success": False,
"error": result.get("error", "Unknown error")
}
else:
return {
"success": False,
"error": f"HTTP Error: {response.status_code}",
"response": response.text
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def add_video_to_draft(draft_id, video_url):
"""
Call CapCut API to add video to draft
Parameters:
draft_id (str): Draft ID
video_url (str): Video URL
Returns:
dict: Dictionary containing draft ID and download URL, or error message if failed
"""
url = "https://open.capcutapi.top/cut_jianying/add_video"
headers = {
"Authorization": f"Bearer {CAPCUT_API_KEY}",
"Content-Type": "application/json"
}
data = {
"video_url": video_url,
"draft_id": draft_id,
"end": 10 # Set video duration to 10 seconds
}
try:
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
if result.get("success"):
return {
"success": True,
"draft_id": result.get("output", {}).get("draft_id"),
"draft_url": result.get("output", {}).get("draft_url")
}
else:
return {
"success": False,
"error": result.get("error", "Unknown error")
}
else:
return {
"success": False,
"error": f"HTTP Error: {response.status_code}",
"response": response.text
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def add_text_to_draft(draft_id, text, font="ZY_Starry",
font_color="#FFFFFF",
background_color="#000000",
background_alpha=0.5,
background_style=2,
background_round_radius=10,
transform_y=0,
transform_x=0,
font_size=10.0,
fixed_width=0.6,
track_name="text_main"):
"""
Call CapCut API to add text to draft
Parameters:
draft_id (str): Draft ID
text (str): Text content
start_time (float): Text start time on timeline (seconds), default is 0
end_time (float): Text end time on timeline (seconds), default is 5
font (str): Font, default is "ZY_Starry"
font_color (str): Font color, default is white
background_color (str): Background color, default is black
background_alpha (float): Background transparency, default is 0.5
transform_y (float): Y-axis position offset, default is 0
transform_x (float): X-axis position offset, default is 0
font_size (float): Font size, default is 10.0
Returns:
dict: Dictionary containing draft ID and download URL, or error message if failed
"""
url = "https://open.capcutapi.top/cut_jianying/add_text"
headers = {
"Authorization": f"Bearer {CAPCUT_API_KEY}",
"Content-Type": "application/json"
}
data = {
"text": text,
"start": 0,
"end": 10,
"draft_id": draft_id,
"font": font,
"font_color": font_color,
"font_size": font_size,
"transform_y": transform_y,
"transform_x": transform_x,
"fixed_width": fixed_width,
"background_color": background_color,
"background_alpha": background_alpha,
"background_style": background_style,
"background_round_radius": background_round_radius,
"track_name": track_name
}
try:
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
if result.get("success"):
return {
"success": True,
"draft_id": result.get("output", {}).get("draft_id"),
"draft_url": result.get("output", {}).get("draft_url")
}
else:
return {
"success": False,
"error": result.get("error", "Unknown error")
}
else:
return {
"success": False,
"error": f"HTTP Error: {response.status_code}",
"response": response.text
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def generate_video(draft_id, resolution="720P", framerate="24"):
"""
Call CapCut API to render video
Parameters:
draft_id (str): Draft ID
license_key (str): License key
resolution (str): Video resolution, default is "720P"
framerate (str): Video frame rate, default is "24"
Returns:
dict: Dictionary containing task ID, or error message if failed
"""
url = "https://open.capcutapi.top/cut_jianying/generate_video"
headers = {
"Authorization": f"Bearer {CAPCUT_API_KEY}",
"Content-Type": "application/json"
}
data = {
"draft_id": draft_id,
"license_key": LICENSE_KEY,
"resolution": resolution,
"framerate": framerate
}
try:
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
if result.get("success"):
return {
"success": True,
"task_id": result.get("output", {}).get("task_id")
}
else:
return {
"success": False,
"error": result.get("error", "Unknown error")
}
else:
return {
"success": False,
"error": f"HTTP Error: {response.status_code}",
"response": response.text
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
def check_task_status(task_id):
"""
Call CapCut API to check task status
Parameters:
task_id (str): Task ID
Returns:
dict: Dictionary containing task status and results, or error message if failed
"""
url = "https://open.capcutapi.top/cut_jianying/task_status"
headers = {
"Authorization": f"Bearer {CAPCUT_API_KEY}",
"Content-Type": "application/json"
}
data = {
"task_id": task_id
}
try:
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
if result.get("success"):
output = result.get("output", {})
return {
"success": True,
"status": output.get("status"),
"progress": output.get("progress"),
"result": output.get("result"), # Video URL
"error": output.get("error")
}
else:
return {
"success": False,
"error": result.get("error", "Unknown error")
}
else:
return {
"success": False,
"error": f"HTTP Error: {response.status_code}",
"response": response.text
}
except Exception as e:
return {
"success": False,
"error": str(e)
}
# Example usage
if __name__ == "__main__":
# Call LLM function and print result
result = llm()
print(json.dumps(result, indent=2, ensure_ascii=False))
# 1. Create draft
draft_result = create_capcut_draft()
print("Draft creation result:", json.dumps(draft_result, indent=2, ensure_ascii=False))
if draft_result.get("success"):
draft_id = draft_result.get("draft_id")
# 2. Search Pexels videos
video_url = search_pexels_videos()
print("Pexels video URL:", video_url)
# 3. Add video to draft
if video_url:
add_result = add_video_to_draft(draft_result.get("draft_id"), video_url)
print("Add video result:", json.dumps(add_result, indent=2, ensure_ascii=False))
# 5. Add title text
title_result = add_text_to_draft(
draft_id=draft_id,
text=result["title"],
font="ZY_Starry", # Use starry font
font_color="#FFFFFF", # White font
background_color="#000000", # Black background
background_alpha=1, # Background transparency
background_style=1,
background_round_radius=10,
transform_y=0.7, # Located at the top of the screen (1 is top edge, -1 is bottom edge)
transform_x=0, # Horizontally centered
font_size=13.0, # Larger font
track_name = "title",
fixed_width=0.6
)
print("Add title result:", json.dumps(title_result, indent=2, ensure_ascii=False))
# 6. Add sentence text
sentence_count = len(result["sentences"])
for i, sentence in enumerate(result["sentences"]):
# Calculate vertical position - evenly distributed in the middle of the screen
transform_y = 0.5 - (1.1 * (i + 1) / (sentence_count + 1))
# Determine horizontal alignment - odd sentences left-aligned, even sentences right-aligned
if i % 2 == 0: # Odd sentences (counting from 0)
transform_x = -0.5 # Left-aligned
else: # Even sentences
transform_x = 0.5 # Right-aligned
sentence_result = add_text_to_draft(
draft_id=draft_id,
text=sentence,
font="ZY_Fantasy", # Use fantasy font
font_color="#FFFFFF", # White font
transform_y=transform_y, # Vertical position
transform_x=transform_x, # Horizontal position (left-right alignment)
background_alpha=0,
font_size=7.0, # Smaller font
fixed_width=0.3,
track_name=f"text_{i}"
)
print(f"Add sentence {i+1} result:", json.dumps(sentence_result, indent=2, ensure_ascii=False))
# 7. Render video
print("\nStarting video rendering...")
generate_result = generate_video(draft_id)
print("Video rendering request result:", json.dumps(generate_result, indent=2, ensure_ascii=False))
if generate_result.get("success"):
task_id = generate_result.get("task_id")
print(f"Task ID: {task_id}, starting to poll task status...")
# 8. Poll task status
import time
max_attempts = 30 # Maximum 30 polling attempts
attempt = 0
while attempt < max_attempts:
status_result = check_task_status(task_id)
print(f"Poll count {attempt+1}, status:", json.dumps(status_result, indent=2, ensure_ascii=False))
if not status_result.get("success"):
print("Failed to check task status:", status_result.get("error"))
break
status = status_result.get("status")
if status == "SUCCESS":
print("\nVideo rendering successful!")
print("Video URL:", status_result.get("result"))
break
elif status == "FAILED":
print("\nVideo rendering failed:", status_result.get("error"))
break
# Wait 5 seconds before checking again
print("Waiting 5 seconds before checking again...")
time.sleep(5)
attempt += 1
if attempt >= max_attempts:
print("\nPolling timeout, please check task status manually later")

13
pattern/README.md Normal file
View File

@@ -0,0 +1,13 @@
# Pattern Gallery
## 001-words.py
[source](001-words.py)
[![Words](https://img.youtube.com/vi/HLSHaJuNtBw/hqdefault.jpg)](https://www.youtube.com/watch?v=HLSHaJuNtBw)
## 002-relationship.py
[source](002-relationship.py)
[![Relationship](https://img.youtube.com/vi/f2Q1OI_SQZo/hqdefault.jpg)](https://www.youtube.com/watch?v=f2Q1OI_SQZo)

View File

@@ -107,7 +107,7 @@
"name": "",
"new_version": "110.0.0",
"relationships": [],
"render_index_track_mode_on": false,
"render_index_track_mode_on": true,
"retouch_cover": null,
"source": "default",
"static_cover_image_path": "",

View File

@@ -270,7 +270,6 @@ class Script_file:
raise NameError("'%s' 类型的轨道已存在, 请为新轨道指定名称以避免混淆" % track_type)
track_name = track_type.name
if track_name in [track.name for track in self.tracks.values()]:
print("名为 '%s' 的轨道已存在" % track_name)
return self
render_index = track_type.value.render_index + relative_index

45
pyproject.toml Normal file
View File

@@ -0,0 +1,45 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "capcut-api"
version = "1.0.0"
description = "Open source CapCut API tool with MCP support"
readme = "README.md"
requires-python = ">=3.10"
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"requests>=2.28.0",
"Pillow>=9.0.0",
"numpy>=1.21.0",
"opencv-python>=4.6.0",
"ffmpeg-python>=0.2.0",
"pydantic>=2.0.0",
"fastapi>=0.100.0",
"uvicorn[standard]>=0.23.0",
]
[project.optional-dependencies]
mcp = [
"mcp>=1.0.0",
"aiohttp>=3.8.0",
"websockets>=11.0",
"jsonrpc-base>=2.2.0",
"jsonrpc-websocket>=3.1.0",
"jsonrpc-async>=2.1.0",
]
[project.urls]
Homepage = "https://github.com/ashreo/CapCutAPI"
Repository = "https://github.com/ashreo/CapCutAPI.git"
Issues = "https://github.com/ashreo/CapCutAPI/issues"

4
requirements-mcp.txt Normal file
View File

@@ -0,0 +1,4 @@
# MCP相关依赖
mcp>=1.0.0
aiohttp>=3.8.0
pydantic>=2.0.0

View File

@@ -3,3 +3,4 @@ psutil
flask
requests
oss2
json5

View File

@@ -3,7 +3,7 @@
"""
import os
import json
import json5 # 替换原来的json模块
# 配置文件路径
CONFIG_FILE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "config.json")
@@ -30,7 +30,8 @@ MP4_OSS_CONFIG=[]
if os.path.exists(CONFIG_FILE_PATH):
try:
with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f:
local_config = json.load(f)
# 使用json5.load替代json.load
local_config = json5.load(f)
# 更新是否是国际版
if "is_capcut_env" in local_config:
@@ -60,6 +61,6 @@ if os.path.exists(CONFIG_FILE_PATH):
if "mp4_oss_config" in local_config:
MP4_OSS_CONFIG = local_config["mp4_oss_config"]
except (json.JSONDecodeError, IOError):
except Exception as e:
# 配置文件加载失败,使用默认配置
pass

339
test_mcp_client.py Normal file
View File

@@ -0,0 +1,339 @@
#!/usr/bin/env python3
"""
CapCut API MCP 测试客户端 (Complete Version)
测试完整版本的MCP服务器包含所有CapCut API接口
"""
import subprocess
import json
import time
import sys
def send_request(process, request_data):
"""发送请求并接收响应"""
try:
request_json = json.dumps(request_data, ensure_ascii=False)
print(f"发送请求: {request_json}")
# 发送请求
process.stdin.write(request_json + "\n")
process.stdin.flush()
# 等待响应
response_line = process.stdout.readline()
if not response_line.strip():
print("❌ 收到空响应")
return None
try:
response = json.loads(response_line.strip())
print(f"收到响应: {json.dumps(response, ensure_ascii=False, indent=2)}")
return response
except json.JSONDecodeError as e:
print(f"❌ JSON解析错误: {e}")
print(f"原始响应: {response_line}")
return None
except Exception as e:
print(f"❌ 发送请求时出错: {e}")
return None
def send_notification(process, notification_data):
"""发送通知(不需要响应)"""
try:
notification_json = json.dumps(notification_data, ensure_ascii=False)
print(f"发送通知: {notification_json}")
process.stdin.write(notification_json + "\n")
process.stdin.flush()
except Exception as e:
print(f"❌ 发送通知时出错: {e}")
def main():
print("🚀 CapCut API MCP 测试客户端 (Complete Version)")
print("🎯 测试所有CapCut API接口功能")
print("=" * 60)
# 启动MCP服务器
try:
process = subprocess.Popen(
[sys.executable, "mcp_server.py"], # 修改为正确的文件名
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=0 # 无缓冲
)
print("✅ MCP服务器已启动 (mcp_server.py)")
time.sleep(1) # 等待服务器启动
# 1. 初始化
init_request = {
"jsonrpc": "2.0",
"id": 1,
"method": "initialize",
"params": {
"protocolVersion": "2024-11-05",
"capabilities": {
"tools": {},
"resources": {}
},
"clientInfo": {
"name": "CapCut-Test-Client-Complete",
"version": "1.0.0"
}
}
}
response = send_request(process, init_request)
if response and "result" in response:
print("✅ 初始化成功")
else:
print("❌ 初始化失败")
return
# 发送初始化完成通知
init_notification = {
"jsonrpc": "2.0",
"method": "notifications/initialized",
"params": {}
}
send_notification(process, init_notification)
print("\n=== 📋 获取工具列表 ===")
# 2. 获取工具列表
tools_request = {
"jsonrpc": "2.0",
"id": 2,
"method": "tools/list",
"params": {}
}
response = send_request(process, tools_request)
if response and "result" in response:
tools = response["result"]["tools"]
print(f"✅ 成功获取 {len(tools)} 个工具:")
for tool in tools:
print(f"{tool['name']}: {tool['description']}")
else:
print("❌ 获取工具列表失败")
return
print("\n=== 🎬 测试核心功能 ===\n")
# 3. 测试创建草稿
print("📝 测试创建草稿")
create_draft_request = {
"jsonrpc": "2.0",
"id": 3,
"method": "tools/call",
"params": {
"name": "create_draft",
"arguments": {
"width": 1080,
"height": 1920
}
}
}
response = send_request(process, create_draft_request)
if response and "result" in response:
print("✅ 创建草稿成功")
# 提取draft_id用于后续测试
draft_data = json.loads(response["result"]["content"][0]["text"])
draft_id = draft_data["result"]["draft_id"]
print(f"📋 草稿ID: {draft_id}")
else:
print("❌ 创建草稿失败")
draft_id = None
# 4. 测试添加文本(带多样式)
print("\n📝 测试添加文本(多样式)")
add_text_request = {
"jsonrpc": "2.0",
"id": 4,
"method": "tools/call",
"params": {
"name": "add_text",
"arguments": {
"text": "Hello CapCut API!",
"start": 0,
"end": 5,
"draft_id": draft_id,
"font_color": "#ff0000",
"font_size": 32,
"shadow_enabled": True,
"shadow_color": "#000000",
"shadow_alpha": 0.8,
"background_color": "#ffffff",
"background_alpha": 0.5,
"text_styles": [
{
"start": 0,
"end": 5,
"font_size": 36,
"font_color": "#00ff00",
"bold": True
},
{
"start": 6,
"end": 12,
"font_size": 28,
"font_color": "#0000ff",
"italic": True
}
]
}
}
}
response = send_request(process, add_text_request)
if response and "result" in response:
print("✅ 添加文本成功")
else:
print("❌ 添加文本失败")
# 5. 测试添加视频
print("\n🎬 测试添加视频")
add_video_request = {
"jsonrpc": "2.0",
"id": 5,
"method": "tools/call",
"params": {
"name": "add_video",
"arguments": {
"video_url": "https://example.com/video.mp4",
"draft_id": draft_id,
"start": 0,
"end": 10,
"target_start": 5,
"transition": "fade",
"volume": 0.8
}
}
}
response = send_request(process, add_video_request)
if response and "result" in response:
print("✅ 添加视频成功")
else:
print("❌ 添加视频失败")
# 6. 测试添加音频
print("\n🎵 测试添加音频")
add_audio_request = {
"jsonrpc": "2.0",
"id": 6,
"method": "tools/call",
"params": {
"name": "add_audio",
"arguments": {
"audio_url": "https://example.com/audio.mp3",
"draft_id": draft_id,
"start": 0,
"end": 15,
"volume": 0.6
}
}
}
response = send_request(process, add_audio_request)
if response and "result" in response:
print("✅ 添加音频成功")
else:
print("❌ 添加音频失败")
# 7. 测试添加图片
print("\n🖼️ 测试添加图片")
add_image_request = {
"jsonrpc": "2.0",
"id": 7,
"method": "tools/call",
"params": {
"name": "add_image",
"arguments": {
"image_url": "https://example.com/image.jpg",
"draft_id": draft_id,
"start": 10,
"end": 15,
"intro_animation": "fade_in",
"outro_animation": "fade_out"
}
}
}
response = send_request(process, add_image_request)
if response and "result" in response:
print("✅ 添加图片成功")
else:
print("❌ 添加图片失败")
# 8. 测试获取视频时长
print("\n⏱️ 测试获取视频时长")
get_duration_request = {
"jsonrpc": "2.0",
"id": 8,
"method": "tools/call",
"params": {
"name": "get_video_duration",
"arguments": {
"video_url": "https://example.com/video.mp4"
}
}
}
response = send_request(process, get_duration_request)
if response and "result" in response:
print("✅ 获取视频时长成功")
else:
print("❌ 获取视频时长失败")
# 9. 测试保存草稿
print("\n💾 测试保存草稿")
save_draft_request = {
"jsonrpc": "2.0",
"id": 9,
"method": "tools/call",
"params": {
"name": "save_draft",
"arguments": {
"draft_id": draft_id
}
}
}
response = send_request(process, save_draft_request)
if response and "result" in response:
print("✅ 保存草稿成功")
else:
print("❌ 保存草稿失败")
print("\n🎉 所有测试完成CapCut API MCP服务器功能验证成功")
print("\n✅ 已验证的功能:")
print(" • 草稿管理 (创建、保存)")
print(" • 文本处理 (多样式、阴影、背景)")
print(" • 视频处理 (添加、转场、音量控制)")
print(" • 音频处理 (添加、音量控制)")
print(" • 图片处理 (添加、动画效果)")
print(" • 工具信息 (时长获取)")
except Exception as e:
print(f"❌ 测试过程中出错: {e}")
import traceback
traceback.print_exc()
finally:
# 关闭服务器
try:
process.terminate()
process.wait(timeout=5)
except:
process.kill()
print("🔴 MCP服务器已关闭")
if __name__ == "__main__":
main()

View File

@@ -6,7 +6,7 @@ import os
import hashlib
import functools
import time
from settings.local import DRAFT_DOMAIN, PREVIEW_ROUTER
from settings.local import DRAFT_DOMAIN, PREVIEW_ROUTER, IS_CAPCUT_ENV
def hex_to_rgb(hex_color: str) -> tuple:
"""Convert hexadecimal color code to RGB tuple (range 0.0-1.0)"""
@@ -80,4 +80,4 @@ def timing_decorator(func_name):
return decorator
def generate_draft_url(draft_id):
return f"{DRAFT_DOMAIN}{PREVIEW_ROUTER}?draft_id={draft_id}"
return f"{DRAFT_DOMAIN}{PREVIEW_ROUTER}?draft_id={draft_id}&is_capcut={1 if IS_CAPCUT_ENV else 0}"