mirror of
https://github.com/index-tts/index-tts.git
synced 2025-11-25 03:15:01 +08:00
118 lines
3.8 KiB
TOML
118 lines
3.8 KiB
TOML
[project]
|
|
name = "indextts"
|
|
version = "2.0.0"
|
|
description = "IndexTTS2: A Breakthrough in Emotionally Expressive and Duration-Controlled Auto-Regressive Zero-Shot Text-to-Speech"
|
|
authors = [{ name = "Bilibili IndexTTS Team" }]
|
|
license = "LicenseRef-Bilibili-IndexTTS"
|
|
license-files = ["LICEN[CS]E*", "INDEX_MODEL_LICENSE*"]
|
|
readme = "README.md"
|
|
classifiers = [
|
|
"Development Status :: 5 - Production/Stable",
|
|
|
|
"Intended Audience :: Science/Research",
|
|
"Intended Audience :: Developers",
|
|
|
|
"Topic :: Scientific/Engineering",
|
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
|
|
"Natural Language :: English",
|
|
"Natural Language :: Chinese (Simplified)",
|
|
|
|
"Programming Language :: Python :: 3",
|
|
|
|
"Operating System :: OS Independent",
|
|
]
|
|
requires-python = ">=3.10"
|
|
dependencies = [
|
|
# IMPORTANT: Always run `uv lock` or `uv lock --upgrade` to resolve dependencies
|
|
# and update the lockfile after editing anything below.
|
|
# WARNING: Ensure that you don't have a local `uv.toml` which overrides PyPI
|
|
# while generating the lockfile: https://github.com/astral-sh/uv/issues/15741
|
|
"accelerate==1.8.1",
|
|
"cn2an==0.5.22",
|
|
"cython==3.0.7",
|
|
"descript-audiotools==0.7.2",
|
|
"einops>=0.8.1",
|
|
"ffmpeg-python==0.2.0",
|
|
"g2p-en==2.1.0",
|
|
"jieba==0.42.1",
|
|
"json5==0.10.0",
|
|
"keras==2.9.0",
|
|
"librosa==0.10.2.post1",
|
|
"matplotlib==3.8.2",
|
|
"modelscope==1.27.0",
|
|
"munch==4.0.0",
|
|
"numba==0.58.1",
|
|
"numpy==1.26.2",
|
|
"omegaconf>=2.3.0",
|
|
"opencv-python==4.9.0.80",
|
|
"pandas==2.3.2",
|
|
"safetensors==0.5.2",
|
|
"sentencepiece>=0.2.1",
|
|
"tensorboard==2.9.1",
|
|
"textstat>=0.7.10",
|
|
"tokenizers==0.21.0",
|
|
"torch==2.8.*",
|
|
"torchaudio==2.8.*",
|
|
"tqdm>=4.67.1",
|
|
"transformers==4.52.1",
|
|
|
|
# Use "wetext" on Windows/Mac, otherwise "WeTextProcessing" on Linux.
|
|
"wetext>=0.0.9; sys_platform != 'linux'",
|
|
"WeTextProcessing; sys_platform == 'linux'",
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
# To install the WebUI support, use `uv sync --extra webui` (or `--all-extras`).
|
|
webui = [
|
|
"gradio==5.45.0",
|
|
]
|
|
# To install the DeepSpeed support, use `uv sync --extra deepspeed` (or `--all-extras`).
|
|
deepspeed = [
|
|
"deepspeed==0.17.1",
|
|
]
|
|
|
|
[project.urls]
|
|
Homepage = "https://github.com/index-tts/index-tts"
|
|
Repository = "https://github.com/index-tts/index-tts.git"
|
|
|
|
[project.scripts]
|
|
# Set the installed binary names and entry points.
|
|
indextts = "indextts.cli:main"
|
|
|
|
[build-system]
|
|
# How to build the project as a CLI tool or PyPI package.
|
|
# NOTE: Use `uv tool install -e .` to install the package as a CLI tool.
|
|
requires = ["hatchling >= 1.27.0"]
|
|
build-backend = "hatchling.build"
|
|
|
|
[tool.uv]
|
|
# Disable build isolation when building DeepSpeed from source.
|
|
# NOTE: This is *necessary* so that DeepSpeed builds directly within our `.venv`,
|
|
# and finds our CUDA-enabled version of PyTorch, which DeepSpeed *needs* during
|
|
# its compilation to determine what GPU support to compile for itself. It also
|
|
# saves time, since it won't waste time downloading a generic PyTorch version.
|
|
no-build-isolation-package = ["deepspeed"]
|
|
|
|
[tool.uv.sources]
|
|
# Install PyTorch with CUDA support on Linux/Windows (CUDA doesn't exist for Mac).
|
|
# NOTE: We must explicitly request them as `dependencies` above. These improved
|
|
# versions will not be selected if they're only third-party dependencies.
|
|
torch = [
|
|
{ index = "pytorch-cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
torchaudio = [
|
|
{ index = "pytorch-cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
torchvision = [
|
|
{ index = "pytorch-cuda", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
|
|
[[tool.uv.index]]
|
|
name = "pytorch-cuda"
|
|
# Use PyTorch built for NVIDIA Toolkit version 12.8.
|
|
# Available versions: https://pytorch.org/get-started/locally/
|
|
url = "https://download.pytorch.org/whl/cu128"
|
|
# Only use this index when explicitly requested by `tool.uv.sources`.
|
|
explicit = true
|