Skip to content

Commit 26fdaaf

Browse files
committed
[WIP] Update project dependencies
1 parent 74ef1bd commit 26fdaaf

File tree

1 file changed

+122
-3
lines changed

1 file changed

+122
-3
lines changed

pyproject.toml

Lines changed: 122 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,24 +3,142 @@ requires = [
33
"setuptools>=77.0",
44
"wheel>=0.45.1",
55
"pip>=25.3",
6+
"pybind11==2.13.6",
67
]
78
build-backend = "setuptools.build_meta"
89

910
[project]
1011
name = "flag_scale"
1112
version = "1.0.0"
12-
description = "FlagScale is a comprehensive toolkit designed to support the entire lifecycle of large models, developed with the backing of the Beijing Academy of Artificial Intelligence (BAAI)."
13+
description = "FlagScale is a comprehensive toolkit designed to support the entire lifecycle of large models."
1314
readme = "README.md"
14-
license = {text = "Apache-2.0"}
15+
license = { text = "Apache-2.0" }
1516
requires-python = ">=3.10"
16-
dynamic = ["dependencies"]
17+
dependencies = [
18+
"aiohttp==3.13.3", # Apache-2.0, MIT. Async HTTP framework.
19+
"click==8.3.1", # BSD-3. Command line processing.
20+
"compressed-tensors==0.13.0", # Apache-2.0. For compress tasks.
21+
"hydra-core==1.3.2", # MIT. Configuration management
22+
"llmcompressor==0.9.0", # Apache-2.0. LLM compressor for (post-)training.
23+
"numpy==2.4.1", # BSD-3. Array computing.
24+
"omegaconf==2.3.0", # BSD. Configuration library.
25+
"openai==2.15.0", # Apache-2.0. Official lib for OpenAI API.
26+
"Pillow==12.1.0", # MIT-CMU. Aka. 'PIL'. Image library.
27+
"PyYAML==6.0.3", # MIT. YAML processor. Aka. 'yaml'.
28+
"packaging==25.0", # Apache-2.0, BSD. Version parsing/comparison util.
29+
"psutil==7.2.1", # BSD-3. Process and system monitoring.
30+
"pydantic==2.12.5", # MIT. Data validation.
31+
"torch==2.9.1", # BSD-3. Tensor & DNN framework.
32+
"torchvision==0.24.1", # BSD. Image and video for Torch deep learning.
33+
"tqdm==4.67.1", # MIT. Progress meter.
34+
"transformers==4.57.6", # Apache-2.0.
35+
"webdataset==1.0.2", # BSD-3. Data storage and I/O.
36+
]
1737

1838
[project.urls]
1939
Homepage = "https://github.com/flagos-ai/FlagScale"
2040

2141
[project.scripts]
2242
flagscale = "flag_scale.flagscale.cli:flagscale"
2343

44+
[project.optional-dependencies]
45+
46+
training = [
47+
"accelerate==1.12.0", # Apache-2.0. PyTorch util for training loops.
48+
"av==16.1.0", # BSD-3. Video training.
49+
"datasets==4.5.0", # Apache-2.0. HuggingFace library for datasets.
50+
"deepspeed=0.18.4", # Apache-2.0. LLava-OneVision and checkpoint.
51+
"einops==0.8.1", # MIT. Deep learning operations.
52+
"fsspec==2026.1.0", # BSD-3. Filesystem specification used in video training tools.
53+
"huggingface-hub==1.3.2", # Apache-2.0. Client library for huggingface.co hub.
54+
"llava-torch==1.2.2.post1", # Apache-2.0. Visual instruction tuning.
55+
"nvidia-resiliency-ext==0.5.0", # Apache-2.0. NVIDIA resiliency extension.
56+
"pandas==2.3.3", # BSD. Data analysis package.
57+
"pyarrow==23.0.0", # Apache-2.0. Library for Apache Arrow.
58+
"pytorch_lightning==2.6.0", # Apache-2.0. PyTorch wrapper.
59+
"regex==2026.1.15", # Apache-2.0. Regular expression.
60+
"safetensors==0.7.0", # Apache-2.0. Model file processing.
61+
"sentencepiece==0.2.1", # Apache-2.0. Text tokenizer and detokenizer.
62+
"tiktoken==0.12.0", # MIT. BPE tokenizer for OpenAI models.
63+
"timm==1.0.24", # Apache-2.0. Image model, used in legacy training.
64+
"tokenizers==0.22.2", # Apache-2.0.
65+
"torchcodec==0.9.1", # BSD-3. Video decoder for PyTorch.
66+
"transformer-engine==2.11.0", # Apache-2.0.
67+
"wandb==0.24.0", # MIT. Weights & biases library.
68+
]
69+
70+
training_addon = [
71+
# TODO(Qiming): Confirm that `accimage` is outdated and can be removed.
72+
# "accimage",
73+
"faiss==1.5.3", # BSD. Search and clustering of dense vectors, for megatron legacy.
74+
"flag_gems==4.2.0", # Apache-2.0.
75+
"flagcx==0.8.0", # Apache-2.0.
76+
"flash_attn==2.8.3", # BSD. Fast and memory-efficient exact attention.
77+
"flash_mla==1.0.0.dev0", # Unknown. Megatron qwen3 vision.
78+
"nvidia-modelopt=0.41.0", # Apache-2.0. Nvidia model optimizer.
79+
"peft==0.18.1", # Apache-2.0. Parameter-efficient fine-tuning lib.
80+
# TODO(Qiming): handle the `apex` dependency.
81+
# https://github.com/NVIDIA/apex # BSD-3. Mixed precision and distributed training.
82+
# TODO(Qiming): Check the 'flashattn_hopper' package referenced in
83+
# `flagscale/models/megatron/qwen3_vl/vision_attention.py`.
84+
]
85+
86+
inference = [
87+
"diffusers==0.36.0", # Apache-2.0. Diffusion models for inference & training.
88+
"fastapi==0.128.0", # MIT. Web framework for serve mode run.
89+
"jsonlines==4.0.0", # BSD. Used by Aquila model only.
90+
"matplotlib==3.10.8", # Python Software Foundation License. Visualization for serving.
91+
"msgpack==1.1.2", # Apache-2.0. MessagePack serializer. Used for serving.
92+
"pynvml==13.0.1", # BSD-3. Utility for NVIDIA management library.
93+
"pyzmq==27.1.0", # BSD-3. Python binding for ZeroMQ. Aka. 'zmq'.
94+
"quart==0.20.0", # MIT. ASGI web framework.
95+
"ray==2.53.0", # Apache-2.0. Framework for distributed apps.
96+
"scikit_learn==1.8.0", # BSD-3. Aka. 'sklearn'. Machine learing and data mining, for visual serving.
97+
"scipy==1.17.0", # BSD. Fundamental algorithms, used in serve mode.
98+
"sglang==0.5.7", # Apache-2.0. Serving framework for LLM and VLM.
99+
"six==1.17.0", # MIT. Python 2 and 3 compatibility, used in tokernizer.
100+
"vllm==0.13.0", # Apache-2.0.
101+
]
102+
103+
robotics = [
104+
"draccus==0.11.5", # MIT. Configuration management framework, for pi0, pi05.
105+
"epath==0.7", # MIT. Dynamic import partent files, used in robotrain.
106+
"Flask==3.1.2", # BSD-3. Web framework.
107+
"flask-cors==6.0.2", # MIT. Flask extension.
108+
"qwen_vl_utils==0.0.14", # Apache-2.0. Qwen vision language model utils - PyTorch.
109+
"redis==7.1.0", # MIT. Client library for Redis.
110+
"Requests==2.32.5", # Apache-2.0. HTTP library.
111+
"sentence-transformers==5.2.0", # Apache-2.0. Embedding, retrieval, reranking.
112+
"typing-extensions==4.15.0", # PSF-2.0. Type hints for Python 3.9+.
113+
# TODO(Qiming): handle the following case.
114+
# git+https://github.com/huggingface/transformers.git@fix/lerobot_openpi
115+
]
116+
117+
lint = [
118+
"pre-commit>=4.2.0", # MIT. Pre-commit hook.
119+
# TODO(Qiming): Check if `black` is actually used.
120+
# "black==24.4.2", # MIT. Code formatter.
121+
# TODO(Qiming): Check if `black` is actually used.
122+
# "flake8==7.1.0", # MIT. Modular source code checker.
123+
"isort==5.13.2", # MIT. Sorting Python imports. Referenced by pre-commit.
124+
# TODO(Qiming): Determine if `pylint` should be removed.
125+
# "pylint==4.0.4", # GPL-2.0. Static checker.
126+
"ruff==0.14.13", # MIT. Linter and code formatter. Referenced by pre-commit.
127+
"typos==1.42.1", # MIT. Source code spelling checker.
128+
]
129+
130+
test = [
131+
"pytest==9.0.2", # MIT. Testing framework.
132+
"pytest-mock==3.15.1", # MIT. Mock wrapper for pytest.
133+
# "pytest-asyncio==1.3.0", # Apache-2.0, usage pattern "@pytest.mark.asyncio"
134+
# "pytest-cov==7.0.0", # MIT, usage pattern "pytest --cov=foo dir/"
135+
# "coverage==7.13.1", #
136+
]
137+
138+
util = [
139+
"modelscope==1.34.0", # Apache-2.0. ModelScope client, also used in pi0 example as library.
140+
]
141+
24142
[tool.ruff]
25143
line-length = 100
26144

@@ -83,6 +201,7 @@ select = [
83201
"Q003",
84202
"Q004",
85203
]
204+
86205
ignore = [
87206
"E402", # Module level import not at top of file
88207
"E501", # Line too long

0 commit comments

Comments
 (0)