File size: 397 Bytes
f91e906
 
 
 
 
 
 
 
 
a79facb
b2bf767
 
 
f91e906
06b4cf5
f91e906
4c3d05b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
wheel
streamlit
ddgs
gradio>=5.0.0
torch>=2.8.0
transformers>=4.53.3
spaces
sentencepiece
accelerate
vllm>=0.6.0
# llm-compressor is optional - only needed for quantizing models, not loading pre-quantized AWQ
# vLLM has native AWQ support built-in
# llmcompressor>=0.1.0  # Commented out - not needed for loading pre-quantized models
autoawq
flash-attn>=2.5.0
timm
compressed-tensors
bitsandbytes