Update start.sh
Browse files
start.sh
CHANGED
|
@@ -10,7 +10,8 @@ unzip llama_cpp.zip > /dev/null 2>&1
|
|
| 10 |
|
| 11 |
echo "✅ Booting up llama server..."
|
| 12 |
|
| 13 |
-
wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-
|
|
|
|
| 14 |
# wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf?download=true > /dev/null 2>&1
|
| 15 |
./build/bin/llama-server -m model.gguf --port 8000 --host 0.0.0.0 --threads 2 --ctx-size 4096 --mlock --jinja
|
| 16 |
|
|
|
|
| 10 |
|
| 11 |
echo "✅ Booting up llama server..."
|
| 12 |
|
| 13 |
+
wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-Thinking-2507-GGUF/resolve/main/Qwen3-4B-Thinking-2507-Q6_K.gguf?download=true > /dev/null 2>&1
|
| 14 |
+
# wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-Instruct-2507-GGUF/resolve/main/Qwen3-4B-Instruct-2507-Q6_K.gguf?download=true > /dev/null 2>&1
|
| 15 |
# wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf?download=true > /dev/null 2>&1
|
| 16 |
./build/bin/llama-server -m model.gguf --port 8000 --host 0.0.0.0 --threads 2 --ctx-size 4096 --mlock --jinja
|
| 17 |
|