Commit
·
230ceb3
1
Parent(s):
0b80443
Add models to eval queue
Browse files- HuggingFaceTB/SmolLM3-3B-Base_eval_request_False_bfloat16_Original.json +1 -0
- HuggingFaceTB/SmolLM3-3B_eval_request_False_bfloat16_Original.json +1 -0
- Qwen/Qwen3-4B-Base_eval_request_False_bfloat16_Original.json +1 -0
- Qwen/Qwen3-4B_eval_request_False_bfloat16_Original.json +1 -0
- Qwen/Qwen3-8B-Base_eval_request_False_bfloat16_Original.json +1 -0
- Qwen/Qwen3-8B_eval_request_False_bfloat16_Original.json +1 -0
- allenai/OLMo-2-1124-7B-Instruct_eval_request_False_bfloat16_Original.json +1 -0
- allenai/OLMo-2-1124-7B_eval_request_False_bfloat16_Original.json +1 -0
- google/gemma-3-4b-it_eval_request_False_bfloat16_Original.json +1 -0
- google/gemma-3-4b-pt_eval_request_False_bfloat16_Original.json +1 -0
- scripts/generate.py +1 -1
- scripts/models.csv +11 -7
HuggingFaceTB/SmolLM3-3B-Base_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "HuggingFaceTB/SmolLM3-3B-Base", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 3.08, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
HuggingFaceTB/SmolLM3-3B_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "HuggingFaceTB/SmolLM3-3B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 3.08, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
Qwen/Qwen3-4B-Base_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "Qwen/Qwen3-4B-Base", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 4.02, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
Qwen/Qwen3-4B_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "Qwen/Qwen3-4B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 4.02, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
Qwen/Qwen3-8B-Base_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "Qwen/Qwen3-8B-Base", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 8.19, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
Qwen/Qwen3-8B_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "Qwen/Qwen3-8B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 8.19, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
allenai/OLMo-2-1124-7B-Instruct_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "allenai/OLMo-2-1124-7B-Instruct", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 7.3, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
allenai/OLMo-2-1124-7B_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "allenai/OLMo-2-1124-7B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 7.3, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
google/gemma-3-4b-it_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "google/gemma-3-4b-it", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "instruction-tuned", "likes": 0, "params": 4.3, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
google/gemma-3-4b-pt_eval_request_False_bfloat16_Original.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "google/gemma-3-4b-pt", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "PENDING", "submitted_time": "2025-07-25T13:14:02Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 4.3, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
scripts/generate.py
CHANGED
|
@@ -40,7 +40,7 @@ def generate_requests(selection: str):
|
|
| 40 |
elif selection == "instructed":
|
| 41 |
df = df[df["model_type"] == "instruction-tuned"]
|
| 42 |
elif selection == "todo":
|
| 43 |
-
df = df[df["status"] == "
|
| 44 |
|
| 45 |
for _, row in df.iterrows():
|
| 46 |
status, model_id, precision, model_type, params = row
|
|
|
|
| 40 |
elif selection == "instructed":
|
| 41 |
df = df[df["model_type"] == "instruction-tuned"]
|
| 42 |
elif selection == "todo":
|
| 43 |
+
df = df[df["status"] == "To do"]
|
| 44 |
|
| 45 |
for _, row in df.iterrows():
|
| 46 |
status, model_id, precision, model_type, params = row
|
scripts/models.csv
CHANGED
|
@@ -30,6 +30,8 @@ Done,utter-project/EuroLLM-9B,bfloat16,pretrained,"February 10, 2025",9.15
|
|
| 30 |
Done,utter-project/EuroLLM-9B-Instruct,bfloat16,instruction-tuned,"February 10, 2025",9.15
|
| 31 |
Not started,HuggingFaceTB/SmolLM2-1.7B,bfloat16,pretrained,,1.71
|
| 32 |
Test,HuggingFaceTB/SmolLM2-1.7B-Instruct,bfloat16,instruction-tuned,,1.71
|
|
|
|
|
|
|
| 33 |
Done,CohereForAI/aya-expanse-8b,float16,pretrained,"December 3, 2024",8.03
|
| 34 |
Done,Qwen/Qwen2.5-1.5B,bfloat16,pretrained,,1.54
|
| 35 |
Done,Qwen/Qwen2.5-1.5B-Instruct,bfloat16,instruction-tuned,,1.54
|
|
@@ -67,16 +69,16 @@ Not started,Qwen/Qwen3-32B-AWQ,float16,instruction-tuned,,5.73
|
|
| 67 |
,unsloth/DeepSeek-R1-Distill-Qwen-14B-bnb-4bit,bfloat16,instruction-tuned,,8.37
|
| 68 |
Test,google/gemma-3-1b-it,bfloat16,instruction-tuned,,1
|
| 69 |
Not started,google/gemma-3-1b-pt,bfloat16,pretrained,,1
|
| 70 |
-
|
| 71 |
-
|
| 72 |
Not started,Qwen/Qwen3-0.6B,bfloat16,instruction-tuned,,0.753
|
| 73 |
Not started,Qwen/Qwen3-0.6B-Base,bfloat16,instruction-tuned,,0.753
|
| 74 |
Test,Qwen/Qwen3-1.7B,bfloat16,instruction-tuned,,2.03
|
| 75 |
Not started,Qwen/Qwen3-1.7B-Base,bfloat16,instruction-tuned,,2.03
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
Done,bertin-project/bertin-gpt-j-6B,float32,pretrained,"December 11, 2024",6.06
|
| 81 |
,Qwen/Qwen2-7B,bfloat16,pretrained,"December 11, 2024",7
|
| 82 |
In progress,tiiuae/falcon-7b,bfloat16,pretrained,,7
|
|
@@ -117,4 +119,6 @@ Not started,TheBloke/Llama-2-13B-chat-GPTQ,float16,instruction-tuned,,2.03
|
|
| 117 |
,TheBloke/Llama-2-70B-GPTQ,float16,,,9.1
|
| 118 |
,TheBloke/Mixtral-8x7B-v0.1-GPTQ,bfloat16,,,6.09
|
| 119 |
,HiTZ/latxa-70b-v1.2,bfloat16,pretrained,"November 27, 2024",70
|
| 120 |
-
,sandbox-ai/Llama-3.1-Tango-70b,float16,instruction-tuned,,70
|
|
|
|
|
|
|
|
|
| 30 |
Done,utter-project/EuroLLM-9B-Instruct,bfloat16,instruction-tuned,"February 10, 2025",9.15
|
| 31 |
Not started,HuggingFaceTB/SmolLM2-1.7B,bfloat16,pretrained,,1.71
|
| 32 |
Test,HuggingFaceTB/SmolLM2-1.7B-Instruct,bfloat16,instruction-tuned,,1.71
|
| 33 |
+
To do,HuggingFaceTB/SmolLM3-3B,bfloat16,instruction-tuned,,3.08
|
| 34 |
+
To do,HuggingFaceTB/SmolLM3-3B-Base,bfloat16,pretrained,,3.08
|
| 35 |
Done,CohereForAI/aya-expanse-8b,float16,pretrained,"December 3, 2024",8.03
|
| 36 |
Done,Qwen/Qwen2.5-1.5B,bfloat16,pretrained,,1.54
|
| 37 |
Done,Qwen/Qwen2.5-1.5B-Instruct,bfloat16,instruction-tuned,,1.54
|
|
|
|
| 69 |
,unsloth/DeepSeek-R1-Distill-Qwen-14B-bnb-4bit,bfloat16,instruction-tuned,,8.37
|
| 70 |
Test,google/gemma-3-1b-it,bfloat16,instruction-tuned,,1
|
| 71 |
Not started,google/gemma-3-1b-pt,bfloat16,pretrained,,1
|
| 72 |
+
To do,google/gemma-3-4b-it,bfloat16,instruction-tuned,,4.3
|
| 73 |
+
To do,google/gemma-3-4b-pt,bfloat16,pretrained,,4.3
|
| 74 |
Not started,Qwen/Qwen3-0.6B,bfloat16,instruction-tuned,,0.753
|
| 75 |
Not started,Qwen/Qwen3-0.6B-Base,bfloat16,instruction-tuned,,0.753
|
| 76 |
Test,Qwen/Qwen3-1.7B,bfloat16,instruction-tuned,,2.03
|
| 77 |
Not started,Qwen/Qwen3-1.7B-Base,bfloat16,instruction-tuned,,2.03
|
| 78 |
+
To do,Qwen/Qwen3-4B,bfloat16,instruction-tuned,,4.02
|
| 79 |
+
To do,Qwen/Qwen3-4B-Base,bfloat16,instruction-tuned,,4.02
|
| 80 |
+
To do,Qwen/Qwen3-8B,bfloat16,instruction-tuned,,8.19
|
| 81 |
+
To do,Qwen/Qwen3-8B-Base,bfloat16,instruction-tuned,,8.19
|
| 82 |
Done,bertin-project/bertin-gpt-j-6B,float32,pretrained,"December 11, 2024",6.06
|
| 83 |
,Qwen/Qwen2-7B,bfloat16,pretrained,"December 11, 2024",7
|
| 84 |
In progress,tiiuae/falcon-7b,bfloat16,pretrained,,7
|
|
|
|
| 119 |
,TheBloke/Llama-2-70B-GPTQ,float16,,,9.1
|
| 120 |
,TheBloke/Mixtral-8x7B-v0.1-GPTQ,bfloat16,,,6.09
|
| 121 |
,HiTZ/latxa-70b-v1.2,bfloat16,pretrained,"November 27, 2024",70
|
| 122 |
+
,sandbox-ai/Llama-3.1-Tango-70b,float16,instruction-tuned,,70
|
| 123 |
+
To do,allenai/OLMo-2-1124-7B,bfloat16,pretrained,,7.3
|
| 124 |
+
To do,allenai/OLMo-2-1124-7B-Instruct,bfloat16,instruction-tuned,,7.3
|