cs2764 commited on
Commit
5b6a1f9
·
verified ·
1 Parent(s): eea72a5

Upload MLX converted model with quantization settings

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +41 -0
  2. chat_template.jinja +46 -0
  3. config.json +0 -0
  4. configuration_deepseek.py +212 -0
  5. generation_config.json +4 -0
  6. model-00001-of-00096.safetensors +3 -0
  7. model-00002-of-00096.safetensors +3 -0
  8. model-00003-of-00096.safetensors +3 -0
  9. model-00004-of-00096.safetensors +3 -0
  10. model-00005-of-00096.safetensors +3 -0
  11. model-00006-of-00096.safetensors +3 -0
  12. model-00007-of-00096.safetensors +3 -0
  13. model-00008-of-00096.safetensors +3 -0
  14. model-00009-of-00096.safetensors +3 -0
  15. model-00010-of-00096.safetensors +3 -0
  16. model-00011-of-00096.safetensors +3 -0
  17. model-00012-of-00096.safetensors +3 -0
  18. model-00013-of-00096.safetensors +3 -0
  19. model-00014-of-00096.safetensors +3 -0
  20. model-00015-of-00096.safetensors +3 -0
  21. model-00016-of-00096.safetensors +3 -0
  22. model-00017-of-00096.safetensors +3 -0
  23. model-00018-of-00096.safetensors +3 -0
  24. model-00019-of-00096.safetensors +3 -0
  25. model-00020-of-00096.safetensors +3 -0
  26. model-00021-of-00096.safetensors +3 -0
  27. model-00022-of-00096.safetensors +3 -0
  28. model-00023-of-00096.safetensors +3 -0
  29. model-00024-of-00096.safetensors +3 -0
  30. model-00025-of-00096.safetensors +3 -0
  31. model-00026-of-00096.safetensors +3 -0
  32. model-00027-of-00096.safetensors +3 -0
  33. model-00028-of-00096.safetensors +3 -0
  34. model-00029-of-00096.safetensors +3 -0
  35. model-00030-of-00096.safetensors +3 -0
  36. model-00031-of-00096.safetensors +3 -0
  37. model-00032-of-00096.safetensors +3 -0
  38. model-00033-of-00096.safetensors +3 -0
  39. model-00034-of-00096.safetensors +3 -0
  40. model-00035-of-00096.safetensors +3 -0
  41. model-00036-of-00096.safetensors +3 -0
  42. model-00037-of-00096.safetensors +3 -0
  43. model-00038-of-00096.safetensors +3 -0
  44. model-00039-of-00096.safetensors +3 -0
  45. model-00040-of-00096.safetensors +3 -0
  46. model-00041-of-00096.safetensors +3 -0
  47. model-00042-of-00096.safetensors +3 -0
  48. model-00043-of-00096.safetensors +3 -0
  49. model-00044-of-00096.safetensors +3 -0
  50. model-00045-of-00096.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: modified-mit
4
+ library_name: transformers
5
+ base_model: moonshotai/Kimi-K2-Instruct-0905
6
+ tags:
7
+ - mlx
8
+ ---
9
+
10
+ # cs2764/Kimi-K2-Instruct-0905-mlx-mixed_3_4
11
+
12
+ The Model [cs2764/Kimi-K2-Instruct-0905-mlx-mixed_3_4](https://huggingface.co/cs2764/Kimi-K2-Instruct-0905-mlx-mixed_3_4) was converted to MLX format from [moonshotai/Kimi-K2-Instruct-0905](https://huggingface.co/moonshotai/Kimi-K2-Instruct-0905) using mlx-lm version **0.28.0**.
13
+
14
+ ## Quantization Details
15
+
16
+ This model was converted with the following quantization settings:
17
+
18
+ - **Quantization Strategy**: mixed_3_4 (Mixed precision)
19
+ - **Average bits per weight**: 3.663
20
+
21
+ ## Use with mlx
22
+
23
+ ```bash
24
+ pip install mlx-lm
25
+ ```
26
+
27
+ ```python
28
+ from mlx_lm import load, generate
29
+
30
+ model, tokenizer = load("cs2764/Kimi-K2-Instruct-0905-mlx-mixed_3_4")
31
+
32
+ prompt="hello"
33
+
34
+ if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
35
+ messages = [{"role": "user", "content": prompt}]
36
+ prompt = tokenizer.apply_chat_template(
37
+ messages, tokenize=False, add_generation_prompt=True
38
+ )
39
+
40
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
41
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools -%}
2
+ <|im_system|>tool_declare<|im_middle|>
3
+ # Tools
4
+ {{ tools | tojson }}<|im_end|>
5
+ {%- endif -%}
6
+ {%- for message in messages -%}
7
+ {%- if loop.first and messages[0]['role'] != 'system' -%}
8
+ <|im_system|>system<|im_middle|>You are Kimi, an AI assistant created by Moonshot AI.<|im_end|>
9
+ {%- endif -%}
10
+
11
+ {%- set role_name = message.get('name') or message['role'] -%}
12
+ {%- if message['role'] == 'user' -%}
13
+ <|im_user|>{{role_name}}<|im_middle|>
14
+ {%- elif message['role'] == 'assistant' -%}
15
+ <|im_assistant|>{{role_name}}<|im_middle|>
16
+ {%- else -%}
17
+ <|im_system|>{{role_name}}<|im_middle|>
18
+ {% endif %}
19
+
20
+ {%- if message['role'] == 'assistant' and message.get('tool_calls') -%}
21
+ {%- if message['content'] -%}{{ message['content'] }}{%- endif -%}
22
+ <|tool_calls_section_begin|>
23
+ {%- for tool_call in message['tool_calls'] -%}
24
+ {%- set formatted_id = tool_call['id'] -%}
25
+ <|tool_call_begin|>{{ formatted_id }}<|tool_call_argument_begin|>{% if tool_call['function']['arguments'] is string %}{{ tool_call['function']['arguments'] }}{% else %}{{ tool_call['function']['arguments'] | tojson }}{% endif %}<|tool_call_end|>
26
+ {%- endfor -%}
27
+ <|tool_calls_section_end|>
28
+ {%- elif message['role'] == 'tool' -%}
29
+ ## Return of {{ message.tool_call_id }}
30
+ {{ message['content'] }}
31
+ {%- elif message['content'] is string -%}
32
+ {{ message['content'] }}
33
+ {%- elif message['content'] is not none -%}
34
+ {% for content in message['content'] -%}
35
+ {% if content['type'] == 'image' or 'image' in content or 'image_url' in content -%}
36
+ <|media_start|>image<|media_content|><|media_pad|><|media_end|>
37
+ {% else -%}
38
+ {{ content['text'] }}
39
+ {%- endif -%}
40
+ {%- endfor -%}
41
+ {%- endif -%}
42
+ <|im_end|>
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ <|im_assistant|>assistant<|im_middle|>
46
+ {%- endif -%}
config.json ADDED
The diff for this file is too large to render. See raw diff
 
configuration_deepseek.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/configuration_deepseek.py
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
9
+ class DeepseekV3Config(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
12
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
13
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
14
+
15
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
+ documentation from [`PretrainedConfig`] for more information.
17
+
18
+
19
+ Args:
20
+ vocab_size (`int`, *optional*, defaults to 129280):
21
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
22
+ `inputs_ids` passed when calling [`DeepseekV3Model`]
23
+ hidden_size (`int`, *optional*, defaults to 4096):
24
+ Dimension of the hidden representations.
25
+ intermediate_size (`int`, *optional*, defaults to 11008):
26
+ Dimension of the MLP representations.
27
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
28
+ Dimension of the MoE representations.
29
+ num_hidden_layers (`int`, *optional*, defaults to 32):
30
+ Number of hidden layers in the Transformer decoder.
31
+ num_nextn_predict_layers (`int`, *optional*, defaults to 1):
32
+ Number of nextn predict layers in the DeepSeekV3 Model.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer decoder.
35
+ n_shared_experts (`int`, *optional*, defaults to None):
36
+ Number of shared experts, None means dense model.
37
+ n_routed_experts (`int`, *optional*, defaults to None):
38
+ Number of routed experts, None means dense model.
39
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
40
+ Scaling factor or routed experts.
41
+ topk_method (`str`, *optional*, defaults to `gready`):
42
+ Topk method used in routed gate.
43
+ n_group (`int`, *optional*, defaults to None):
44
+ Number of groups for routed experts.
45
+ topk_group (`int`, *optional*, defaults to None):
46
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
47
+ num_experts_per_tok (`int`, *optional*, defaults to None):
48
+ Number of selected experts, None means dense model.
49
+ moe_layer_freq (`int`, *optional*, defaults to 1):
50
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
51
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
52
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
53
+ \--k dense layers--/
54
+ norm_topk_prob (`bool`, *optional*, defaults to False):
55
+ Whether to normalize the weights of the routed experts.
56
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
57
+ Method of computing expert weights.
58
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
59
+ Auxiliary loss weight coefficient.
60
+ seq_aux = (`bool`, *optional*, defaults to True):
61
+ Whether to compute the auxiliary loss for each individual sample.
62
+ num_key_value_heads (`int`, *optional*):
63
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
64
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
65
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
66
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
67
+ by meanpooling all the original heads within that group. For more details checkout [this
68
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
69
+ `num_attention_heads`.
70
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
+ The non-linear activation function (function or string) in the decoder.
72
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
73
+ The maximum sequence length that this model might ever be used with.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
77
+ The epsilon used by the rms normalization layers.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ pad_token_id (`int`, *optional*):
82
+ Padding token id.
83
+ bos_token_id (`int`, *optional*, defaults to 1):
84
+ Beginning of stream token id.
85
+ eos_token_id (`int`, *optional*, defaults to 2):
86
+ End of stream token id.
87
+ pretraining_tp (`int`, *optional*, defaults to 1):
88
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
89
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
90
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
91
+ issue](https://github.com/pytorch/pytorch/issues/76232).
92
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
93
+ Whether to tie weight embeddings
94
+ rope_theta (`float`, *optional*, defaults to 10000.0):
95
+ The base period of the RoPE embeddings.
96
+ rope_scaling (`Dict`, *optional*):
97
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
98
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
99
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
100
+ `max_position_embeddings` to the expected new maximum.
101
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
102
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
103
+ attention_dropout (`float`, *optional*, defaults to 0.0):
104
+ The dropout ratio for the attention probabilities.
105
+
106
+ ```python
107
+ >>> from transformers import DeepseekV3Model, DeepseekV3Config
108
+
109
+ >>> # Initializing a Deepseek-V3 style configuration
110
+ >>> configuration = DeepseekV3Config()
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "deepseek_v3"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_size=129280,
122
+ hidden_size=7168,
123
+ intermediate_size=18432,
124
+ moe_intermediate_size = 2048,
125
+ num_hidden_layers=61,
126
+ num_nextn_predict_layers=1,
127
+ num_attention_heads=128,
128
+ num_key_value_heads=128,
129
+ n_shared_experts = 1,
130
+ n_routed_experts = 256,
131
+ ep_size = 1,
132
+ routed_scaling_factor = 2.5,
133
+ kv_lora_rank = 512,
134
+ q_lora_rank = 1536,
135
+ qk_rope_head_dim = 64,
136
+ v_head_dim = 128,
137
+ qk_nope_head_dim = 128,
138
+ topk_method = 'noaux_tc',
139
+ n_group = 8,
140
+ topk_group = 4,
141
+ num_experts_per_tok = 8,
142
+ moe_layer_freq = 1,
143
+ first_k_dense_replace = 3,
144
+ norm_topk_prob = True,
145
+ scoring_func = 'sigmoid',
146
+ aux_loss_alpha = 0.001,
147
+ seq_aux = True,
148
+ hidden_act="silu",
149
+ max_position_embeddings=4096,
150
+ initializer_range=0.02,
151
+ rms_norm_eps=1e-6,
152
+ use_cache=True,
153
+ pad_token_id=None,
154
+ bos_token_id=0,
155
+ eos_token_id=1,
156
+ pretraining_tp=1,
157
+ tie_word_embeddings=False,
158
+ rope_theta=10000.0,
159
+ rope_scaling=None,
160
+ attention_bias=False,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.moe_intermediate_size = moe_intermediate_size
169
+ self.num_hidden_layers = num_hidden_layers
170
+ self.num_nextn_predict_layers = num_nextn_predict_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.n_shared_experts = n_shared_experts
173
+ self.n_routed_experts = n_routed_experts
174
+ self.ep_size = ep_size
175
+ self.routed_scaling_factor = routed_scaling_factor
176
+ self.kv_lora_rank = kv_lora_rank
177
+ self.q_lora_rank = q_lora_rank
178
+ self.qk_rope_head_dim = qk_rope_head_dim
179
+ self.v_head_dim = v_head_dim
180
+ self.qk_nope_head_dim = qk_nope_head_dim
181
+ self.topk_method = topk_method
182
+ self.n_group = n_group
183
+ self.topk_group = topk_group
184
+ self.num_experts_per_tok = num_experts_per_tok
185
+ self.moe_layer_freq = moe_layer_freq
186
+ self.first_k_dense_replace = first_k_dense_replace
187
+ self.norm_topk_prob = norm_topk_prob
188
+ self.scoring_func = scoring_func
189
+ self.aux_loss_alpha = aux_loss_alpha
190
+ self.seq_aux = seq_aux
191
+ # for backward compatibility
192
+ if num_key_value_heads is None:
193
+ num_key_value_heads = num_attention_heads
194
+
195
+ self.num_key_value_heads = num_key_value_heads
196
+ self.hidden_act = hidden_act
197
+ self.initializer_range = initializer_range
198
+ self.rms_norm_eps = rms_norm_eps
199
+ self.pretraining_tp = pretraining_tp
200
+ self.use_cache = use_cache
201
+ self.rope_theta = rope_theta
202
+ self.rope_scaling = rope_scaling
203
+ self.attention_bias = attention_bias
204
+ self.attention_dropout = attention_dropout
205
+
206
+ super().__init__(
207
+ pad_token_id=pad_token_id,
208
+ bos_token_id=bos_token_id,
209
+ eos_token_id=eos_token_id,
210
+ tie_word_embeddings=tie_word_embeddings,
211
+ **kwargs,
212
+ )
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_length": 131072,
3
+ "eos_token_id": 163586
4
+ }
model-00001-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87f3ef20955f70c7cb78d9f42001f5ee5588ab5d8f77332bcbd529bde26ca619
3
+ size 3258501003
model-00002-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d76f59470f0ae35d2b1252265f1fe265e6c06c22b37f1effccf4dc8dd83b65f
3
+ size 5284823576
model-00003-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e48d8996b46fed83432a1312cea21ae3245bf2eeec79721a78648223ee3a058
3
+ size 5355710527
model-00004-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6048f10ff8a065ae43556a9eeadee4cad9dba51cd4532df679dcd4a65487f84
3
+ size 5355709979
model-00005-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4534cb7e3d3fb97f5ca549357e10358cc0a195c09b2f212d86f0532c0907b2a8
3
+ size 2818572949
model-00006-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aa6fed623dd6ad6bee32c03d49ad3383c0a002bf37ceb1d40dcd7d652bf810d
3
+ size 5355710021
model-00007-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f76feb9e0438473428484d4852e6d580bac41bcc1e3a178759b4e09a34804b1c
3
+ size 2818572951
model-00008-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2708b80e5731bf73f6a43c3f27eeb5f9d02d3854722640740eeb2e4fb8e33f80
3
+ size 5355710021
model-00009-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de3d1653f2bce5116d11303afd5ed7f4215b4fe671a6ed86be83325c89757b7b
3
+ size 2818572949
model-00010-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca0a0dcfef0df847a5fe4dfbf89433f34baee80fd06de2b0497f940eeb651d96
3
+ size 5355710021
model-00011-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29d8a7490b05baffc7f597b2fd8f858f2c56729f57616309b24606dd7ba420c7
3
+ size 2818572949
model-00012-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfbed0c9786885ac23c9a05373c2cc39f95c0541b508fcc59875ec30a014cbc5
3
+ size 5355710021
model-00013-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f766c8af0e6b77c8319d1eb3a52a4725f354420f5f322d664bc5f571b87b770
3
+ size 5353875409
model-00014-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:871aced1cb37bbd170db0498ad522f7e97e3a25958ed1612011dec8ff0a92735
3
+ size 4932502294
model-00015-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa5df5c96feca014af053c85678008db5785079d7b9fd5dd94470903ef93b83d
3
+ size 5001553645
model-00016-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:666e82502ef8d09c4080c2b4ba14bcaaa96f3cc763dd2991c77170a633d1728f
3
+ size 5284823574
model-00017-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f1ade3ecf83ae8e4726e5366934b8f40cc85728bb53590dd7d6771cd5d918f0
3
+ size 5355710548
model-00018-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c2aef948115b3506c71c0ea3b94ca860b2cc32fa332b812e43a9c997b4425a9
3
+ size 5001553741
model-00019-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80f46478acbfc26149990b39e0ab462e93ed42db163e5d8f3e431255e3feb79a
3
+ size 5001553669
model-00020-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5d71bbd1688d613830861362a0689854cee1e662f95fcc6f9f860b5a97405e4
3
+ size 4932502300
model-00021-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c76fdf996750c7733fb6964c6561467b81b499c18faad8b7386c5b913408c8c
3
+ size 5355709991
model-00022-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fea1443b6948ea60cd39f48345e2498a26e59009cbdc33cc09ad35b3cd29441e
3
+ size 5353875461
model-00023-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6adee5935d7d64b9c6591f27b3d150585e67d4991bd22e9883abe07da05cfc09
3
+ size 4932502296
model-00024-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72e713f1be191959b01b240c7b20927a38342e48e351044e35a75b8a0d6cf1ee
3
+ size 5001553743
model-00025-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd85be9e004e01729ec4f7509ea9234646ddeba84100785d3c65f6a808a5807
3
+ size 5284823580
model-00026-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c18dd455c3b2e363fa676ee19cd2e56d56e0a6dddd132641d5df2540fc5a30
3
+ size 5355710561
model-00027-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22207cd722c3ca17004306803ba75a5921dd8a808467ca33343685396efa5dbb
3
+ size 5001553721
model-00028-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87d018a5d66aff946ea5199ab70706ec0b2a45580b17f86d00d397ef569e8f7e
3
+ size 5001553679
model-00029-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:039a471eb296a8f7563f420374c791bee2a4dc5250200d1372b1856eebd01ca9
3
+ size 4932502302
model-00030-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b788180d969a6f919bd12ba1cc0f649496b7f3c6234dc01c2e28da5d108b558
3
+ size 5355710055
model-00031-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:560eba1b85d42cfeaa28b6515a2ecfbf6aed9c8556f8f8c4cc9d221bcc8f3947
3
+ size 5353875469
model-00032-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19b47f227df1f8864d376bf328130c29f850e470c90825597d99a476f567c267
3
+ size 4932502296
model-00033-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b59d61c5c3ae4b9c809d0ad2137332fde9dba9bcd46825d08991ffc958fdcbfd
3
+ size 5001553733
model-00034-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ff666cdff62d81b01c8de5f0e1bdf4da97c7693e45bdb46ef833d0f0baef73e
3
+ size 5284823580
model-00035-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ca51ef4ea507a99a5a3a8d2c6d4b5504c6909341a7299009de43e06844bd033
3
+ size 5355710563
model-00036-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf20b331301b671baac9b8fba26377bcb73abd6d41d21bc5ff1fe42b8d743530
3
+ size 5001553767
model-00037-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93746eee7242c802332bd58c8c6fc1db9b62b296ea5450901f73f58fdda9db79
3
+ size 5001553707
model-00038-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ede59b7aaa9a3f602764a344677689c86c950c4b43666a5190a32d3316370675
3
+ size 4932502294
model-00039-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e7c25c527743764ac9f8d6a3f4749d4041c1f4beb009481d984feb92234eb59
3
+ size 5355709967
model-00040-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:745e0ca0e1a4893c2039ef34cc01b6a31ca93aea65c6b860ca6efe1a4de594f8
3
+ size 5353875471
model-00041-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1b6d4d796546a75f789bb562fea4171ea24a54ff156d61dcaf248a2fe1ec308
3
+ size 4932502300
model-00042-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ad642b27965f167e48b79417e2f57e919b299b6f672e49869c7682a04ce4ae1
3
+ size 5001553767
model-00043-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f69e8e8f7457c4818a7a58215e61825161ca5321e6332b6e9bee4924f06dc4f
3
+ size 5284823580
model-00044-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f57d9695857769296a31c58b574d430a92bb16b727a934e2f4079badc1842d3
3
+ size 5355710563
model-00045-of-00096.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6bb7ab1137126edec962838975a25f3d32e52d4f8149fa6736d9d3a53e8608c
3
+ size 5001553707