{ "architectures": [ "Sam3Model" ], "detr_decoder_config": { "box_rpb_mode": "log", "dropout": 0.1, "hidden_act": "relu", "hidden_dropout": 0.0, "hidden_size": 16, "initializer_range": 0.02, "intermediate_size": 32, "layer_norm_eps": 1e-06, "model_type": "sam3_detr_decoder", "num_attention_heads": 2, "num_layers": 6, "num_queries": 200, "use_presence_token": true }, "detr_encoder_config": { "dropout": 0.1, "hidden_act": "relu", "hidden_dropout": 0.0, "hidden_size": 16, "initializer_range": 0.02, "intermediate_size": 32, "layer_norm_eps": 1e-06, "model_type": "sam3_detr_encoder", "num_attention_heads": 2, "num_layers": 6 }, "dtype": "float32", "geometry_encoder_config": { "dropout": 0.1, "hidden_act": "relu", "hidden_dropout": 0.0, "hidden_size": 16, "initializer_range": 0.02, "intermediate_size": 32, "layer_norm_eps": 1e-06, "model_type": "sam3_geometry_encoder", "num_attention_heads": 2, "num_layers": 3, "roi_size": 7 }, "initializer_range": 0.02, "mask_decoder_config": { "dropout": 0.0, "hidden_size": 16, "initializer_range": 0.02, "intermediate_size": 32, "layer_norm_eps": 1e-06, "model_type": "sam3_mask_decoder", "num_attention_heads": 2, "num_upsampling_stages": 3 }, "model_type": "sam3", "text_config": { "attention_dropout": 0.0, "hidden_act": "gelu", "hidden_size": 16, "initializer_factor": 1.0, "initializer_range": 0.02, "intermediate_size": 32, "layer_norm_eps": 1e-05, "max_position_embeddings": 32, "model_type": "clip_text_model", "num_attention_heads": 2, "num_hidden_layers": 2, "projection_dim": 16, "vocab_size": 49408 }, "transformers_version": "5.0.0.dev0", "vision_config": { "backbone_config": { "_name_or_path": "", "add_cross_attention": false, "architectures": null, "attention_dropout": 0.0, "bos_token_id": null, "chunk_size_feed_forward": 0, "cross_attention_hidden_size": null, "decoder_start_token_id": null, "dtype": null, "eos_token_id": null, "finetuning_task": null, "fpn_hidden_size": 16, "global_attn_indexes": [ 1, 3, 5, 7 ], "hidden_act": "gelu", "hidden_dropout": 0.0, "hidden_size": 16, "id2label": { "0": "LABEL_0", "1": "LABEL_1" }, "image_size": 1008, "initializer_range": 0.02, "intermediate_size": 32, "is_decoder": false, "is_encoder_decoder": false, "label2id": { "LABEL_0": 0, "LABEL_1": 1 }, "layer_norm_eps": 1e-06, "layer_scale_init_value": null, "model_type": "sam3_vit_model", "num_attention_heads": 2, "num_channels": 3, "num_hidden_layers": 8, "output_attentions": false, "output_hidden_states": false, "pad_token_id": null, "patch_size": 14, "prefix": null, "pretrain_image_size": 336, "problem_type": null, "qkv_bias": true, "return_dict": true, "rope_theta": 10000.0, "sep_token_id": null, "task_specific_params": null, "tie_encoder_decoder": false, "tie_word_embeddings": true, "tokenizer_class": null, "window_size": 24 }, "backbone_feature_sizes": [ [ 288, 288 ], [ 144, 144 ], [ 72, 72 ] ], "fpn_hidden_size": 16, "fpn_kernel_size": 2, "fpn_stride": 2, "hidden_act": "gelu", "initializer_range": 0.02, "layer_norm_eps": 1e-06, "model_type": "sam3_vision_model", "num_feature_levels": 3, "scale_factors": [ 4.0, 2.0, 1.0, 0.5 ] } }