kenji-endo-0.1 / config.json
mrinaldi's picture
Upload folder using huggingface_hub
2540b01 verified
{
"_checkpoint_path": "../checkpoints_baby_BUONO/last-v2.ckpt",
"_matformer_config_dict": {
"_checkpoint_path": "../checkpoints_baby_BUONO/last-v2.ckpt",
"_model_class": "Autoregressive_Model",
"_tokenizer_name": "sapienzanlp/Minerva-350M-base-v1.0",
"attention_type": [],
"bias": false,
"block_size_for_attention": 128,
"bos_token_id": 1,
"compile_flexattn": false,
"custom_layers": {},
"decoder": null,
"default_layer": {
"attn_impl": "flash",
"ffn_activation": "swiglu",
"hooks": {},
"normalization": "rmsnorm",
"normalization_position": "post",
"positional_encoding": "alibi",
"sliding_window_size": null
},
"encoder": null,
"entropy": null,
"eos_token_id": 2,
"ffn_factor": 3.0,
"has_entropy_model": null,
"has_text_autoencoder": null,
"hidden_size": 768,
"is_causal": true,
"mask_token_id": null,
"masked_substitution_rate": null,
"max_position_embeddings": 1024,
"model_class": null,
"name": "BabyLM",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"num_labels": 2,
"pad_token_id": 0,
"rms_norm_eps": 1e-06,
"rope_theta": 10000.0,
"sliding_type": null,
"tie_word_embeddings": false,
"training_objective": "autoregressive",
"vocab_size": 32777
},
"_model_class": "Autoregressive_Model",
"_tokenizer_name": "sapienzanlp/Minerva-350M-base-v1.0",
"attention_type": [],
"auto_map": {
"AutoConfig": "modeling_matformer.MatformerConfig",
"AutoModel": "modeling_matformer.MatformerModel",
"AutoModelForCausalLM": "modeling_matformer.MatformerForCausalLM"
},
"bias": false,
"block_size_for_attention": 128,
"bos_token_id": 1,
"compile_flexattn": false,
"custom_layers": {},
"decoder": null,
"default_layer": {
"attn_impl": "flash",
"ffn_activation": "swiglu",
"hooks": {},
"normalization": "rmsnorm",
"normalization_position": "post",
"positional_encoding": "alibi",
"sliding_window_size": null
},
"encoder": null,
"entropy": null,
"eos_token_id": 2,
"ffn_factor": 3.0,
"has_entropy_model": null,
"has_text_autoencoder": null,
"hidden_size": 768,
"is_causal": true,
"mask_token_id": null,
"masked_substitution_rate": null,
"max_position_embeddings": 1024,
"model_class": null,
"model_type": "matformer",
"name": "BabyLM",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"rms_norm_eps": 1e-06,
"rope_theta": 10000.0,
"sliding_type": null,
"training_objective": "autoregressive",
"transformers_version": "4.57.1",
"use_cache": true,
"vocab_size": 32777
}