| { | |
| "dataset": { | |
| "align_stage_components": [ | |
| "download/llava-laion-cc-sbu-558k/chat.json", | |
| "download/llava-laion-cc-sbu-558k" | |
| ], | |
| "dataset_id": "llava-v15", | |
| "dataset_resampled": true, | |
| "dataset_root_dir": "data", | |
| "finetune_stage_components": [ | |
| "/local/home/weizhiwang/data/MAmmoTH-VL-Instruct-12M/mammoth_si_10M_simple.jsonl", | |
| "/share/edc/home/weizhiwang/data/MAmmoTH-VL-Instruct-12M/single_image_data" | |
| ], | |
| "max_num_images": 6, | |
| "min_num_images": 1, | |
| "train_num_samples": 200000, | |
| "type": "llava-v15", | |
| "workers": 4 | |
| }, | |
| "model": { | |
| "align_epochs": 1, | |
| "align_global_batch_size": 256, | |
| "align_learning_rate": 0.001, | |
| "align_lr_scheduler_type": "linear-warmup+cosine-decay", | |
| "align_max_grad_norm": 1.0, | |
| "align_max_steps": null, | |
| "align_per_device_batch_size": 16, | |
| "align_train_strategy": "fsdp-shard-grad-op", | |
| "align_warmup_ratio": 0.03, | |
| "align_weight_decay": 0.0, | |
| "arch_specifier": "full-align+729-avgpool", | |
| "enable_gradient_checkpointing": true, | |
| "enable_mixed_precision_training": true, | |
| "finetune_epochs": 1, | |
| "finetune_global_batch_size": 128, | |
| "finetune_learning_rate": 2e-05, | |
| "finetune_lr_scheduler_type": "linear-warmup+cosine-decay", | |
| "finetune_max_grad_norm": 1.0, | |
| "finetune_max_steps": null, | |
| "finetune_per_device_batch_size": 2, | |
| "finetune_train_strategy": "fsdp-full-shard", | |
| "finetune_warmup_ratio": 0.03, | |
| "finetune_weight_decay": 0.1, | |
| "image_resize_strategy": "resize-naive", | |
| "llm_backbone_id": "qwen2.5-1.5b-instruct", | |
| "llm_max_length": 4096, | |
| "model_id": "qwen2.5-1.5b-instruct-continue-training-ccs-datacomp-mlm-filter-mammoth-10m", | |
| "pretrain_epochs": 1, | |
| "pretrain_global_batch_size": 512, | |
| "pretrain_learning_rate": 5e-05, | |
| "pretrain_lr_scheduler_type": "linear-warmup+cosine-decay", | |
| "pretrain_max_grad_norm": 1.0, | |
| "pretrain_max_steps": null, | |
| "pretrain_per_device_batch_size": 16, | |
| "pretrain_train_strategy": "fsdp-full-shard", | |
| "pretrain_warmup_ratio": 0.03, | |
| "pretrain_weight_decay": 0.01, | |
| "reduce_in_full_precision": false, | |
| "type": "one-stage+7b", | |
| "vision_backbone_id": "siglip-vit-so400m-384px" | |
| }, | |
| "mount_path": "Qwen", | |
| "pretrained_checkpoint": "/local/home/weizhiwang/checkpoints/obelics+qwen2.5-1.5b-instruct-continue-training-ccs-datacomp-mlm-filter+stage-pretrain+x7/checkpoints/latest-checkpoint.pt", | |
| "run_id": "qwen2.5-1.5b-instruct-continue-training-ccs-datacomp-mlm-filter-mammoth-10m+stage-finetune+x7", | |
| "run_root_dir": "/share/edc/home/weizhiwang/checkpoints", | |
| "seed": 7, | |
| "stage": "finetune", | |
| "trackers": [ | |
| "jsonl" | |
| ], | |
| "wandb_entity": null, | |
| "wandb_project": "mmpretrain" | |
| } |