default_stage: default_modifiers: GPTQModifier: config_groups: config_group: targets: [Linear] weights: num_bits: 4 type: int symmetric: true group_size: 128 strategy: group block_structure: null dynamic: false actorder: weight observer: minmax observer_kwargs: {} input_activations: null output_activations: null format: null targets: [Linear] ignore: [lm_head, 're:.*mlp.gate$', 're:.*shared_experts*$', 're:.*.dense$'] sequential_update: true block_size: 128 dampening_frac: 0.01 offload_hessians: false