{ "model_type": "stable_diffusion", "quantization": "8-bit", "architectures": [ "StableDiffusionPipeline" ], "torch_dtype": "bfloat16", "precision": "8-bit", "base_model": "stabilityai/sdxl-turbo" }