Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| import spaces | |
| import torch | |
| from PIL import Image | |
| from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration | |
| MODEL_ID = "internlm/CapRL-3B" | |
| DEFAULT_PROMPT = "Describe the image in detail." | |
| MAX_NEW_TOKENS = 4096 | |
| # Default demo content | |
| DEFAULT_IMAGE = "./examples/1909.png" | |
| DEFAULT_CAPTION = ( | |
| "The image is a bar chart from the Pew Research Center that illustrates how older Republicans and Republican leaners view Donald Trump, specifically focusing on how many describe the phrase \"fights for what I believe in\" to describe Trump. The data is based on a survey conducted from February 4-15, 2020, among U.S. adults who identify as Republicans or Republican-leaning independents.\n\n" | |
| "### Title:\n" | |
| "Older Republicans especially likely to see Trump as fighting for their beliefs\n\n" | |
| "### Main Question:\n" | |
| "Among Republicans and Republican leaners, % who say the phrase 'fights for what I believe in' describes Trump ...\n\n" | |
| "### Data Breakdown:\n\n" | |
| "1. **All Rep/Lean Rep (Overall):**\n" | |
| " - Very well: 51%\n" | |
| " - Fairly well: 36%\n" | |
| " - NET: 87%\n\n" | |
| "2. **Ages 18-29:**\n" | |
| " - Very well: 31%\n" | |
| " - Fairly well: 45%\n" | |
| " - NET: 76%\n\n" | |
| "3. **30-49:**\n" | |
| " - Very well: 41%\n" | |
| " - Fairly well: 42%\n" | |
| " - NET: 82%\n\n" | |
| "4. **50-64:**\n" | |
| " - Very well: 58%\n" | |
| " - Fairly well: 33%\n" | |
| " - NET: 92%\n\n" | |
| "5. **65+:**\n" | |
| " - Very well: 68%\n" | |
| " - Fairly well: 26%\n" | |
| " - NET: 94%\n\n" | |
| "6. **Postgrad:**\n" | |
| " - Very well: 42%\n" | |
| " - Fairly well: 38%\n" | |
| " - NET: 80%\n\n" | |
| "7. **College grad:**\n" | |
| " - Very well: 45%\n" | |
| " - Fairly well: 40%\n" | |
| " - NET: 85%\n\n" | |
| "8. **Some college:**\n" | |
| " - Very well: 51%\n" | |
| " - Fairly well: 36%\n" | |
| " - NET: 87%\n\n" | |
| "9. **HS or less:**\n" | |
| " - Very well: 56%\n" | |
| " - Fairly well: 33%\n" | |
| " - NET: 89\n\n" | |
| "10. **Conserv (Conservative):**\n" | |
| " - Very well: 63%\n" | |
| " - Fairly well: 31%\n" | |
| " - NET: 94%\n\n" | |
| "11. **Mod/Lib (Moderate/Liberal):**\n" | |
| " - Very well: 32%\n" | |
| " - Fairly well: 44%\n" | |
| " - NET: 75\n\n" | |
| "12. **Republican:**\n" | |
| " - Very well: 61%\n" | |
| " - Fairly well: 32%\n" | |
| " - NET: 93\n\n" | |
| "13. **Lean Republican:**\n" | |
| " - Very well: 36%\n" | |
| " - Fairly well: 41%\n" | |
| " - NET: 77\n\n" | |
| "### Notes:\n" | |
| "- The note at the bottom states that the data is based on Republicans and Republican-leaning independents.\n" | |
| "- The source is a survey of U.S. adults conducted from February 4-15, 2020.\n\n" | |
| "### Key Observations:\n" | |
| "1. Older Republicans (65+) are the most likely to see Trump as someone who \"fights for what I believe in,\" with a net positive percentage of 94.\n" | |
| "2. Younger age groups (18-29) have the lowest net positive percentage at 76.\n" | |
| "3. Those with higher educational backgrounds (postgrad and college grad) have slightly lower net positive percentages compared to those with some college education (80 vs. 85).\n" | |
| "4. Conservatives (63% very well) are the most likely to see Trump this way, followed by Republicans (61%).\n" | |
| "5. Lean Republicans (36% very well) have the lowest percentage among the leaner categories.\n\n" | |
| "This detailed description should provide a pure text model with sufficient information to answer any related questions about the image." | |
| ) | |
| DEFAULT_CAPTION_TOKENS = 826 | |
| def get_device() -> str: | |
| return "cuda" if torch.cuda.is_available() else "cpu" | |
| def select_dtype(device: str): | |
| if device == "cuda": | |
| if torch.cuda.is_bf16_supported(): | |
| return torch.bfloat16 | |
| return torch.float16 | |
| return torch.float32 | |
| def load_model(): | |
| device = get_device() | |
| dtype = select_dtype(device) | |
| # Use device_map="auto" for proper GPU allocation with spaces.GPU decorator | |
| model = Qwen2_5_VLForConditionalGeneration.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=dtype, | |
| device_map="auto", | |
| trust_remote_code=True, | |
| ) | |
| processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True) | |
| return model, processor | |
| MODEL, PROCESSOR = load_model() | |
| def generate_caption(image: Image.Image): | |
| if image is None: | |
| return "", 0 | |
| try: | |
| # Validate image | |
| if not isinstance(image, Image.Image): | |
| return "Error: Invalid image format", 0 | |
| # Check image size (warn if too large) | |
| max_size = 4096 | |
| if image.width > max_size or image.height > max_size: | |
| # Resize if too large to prevent OOM | |
| image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS) | |
| device = MODEL.device | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "image"}, | |
| {"type": "text", "text": DEFAULT_PROMPT}, | |
| ], | |
| } | |
| ] | |
| prompt_text = PROCESSOR.apply_chat_template( | |
| messages, tokenize=False, add_generation_prompt=True | |
| ) | |
| inputs = PROCESSOR( | |
| text=[prompt_text], | |
| images=[image], | |
| return_tensors="pt", | |
| ).to(device) | |
| generated_ids = MODEL.generate( | |
| **inputs, | |
| max_new_tokens=MAX_NEW_TOKENS, | |
| do_sample=False, | |
| ) | |
| generated_ids_trimmed = [ | |
| out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) | |
| ] | |
| output_text = PROCESSOR.batch_decode( | |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False | |
| ) | |
| caption = output_text[0].strip() | |
| input_ids = inputs.get("input_ids") | |
| input_length = input_ids.shape[-1] if input_ids is not None else 0 | |
| total_length = generated_ids.shape[-1] | |
| num_generated_tokens = max(total_length - input_length, 0) | |
| return caption, int(num_generated_tokens) | |
| except torch.cuda.OutOfMemoryError: | |
| torch.cuda.empty_cache() | |
| return "Error: Out of GPU memory. Please try with a smaller image.", 0 | |
| except Exception as e: | |
| return f"Error generating caption: {str(e)}", 0 | |
| with gr.Blocks(title="CapRL Image Captioning") as demo: | |
| gr.Markdown("# π¨ CapRL for Image Captioning") | |
| gr.Markdown("### CapRL: Stimulating Dense Image Caption Capabilities via Reinforcement Learning") | |
| gr.Markdown("β¨ Upload an image to generate a detailed caption with CapRL-3B! β¨") | |
| gr.Markdown( | |
| """ | |
| π <a href="https://arxiv.org/abs/2509.22647">Paper</a> | π <a href="https://github.com/InternLM/CapRL">Github</a> | π€ <a href="https://huggingface.co/internlm/CapRL-3B">CapRL-3B Model</a> | π€ <a href="https://huggingface.co/yuhangzang/CapRL-InternVL3.5-8B">CapRL-InternVL3.5-8B Model</a> | | |
| π€ <a href="https://huggingface.co/datasets/internlm/CapRL-2M">CapRL-2M Dataset</a> | |
| π€ <a href="https://huggingface.co/collections/long-xing1/caprl-68d64ac32ded31596c36e189">CapRL Collection</a> | π° <a href="https://huggingface.co/papers/2509.22647">Daily Paper</a> | πΎ <a href="https://huggingface.co/mradermacher/CapRL-3B-GGUF">CapRL-3B-GGUF</a> | πΎ <a href="https://huggingface.co/mradermacher/CapRL-3B-i1-GGUF">CapRL-3B-i1-GGUF</a> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| # Preload a default image to match the provided caption | |
| image_input = gr.Image(type="pil", label="Input Image", value=Image.open(DEFAULT_IMAGE)) | |
| generate_button = gr.Button("Generate Caption") | |
| with gr.Column(): | |
| # Show a default caption and its token count on load | |
| caption_output = gr.Textbox(label="Caption", lines=6, value=DEFAULT_CAPTION) | |
| token_output = gr.Number(label="Generated Tokens", precision=0, value=DEFAULT_CAPTION_TOKENS) | |
| generate_button.click( | |
| fn=generate_caption, | |
| inputs=image_input, | |
| outputs=[caption_output, token_output], | |
| show_progress=True, | |
| ) | |
| image_input.upload( | |
| fn=generate_caption, | |
| inputs=image_input, | |
| outputs=[caption_output, token_output], | |
| show_progress=True, | |
| ) | |
| gr.Examples( | |
| examples=[ | |
| ["./examples/1909.png"], | |
| ["./examples/44687.jpeg"], | |
| ["./examples/natural.png"], | |
| ], | |
| inputs=image_input, | |
| outputs=[caption_output, token_output], | |
| fn=generate_caption, | |
| cache_examples=True, | |
| label="πΈ Example Images" | |
| ) | |
| gr.Markdown("### Citation") | |
| gr.Markdown("If you find this project useful, please kindly cite:") | |
| citation_text = """@article{xing2025caprl, | |
| title={{CapRL}: Stimulating Dense Image Caption Capabilities via Reinforcement Learning}, | |
| author={Xing, Long and Dong, Xiaoyi and Zang, Yuhang and Cao, Yuhang and Liang, Jianze and Huang, Qidong and Wang, Jiaqi and Wu, Feng and Lin, Dahua}, | |
| journal={arXiv preprint arXiv:2509.22647}, | |
| year={2025} | |
| }""" | |
| gr.Code(value=citation_text, language="markdown", label="BibTeX Citation") | |
| demo.launch() | |