guard-4-handle / chat_template.jinja
wangsir123's picture
Upload folder using huggingface_hub
d0d4eb9 verified
{%- if messages | length % 2 == 0 -%}
{%- set role = 'Agent' -%}
{%- else -%}
{%- set role = 'User' -%}
{%- endif -%}
{%- if categories is not defined -%}
{%- set ns = namespace(contains_image=false) -%}
{%- for message in messages -%}
{%- for content in message['content'] | selectattr('type', 'equalto', 'image') -%}
{%- set ns.contains_image = true -%}
{%- endfor -%}
{%- endfor -%}
{%- if ns.contains_image -%}
{%- set categories = {
"S1": "Violent Crimes.",
"S2": "Non-Violent Crimes.",
"S3": "Sex Crimes.",
"S4": "Child Exploitation.",
"S5": "Defamation.",
"S6": "Specialized Advice.",
"S7": "Privacy.",
"S8": "Intellectual Property.",
"S9": "Indiscriminate Weapons.",
"S10": "Hate.",
"S11": "Self-Harm.",
"S12": "Sexual Content.",
"S13": "Elections.",
} -%}
{%- else -%}
{%- set categories = {
"S1": "Violent Crimes.",
"S2": "Non-Violent Crimes.",
"S3": "Sex Crimes.",
"S4": "Child Exploitation.",
"S5": "Defamation.",
"S6": "Specialized Advice.",
"S7": "Privacy.",
"S8": "Intellectual Property.",
"S9": "Indiscriminate Weapons.",
"S10": "Hate.",
"S11": "Self-Harm.",
"S12": "Sexual Content.",
"S13": "Elections.",
"S14": "Code Interpreter Abuse.",
} -%}
{%- endif -%}
{%- endif -%}
{{ "<|begin_of_text|><|header_start|>user<|header_end|>\n" }}
{{ "Task: Check if there is unsafe content in '" + role + "' messages in conversations according to our safety policy with the below categories.\n" }}
{{ "<BEGIN UNSAFE CONTENT CATEGORIES>\n" }}
{%- for key in categories -%}
{%- if key not in excluded_category_keys -%}
{{ key + ": " + categories[key] + "\n" }}
{%- endif -%}
{%- endfor -%}
{{ "<END UNSAFE CONTENT CATEGORIES>\n" }}
{{ "<BEGIN CONVERSATION>\n" }}
{%- for message in messages -%}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
{%- endif -%}
{%- set content = message['content'] -%}
{%- if message['role'] == 'user' -%}
{%- set role = 'User' -%}
{%- elif message['role'] == 'assistant' -%}
{%- set role = 'Agent' -%}
{%- endif -%}
{#— #1 Accumulate all images for this message —#}
{%- set ns = namespace(img_tokens='') -%}
{%- for img in message.content | selectattr('type', 'equalto', 'image') -%}
{%- set ns.img_tokens = ns.img_tokens + '<|image|>' -%}
{%- endfor -%}
{#— #2 Then emit all text —#}
{%- for txt in message.content | selectattr('type','equalto','text') -%}
{{ role + ': ' }}{{ ns.img_tokens }}{{ txt.text | trim }}\n\n
{#— clear the buffer so only first text prints images —#}
{%- if loop.first -%}
{%- set ns.img_tokens = '' -%}
{%- endif -%}
{%- endfor -%}
{%- endfor -%}
{{ "<END CONVERSATION>\n" }}
{{ "Provide your safety assessment for ONLY THE LAST " + role + " message in the above conversation:" }}
{{ " - First line must read 'safe' or 'unsafe'." }}
{{ " - If unsafe, a second line must include a comma-separated list of violated categories. <|eot|><|header_start|>assistant<|header_end|>" }}