File size: 7,413 Bytes
b965a6e
 
 
 
 
 
 
 
 
 
 
 
 
29a0e42
b965a6e
29a0e42
 
 
b965a6e
29a0e42
 
b965a6e
 
 
29a0e42
 
 
b965a6e
 
 
 
29a0e42
b965a6e
 
 
 
29a0e42
b965a6e
b807233
 
 
 
b965a6e
b807233
b965a6e
b807233
 
 
 
 
 
 
 
 
 
 
b965a6e
b807233
b965a6e
 
 
 
ddf1ba7
29a0e42
 
 
b965a6e
 
 
 
 
 
 
 
29a0e42
 
 
 
 
 
b965a6e
29a0e42
b965a6e
29a0e42
 
b965a6e
 
29a0e42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d84d51c
29a0e42
d84d51c
 
 
 
b965a6e
3d8a7dc
 
a1332fa
 
 
 
3d8a7dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29a0e42
 
 
 
 
 
 
b965a6e
29a0e42
 
3d8a7dc
 
29a0e42
 
3d8a7dc
 
29a0e42
 
 
 
d84d51c
3d8a7dc
 
 
 
 
 
2aa1857
3d8a7dc
 
d84d51c
3d8a7dc
d84d51c
29a0e42
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accuracy metric for the Test of Time benchmark by Bahar et al. (2025)."""

import ast
import json
from typing import Literal

import datasets
import evaluate

_CITATION = """\
@InProceedings{huggingface:module,
title = {Test of Time Accuracy},
authors={Auss Abbood},
year={2025}
}
"""

_DESCRIPTION = """\
The Test of Time (ToT) benchmarks expects models format their answers as a JSON with an explanation field and an answer field that follows a predefined format. The metrics extracts JSONs objects from the model's output, retains only the first JSON, drops the explanation field and compares it with the reference answer.
"""


_KWARGS_DESCRIPTION = """
Compares the extracted answer from the model's output with the reference answer.
Args:
    predictions: list of predictions to score. Each prediction should be a string that contains a JSON object (e.g., generated by an LLM).
    references: list of reference answers.
    subset: The subset of the benchmark being evaluated. Must be one of "arithmetic" or "semantic".
    return_average: If True, returns the average accuracy. If False, returns a list of boolean scores (correct/incorrect) for each sample. Defaults to True.
Returns:
    accuracy: The accuracy score (0.0 to 1.0) if return_average=True, or a list of booleans indicating correctness per sample if return_average=False.
Examples:
    >>> import evaluate
    >>> metric = evaluate.load("aauss/test_of_time_accuracy")
    >>> predictions = [
    ...     '{"explanation": "Some explanation...", "unordered_list": ["London"]}',
    ...     ' "Response without opening curly brackets...", "answer": "2005-04-07"}',
    ... ]
    >>> references = [
    ...     '{"unordered_list": ["London"]}',
    ...     "{'answer': '2005-04-07'}",
    ... ]
    >>> results = metric.compute(predictions=predictions, references=references, subset="arithmetic")
    >>> print(results)
    {'accuracy': 0.5}
"""


@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class TestOfTimeAccuracy(evaluate.Metric):
    """Accuracy metric for the Test of Time benchmark by Bahar et al. (2025)."""

    __test__ = False

    def _info(self):
        return evaluate.MetricInfo(
            module_type="metric",
            description=_DESCRIPTION,
            citation=_CITATION,
            inputs_description=_KWARGS_DESCRIPTION,
            # This defines the format of each prediction and reference
            features=datasets.Features(
                {
                    "predictions": datasets.Value("string"),
                    "references": datasets.Value("string"),
                }
            ),
            # Homepage of the module for documentation
            # homepage="http://module.homepage",
            # Additional links to the codebase or references
            # codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
            # reference_urls=["http://path.to.reference.url/new_module"],
        )

    @staticmethod
    def _extract_first_json_object(s: str) -> dict | None:
        decoder = json.JSONDecoder()
        idx, end = 0, len(s)
        while idx < end:
            try:
                obj, next_idx = decoder.raw_decode(s, idx)
                idx = next_idx
                if isinstance(obj, dict):
                    return obj
            except ValueError:
                idx += 1
        return None

    @staticmethod
    def _pop_explanation(d):
        if isinstance(d, dict):
            d.pop("explanation", None)
        return d

    @staticmethod
    def _get_answer(d):
        if isinstance(d, dict):
            return d.get("answer", None)
        return d

    @staticmethod
    def _parse_label(s):
        """Parses a string that could be a Python dict."""
        try:
            # Safe: only parses literals, does not execute code
            return ast.literal_eval(s)
        except (ValueError, SyntaxError):
            return None

    @staticmethod
    def _sort_unordered_list(d):
        if isinstance(d, dict) and "unordered_list" in d:
            return sorted(d["unordered_list"])
        return d

    @staticmethod
    def _cast_prediction(reference: dict, prediction: dict) -> None | dict:
        """
        Casts the values in the prediction dictionary to match the types
        of the values in the reference dictionary.
        """
        casted_prediction = {}

        try:
            for ref_key, ref_value in reference.items():
                if ref_key not in prediction:
                    return None

                reference_type = type(ref_value)
                pred_value = prediction[ref_key]

                # Special safeguard: Python allows list("abc") -> ['a', 'b', 'c'].
                # We don't want to turn strings into character lists.
                if reference_type == list and not isinstance(pred_value, list):
                    return None

                # This handles int("123") -> 123, float(12) -> 12.0, str(100) -> "100"
                casted_prediction[ref_key] = reference_type(pred_value)

            return casted_prediction

        except (ValueError, TypeError):
            return None

    def _compute(
        self,
        predictions,
        references,
        subset: Literal["arithmetic", "semantic"],
        return_average: bool = True,
    ):
        """Returns the scores"""
        predictions = [self._extract_first_json_object(p) for p in predictions]
        if subset == "semantic":
            # Semantic subset's answers are not JSON objects.
            # Expected answers are always in "answer" field.
            predictions = [self._get_answer(p) for p in predictions]
        elif subset == "arithmetic":
            # Arithmetic subset's answers are JSON objects.
            # Answer fields vary. Thus, remove explanation field.
            predictions = [self._pop_explanation(p) for p in predictions]
            references = [self._parse_label(r) for r in references]
        else:
            raise ValueError(f"Invalid subset: {subset}")
        accuracy = []
        for pred, ref in zip(predictions, references):
            if subset == "arithmetic":
                pred = self._cast_prediction(ref, pred)
                if "unordered_list" in ref:
                    pred = self._sort_unordered_list(pred)
                    ref = self._sort_unordered_list(ref)
            if subset == "semantic":
                pred = str(pred)
                ref = str(ref)
            accuracy.append(
                pred == ref
            )  # Semantic subset answer JSON somestimes has int as value. Label is string.
        if return_average:
            return {"accuracy": sum(accuracy) / len(accuracy)}
        return {"accuracy": accuracy}