Dataset Viewer
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'train' of the config 'default' of the dataset.
Error code: FeaturesError
Exception: ArrowInvalid
Message: Schema at index 4 was different:
average_CPS: double
config: struct<model_name: string, num_fewshot: string, batch_size: int64, model: string, base_model: string, revision: string, multimodal: bool, submitted_time: timestamp[s], num_params_billion: double, language: string>
tasks: struct<admission-test: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, faq: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, hate-speech-detection: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, lexical-substitution: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, evalita NER: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, relation-extraction: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, sentiment-analysis: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, summarization-fanpage: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, text-entailment: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, word-in-context: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, MAIA-MC: struct<prompts: list<item: struct<prompt: string, metric: string, value: null, stderr: null>>, average_accuracy: null, best_prompt: null, prompt_id: null, CPS: null, std_accuracy: null>, MAIA-GEN: struct<prompts: list<item: struct<prompt: string, metric: string, value: null, stderr: null>>, average_accuracy: null, best_prompt: null, prompt_id: null, CPS: null, std_accuracy: null>>
vs
average_CPS: double
config: struct<model_name: string, num_fewshot: string, batch_size: int64, model: string, base_model: string, revision: string, multimodal: bool, submitted_time: timestamp[s], num_params_billion: double, language: string>
tasks: struct<admission-test: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, faq: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, hate-speech-detection: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, lexical-substitution: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, evalita NER: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, relation-extraction: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, sentiment-analysis: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, summarization-fanpage: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, text-entailment: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, word-in-context: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, MAIA-MC: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, MAIA-GEN: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>>
Traceback: Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 243, in compute_first_rows_from_streaming_response
iterable_dataset = iterable_dataset._resolve_features()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 3496, in _resolve_features
features = _infer_features_from_batch(self.with_format(None)._head())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2257, in _head
return next(iter(self.iter(batch_size=n)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2461, in iter
for key, example in iterator:
^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 1952, in __iter__
for key, pa_table in self._iter_arrow():
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 1974, in _iter_arrow
yield from self.ex_iterable._iter_arrow()
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 531, in _iter_arrow
yield new_key, pa.Table.from_batches(chunks_buffer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "pyarrow/table.pxi", line 5039, in pyarrow.lib.Table.from_batches
File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status
pyarrow.lib.ArrowInvalid: Schema at index 4 was different:
average_CPS: double
config: struct<model_name: string, num_fewshot: string, batch_size: int64, model: string, base_model: string, revision: string, multimodal: bool, submitted_time: timestamp[s], num_params_billion: double, language: string>
tasks: struct<admission-test: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, faq: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, hate-speech-detection: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, lexical-substitution: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, evalita NER: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, relation-extraction: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, sentiment-analysis: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, summarization-fanpage: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, text-entailment: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, word-in-context: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, MAIA-MC: struct<prompts: list<item: struct<prompt: string, metric: string, value: null, stderr: null>>, average_accuracy: null, best_prompt: null, prompt_id: null, CPS: null, std_accuracy: null>, MAIA-GEN: struct<prompts: list<item: struct<prompt: string, metric: string, value: null, stderr: null>>, average_accuracy: null, best_prompt: null, prompt_id: null, CPS: null, std_accuracy: null>>
vs
average_CPS: double
config: struct<model_name: string, num_fewshot: string, batch_size: int64, model: string, base_model: string, revision: string, multimodal: bool, submitted_time: timestamp[s], num_params_billion: double, language: string>
tasks: struct<admission-test: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, faq: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, hate-speech-detection: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, lexical-substitution: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, evalita NER: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, relation-extraction: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, sentiment-analysis: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, summarization-fanpage: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, text-entailment: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, word-in-context: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: double>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, MAIA-MC: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>, MAIA-GEN: struct<prompts: list<item: struct<prompt: string, metric: string, value: double, stderr: null>>, average_accuracy: double, best_prompt: double, prompt_id: string, CPS: double, std_accuracy: double>>Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
README.md exists but content is empty.
- Downloads last month
- 366