Dataset Viewer
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'train' of the config 'default' of the dataset.
Error code: FeaturesError
Exception: ArrowInvalid
Message: Schema at index 1 was different:
_attn_implementation_autoset: bool
acoustic_vae_dim: int64
acoustic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, decoder_depths: null, decoder_n_filters: int64, decoder_ratios: list<item: int64>, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: double, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
decoder_config: struct<attention_dropout: double, hidden_act: string, hidden_size: int64, initializer_range: double, intermediate_size: int64, max_position_embeddings: int64, max_window_layers: int64, model_type: string, num_attention_heads: int64, num_hidden_layers: int64, num_key_value_heads: int64, rms_norm_eps: double, rope_scaling: null, rope_theta: double, sliding_window: null, tie_word_embeddings: bool, torch_dtype: string, use_cache: bool, use_sliding_window: bool, vocab_size: int64>
diffusion_head_config: struct<ddpm_batch_mul: int64, ddpm_beta_schedule: string, ddpm_num_inference_steps: int64, ddpm_num_steps: int64, diffusion_type: string, head_ffn_ratio: double, head_layers: int64, hidden_size: int64, latent_size: int64, model_type: string, prediction_type: string, rms_norm_eps: double, speech_vae_dim: int64>
model_type: string
semantic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: int64, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
semantic_vae_dim: int64
torch_dtype: string
vs
_attn_implementation_autoset: bool
acoustic_vae_dim: int64
acoustic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, decoder_depths: null, decoder_n_filters: int64, decoder_ratios: list<item: int64>, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: double, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
decoder_config: struct<attention_dropout: double, hidden_act: string, hidden_size: int64, initializer_range: double, intermediate_size: int64, max_position_embeddings: int64, max_window_layers: int64, model_type: string, num_attention_heads: int64, num_hidden_layers: int64, num_key_value_heads: int64, rms_norm_eps: double, rope_theta: double, sliding_window: null, tie_word_embeddings: bool, torch_dtype: string, transformers_version: string, use_cache: bool, use_mrope: bool, use_sliding_window: bool, vocab_size: int64>
diffusion_head_config: struct<ddpm_batch_mul: int64, ddpm_beta_schedule: string, ddpm_num_inference_steps: int64, ddpm_num_steps: int64, diffusion_type: string, head_ffn_ratio: double, head_layers: int64, hidden_size: int64, latent_size: int64, model_type: string, prediction_type: string, rms_norm_eps: double, speech_vae_dim: int64>
model_type: string
semantic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: int64, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
semantic_vae_dim: int64
torch_dtype: string
Traceback: Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 243, in compute_first_rows_from_streaming_response
iterable_dataset = iterable_dataset._resolve_features()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 3608, in _resolve_features
features = _infer_features_from_batch(self.with_format(None)._head())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2368, in _head
return next(iter(self.iter(batch_size=n)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2573, in iter
for key, example in iterator:
^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2060, in __iter__
for key, pa_table in self._iter_arrow():
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2082, in _iter_arrow
yield from self.ex_iterable._iter_arrow()
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 572, in _iter_arrow
yield new_key, pa.Table.from_batches(chunks_buffer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "pyarrow/table.pxi", line 5039, in pyarrow.lib.Table.from_batches
File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status
pyarrow.lib.ArrowInvalid: Schema at index 1 was different:
_attn_implementation_autoset: bool
acoustic_vae_dim: int64
acoustic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, decoder_depths: null, decoder_n_filters: int64, decoder_ratios: list<item: int64>, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: double, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
decoder_config: struct<attention_dropout: double, hidden_act: string, hidden_size: int64, initializer_range: double, intermediate_size: int64, max_position_embeddings: int64, max_window_layers: int64, model_type: string, num_attention_heads: int64, num_hidden_layers: int64, num_key_value_heads: int64, rms_norm_eps: double, rope_scaling: null, rope_theta: double, sliding_window: null, tie_word_embeddings: bool, torch_dtype: string, use_cache: bool, use_sliding_window: bool, vocab_size: int64>
diffusion_head_config: struct<ddpm_batch_mul: int64, ddpm_beta_schedule: string, ddpm_num_inference_steps: int64, ddpm_num_steps: int64, diffusion_type: string, head_ffn_ratio: double, head_layers: int64, hidden_size: int64, latent_size: int64, model_type: string, prediction_type: string, rms_norm_eps: double, speech_vae_dim: int64>
model_type: string
semantic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: int64, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
semantic_vae_dim: int64
torch_dtype: string
vs
_attn_implementation_autoset: bool
acoustic_vae_dim: int64
acoustic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, decoder_depths: null, decoder_n_filters: int64, decoder_ratios: list<item: int64>, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: double, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
decoder_config: struct<attention_dropout: double, hidden_act: string, hidden_size: int64, initializer_range: double, intermediate_size: int64, max_position_embeddings: int64, max_window_layers: int64, model_type: string, num_attention_heads: int64, num_hidden_layers: int64, num_key_value_heads: int64, rms_norm_eps: double, rope_theta: double, sliding_window: null, tie_word_embeddings: bool, torch_dtype: string, transformers_version: string, use_cache: bool, use_mrope: bool, use_sliding_window: bool, vocab_size: int64>
diffusion_head_config: struct<ddpm_batch_mul: int64, ddpm_beta_schedule: string, ddpm_num_inference_steps: int64, ddpm_num_steps: int64, diffusion_type: string, head_ffn_ratio: double, head_layers: int64, hidden_size: int64, latent_size: int64, model_type: string, prediction_type: string, rms_norm_eps: double, speech_vae_dim: int64>
model_type: string
semantic_tokenizer_config: struct<causal: bool, channels: int64, conv_bias: bool, conv_norm: string, corpus_normalize: double, disable_last_norm: bool, encoder_depths: string, encoder_n_filters: int64, encoder_ratios: list<item: int64>, fix_std: int64, layer_scale_init_value: double, layernorm: string, layernorm_elementwise_affine: bool, layernorm_eps: double, mixer_layer: string, model_type: string, pad_mode: string, std_dist_type: string, vae_dim: int64, weight_init_value: double>
semantic_vae_dim: int64
torch_dtype: stringNeed help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
No dataset card yet
- Downloads last month
- 29