jbcs2025_experiments_report / create_parquet_files.py
abarbosa's picture
Add combined evaluation_results & bootstrap_confidence_intervals splits
5c8ff03
raw
history blame
1.42 kB
import pandas as pd
from pathlib import Path
import pyarrow # ensures pyarrow is installed for Parquet support
def find_and_group_csvs():
base = Path(".")
groups = {
"evaluation_results": sorted(base.rglob("evaluation_results.csv")),
"bootstrap_confidence_intervals": sorted(base.rglob("bootstrap_confidence_intervals.csv")),
}
for name, paths in groups.items():
print(f"[INFO] Found {len(paths)} files for '{name}'")
if not paths:
print(f"[WARNING] No files found for '{name}'")
return groups
def combine(paths, out_path):
if not paths:
print(f"[SKIP] No files to combine for {out_path}")
return
print(f"[INFO] Combining {len(paths)} files into {out_path}")
dfs = [pd.read_csv(p) for p in paths]
# Basic schema validation
cols = {tuple(df.columns) for df in dfs}
if len(cols) > 1:
raise ValueError(f"[ERROR] {out_path}: header mismatch across shards")
combined = pd.concat(dfs, ignore_index=True)
combined.to_parquet(out_path, engine="pyarrow", index=False)
print(f"[SUCCESS] Written {out_path} with {len(combined)} rows")
if __name__ == "__main__":
groups = find_and_group_csvs()
combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet")
combine(groups["bootstrap_confidence_intervals"], "boostrap_confidence_intervals-00000-of-00001.parquet")