|
|
import pandas as pd |
|
|
from pathlib import Path |
|
|
import pyarrow |
|
|
|
|
|
|
|
|
def find_and_group_csvs(): |
|
|
base = Path(".") |
|
|
groups = { |
|
|
"evaluation_results": sorted(base.rglob("evaluation_results.csv")), |
|
|
"bootstrap_confidence_intervals": sorted(base.rglob("bootstrap_confidence_intervals.csv")), |
|
|
} |
|
|
for name, paths in groups.items(): |
|
|
print(f"[INFO] Found {len(paths)} files for '{name}'") |
|
|
if not paths: |
|
|
print(f"[WARNING] No files found for '{name}'") |
|
|
return groups |
|
|
|
|
|
|
|
|
def combine(paths, out_path): |
|
|
if not paths: |
|
|
print(f"[SKIP] No files to combine for {out_path}") |
|
|
return |
|
|
|
|
|
print(f"[INFO] Combining {len(paths)} files into {out_path}") |
|
|
dfs = [pd.read_csv(p) for p in paths] |
|
|
|
|
|
|
|
|
cols = {tuple(df.columns) for df in dfs} |
|
|
if len(cols) > 1: |
|
|
raise ValueError(f"[ERROR] {out_path}: header mismatch across shards") |
|
|
|
|
|
combined = pd.concat(dfs, ignore_index=True) |
|
|
combined.to_parquet(out_path, engine="pyarrow", index=False) |
|
|
print(f"[SUCCESS] Written {out_path} with {len(combined)} rows") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
groups = find_and_group_csvs() |
|
|
combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet") |
|
|
combine(groups["bootstrap_confidence_intervals"], "boostrap_confidence_intervals-00000-of-00001.parquet") |
|
|
|