File size: 1,417 Bytes
5c8ff03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import pandas as pd
from pathlib import Path
import pyarrow  # ensures pyarrow is installed for Parquet support


def find_and_group_csvs():
    base = Path(".")
    groups = {
        "evaluation_results": sorted(base.rglob("evaluation_results.csv")),
        "bootstrap_confidence_intervals": sorted(base.rglob("bootstrap_confidence_intervals.csv")),
    }
    for name, paths in groups.items():
        print(f"[INFO] Found {len(paths)} files for '{name}'")
        if not paths:
            print(f"[WARNING] No files found for '{name}'")
    return groups


def combine(paths, out_path):
    if not paths:
        print(f"[SKIP] No files to combine for {out_path}")
        return

    print(f"[INFO] Combining {len(paths)} files into {out_path}")
    dfs = [pd.read_csv(p) for p in paths]

    # Basic schema validation
    cols = {tuple(df.columns) for df in dfs}
    if len(cols) > 1:
        raise ValueError(f"[ERROR] {out_path}: header mismatch across shards")

    combined = pd.concat(dfs, ignore_index=True)
    combined.to_parquet(out_path, engine="pyarrow", index=False)
    print(f"[SUCCESS] Written {out_path} with {len(combined)} rows")


if __name__ == "__main__":
    groups = find_and_group_csvs()
    combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet")
    combine(groups["bootstrap_confidence_intervals"], "boostrap_confidence_intervals-00000-of-00001.parquet")