add title/text columns, enfore same schema as wands
Browse files- build.py +19 -2
- data/test.jsonl.gz +2 -2
- data/train.jsonl.gz +2 -2
build.py
CHANGED
|
@@ -3,6 +3,12 @@ import polars as pl
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
MAPPING = {"E": 3, "S": 2, "C": 1, "I": 0}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
if __name__ == "__main__":
|
| 8 |
parser = argparse.ArgumentParser()
|
|
@@ -29,9 +35,20 @@ if __name__ == "__main__":
|
|
| 29 |
os.path.join(args.source, "shopping_queries_dataset_examples.parquet")
|
| 30 |
)
|
| 31 |
merged = examples.join(products, on=pl.col("product_id"))
|
| 32 |
-
|
| 33 |
-
merged = merged.with_columns(label=pl.col("label").replace(MAPPING).cast(pl.Int32))
|
| 34 |
merged = merged.select(pl.all().exclude("^__index_level_.*$"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
print(f"loaded {len(merged)} source rows")
|
| 36 |
merged = merged.filter(pl.col("split") == args.split)
|
| 37 |
print(f"split filtering done: {len(merged)} rows")
|
|
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
MAPPING = {"E": 3, "S": 2, "C": 1, "I": 0}
|
| 6 |
+
COLUMNS = [
|
| 7 |
+
"product_description",
|
| 8 |
+
"product_bullet_point",
|
| 9 |
+
"product_brand",
|
| 10 |
+
"product_color",
|
| 11 |
+
]
|
| 12 |
|
| 13 |
if __name__ == "__main__":
|
| 14 |
parser = argparse.ArgumentParser()
|
|
|
|
| 35 |
os.path.join(args.source, "shopping_queries_dataset_examples.parquet")
|
| 36 |
)
|
| 37 |
merged = examples.join(products, on=pl.col("product_id"))
|
| 38 |
+
|
|
|
|
| 39 |
merged = merged.select(pl.all().exclude("^__index_level_.*$"))
|
| 40 |
+
merged = merged.with_columns(
|
| 41 |
+
label=pl.col("esci_label").replace(MAPPING).cast(pl.Int32),
|
| 42 |
+
id=pl.col("example_id").cast(pl.String),
|
| 43 |
+
query_id=pl.col("query_id").cast(pl.String),
|
| 44 |
+
product_id=pl.col("product_id").cast(pl.String),
|
| 45 |
+
title=pl.col("product_title"),
|
| 46 |
+
text=pl.concat_str(
|
| 47 |
+
[pl.lit(f"{col}: ") + pl.col(col).fill_null("") for col in COLUMNS],
|
| 48 |
+
separator="\n",
|
| 49 |
+
),
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
print(f"loaded {len(merged)} source rows")
|
| 53 |
merged = merged.filter(pl.col("split") == args.split)
|
| 54 |
print(f"split filtering done: {len(merged)} rows")
|
data/test.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb0db292e2185fea215bab11ef5d583aaf28eb1fc03cbcbca01a0c324009e11c
|
| 3 |
+
size 114812289
|
data/train.jsonl.gz
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf9ae049edf3e7c233f7349503bc4c92fd50ddedef936a13792bfd9d1a4a0e59
|
| 3 |
+
size 452692076
|