jkh commited on
Commit
78792a7
·
verified ·
1 Parent(s): 9a1ca56

Upload data_processing.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. data_processing.py +484 -0
data_processing.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Phase 2: Data Processing for Human Skeletal Muscle Aging Atlas
4
+ ==============================================================
5
+
6
+ Processes the H5AD file into HuggingFace-compatible parquet files:
7
+ - Expression matrix (sparse -> dense conversion)
8
+ - Sample metadata (cell-level information)
9
+ - Feature metadata (gene information)
10
+ - Dimensionality reduction projections (scVI, UMAP, PCA, t-SNE)
11
+ - Unstructured metadata (all additional data)
12
+
13
+ Requirements:
14
+ - Large memory for 183K × 29K matrix processing
15
+ - Sparse matrix handling for efficiency
16
+ - Proper data type optimization
17
+ """
18
+
19
+ import logging
20
+ import json
21
+ import time
22
+ from pathlib import Path
23
+ from typing import Dict, Any, Optional
24
+ import shutil
25
+
26
+ import numpy as np
27
+ import pandas as pd
28
+ import scanpy as sc
29
+ from scipy import sparse
30
+ import pyarrow.parquet as pq
31
+ import warnings
32
+
33
+ # Configure scanpy
34
+ sc.settings.verbosity = 3
35
+ sc.settings.set_figure_params(dpi=80, facecolor='white')
36
+
37
+ # Setup logging
38
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
39
+ logger = logging.getLogger(__name__)
40
+
41
+ def make_json_serializable(obj: Any) -> Any:
42
+ """Convert numpy arrays and other non-serializable objects for JSON"""
43
+ if isinstance(obj, np.ndarray):
44
+ return obj.tolist()
45
+ elif isinstance(obj, dict):
46
+ return {k: make_json_serializable(v) for k, v in obj.items()}
47
+ elif isinstance(obj, (list, tuple)):
48
+ return [make_json_serializable(i) for i in obj]
49
+ elif isinstance(obj, (np.integer, np.floating)):
50
+ return obj.item()
51
+ else:
52
+ return obj
53
+
54
+ def log_memory_usage(stage: str, adata: sc.AnnData) -> None:
55
+ """Log memory usage and dataset info"""
56
+ memory_mb = adata.X.data.nbytes / 1024**2 if sparse.issparse(adata.X) else adata.X.nbytes / 1024**2
57
+ logger.info(f"{stage}: Shape {adata.shape}, Memory: {memory_mb:.1f}MB")
58
+
59
+ def fix_pandas_index_column_bug(parquet_file: Path) -> bool:
60
+ """
61
+ Fix the pandas __index_level_0__ bug in parquet files
62
+
63
+ This is a known bug in pandas/PyArrow where pandas saves the index as an extra
64
+ '__index_level_0__' column when writing to parquet format.
65
+ This is a known upstream issue with no planned fix
66
+
67
+ References:
68
+ - https://github.com/pandas-dev/pandas/issues/51664
69
+ - https://github.com/pola-rs/polars/issues/7291
70
+
71
+ Args:
72
+ parquet_file: Path to the parquet file to fix
73
+
74
+ Returns:
75
+ bool: True if fix was applied successfully, False otherwise
76
+ """
77
+ logger.info(f"🔧 Checking for pandas __index_level_0__ bug in {parquet_file.name}")
78
+
79
+ try:
80
+ # Check if the bug exists
81
+ pf = pq.ParquetFile(parquet_file)
82
+ schema_names = pf.schema_arrow.names
83
+
84
+ if '__index_level_0__' not in schema_names:
85
+ logger.info("✅ No __index_level_0__ column found - file is clean")
86
+ return True
87
+
88
+ logger.warning(f"🐛 Found pandas __index_level_0__ bug - fixing...")
89
+ logger.info(f" Current columns: {len(schema_names)} (expected: {len(schema_names)-1})")
90
+
91
+ # Create backup
92
+ backup_file = parquet_file.with_suffix('.backup.parquet')
93
+ if not backup_file.exists():
94
+ shutil.copy2(parquet_file, backup_file)
95
+ logger.info(f"📦 Backup created: {backup_file.name}")
96
+
97
+ # Apply fix using PyArrow
98
+ table = pq.read_table(parquet_file)
99
+
100
+ # Filter out the problematic column
101
+ columns_to_keep = [name for name in table.column_names if name != '__index_level_0__']
102
+ clean_table = table.select(columns_to_keep)
103
+
104
+ # Write clean table to temporary file first
105
+ temp_file = parquet_file.with_suffix('.temp.parquet')
106
+ pq.write_table(clean_table, temp_file, compression='snappy')
107
+
108
+ # Verify the fix
109
+ temp_pf = pq.ParquetFile(temp_file)
110
+ temp_schema_names = temp_pf.schema_arrow.names
111
+
112
+ if '__index_level_0__' not in temp_schema_names:
113
+ # Replace original with fixed version
114
+ shutil.move(temp_file, parquet_file)
115
+ logger.info(f"✅ Fixed pandas __index_level_0__ bug")
116
+ logger.info(f" Column count: {len(schema_names)} → {len(temp_schema_names)}")
117
+ return True
118
+ else:
119
+ # Fix failed, clean up
120
+ temp_file.unlink()
121
+ logger.error("❌ Fix verification failed")
122
+ return False
123
+
124
+ except Exception as e:
125
+ logger.error(f"❌ Error fixing pandas index bug: {e}")
126
+ return False
127
+
128
+ def process_expression_matrix(adata: sc.AnnData, method: str, output_dir: Path) -> Dict[str, Any]:
129
+ """
130
+ Process and save expression matrix
131
+
132
+ Strategy:
133
+ - Check sparsity and memory requirements
134
+ - Convert to dense if manageable, keep sparse if too large
135
+ - Use appropriate data types (float32) for efficiency
136
+ """
137
+ logger.info("Starting expression matrix processing...")
138
+ log_memory_usage("Expression matrix", adata)
139
+
140
+ # Calculate memory requirements for dense conversion
141
+ dense_memory_gb = (adata.n_obs * adata.n_vars * 4) / (1024**3) # float32 = 4 bytes
142
+ sparsity = 1.0 - (adata.X.nnz / (adata.n_obs * adata.n_vars))
143
+
144
+ logger.info(f"Dense conversion would require: {dense_memory_gb:.2f}GB")
145
+ logger.info(f"Current sparsity: {sparsity:.2%}")
146
+
147
+ output_file = output_dir / f"skeletal_muscle_{method}_expression.parquet"
148
+
149
+ if dense_memory_gb > 8.0: # If >8GB, process in chunks
150
+ logger.info("Large matrix detected, processing in chunks...")
151
+ chunk_size = 10000
152
+ chunks = []
153
+
154
+ for i in range(0, adata.n_obs, chunk_size):
155
+ end_idx = min(i + chunk_size, adata.n_obs)
156
+ chunk = adata[i:end_idx, :].copy()
157
+
158
+ if sparse.issparse(chunk.X):
159
+ chunk_dense = chunk.X.toarray().astype(np.float32)
160
+ else:
161
+ chunk_dense = chunk.X.astype(np.float32)
162
+
163
+ chunk_df = pd.DataFrame(
164
+ chunk_dense,
165
+ index=chunk.obs_names,
166
+ columns=chunk.var_names
167
+ )
168
+ chunks.append(chunk_df)
169
+ logger.info(f"Processed chunk {i//chunk_size + 1}/{(adata.n_obs-1)//chunk_size + 1}")
170
+
171
+ # Combine chunks
172
+ expression_df = pd.concat(chunks, axis=0)
173
+ del chunks # Free memory
174
+
175
+ else:
176
+ # Convert to dense in one go
177
+ logger.info("Converting to dense matrix...")
178
+ if sparse.issparse(adata.X):
179
+ expression_data = adata.X.toarray().astype(np.float32)
180
+ else:
181
+ expression_data = adata.X.astype(np.float32)
182
+
183
+ expression_df = pd.DataFrame(
184
+ expression_data,
185
+ index=adata.obs_names,
186
+ columns=adata.var_names
187
+ )
188
+
189
+ # Save with compression
190
+ logger.info(f"Saving expression matrix: {expression_df.shape}")
191
+ expression_df.to_parquet(output_file, compression='snappy')
192
+
193
+ # Apply pandas __index_level_0__ bug fix
194
+ # This is a known issue where pandas saves the index as an extra column
195
+ fix_success = fix_pandas_index_column_bug(output_file)
196
+
197
+ stats = {
198
+ 'file': str(output_file),
199
+ 'shape': list(expression_df.shape),
200
+ 'memory_gb': dense_memory_gb,
201
+ 'sparsity_percent': sparsity * 100,
202
+ 'dtype': str(expression_df.dtypes.iloc[0]),
203
+ 'pandas_index_bug_fixed': fix_success
204
+ }
205
+
206
+ logger.info(f"✅ Expression matrix saved: {expression_df.shape}")
207
+ return stats
208
+
209
+ def process_sample_metadata(adata: sc.AnnData, method: str, output_dir: Path) -> Dict[str, Any]:
210
+ """Process and save sample (cell) metadata"""
211
+ logger.info("Processing sample metadata...")
212
+
213
+ sample_metadata = adata.obs.copy()
214
+
215
+ # Verify critical columns exist
216
+ critical_cols = ['Age_group', 'Sex', 'annotation_level0', 'DonorID', 'batch']
217
+ missing_cols = [col for col in critical_cols if col not in sample_metadata.columns]
218
+
219
+ if missing_cols:
220
+ logger.warning(f"Missing critical columns: {missing_cols}")
221
+ else:
222
+ logger.info("✅ All critical metadata columns present")
223
+
224
+ # Add standardized age column if needed
225
+ if 'age_numeric' not in sample_metadata.columns and 'Age_group' in sample_metadata.columns:
226
+ # Convert age groups to numeric (use midpoint of range)
227
+ age_mapping = {
228
+ '15-20': 17.5, '20-25': 22.5, '25-30': 27.5, '35-40': 37.5,
229
+ '50-55': 52.5, '55-60': 57.5, '60-65': 62.5, '70-75': 72.5
230
+ }
231
+ sample_metadata['age_numeric'] = sample_metadata['Age_group'].map(age_mapping)
232
+ logger.info("Added numeric age column")
233
+
234
+ # Optimize data types
235
+ for col in sample_metadata.columns:
236
+ if sample_metadata[col].dtype == 'object':
237
+ # Convert categorical strings to category type for efficiency
238
+ if sample_metadata[col].nunique() < len(sample_metadata) * 0.5:
239
+ sample_metadata[col] = sample_metadata[col].astype('category')
240
+
241
+ output_file = output_dir / f"skeletal_muscle_{method}_sample_metadata.parquet"
242
+ sample_metadata.to_parquet(output_file, compression='snappy')
243
+
244
+ stats = {
245
+ 'file': str(output_file),
246
+ 'shape': list(sample_metadata.shape),
247
+ 'columns': list(sample_metadata.columns),
248
+ 'missing_columns': missing_cols,
249
+ 'age_groups': sample_metadata['Age_group'].value_counts().to_dict() if 'Age_group' in sample_metadata.columns else {},
250
+ 'cell_types': sample_metadata['annotation_level0'].value_counts().head(10).to_dict() if 'annotation_level0' in sample_metadata.columns else {}
251
+ }
252
+
253
+ logger.info(f"✅ Sample metadata saved: {sample_metadata.shape}")
254
+ return stats
255
+
256
+ def process_feature_metadata(adata: sc.AnnData, method: str, output_dir: Path) -> Dict[str, Any]:
257
+ """Process and save feature (gene) metadata"""
258
+ logger.info("Processing feature metadata...")
259
+
260
+ feature_metadata = adata.var.copy()
261
+
262
+ # Ensure gene IDs are present
263
+ if 'gene_ids' not in feature_metadata.columns:
264
+ feature_metadata['gene_ids'] = feature_metadata.index
265
+ logger.info("Added gene_ids column from index")
266
+
267
+ # Verify gene symbols
268
+ if 'SYMBOL' in feature_metadata.columns:
269
+ n_symbols = feature_metadata['SYMBOL'].notna().sum()
270
+ logger.info(f"Gene symbols available for {n_symbols}/{len(feature_metadata)} genes")
271
+
272
+ output_file = output_dir / f"skeletal_muscle_{method}_feature_metadata.parquet"
273
+ feature_metadata.to_parquet(output_file, compression='snappy')
274
+
275
+ stats = {
276
+ 'file': str(output_file),
277
+ 'shape': list(feature_metadata.shape),
278
+ 'columns': list(feature_metadata.columns),
279
+ 'has_symbols': 'SYMBOL' in feature_metadata.columns,
280
+ 'has_ensembl': 'ENSEMBL' in feature_metadata.columns
281
+ }
282
+
283
+ logger.info(f"✅ Feature metadata saved: {feature_metadata.shape}")
284
+ return stats
285
+
286
+ def compute_missing_projections(adata: sc.AnnData) -> Dict[str, bool]:
287
+ """Compute missing dimensionality reductions"""
288
+ logger.info("Checking and computing missing projections...")
289
+
290
+ computed = {}
291
+
292
+ # Check PCA
293
+ if 'X_pca' not in adata.obsm:
294
+ logger.info("Computing PCA (50 components)...")
295
+ try:
296
+ sc.pp.pca(adata, n_comps=50, svd_solver='arpack')
297
+ computed['X_pca'] = True
298
+ logger.info("✅ PCA computed")
299
+ except Exception as e:
300
+ logger.error(f"PCA computation failed: {e}")
301
+ computed['X_pca'] = False
302
+ else:
303
+ computed['X_pca'] = True
304
+ logger.info("✅ PCA already exists")
305
+
306
+ # Check t-SNE
307
+ if 'X_tsne' not in adata.obsm:
308
+ logger.info("Computing t-SNE...")
309
+ try:
310
+ # Use existing neighbors if available, otherwise compute
311
+ if 'neighbors' not in adata.uns:
312
+ logger.info("Computing neighbors for t-SNE...")
313
+ sc.pp.neighbors(adata, n_neighbors=15, n_pcs=40)
314
+
315
+ sc.tl.tsne(adata, perplexity=30, n_jobs=8)
316
+ computed['X_tsne'] = True
317
+ logger.info("✅ t-SNE computed")
318
+ except Exception as e:
319
+ logger.error(f"t-SNE computation failed: {e}")
320
+ computed['X_tsne'] = False
321
+ else:
322
+ computed['X_tsne'] = True
323
+ logger.info("✅ t-SNE already exists")
324
+
325
+ return computed
326
+
327
+ def process_projections(adata: sc.AnnData, method: str, output_dir: Path) -> Dict[str, Any]:
328
+ """Process and save all dimensionality reduction projections"""
329
+ logger.info("Processing dimensionality reduction projections...")
330
+
331
+ # First compute any missing projections
332
+ computed_status = compute_missing_projections(adata)
333
+
334
+ projection_stats = {}
335
+ expected_projections = ['X_scVI', 'X_umap', 'X_pca', 'X_tsne']
336
+
337
+ for proj_name in expected_projections:
338
+ if proj_name in adata.obsm:
339
+ proj_data = adata.obsm[proj_name]
340
+
341
+ # Convert to DataFrame
342
+ proj_df = pd.DataFrame(
343
+ proj_data,
344
+ index=adata.obs_names,
345
+ columns=[f"{proj_name.split('_')[1].upper()}{i+1}" for i in range(proj_data.shape[1])]
346
+ )
347
+
348
+ # Save projection
349
+ output_file = output_dir / f"skeletal_muscle_{method}_projection_{proj_name}.parquet"
350
+ proj_df.to_parquet(output_file, compression='snappy')
351
+
352
+ projection_stats[proj_name] = {
353
+ 'file': str(output_file),
354
+ 'shape': list(proj_df.shape),
355
+ 'computed_now': computed_status.get(proj_name, False)
356
+ }
357
+
358
+ logger.info(f"✅ Saved {proj_name}: {proj_df.shape}")
359
+ else:
360
+ logger.warning(f"❌ {proj_name} not available")
361
+ projection_stats[proj_name] = {'available': False}
362
+
363
+ return projection_stats
364
+
365
+ def process_unstructured_metadata(adata: sc.AnnData, method: str, output_dir: Path) -> Dict[str, Any]:
366
+ """Process and save unstructured metadata (uns)"""
367
+ logger.info("Processing unstructured metadata...")
368
+
369
+ try:
370
+ # Make data JSON serializable
371
+ unstructured_data = make_json_serializable(adata.uns)
372
+
373
+ output_file = output_dir / f"skeletal_muscle_{method}_unstructured_metadata.json"
374
+
375
+ with open(output_file, 'w') as f:
376
+ json.dump(unstructured_data, f, indent=2)
377
+
378
+ # Count keys and estimate size
379
+ key_count = len(unstructured_data) if isinstance(unstructured_data, dict) else 0
380
+ file_size_mb = output_file.stat().st_size / (1024**2)
381
+
382
+ stats = {
383
+ 'file': str(output_file),
384
+ 'key_count': key_count,
385
+ 'file_size_mb': round(file_size_mb, 2),
386
+ 'top_keys': list(unstructured_data.keys())[:10] if isinstance(unstructured_data, dict) else []
387
+ }
388
+
389
+ logger.info(f"✅ Unstructured metadata saved: {key_count} keys, {file_size_mb:.1f}MB")
390
+ return stats
391
+
392
+ except Exception as e:
393
+ logger.error(f"Failed to process unstructured metadata: {e}")
394
+ return {'error': str(e)}
395
+
396
+ def main():
397
+ """Main processing function"""
398
+ start_time = time.time()
399
+ logger.info("=== Phase 2: Data Processing Started ===")
400
+
401
+ # Paths
402
+ data_file = Path("data/SKM_human_pp_cells2nuclei_2023-06-22.h5ad")
403
+ output_dir = Path("processed")
404
+ output_dir.mkdir(exist_ok=True)
405
+
406
+ # Configuration
407
+ method = "10x" # From exploration results
408
+
409
+ # Load data
410
+ logger.info(f"Loading data from {data_file}...")
411
+ try:
412
+ adata = sc.read_h5ad(data_file)
413
+ logger.info(f"✅ Data loaded: {adata.shape}")
414
+ log_memory_usage("Initial", adata)
415
+ except Exception as e:
416
+ logger.error(f"Failed to load data: {e}")
417
+ return
418
+
419
+ # Processing results tracking
420
+ processing_results = {
421
+ 'dataset_info': {
422
+ 'shape': list(adata.shape),
423
+ 'method': method,
424
+ 'processing_time': None,
425
+ 'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
426
+ }
427
+ }
428
+
429
+ try:
430
+ # Task 2.1: Expression Matrix
431
+ logger.info("\n🧬 Task 2.1: Processing Expression Matrix")
432
+ processing_results['expression'] = process_expression_matrix(adata, method, output_dir)
433
+
434
+ # Task 2.2: Sample Metadata
435
+ logger.info("\n📊 Task 2.2: Processing Sample Metadata")
436
+ processing_results['sample_metadata'] = process_sample_metadata(adata, method, output_dir)
437
+
438
+ # Task 2.3: Feature Metadata
439
+ logger.info("\n🧪 Task 2.3: Processing Feature Metadata")
440
+ processing_results['feature_metadata'] = process_feature_metadata(adata, method, output_dir)
441
+
442
+ # Task 2.4: Dimensionality Reductions
443
+ logger.info("\n📈 Task 2.4: Processing Projections")
444
+ processing_results['projections'] = process_projections(adata, method, output_dir)
445
+
446
+ # Task 2.5: Unstructured Metadata
447
+ logger.info("\n📋 Task 2.5: Processing Unstructured Metadata")
448
+ processing_results['unstructured'] = process_unstructured_metadata(adata, method, output_dir)
449
+
450
+ # Save processing summary
451
+ processing_time = time.time() - start_time
452
+ processing_results['dataset_info']['processing_time'] = f"{processing_time:.1f}s"
453
+
454
+ summary_file = output_dir / "phase2_processing_summary.json"
455
+ with open(summary_file, 'w') as f:
456
+ json.dump(processing_results, f, indent=2)
457
+
458
+ logger.info(f"\n✅ Phase 2 Processing Complete!")
459
+ logger.info(f"⏱️ Total time: {processing_time:.1f}s")
460
+ logger.info(f"📄 Summary saved: {summary_file}")
461
+
462
+ # List all created files
463
+ logger.info("\n📁 Created Files:")
464
+ for file_path in output_dir.glob("skeletal_muscle_*.parquet"):
465
+ size_mb = file_path.stat().st_size / (1024**2)
466
+ logger.info(f" {file_path.name} ({size_mb:.1f}MB)")
467
+
468
+ for file_path in output_dir.glob("skeletal_muscle_*.json"):
469
+ size_mb = file_path.stat().st_size / (1024**2)
470
+ logger.info(f" {file_path.name} ({size_mb:.1f}MB)")
471
+
472
+ except Exception as e:
473
+ logger.error(f"Processing failed: {e}")
474
+ processing_results['error'] = str(e)
475
+
476
+ # Save error summary
477
+ error_file = output_dir / "phase2_error_summary.json"
478
+ with open(error_file, 'w') as f:
479
+ json.dump(processing_results, f, indent=2)
480
+
481
+ raise
482
+
483
+ if __name__ == "__main__":
484
+ main()