Upload folder using huggingface_hub
Browse files- README.md +234 -3
- __pycache__/fineweb_processor.cpython-310.pyc +0 -0
- cleanup_fineweb.sh +56 -0
- data_processor.py +287 -0
- fineweb_processor.py +303 -0
- fineweb_train.bin +3 -0
- fineweb_validation.bin +3 -0
README.md
CHANGED
|
@@ -1,3 +1,234 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FineWeb Educational Dataset - Construction Guide
|
| 2 |
+
|
| 3 |
+
This document explains how the FineWeb Educational dataset was constructed, sampled, and processed for training DeepSeek language models.
|
| 4 |
+
|
| 5 |
+
## Dataset Source
|
| 6 |
+
|
| 7 |
+
**Original Dataset**: [HuggingFaceFW/fineweb-edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
| 8 |
+
|
| 9 |
+
**Full Dataset Size**: ~4TB (2,410 parquet files)
|
| 10 |
+
**Content Type**: High-quality educational web content from Common Crawl
|
| 11 |
+
|
| 12 |
+
## Sampling Strategy
|
| 13 |
+
|
| 14 |
+
### Why Sampling?
|
| 15 |
+
The full FineWeb dataset is massive (~4TB) and would take days to download and process. We implemented **Strategy 4: Single Parquet File Loading** for efficient processing.
|
| 16 |
+
|
| 17 |
+
### Sampling Method
|
| 18 |
+
- **Files Selected**: First 5 parquet files from the dataset
|
| 19 |
+
- **Download Size**: ~10GB (vs 4TB full dataset)
|
| 20 |
+
- **Percentage**: ~0.25% of total dataset
|
| 21 |
+
- **Rationale**: Sequential loading is most efficient with HuggingFace datasets
|
| 22 |
+
|
| 23 |
+
### Files Used
|
| 24 |
+
```
|
| 25 |
+
data/CC-MAIN-2024-42/
|
| 26 |
+
├── 000_00000.parquet (~2.3GB)
|
| 27 |
+
├── 000_00001.parquet (~2.3GB)
|
| 28 |
+
├── 000_00002.parquet (~2.3GB)
|
| 29 |
+
├── 000_00003.parquet (~2.3GB)
|
| 30 |
+
└── 000_00004.parquet (~2.3GB)
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
## Data Processing Pipeline
|
| 34 |
+
|
| 35 |
+
### Step 1: Raw Data Loading
|
| 36 |
+
```python
|
| 37 |
+
# Load 5 parquet files (~10GB total)
|
| 38 |
+
data_files = [
|
| 39 |
+
"data/CC-MAIN-2024-42/000_00000.parquet",
|
| 40 |
+
"data/CC-MAIN-2024-42/000_00001.parquet",
|
| 41 |
+
"data/CC-MAIN-2024-42/000_00002.parquet",
|
| 42 |
+
"data/CC-MAIN-2024-42/000_00003.parquet",
|
| 43 |
+
"data/CC-MAIN-2024-42/000_00004.parquet"
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
ds = load_dataset("HuggingFaceFW/fineweb-edu",
|
| 47 |
+
data_files=data_files,
|
| 48 |
+
split='train')
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
**Result**: 2,294,208 raw examples loaded
|
| 52 |
+
|
| 53 |
+
### Step 2: Quality Filtering
|
| 54 |
+
```python
|
| 55 |
+
def filter_by_length(example):
|
| 56 |
+
text_length = len(example.get('text', ''))
|
| 57 |
+
return (100 <= text_length <= 3000 and # Length filter
|
| 58 |
+
example.get('score', 0) > 0.6) # Quality filter
|
| 59 |
+
|
| 60 |
+
ds = ds.filter(filter_by_length)
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
**Filtering Criteria**:
|
| 64 |
+
- **Length**: 100-3000 characters (educational content range)
|
| 65 |
+
- **Quality Score**: > 0.6 (high-quality content only)
|
| 66 |
+
- **Language**: English content (from language detection)
|
| 67 |
+
|
| 68 |
+
**Result**: 964,864 high-quality examples (42% of raw data)
|
| 69 |
+
|
| 70 |
+
### Step 3: Dataset Splitting
|
| 71 |
+
```python
|
| 72 |
+
# 80/20 train/validation split
|
| 73 |
+
train_val = ds.train_test_split(test_size=0.2, seed=42)
|
| 74 |
+
|
| 75 |
+
ds = {
|
| 76 |
+
"train": train_val["train"], # 80% for training
|
| 77 |
+
"validation": train_val["test"] # 20% for validation
|
| 78 |
+
}
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
**Final Split**:
|
| 82 |
+
- **Training**: 771,891 examples (80%)
|
| 83 |
+
- **Validation**: 192,973 examples (20%)
|
| 84 |
+
|
| 85 |
+
### Step 4: Tokenization & Binary Conversion
|
| 86 |
+
```python
|
| 87 |
+
# Process each split
|
| 88 |
+
for split_name, split_data in ds.items():
|
| 89 |
+
# Tokenize with GPT-2 tokenizer
|
| 90 |
+
tokenized = split_data.map(self.process, ...)
|
| 91 |
+
|
| 92 |
+
# Convert to binary format
|
| 93 |
+
all_ids = []
|
| 94 |
+
for example in tokenized:
|
| 95 |
+
all_ids.extend(example['ids'])
|
| 96 |
+
|
| 97 |
+
# Save as binary file
|
| 98 |
+
arr = np.array(all_ids, dtype=np.uint16)
|
| 99 |
+
filename = f"fineweb_{split_name}.bin"
|
| 100 |
+
arr.tofile(filename)
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
## Final Dataset Statistics
|
| 104 |
+
|
| 105 |
+
### File Sizes
|
| 106 |
+
- **`fineweb_train.bin`**: 646.95 MB (339,186,828 tokens)
|
| 107 |
+
- **`fineweb_validation.bin`**: 161.80 MB (84,832,287 tokens)
|
| 108 |
+
- **Total Processed**: 808.75 MB (424,019,115 tokens)
|
| 109 |
+
|
| 110 |
+
### Content Distribution
|
| 111 |
+
- **Training Examples**: 758,265 (after tokenization filtering)
|
| 112 |
+
- **Validation Examples**: 189,518 (after tokenization filtering)
|
| 113 |
+
- **Total Examples**: 947,783
|
| 114 |
+
|
| 115 |
+
### Quality Metrics
|
| 116 |
+
- **Original Raw Data**: 2,294,208 examples
|
| 117 |
+
- **After Quality Filtering**: 964,864 examples (42% retention)
|
| 118 |
+
- **After Tokenization**: 947,783 examples (98% of filtered)
|
| 119 |
+
|
| 120 |
+
## Dataset Structure
|
| 121 |
+
|
| 122 |
+
### Input Format
|
| 123 |
+
Each example contains:
|
| 124 |
+
```json
|
| 125 |
+
{
|
| 126 |
+
"text": "Main educational content...",
|
| 127 |
+
"url": "https://example.com/article",
|
| 128 |
+
"date": "2024-01-15",
|
| 129 |
+
"language": "en",
|
| 130 |
+
"score": 0.95
|
| 131 |
+
}
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
### Processing Output
|
| 135 |
+
```python
|
| 136 |
+
# Special tokens structure
|
| 137 |
+
full_text = (
|
| 138 |
+
f"{self.special_tokens['content_start']} {content} {self.special_tokens['content_end']}"
|
| 139 |
+
f" {self.special_tokens['url_start']} {url} {self.special_tokens['url_end']}"
|
| 140 |
+
f" {self.special_tokens['date_start']} {date} {self.special_tokens['date_end']}"
|
| 141 |
+
)
|
| 142 |
+
```
|
| 143 |
+
|
| 144 |
+
## Usage in Training
|
| 145 |
+
|
| 146 |
+
### Training Script
|
| 147 |
+
```bash
|
| 148 |
+
python src/run_fineweb_training.py
|
| 149 |
+
```
|
| 150 |
+
|
| 151 |
+
### Data Loading
|
| 152 |
+
```python
|
| 153 |
+
from src.data.fineweb_processor import FineWebDataProcessor
|
| 154 |
+
|
| 155 |
+
processor = FineWebDataProcessor()
|
| 156 |
+
train_data = processor.load_binary_data('fineweb_train.bin')
|
| 157 |
+
val_data = processor.load_binary_data('fineweb_validation.bin')
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
## Reproducibility
|
| 161 |
+
|
| 162 |
+
### Random Seeds
|
| 163 |
+
- **Dataset Split**: `seed=42` (reproducible train/val split)
|
| 164 |
+
- **Processing**: Deterministic tokenization and filtering
|
| 165 |
+
|
| 166 |
+
### File Selection
|
| 167 |
+
- **Parquet Files**: First 5 files in chronological order
|
| 168 |
+
- **Sampling**: Sequential loading (not random sampling)
|
| 169 |
+
|
| 170 |
+
## Limitations & Considerations
|
| 171 |
+
|
| 172 |
+
### Sampling Bias
|
| 173 |
+
- **Chronological**: Only includes content from specific time periods
|
| 174 |
+
- **Geographic**: May be biased toward certain regions/languages
|
| 175 |
+
- **Content Type**: Web content may have different characteristics than curated datasets
|
| 176 |
+
|
| 177 |
+
### Quality Trade-offs
|
| 178 |
+
- **Filtering**: Aggressive filtering removes 58% of raw data
|
| 179 |
+
- **Length**: 100-3000 character limit may exclude some valuable content
|
| 180 |
+
- **Score Threshold**: 0.6 threshold is somewhat arbitrary
|
| 181 |
+
|
| 182 |
+
## Future Improvements
|
| 183 |
+
|
| 184 |
+
### Alternative Sampling Strategies
|
| 185 |
+
1. **Random Sampling**: Load random parquet files across time periods
|
| 186 |
+
2. **Stratified Sampling**: Ensure representation across different content types
|
| 187 |
+
3. **Progressive Loading**: Start small, expand based on training results
|
| 188 |
+
|
| 189 |
+
### Enhanced Filtering
|
| 190 |
+
1. **Content Classification**: Filter by educational topic/domain
|
| 191 |
+
2. **Language Detection**: Better multilingual support
|
| 192 |
+
3. **Quality Metrics**: More sophisticated quality scoring
|
| 193 |
+
|
| 194 |
+
## Technical Details
|
| 195 |
+
|
| 196 |
+
### Tokenizer
|
| 197 |
+
- **Type**: GPT-2 tokenizer (50,257 vocabulary)
|
| 198 |
+
- **Special Tokens**: Custom tokens for content structure
|
| 199 |
+
- **Context Window**: 1024 tokens (DeepSeek architecture)
|
| 200 |
+
|
| 201 |
+
### Processing Pipeline
|
| 202 |
+
- **Parallel Processing**: 8 processes for tokenization
|
| 203 |
+
- **Memory Management**: Efficient streaming for large datasets
|
| 204 |
+
- **Error Handling**: Graceful fallback for malformed examples
|
| 205 |
+
|
| 206 |
+
### Storage Format
|
| 207 |
+
- **Binary Format**: `.bin` files for efficient loading
|
| 208 |
+
- **Data Type**: `uint16` (65,536 token limit, sufficient for GPT-2 vocab)
|
| 209 |
+
- **Compression**: No compression (trade-off between size and loading speed)
|
| 210 |
+
|
| 211 |
+
## Dataset Citation
|
| 212 |
+
|
| 213 |
+
If you use this processed dataset, please cite:
|
| 214 |
+
|
| 215 |
+
```bibtex
|
| 216 |
+
@dataset{fineweb_edu_processed,
|
| 217 |
+
title={FineWeb Educational Dataset - 10GB Sampled (0.25% of Full Dataset)},
|
| 218 |
+
author={Your Name},
|
| 219 |
+
year={2024},
|
| 220 |
+
url={https://huggingface.co/datasets/your-username/fineweb-edu-10gb-5parquet-processed},
|
| 221 |
+
note={Processed subset of HuggingFaceFW/fineweb-edu for DeepSeek training}
|
| 222 |
+
}
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
## Contact & Support
|
| 226 |
+
|
| 227 |
+
For questions about this dataset construction:
|
| 228 |
+
- **Repository**: [Tiny-Deepseek](https://github.com/your-username/Tiny-Deepseek)
|
| 229 |
+
- **Issues**: GitHub Issues for technical problems
|
| 230 |
+
- **Discussions**: GitHub Discussions for general questions
|
| 231 |
+
|
| 232 |
+
---
|
| 233 |
+
|
| 234 |
+
*This dataset was constructed as part of the Tiny-Deepseek project for training efficient language models on educational content.*
|
__pycache__/fineweb_processor.cpython-310.pyc
ADDED
|
Binary file (8.97 kB). View file
|
|
|
cleanup_fineweb.sh
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
echo "🗑️ FineWeb Dataset Cleanup Script"
|
| 4 |
+
echo "=================================="
|
| 5 |
+
|
| 6 |
+
# Check current usage
|
| 7 |
+
echo "📊 Current FineWeb cache usage:"
|
| 8 |
+
du -sh /root/.cache/huggingface/hub/datasets--HuggingFaceFW--fineweb-edu 2>/dev/null || echo "No hub cache found"
|
| 9 |
+
du -sh /root/.cache/huggingface/datasets/HuggingFaceFW___fineweb-edu 2>/dev/null || echo "No datasets cache found"
|
| 10 |
+
|
| 11 |
+
echo ""
|
| 12 |
+
echo "🧹 Cleaning up FineWeb cached data..."
|
| 13 |
+
|
| 14 |
+
# Remove the hub cache (downloaded files)
|
| 15 |
+
if [ -d "/root/.cache/huggingface/hub/datasets--HuggingFaceFW--fineweb-edu" ]; then
|
| 16 |
+
echo "Removing hub cache..."
|
| 17 |
+
rm -rf /root/.cache/huggingface/hub/datasets--HuggingFaceFW--fineweb-edu
|
| 18 |
+
echo "✅ Hub cache removed"
|
| 19 |
+
else
|
| 20 |
+
echo "ℹ️ No hub cache found"
|
| 21 |
+
fi
|
| 22 |
+
|
| 23 |
+
# Remove the datasets cache (processed data)
|
| 24 |
+
if [ -d "/root/.cache/huggingface/datasets/HuggingFaceFW___fineweb-edu" ]; then
|
| 25 |
+
echo "Removing datasets cache..."
|
| 26 |
+
rm -rf /root/.cache/huggingface/datasets/HuggingFaceFW___fineweb-edu
|
| 27 |
+
echo "✅ Datasets cache removed"
|
| 28 |
+
else
|
| 29 |
+
echo "ℹ️ No datasets cache found"
|
| 30 |
+
fi
|
| 31 |
+
|
| 32 |
+
# Remove lock files
|
| 33 |
+
if [ -d "/root/.cache/huggingface/hub/.locks/datasets--HuggingFaceFW--fineweb-edu" ]; then
|
| 34 |
+
echo "Removing lock files..."
|
| 35 |
+
rm -rf /root/.cache/huggingface/hub/.locks/datasets--HuggingFaceFW--fineweb-edu
|
| 36 |
+
echo "✅ Lock files removed"
|
| 37 |
+
fi
|
| 38 |
+
|
| 39 |
+
# Also clean up any processed binary files in our data directory
|
| 40 |
+
if [ -f "src/data/fineweb_train.bin" ] || [ -f "src/data/fineweb_validation.bin" ] || [ -f "src/data/fineweb_finetune.bin" ]; then
|
| 41 |
+
echo "Removing processed FineWeb binary files..."
|
| 42 |
+
rm -f src/data/fineweb_*.bin
|
| 43 |
+
echo "✅ Binary files removed"
|
| 44 |
+
fi
|
| 45 |
+
|
| 46 |
+
echo ""
|
| 47 |
+
echo "🎉 FineWeb cleanup completed!"
|
| 48 |
+
echo "💾 Space freed: ~77GB"
|
| 49 |
+
echo ""
|
| 50 |
+
echo "📋 Summary:"
|
| 51 |
+
echo "- Removed HuggingFace hub cache"
|
| 52 |
+
echo "- Removed datasets cache"
|
| 53 |
+
echo "- Removed lock files"
|
| 54 |
+
echo "- Removed processed binary files"
|
| 55 |
+
echo ""
|
| 56 |
+
echo "ℹ️ You can now run the processor again with the smaller dataset size."
|
data_processor.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Data Processor for DeepSeek Children's Stories Model
|
| 3 |
+
Handles dataset loading, preprocessing, and tokenization for children's story generation
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import tiktoken
|
| 7 |
+
import os
|
| 8 |
+
import numpy as np
|
| 9 |
+
from datasets import load_dataset
|
| 10 |
+
from tqdm.auto import tqdm
|
| 11 |
+
import torch
|
| 12 |
+
from typing import Dict, List, Optional
|
| 13 |
+
|
| 14 |
+
def load_encoder_decoder():
|
| 15 |
+
"""Load the encoder and decoder for text processing"""
|
| 16 |
+
enc = tiktoken.get_encoding("gpt2")
|
| 17 |
+
return enc, enc
|
| 18 |
+
|
| 19 |
+
class DeepSeekDataProcessor:
|
| 20 |
+
def __init__(self, config=None):
|
| 21 |
+
# Initialize tokenizer with GPT-2 encoding
|
| 22 |
+
self.enc = tiktoken.get_encoding("gpt2")
|
| 23 |
+
|
| 24 |
+
# Special tokens for story structure (optimized for children's stories)
|
| 25 |
+
self.special_tokens = {
|
| 26 |
+
"story_start": "<|story|>",
|
| 27 |
+
"story_end": "</|story|>",
|
| 28 |
+
"prompt_start": "<|prompt|>",
|
| 29 |
+
"prompt_end": "</|prompt|>",
|
| 30 |
+
"moral_start": "<|moral|>",
|
| 31 |
+
"moral_end": "</|moral|>",
|
| 32 |
+
"character_start": "<|character|>",
|
| 33 |
+
"character_end": "</|character|>"
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
# Ensure data directory exists
|
| 37 |
+
self.data_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
|
| 38 |
+
os.makedirs(self.data_dir, exist_ok=True)
|
| 39 |
+
print(f"Data directory: {self.data_dir}")
|
| 40 |
+
|
| 41 |
+
# Configuration for processing
|
| 42 |
+
self.max_length = 1024 # DeepSeek context window
|
| 43 |
+
self.min_length = 50 # Minimum story length
|
| 44 |
+
|
| 45 |
+
def preprocess_text(self, text: str) -> str:
|
| 46 |
+
"""Preprocess text for children's stories"""
|
| 47 |
+
# Basic text cleaning
|
| 48 |
+
text = text.lower() # Convert to lowercase for consistency
|
| 49 |
+
text = text.replace('\n', ' ') # Replace newlines with spaces
|
| 50 |
+
text = ' '.join(text.split()) # Normalize whitespace
|
| 51 |
+
|
| 52 |
+
# Remove any inappropriate content markers (basic filtering)
|
| 53 |
+
inappropriate_phrases = ['adult content', 'mature', 'explicit']
|
| 54 |
+
for phrase in inappropriate_phrases:
|
| 55 |
+
if phrase in text:
|
| 56 |
+
return ""
|
| 57 |
+
|
| 58 |
+
# Ensure the text is child-friendly
|
| 59 |
+
if len(text) < self.min_length:
|
| 60 |
+
return ""
|
| 61 |
+
|
| 62 |
+
return text
|
| 63 |
+
|
| 64 |
+
def extract_story_elements(self, example: Dict) -> Dict:
|
| 65 |
+
"""Extract story elements for better structure"""
|
| 66 |
+
prompt = self.preprocess_text(example.get('prompt', ''))
|
| 67 |
+
story = self.preprocess_text(example.get('text', ''))
|
| 68 |
+
|
| 69 |
+
# Extract potential moral or lesson
|
| 70 |
+
moral = ""
|
| 71 |
+
if 'moral' in example:
|
| 72 |
+
moral = self.preprocess_text(example['moral'])
|
| 73 |
+
elif 'lesson' in example:
|
| 74 |
+
moral = self.preprocess_text(example['lesson'])
|
| 75 |
+
|
| 76 |
+
# Extract main character if available
|
| 77 |
+
character = ""
|
| 78 |
+
if 'character' in example:
|
| 79 |
+
character = self.preprocess_text(example['character'])
|
| 80 |
+
|
| 81 |
+
return {
|
| 82 |
+
'prompt': prompt,
|
| 83 |
+
'story': story,
|
| 84 |
+
'moral': moral,
|
| 85 |
+
'character': character
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
def process(self, example: Dict) -> Dict:
|
| 89 |
+
"""Process a single example for DeepSeek model"""
|
| 90 |
+
# Extract story elements
|
| 91 |
+
elements = self.extract_story_elements(example)
|
| 92 |
+
|
| 93 |
+
# Skip if no valid content
|
| 94 |
+
if not elements['story'] or not elements['prompt']:
|
| 95 |
+
return {'ids': [], 'len': 0}
|
| 96 |
+
|
| 97 |
+
# Create structured text with special tokens
|
| 98 |
+
full_text = (
|
| 99 |
+
f"{self.special_tokens['prompt_start']} {elements['prompt']} {self.special_tokens['prompt_end']} "
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Add character information if available
|
| 103 |
+
if elements['character']:
|
| 104 |
+
full_text += f"{self.special_tokens['character_start']} {elements['character']} {self.special_tokens['character_end']} "
|
| 105 |
+
|
| 106 |
+
# Add the main story
|
| 107 |
+
full_text += f"{self.special_tokens['story_start']} {elements['story']} {self.special_tokens['story_end']}"
|
| 108 |
+
|
| 109 |
+
# Add moral if available
|
| 110 |
+
if elements['moral']:
|
| 111 |
+
full_text += f" {self.special_tokens['moral_start']} {elements['moral']} {self.special_tokens['moral_end']}"
|
| 112 |
+
|
| 113 |
+
# Tokenize with error handling
|
| 114 |
+
try:
|
| 115 |
+
ids = self.enc.encode_ordinary(full_text)
|
| 116 |
+
|
| 117 |
+
# Ensure the sequence isn't too long
|
| 118 |
+
if len(ids) > self.max_length:
|
| 119 |
+
ids = ids[:self.max_length]
|
| 120 |
+
|
| 121 |
+
# Skip if too short
|
| 122 |
+
if len(ids) < 20:
|
| 123 |
+
return {'ids': [], 'len': 0}
|
| 124 |
+
|
| 125 |
+
out = {'ids': ids, 'len': len(ids)}
|
| 126 |
+
return out
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
print(f"Error tokenizing text: {e}")
|
| 130 |
+
return {'ids': [], 'len': 0}
|
| 131 |
+
|
| 132 |
+
def prepare_dataset(self) -> Dict:
|
| 133 |
+
"""Prepare the Children Stories Collection dataset for DeepSeek training"""
|
| 134 |
+
# Load the Children Stories Collection dataset
|
| 135 |
+
print("Loading Children Stories Collection dataset...")
|
| 136 |
+
ds = load_dataset("ajibawa-2023/Children-Stories-Collection")
|
| 137 |
+
|
| 138 |
+
train_bin_path = os.path.join(self.data_dir, "train.bin")
|
| 139 |
+
val_bin_path = os.path.join(self.data_dir, "validation.bin")
|
| 140 |
+
finetune_bin_path = os.path.join(self.data_dir, "finetune.bin")
|
| 141 |
+
|
| 142 |
+
print(f"Checking for existing processed files...")
|
| 143 |
+
|
| 144 |
+
# Check if all files exist
|
| 145 |
+
if (os.path.exists(train_bin_path) and
|
| 146 |
+
os.path.exists(val_bin_path) and
|
| 147 |
+
os.path.exists(finetune_bin_path)):
|
| 148 |
+
|
| 149 |
+
print("Found existing processed files!")
|
| 150 |
+
print(f"Train file: {os.path.getsize(train_bin_path) / (1024*1024):.2f} MB")
|
| 151 |
+
print(f"Validation file: {os.path.getsize(val_bin_path) / (1024*1024):.2f} MB")
|
| 152 |
+
print(f"Finetune file: {os.path.getsize(finetune_bin_path) / (1024*1024):.2f} MB")
|
| 153 |
+
|
| 154 |
+
return {
|
| 155 |
+
"train": train_bin_path,
|
| 156 |
+
"validation": val_bin_path,
|
| 157 |
+
"finetune": finetune_bin_path
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
print("Processing dataset...")
|
| 161 |
+
|
| 162 |
+
# Filter out examples that are too short or too long
|
| 163 |
+
def filter_by_length(example):
|
| 164 |
+
text_length = len(example.get('text', ''))
|
| 165 |
+
return self.min_length <= text_length <= 2000 # Reasonable length for children's stories
|
| 166 |
+
|
| 167 |
+
ds = ds.filter(filter_by_length)
|
| 168 |
+
print(f"After filtering: {len(ds['train'])} examples")
|
| 169 |
+
|
| 170 |
+
# Split the dataset into train, validation, and finetune sets
|
| 171 |
+
train_val_test = ds["train"].train_test_split(test_size=0.2, seed=42)
|
| 172 |
+
val_finetune = train_val_test["test"].train_test_split(test_size=0.5, seed=42)
|
| 173 |
+
|
| 174 |
+
# Create a new dataset dictionary with all splits
|
| 175 |
+
ds = {
|
| 176 |
+
"train": train_val_test["train"],
|
| 177 |
+
"validation": val_finetune["train"],
|
| 178 |
+
"finetune": val_finetune["test"]
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
print(f"Dataset split sizes:")
|
| 182 |
+
print(f"Training set: {len(ds['train'])} examples")
|
| 183 |
+
print(f"Validation set: {len(ds['validation'])} examples")
|
| 184 |
+
print(f"Finetune set: {len(ds['finetune'])} examples")
|
| 185 |
+
|
| 186 |
+
# Process each split
|
| 187 |
+
for split_name, split_data in ds.items():
|
| 188 |
+
print(f"\nProcessing {split_name} split...")
|
| 189 |
+
|
| 190 |
+
# Process the data
|
| 191 |
+
tokenized = split_data.map(
|
| 192 |
+
self.process,
|
| 193 |
+
remove_columns=['text', 'prompt', 'text_token_length'],
|
| 194 |
+
desc=f"tokenizing {split_name} split",
|
| 195 |
+
num_proc=8,
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# Filter out empty sequences
|
| 199 |
+
tokenized = tokenized.filter(lambda x: x['len'] > 0)
|
| 200 |
+
print(f"After processing: {len(tokenized)} valid examples")
|
| 201 |
+
|
| 202 |
+
# Save to binary file
|
| 203 |
+
filename = os.path.join(self.data_dir, f"{split_name}.bin")
|
| 204 |
+
print(f"Saving {split_name} split to: {filename}")
|
| 205 |
+
|
| 206 |
+
# Calculate total length
|
| 207 |
+
arr_len = np.sum(tokenized['len'], dtype=np.uint64)
|
| 208 |
+
dtype = np.uint16
|
| 209 |
+
arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,))
|
| 210 |
+
total_batches = 1024
|
| 211 |
+
|
| 212 |
+
idx = 0
|
| 213 |
+
for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'):
|
| 214 |
+
batch = tokenized.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy')
|
| 215 |
+
arr_batch = np.concatenate(batch['ids'])
|
| 216 |
+
arr[idx : idx + len(arr_batch)] = arr_batch
|
| 217 |
+
idx += len(arr_batch)
|
| 218 |
+
arr.flush()
|
| 219 |
+
|
| 220 |
+
# Verify file was created
|
| 221 |
+
if os.path.exists(filename):
|
| 222 |
+
print(f"Successfully created {filename}")
|
| 223 |
+
print(f"File size: {os.path.getsize(filename) / (1024*1024):.2f} MB")
|
| 224 |
+
else:
|
| 225 |
+
raise RuntimeError(f"Failed to create {filename}")
|
| 226 |
+
|
| 227 |
+
return {
|
| 228 |
+
"train": train_bin_path,
|
| 229 |
+
"validation": val_bin_path,
|
| 230 |
+
"finetune": finetune_bin_path
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
def load_binary_data(self, filepath: str) -> torch.Tensor:
|
| 234 |
+
"""Load binary data file as tensor"""
|
| 235 |
+
try:
|
| 236 |
+
data = np.memmap(filepath, dtype=np.uint16, mode='r')
|
| 237 |
+
return torch.from_numpy(data.copy())
|
| 238 |
+
except Exception as e:
|
| 239 |
+
print(f"Error loading data from {filepath}: {e}")
|
| 240 |
+
raise
|
| 241 |
+
|
| 242 |
+
def get_batch(self, data: torch.Tensor, batch_size: int, block_size: int) -> tuple:
|
| 243 |
+
"""Get a batch of data for training"""
|
| 244 |
+
# Generate random indices
|
| 245 |
+
ix = torch.randint(len(data) - block_size, (batch_size,))
|
| 246 |
+
|
| 247 |
+
# Get input sequences
|
| 248 |
+
x = torch.stack([data[i:i+block_size].long() for i in ix])
|
| 249 |
+
# Get target sequences (shifted by 1)
|
| 250 |
+
y = torch.stack([data[i+1:i+1+block_size].long() for i in ix])
|
| 251 |
+
|
| 252 |
+
return x, y
|
| 253 |
+
|
| 254 |
+
def decode_tokens(self, token_ids: List[int]) -> str:
|
| 255 |
+
"""Decode token IDs back to text"""
|
| 256 |
+
try:
|
| 257 |
+
return self.enc.decode(token_ids)
|
| 258 |
+
except Exception as e:
|
| 259 |
+
print(f"Error decoding tokens: {e}")
|
| 260 |
+
return ""
|
| 261 |
+
|
| 262 |
+
def encode_text(self, text: str) -> List[int]:
|
| 263 |
+
"""Encode text to token IDs"""
|
| 264 |
+
try:
|
| 265 |
+
return self.enc.encode_ordinary(text)
|
| 266 |
+
except Exception as e:
|
| 267 |
+
print(f"Error encoding text: {e}")
|
| 268 |
+
return []
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def main():
|
| 272 |
+
"""Main function to process the dataset"""
|
| 273 |
+
print("DeepSeek Children's Stories Data Processor")
|
| 274 |
+
print("=" * 50)
|
| 275 |
+
|
| 276 |
+
processor = DeepSeekDataProcessor()
|
| 277 |
+
processor.prepare_dataset()
|
| 278 |
+
|
| 279 |
+
print("\nData processing completed successfully!")
|
| 280 |
+
print("Files created:")
|
| 281 |
+
print("- src/data/train.bin")
|
| 282 |
+
print("- src/data/validation.bin")
|
| 283 |
+
print("- src/data/finetune.bin")
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
if __name__ == "__main__":
|
| 287 |
+
main()
|
fineweb_processor.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
FineWeb Educational Data Processor for DeepSeek Model
|
| 3 |
+
Handles dataset loading, preprocessing, and tokenization for educational web content
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import tiktoken
|
| 7 |
+
import os
|
| 8 |
+
import numpy as np
|
| 9 |
+
from datasets import load_dataset
|
| 10 |
+
from tqdm.auto import tqdm
|
| 11 |
+
import torch
|
| 12 |
+
from typing import Dict, List, Optional
|
| 13 |
+
|
| 14 |
+
def load_encoder_decoder():
|
| 15 |
+
"""Load the encoder and decoder for text processing"""
|
| 16 |
+
enc = tiktoken.get_encoding("gpt2")
|
| 17 |
+
return enc, enc
|
| 18 |
+
|
| 19 |
+
class FineWebDataProcessor:
|
| 20 |
+
def __init__(self, config=None):
|
| 21 |
+
# Initialize tokenizer with GPT-2 encoding
|
| 22 |
+
self.enc, self.dec = load_encoder_decoder()
|
| 23 |
+
|
| 24 |
+
# Special tokens for educational content structure
|
| 25 |
+
self.special_tokens = {
|
| 26 |
+
"content_start": "<|content|>",
|
| 27 |
+
"content_end": "</|content|>",
|
| 28 |
+
"metadata_start": "<|metadata|>",
|
| 29 |
+
"metadata_end": "</|metadata|>",
|
| 30 |
+
"url_start": "<|url|>",
|
| 31 |
+
"url_end": "</|url|>",
|
| 32 |
+
"date_start": "<|date|>",
|
| 33 |
+
"date_end": "</|date|>"
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
# Ensure data directory exists
|
| 37 |
+
self.data_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
|
| 38 |
+
os.makedirs(self.data_dir, exist_ok=True)
|
| 39 |
+
print(f"Data directory: {self.data_dir}")
|
| 40 |
+
|
| 41 |
+
# Configuration for processing
|
| 42 |
+
self.max_length = 1024 # DeepSeek context window
|
| 43 |
+
self.min_length = 100 # Minimum content length for educational content
|
| 44 |
+
|
| 45 |
+
def preprocess_text(self, text: str) -> str:
|
| 46 |
+
"""Preprocess text for educational content"""
|
| 47 |
+
if not text or not isinstance(text, str):
|
| 48 |
+
return ""
|
| 49 |
+
|
| 50 |
+
# Basic text cleaning
|
| 51 |
+
text = text.strip()
|
| 52 |
+
text = text.replace('\n', ' ') # Replace newlines with spaces
|
| 53 |
+
text = ' '.join(text.split()) # Normalize whitespace
|
| 54 |
+
|
| 55 |
+
# Remove any inappropriate content markers
|
| 56 |
+
inappropriate_phrases = ['adult content', 'mature', 'explicit', 'nsfw']
|
| 57 |
+
for phrase in inappropriate_phrases:
|
| 58 |
+
if phrase.lower() in text.lower():
|
| 59 |
+
return ""
|
| 60 |
+
|
| 61 |
+
# Ensure the text is educational and appropriate
|
| 62 |
+
if len(text) < self.min_length:
|
| 63 |
+
return ""
|
| 64 |
+
|
| 65 |
+
return text
|
| 66 |
+
|
| 67 |
+
def extract_content_elements(self, example: Dict) -> Dict:
|
| 68 |
+
"""Extract content elements for better structure"""
|
| 69 |
+
# Main content
|
| 70 |
+
content = self.preprocess_text(example.get('text', ''))
|
| 71 |
+
|
| 72 |
+
# Metadata
|
| 73 |
+
url = example.get('url', '')
|
| 74 |
+
date = example.get('date', '')
|
| 75 |
+
language = example.get('language', '')
|
| 76 |
+
score = example.get('score', 0.0)
|
| 77 |
+
|
| 78 |
+
# Only process if we have valid content
|
| 79 |
+
if not content:
|
| 80 |
+
return {'content': '', 'url': '', 'date': '', 'language': '', 'score': 0.0}
|
| 81 |
+
|
| 82 |
+
return {
|
| 83 |
+
'content': content,
|
| 84 |
+
'url': url,
|
| 85 |
+
'date': date,
|
| 86 |
+
'language': language,
|
| 87 |
+
'score': score
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
def process(self, example: Dict) -> Dict:
|
| 91 |
+
"""Process a single example for DeepSeek model"""
|
| 92 |
+
# Extract content elements
|
| 93 |
+
elements = self.extract_content_elements(example)
|
| 94 |
+
|
| 95 |
+
# Skip if no valid content
|
| 96 |
+
if not elements['content']:
|
| 97 |
+
return {'ids': [], 'len': 0}
|
| 98 |
+
|
| 99 |
+
# Create structured text with special tokens
|
| 100 |
+
full_text = f"{self.special_tokens['content_start']} {elements['content']} {self.special_tokens['content_end']}"
|
| 101 |
+
|
| 102 |
+
# Add metadata if available
|
| 103 |
+
if elements['url']:
|
| 104 |
+
full_text += f" {self.special_tokens['url_start']} {elements['url']} {self.special_tokens['url_end']}"
|
| 105 |
+
|
| 106 |
+
if elements['date']:
|
| 107 |
+
full_text += f" {self.special_tokens['date_start']} {elements['date']} {self.special_tokens['date_end']}"
|
| 108 |
+
|
| 109 |
+
# Add language and score information
|
| 110 |
+
if elements['language'] and elements['score'] > 0.5: # Only include if language is detected with confidence
|
| 111 |
+
full_text += f" {self.special_tokens['metadata_start']} Language: {elements['language']}, Quality: {elements['score']:.2f} {self.special_tokens['metadata_end']}"
|
| 112 |
+
|
| 113 |
+
# Tokenize with error handling
|
| 114 |
+
try:
|
| 115 |
+
ids = self.enc.encode_ordinary(full_text)
|
| 116 |
+
|
| 117 |
+
# Ensure the sequence isn't too long
|
| 118 |
+
if len(ids) > self.max_length:
|
| 119 |
+
ids = ids[:self.max_length]
|
| 120 |
+
|
| 121 |
+
# Skip if too short
|
| 122 |
+
if len(ids) < 50: # Higher minimum for educational content
|
| 123 |
+
return {'ids': [], 'len': 0}
|
| 124 |
+
|
| 125 |
+
out = {'ids': ids, 'len': len(ids)}
|
| 126 |
+
return out
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
print(f"Error tokenizing text: {e}")
|
| 130 |
+
return {'ids': [], 'len': 0}
|
| 131 |
+
|
| 132 |
+
def prepare_dataset(self) -> Dict:
|
| 133 |
+
"""Prepare the FineWeb Educational dataset for DeepSeek training"""
|
| 134 |
+
# Clear any existing dataset cache first
|
| 135 |
+
print("Clearing existing dataset cache...")
|
| 136 |
+
try:
|
| 137 |
+
from datasets import clear_cache
|
| 138 |
+
clear_cache()
|
| 139 |
+
print("Dataset cache cleared successfully!")
|
| 140 |
+
|
| 141 |
+
# Also try to clear HuggingFace cache directory
|
| 142 |
+
cache_dir = os.path.expanduser("~/.cache/huggingface")
|
| 143 |
+
if os.path.exists(cache_dir):
|
| 144 |
+
print(f"Clearing HuggingFace cache at: {cache_dir}")
|
| 145 |
+
# Note: This is a safety measure - actual clearing happens via clear_cache()
|
| 146 |
+
except Exception as e:
|
| 147 |
+
print(f"Warning: Could not clear cache: {e}")
|
| 148 |
+
|
| 149 |
+
# Strategy: Load multiple parquet files for ~10GB download
|
| 150 |
+
print("Loading multiple parquet files from FineWeb Educational dataset (~60GB)...")
|
| 151 |
+
try:
|
| 152 |
+
# Load 30 parquet files for a larger, more diverse dataset
|
| 153 |
+
# This should provide ~60GB of data and prevent overfitting
|
| 154 |
+
data_files = [f"data/CC-MAIN-2024-42/000_{i:05d}.parquet" for i in range(30)]
|
| 155 |
+
|
| 156 |
+
print(f"Loading {len(data_files)} parquet files...")
|
| 157 |
+
ds = load_dataset("HuggingFaceFW/fineweb-edu",
|
| 158 |
+
data_files=data_files,
|
| 159 |
+
split='train')
|
| 160 |
+
print(f"Successfully loaded {len(ds)} examples from {len(data_files)} parquet files")
|
| 161 |
+
|
| 162 |
+
except Exception as e:
|
| 163 |
+
print(f"Failed to load multiple files, trying single file fallback: {e}")
|
| 164 |
+
# Fallback: Single file approach
|
| 165 |
+
ds = load_dataset("HuggingFaceFW/fineweb-edu",
|
| 166 |
+
data_files="data/CC-MAIN-2024-42/000_00000.parquet",
|
| 167 |
+
split='train')
|
| 168 |
+
print(f"Successfully loaded {len(ds)} examples from single parquet file (fallback)")
|
| 169 |
+
|
| 170 |
+
train_bin_path = os.path.join(self.data_dir, "fineweb_train.bin")
|
| 171 |
+
val_bin_path = os.path.join(self.data_dir, "fineweb_validation.bin")
|
| 172 |
+
|
| 173 |
+
print(f"Checking for existing processed files...")
|
| 174 |
+
|
| 175 |
+
# Check if both files exist
|
| 176 |
+
if (os.path.exists(train_bin_path) and
|
| 177 |
+
os.path.exists(val_bin_path)):
|
| 178 |
+
|
| 179 |
+
print("Found existing processed files!")
|
| 180 |
+
print(f"Train file: {os.path.getsize(train_bin_path) / (1024*1024):.2f} MB")
|
| 181 |
+
print(f"Validation file: {os.path.getsize(val_bin_path) / (1024*1024):.2f} MB")
|
| 182 |
+
|
| 183 |
+
return {
|
| 184 |
+
"train": train_bin_path,
|
| 185 |
+
"validation": val_bin_path
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
print("Processing dataset...")
|
| 189 |
+
|
| 190 |
+
# Filter out examples that are too short or too long
|
| 191 |
+
def filter_by_length(example):
|
| 192 |
+
text_length = len(example.get('text', ''))
|
| 193 |
+
# More selective filtering for larger dataset
|
| 194 |
+
return (self.min_length <= text_length <= 3000 and # Reasonable length for educational content
|
| 195 |
+
example.get('score', 0) > 0.6) # Higher quality threshold
|
| 196 |
+
|
| 197 |
+
ds = ds.filter(filter_by_length)
|
| 198 |
+
print(f"After filtering: {len(ds)} examples")
|
| 199 |
+
|
| 200 |
+
# Split the dataset into train and validation sets (80/20)
|
| 201 |
+
train_val = ds.train_test_split(test_size=0.2, seed=42)
|
| 202 |
+
|
| 203 |
+
# Create a new dataset dictionary with both splits
|
| 204 |
+
ds = {
|
| 205 |
+
"train": train_val["train"],
|
| 206 |
+
"validation": train_val["test"]
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
print(f"Dataset split sizes:")
|
| 210 |
+
print(f"Training set: {len(ds['train'])} examples (80%)")
|
| 211 |
+
print(f"Validation set: {len(ds['validation'])} examples (20%)")
|
| 212 |
+
|
| 213 |
+
# Process each split
|
| 214 |
+
for split_name, split_data in ds.items():
|
| 215 |
+
print(f"\nProcessing {split_name} split...")
|
| 216 |
+
|
| 217 |
+
# Process the data
|
| 218 |
+
tokenized = split_data.map(
|
| 219 |
+
self.process,
|
| 220 |
+
remove_columns=['text', 'id', 'dump', 'url', 'date', 'file_path', 'language', 'language_score', 'token_count', 'score', 'int_score'],
|
| 221 |
+
desc=f"tokenizing {split_name} split",
|
| 222 |
+
num_proc=8,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# Filter out empty sequences
|
| 226 |
+
tokenized = tokenized.filter(lambda x: x['len'] > 0)
|
| 227 |
+
print(f"After processing: {len(tokenized)} valid examples")
|
| 228 |
+
|
| 229 |
+
# Convert to numpy arrays
|
| 230 |
+
all_ids = []
|
| 231 |
+
for example in tokenized:
|
| 232 |
+
all_ids.extend(example['ids'])
|
| 233 |
+
|
| 234 |
+
# Convert to numpy array
|
| 235 |
+
arr = np.array(all_ids, dtype=np.uint16)
|
| 236 |
+
|
| 237 |
+
# Save to binary file
|
| 238 |
+
filename = os.path.join(self.data_dir, f"fineweb_{split_name}.bin")
|
| 239 |
+
arr.tofile(filename)
|
| 240 |
+
|
| 241 |
+
print(f"Saved {split_name} split to {filename}")
|
| 242 |
+
print(f"File size: {os.path.getsize(filename) / (1024*1024):.2f} MB")
|
| 243 |
+
print(f"Number of tokens: {len(arr):,}")
|
| 244 |
+
|
| 245 |
+
return {
|
| 246 |
+
"train": train_bin_path,
|
| 247 |
+
"validation": val_bin_path
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
def load_binary_data(self, filepath: str) -> torch.Tensor:
|
| 251 |
+
"""Load binary data file as tensor"""
|
| 252 |
+
try:
|
| 253 |
+
data = np.memmap(filepath, dtype=np.uint16, mode='r')
|
| 254 |
+
return torch.from_numpy(data.copy())
|
| 255 |
+
except Exception as e:
|
| 256 |
+
print(f"Error loading data from {filepath}: {e}")
|
| 257 |
+
raise
|
| 258 |
+
|
| 259 |
+
def get_batch(self, data: torch.Tensor, batch_size: int, block_size: int) -> tuple:
|
| 260 |
+
"""Get a batch of data for training"""
|
| 261 |
+
# Generate random indices
|
| 262 |
+
ix = torch.randint(len(data) - block_size, (batch_size,))
|
| 263 |
+
|
| 264 |
+
# Get input sequences
|
| 265 |
+
x = torch.stack([data[i:i+block_size].long() for i in ix])
|
| 266 |
+
# Get target sequences (shifted by 1)
|
| 267 |
+
y = torch.stack([data[i+1:i+1+block_size].long() for i in ix])
|
| 268 |
+
|
| 269 |
+
return x, y
|
| 270 |
+
|
| 271 |
+
def decode_tokens(self, token_ids: List[int]) -> str:
|
| 272 |
+
"""Decode token IDs back to text"""
|
| 273 |
+
try:
|
| 274 |
+
return self.enc.decode(token_ids)
|
| 275 |
+
except Exception as e:
|
| 276 |
+
print(f"Error decoding tokens: {e}")
|
| 277 |
+
return ""
|
| 278 |
+
|
| 279 |
+
def encode_text(self, text: str) -> List[int]:
|
| 280 |
+
"""Encode text to token IDs"""
|
| 281 |
+
try:
|
| 282 |
+
return self.enc.encode_ordinary(text)
|
| 283 |
+
except Exception as e:
|
| 284 |
+
print(f"Error encoding text: {e}")
|
| 285 |
+
return []
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def main():
|
| 289 |
+
"""Main function to process the FineWeb Educational dataset"""
|
| 290 |
+
print("FineWeb Educational Data Processor")
|
| 291 |
+
print("=" * 50)
|
| 292 |
+
|
| 293 |
+
processor = FineWebDataProcessor()
|
| 294 |
+
processor.prepare_dataset()
|
| 295 |
+
|
| 296 |
+
print("\nData processing completed successfully!")
|
| 297 |
+
print("Files created:")
|
| 298 |
+
print("- src/data/fineweb_train.bin")
|
| 299 |
+
print("- src/data/fineweb_validation.bin")
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
if __name__ == "__main__":
|
| 303 |
+
main()
|
fineweb_train.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f1f7004ad0903f195c647702bd7d082f91bede6d2ae155ce7c0c9eb2bca5bc2
|
| 3 |
+
size 678373656
|
fineweb_validation.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:943cdfa5cc66bf5482fcc92befb95bc527bb8836c10357f9265a7ecdfb8c40f4
|
| 3 |
+
size 169664574
|