sahancpal commited on
Commit
0ea1ad2
·
verified ·
1 Parent(s): f5ad30a

Create hf_op_trace.json

Browse files
Files changed (1) hide show
  1. hf_op_trace.json +229 -0
hf_op_trace.json ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper module for parsing HuggingFace tracer data.
3
+
4
+ This module contains utilities for loading, processing, and selecting
5
+ unique inputs from HuggingFace tracer JSON data.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ from typing import Any, Dict, List
11
+
12
+ import torch
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Operations that require special handling due to input constraints
17
+ # These ops have requirements on inputs that make randomized tensors unsuitable
18
+ SPECIAL_CASES = {
19
+ "embedding.default", # requires second arg tensor to describe dims of first arg
20
+ "index.Tensor", # requires list of tensors with indices within bounds of first arg
21
+ "meshgrid.indexing", # requires last argument to be indexing method string
22
+ "empty_like.default", # correctness testing doesn't make sense without special handling
23
+ }
24
+
25
+
26
+ def load_json_data(json_file_path: str) -> Dict[str, Any]:
27
+ """
28
+ Load operator data from JSON file.
29
+
30
+ Args:
31
+ json_file_path: Path to JSON file containing operator data
32
+
33
+ Returns:
34
+ Dictionary containing the loaded JSON data
35
+
36
+ Raises:
37
+ FileNotFoundError: If the JSON file doesn't exist
38
+ json.JSONDecodeError: If the JSON format is invalid
39
+ """
40
+ try:
41
+ with open(json_file_path, "r") as f:
42
+ return json.load(f)
43
+ except FileNotFoundError:
44
+ logger.error(f"JSON file not found: {json_file_path}")
45
+ raise
46
+ except json.JSONDecodeError as e:
47
+ logger.error(f"Invalid JSON format in {json_file_path}: {e}")
48
+ raise
49
+
50
+
51
+ def calculate_tensor_shape_magnitude(combination: Dict[str, Any]) -> float:
52
+ """
53
+ Calculate a magnitude metric for tensor arguments to determine 'largest'.
54
+
55
+ Args:
56
+ combination: Dictionary containing input_shapes and other metadata
57
+
58
+ Returns:
59
+ Float representing the total "magnitude" (product of all tensor dimensions) from the shape
60
+ """
61
+ total_magnitude = 0.0
62
+ input_shapes = combination["input_shapes"]
63
+
64
+ for shape in input_shapes:
65
+ if (
66
+ isinstance(shape, list)
67
+ and len(shape) > 0
68
+ and all(isinstance(x, int) for x in shape)
69
+ ):
70
+ # Calculate product of dimensions (total tensor size)
71
+ magnitude = 1
72
+ for dim in shape:
73
+ magnitude *= dim
74
+ total_magnitude += magnitude
75
+
76
+ return total_magnitude
77
+
78
+
79
+ def select_unique_inputs(
80
+ unique_inputs: List[Dict[str, Any]],
81
+ dtype,
82
+ max_popular: int = 5,
83
+ max_largest: int = 5,
84
+ ) -> List[Dict[str, Any]]:
85
+ """
86
+ Select the most relevant unique inputs based on popularity and size.
87
+
88
+ Selects up to max_popular most popular unique_inputs and max_largest
89
+ largest unique_inputs, ensuring uniqueness by avoiding duplicates.
90
+
91
+ Args:
92
+ unique_inputs: List of unique input combinations
93
+ dtype: Data type to use for tensors, we will filter to only those with this dtype
94
+ max_popular: Maximum number of popular inputs to select
95
+ max_largest: Maximum number of largest inputs to select
96
+
97
+ Returns:
98
+ List of selected unique input combinations
99
+ """
100
+
101
+ # Filter to only those with the specified dtype in the cases of tensors
102
+ for input in unique_inputs:
103
+ for tensor_dtype in input["input_dtypes"]:
104
+ if tensor_dtype.startswith("torch.") and tensor_dtype != str(dtype):
105
+ continue
106
+ for _, entry in input["tensor_lists"].items():
107
+ for tensor_dtype in entry["dtypes"]:
108
+ # all types should be tensors already
109
+ tensor_dtype != str(dtype):
110
+ continue
111
+
112
+ # Sort by count (popularity) descending
113
+ popular_unique_inputs = sorted(
114
+ unique_inputs, key=lambda x: x["count"], reverse=True
115
+ )[:max_popular]
116
+
117
+ # Sort by magnitude descending
118
+ largest_unique_inputs = sorted(
119
+ unique_inputs,
120
+ key=lambda x: calculate_tensor_shape_magnitude(x),
121
+ reverse=True,
122
+ )
123
+
124
+ # Create set of selected unique_inputs (using input_shapes as key for uniqueness)
125
+ selected = {}
126
+
127
+ # Add popular unique_inputs first
128
+ for combo in popular_unique_inputs:
129
+ key = str(combo["input_shapes"]) # Use string representation as key
130
+ selected[key] = combo
131
+
132
+ # Add largest unique_inputs, skipping duplicates
133
+ for combo in largest_unique_inputs:
134
+ key = str(combo["input_shapes"])
135
+ if key not in selected:
136
+ selected[key] = combo
137
+ if len(selected) >= max_popular + max_largest:
138
+ break
139
+
140
+ return list(selected.values())
141
+
142
+
143
+ def create_single_tensor(
144
+ shape: List[int],
145
+ dtype_str: str,
146
+ device: str = "cpu",
147
+ default_dtype: torch.dtype = torch.float32,
148
+ ) -> torch.Tensor:
149
+ """
150
+ Create a single tensor with the given shape and dtype.
151
+
152
+ Args:
153
+ shape: List of integers representing tensor dimensions
154
+ dtype_str: String representation of the desired dtype
155
+ device: Device to create tensor on
156
+ default_dtype: Fallback dtype if conversion fails
157
+
158
+ Returns:
159
+ PyTorch tensor with specified properties
160
+ """
161
+ # Convert dtype string to actual torch dtype
162
+ torch_dtype = default_dtype
163
+ if dtype_str and isinstance(dtype_str, str):
164
+ try:
165
+ if dtype_str.startswith("torch."):
166
+ dtype_name = dtype_str.replace("torch.", "")
167
+ torch_dtype = getattr(torch, dtype_name)
168
+ except AttributeError:
169
+ logger.warning(
170
+ f"Could not convert {dtype_str} to torch dtype, using {torch_dtype}"
171
+ )
172
+
173
+ # Create tensor with appropriate method based on dtype
174
+ if torch_dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16]:
175
+ # Floating point types - use randn
176
+ tensor = torch.randn(shape, dtype=torch_dtype, device=device)
177
+ elif torch_dtype in [
178
+ torch.int8,
179
+ torch.int16,
180
+ torch.int32,
181
+ torch.int64,
182
+ torch.uint8,
183
+ ]:
184
+ # Integer types - use randint with reasonable range
185
+ tensor = torch.randint(0, 10, shape, dtype=torch_dtype, device=device)
186
+ elif torch_dtype == torch.bool:
187
+ # Boolean type - use randint and cast to bool
188
+ tensor = torch.randint(0, 2, shape, dtype=torch.uint8, device=device).bool()
189
+ elif torch_dtype in [torch.complex64, torch.complex128]:
190
+ # Complex types - create from real and imaginary parts
191
+ real_dtype = torch.float32 if torch_dtype == torch.complex64 else torch.float64
192
+ real_part = torch.randn(shape, dtype=real_dtype, device=device)
193
+ imag_part = torch.randn(shape, dtype=real_dtype, device=device)
194
+ tensor = torch.complex(real_part, imag_part)
195
+ else:
196
+ raise ValueError(f"Unsupported dtype: {dtype_str}")
197
+
198
+ return tensor
199
+
200
+
201
+ def create_tensor_list(
202
+ tensor_list_metadata: Dict[str, Any],
203
+ device: str = "cpu",
204
+ default_dtype: torch.dtype = torch.float32,
205
+ ) -> List[torch.Tensor]:
206
+ """
207
+ Create a list of tensors from tensor list metadata.
208
+
209
+ Args:
210
+ tensor_list_metadata: Dictionary containing length, shapes, and dtypes
211
+ device: Device to create tensors on
212
+ default_dtype: Fallback dtype if conversion fails
213
+
214
+ Returns:
215
+ List of PyTorch tensors
216
+ """
217
+ length = tensor_list_metadata["length"]
218
+ shapes = tensor_list_metadata["shapes"]
219
+ dtypes = tensor_list_metadata["dtypes"]
220
+
221
+ tensor_list = []
222
+ for j in range(length):
223
+ # Use last shape/dtype if not enough provided
224
+ shape = shapes[j] if j < len(shapes) else shapes[-1]
225
+ dtype_str = dtypes[j] if j < len(dtypes) else dtypes[-1]
226
+ tensor = create_single_tensor(shape, dtype_str, device, default_dtype)
227
+ tensor_list.append(tensor)
228
+
229
+ return tensor_list