-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathbenchmark_models.py
More file actions
247 lines (203 loc) · 7.66 KB
/
benchmark_models.py
File metadata and controls
247 lines (203 loc) · 7.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import time
from typing import Tuple
from abc import ABC, abstractmethod
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import logging
import onnxruntime as ort
import openvino as ov
# Configure logging
logging.basicConfig(filename="model.log", level=logging.INFO)
class Benchmark(ABC):
"""
Abstract class representing a benchmark.
"""
def __init__(self, nruns: int = 100, nwarmup: int = 50):
self.nruns = nruns
self.nwarmup = nwarmup
@abstractmethod
def run(self):
"""
Abstract method to run the benchmark.
"""
pass
class PyTorchBenchmark:
def __init__(
self,
model: torch.nn.Module,
device: str = "cuda",
input_shape: Tuple[int, int, int, int] = (32, 3, 224, 224),
dtype: torch.dtype = torch.float32,
nwarmup: int = 50,
nruns: int = 100,
) -> None:
"""
Initialize the Benchmark object.
:param model: The model to be benchmarked.
:param device: The device to run the benchmark on ("cpu" or "cuda").
:param input_shape: The shape of the input data.
:param dtype: The data type to be used in the benchmark (typically torch.float32 or torch.float16).
:param nwarmup: The number of warmup runs before timing.
:param nruns: The number of runs for timing.
"""
self.model = model
self.device = device
self.input_shape = input_shape
self.dtype = dtype
self.nwarmup = nwarmup
self.nruns = nruns
cudnn.benchmark = True # Enable cuDNN benchmarking optimization
def run(self):
"""
Run the benchmark with the given model, input shape, and other parameters.
Log the average batch time and print the input shape and output feature size.
"""
# Prepare input data
input_data = torch.randn(self.input_shape).to(self.device).to(self.dtype)
# Warm up
print("Warm up ...")
with torch.no_grad():
for _ in range(self.nwarmup):
features = self.model(input_data)
if self.device == "cuda":
torch.cuda.synchronize()
# Start timing
print("Start timing ...")
timings = []
with torch.no_grad():
for i in range(1, self.nruns + 1):
start_time = time.time()
features = self.model(input_data)
if self.device == "cuda":
torch.cuda.synchronize()
end_time = time.time()
timings.append(end_time - start_time)
if i % 10 == 0:
print(
f"Iteration {i}/{self.nruns}, ave batch time {np.mean(timings) * 1000:.2f} ms"
)
logging.info(f"Average batch time: {np.mean(timings) * 1000:.2f} ms")
return np.mean(timings) * 1000
class ONNXBenchmark(Benchmark):
"""
A class used to benchmark the performance of an ONNX model.
"""
def __init__(
self,
ort_session: ort.InferenceSession,
input_shape: tuple,
nruns: int = 100,
nwarmup: int = 50,
):
super().__init__(nruns)
self.ort_session = ort_session
self.input_shape = input_shape
self.nwarmup = nwarmup
self.nruns = nruns
def run(self):
print("Warming up ...")
# Adjusting the batch size in the input shape to match the expected input size of the model.
input_shape = (1,) + self.input_shape[1:]
input_data = np.random.randn(*input_shape).astype(np.float32)
for _ in range(self.nwarmup): # Warm-up runs
_ = self.ort_session.run(None, {"input": input_data})
print("Starting benchmark ...")
timings = []
for i in range(1, self.nruns + 1):
start_time = time.time()
_ = self.ort_session.run(None, {"input": input_data})
end_time = time.time()
timings.append(end_time - start_time)
if i % 10 == 0:
print(
f"Iteration {i}/{self.nruns}, ave batch time {np.mean(timings) * 1000:.2f} ms"
)
avg_time = np.mean(timings) * 1000
logging.info(f"Average ONNX inference time: {avg_time:.2f} ms")
return avg_time
class OVBenchmark(Benchmark):
def __init__(
self, model: ov.frontend.FrontEnd, input_shape: Tuple[int, int, int, int]
):
"""
Initialize the OVBenchmark with the OpenVINO model and the input shape.
:param model: ov.frontend.FrontEnd
The OpenVINO model.
:param input_shape: Tuple[int, int, int, int]
The shape of the model input.
"""
self.ov_model = model
self.core = ov.Core()
self.compiled_model = None
self.input_shape = input_shape
self.nwarmup = 50
self.nruns = 100
self.dummy_input = np.random.randn(*input_shape).astype(np.float32)
def warmup(self):
"""
Compile the OpenVINO model for optimal execution on available hardware.
"""
self.compiled_model = self.core.compile_model(self.ov_model, "AUTO")
def inference(self, input_data) -> dict:
"""
Perform inference on the input data using the compiled OpenVINO model.
:param input_data: np.ndarray
The input data for the model.
:return: dict
The model's output as a dictionary.
"""
outputs = self.compiled_model(inputs={"input": input_data})
return outputs
def run(self):
"""
Run the benchmark on the OpenVINO model. It first warms up by compiling the model and then measures
the average inference time over a set number of runs.
"""
# Warm-up runs
logging.info("Warming up ...")
for _ in range(self.nwarmup):
self.warmup()
# Benchmarking
total_time = 0
for i in range(1, self.nruns + 1):
start_time = time.time()
_ = self.inference(self.dummy_input)
total_time += time.time() - start_time
if i % 10 == 0:
print(
f"Iteration {i}/{self.nruns}, ave batch time {total_time / i * 1000:.2f} ms"
)
avg_time = total_time / self.nruns
logging.info(f"Average inference time: {avg_time * 1000:.2f} ms")
return avg_time * 1000
def benchmark_onnx_model(ort_session: ort.InferenceSession):
run_benchmark(None, None, None, ort_session, onnx=True)
def benchmark_ov_model(ov_model: ov.CompiledModel) -> OVBenchmark:
ov_benchmark = OVBenchmark(ov_model, input_shape=(1, 3, 224, 224))
ov_benchmark.run()
return ov_benchmark
def benchmark_cuda_model(cuda_model: torch.nn.Module, device: str, dtype: torch.dtype):
run_benchmark(cuda_model, device, dtype)
def run_benchmark(
model: torch.nn.Module,
device: str,
dtype: torch.dtype,
ort_session: ort.InferenceSession = None,
onnx: bool = False,
) -> None:
"""
Run and log the benchmark for the given model, device, and dtype.
:param onnx:
:param ort_session:
:param model: The model to be benchmarked.
:param device: The device to run the benchmark on ("cpu" or "cuda").
:param dtype: The data type to be used in the benchmark (typically torch.float32 or torch.float16).
"""
if onnx:
logging.info(f"Running Benchmark for ONNX")
benchmark = ONNXBenchmark(ort_session, input_shape=(32, 3, 224, 224))
else:
logging.info(f"Running Benchmark for {device.upper()} and precision {dtype}")
benchmark = PyTorchBenchmark(model, device=device, dtype=dtype)
benchmark.run()