def run_benchmark(self): # Warm up, ignore execution time value _, _ = block_forward_backward_and_time(block=self.block, runs=self.warmup, x=self.data) # Run Benchmarks exe_time, _ = block_forward_backward_and_time(block=self.block, runs=self.runs, x=self.data) self.results["MX_Gluon_Imperative_PRelu_Forward_Backward_Time"] = exe_time / self.runs
def run_benchmark(self): # NOTE: Below we are calling as (self.data, block...) purposefully. # Because, Lambda(..) block DO NOT take a named input and we can't call # block_forward_backward_and_time(..) with unnamed parameter self.data at the end. # Warm up, ignore execution time value _, _ = block_forward_backward_and_time(self.data, block=self.block, runs=self.warmup) # Run Benchmarks exe_time, _ = block_forward_backward_and_time(self.data, block=self.block, runs=self.runs) self.results[ "MX_Gluon_Imperative_Lambda_Batchify_Forward_Backward_Time"] = exe_time / self.runs
def run_benchmark(self): # TODO: Conv2DTranspose performance on CPU is real bad. Not recommended to run for now. if self.ctx == mx.cpu(): print( "WARNING: Conv2DTranspose performance on CPU is real bad. Not recommended to run for now!!" ) self.results[ "MX_Gluon_Imperative_Conv2DTranspose_Forward_Backward_Time"] = 0 return # Warm up, ignore execution time value _, _ = block_forward_backward_and_time(block=self.block, runs=self.warmup, x=self.data) # Run Benchmarks exe_time, _ = block_forward_backward_and_time(block=self.block, runs=self.runs, x=self.data) self.results[ "MX_Gluon_Imperative_Conv2DTranspose_Forward_Backward_Time"] = exe_time / self.runs