def _run_and_report_benchmark(self): start_time_sec = time.time() stats = imagenet_main.run_imagenet(flags.FLAGS) wall_time_sec = time.time() - start_time_sec self._report_benchmark(stats, wall_time_sec, top_1_min=0.762, top_1_max=0.766)
def _run_and_report_benchmark(self): start_time_sec = time.time() stats = imagenet_main.run_imagenet(FLAGS) wall_time_sec = time.time() - start_time_sec print(stats) # Remove values to skip triggering accuracy check. stats['eval_results'].pop('accuracy', None) stats['eval_results'].pop('accuracy_top_5', None) self._report_benchmark(stats, wall_time_sec)
def _run_and_report_benchmark(self): start_time_sec = time.time() stats = imagenet_main.run_imagenet(FLAGS) wall_time_sec = time.time() - start_time_sec print(stats) # Remove values to skip triggering accuracy check. del stats['eval_results']['accuracy'] del stats['eval_results']['accuracy_top_5'] self._report_benchmark(stats, wall_time_sec)