def _run_and_report_benchmark(self): """Executes benchmark and reports result.""" start_time_sec = time.time() stats = cifar_main.run_cifar(flags.FLAGS) wall_time_sec = time.time() - start_time_sec examples_per_sec_hook = None for hook in stats['train_hooks']: if isinstance(hook, hooks.ExamplesPerSecondHook): examples_per_sec_hook = hook break eval_results = stats['eval_results'] extras = {} extras['accuracy_top_1'] = self._json_description( eval_results['accuracy'].item(), priority=0) extras['accuracy_top_5'] = self._json_description( eval_results['accuracy_top_5'].item()) if examples_per_sec_hook: exp_per_second_list = examples_per_sec_hook.current_examples_per_sec_list # ExamplesPerSecondHook skips the first 10 steps. exp_per_sec = sum(exp_per_second_list) / (len(exp_per_second_list)) extras['exp_per_second'] = self._json_description(exp_per_sec) self.report_benchmark(iters=eval_results['global_step'], wall_time=wall_time_sec, extras=extras)
def _run_and_report_benchmark(self): """Executes benchmark and reports result.""" start_time_sec = time.time() stats = cifar_main.run_cifar(flags.FLAGS) wall_time_sec = time.time() - start_time_sec examples_per_sec_hook = None for hook in stats['train_hooks']: if isinstance(hook, hooks.ExamplesPerSecondHook): examples_per_sec_hook = hook break eval_results = stats['eval_results'] metrics = [] metrics.append({'name': 'accuracy_top_1', 'value': eval_results['accuracy'].item()}) metrics.append({'name': 'accuracy_top_5', 'value': eval_results['accuracy_top_5'].item()}) if examples_per_sec_hook: exp_per_second_list = examples_per_sec_hook.current_examples_per_sec_list # ExamplesPerSecondHook skips the first 10 steps. exp_per_sec = sum(exp_per_second_list) / (len(exp_per_second_list)) metrics.append({'name': 'exp_per_second', 'value': exp_per_sec}) self.report_benchmark( iters=eval_results['global_step'], wall_time=wall_time_sec, metrics=metrics)
def _run_and_report_benchmark(self): """Executes benchmark and reports result.""" start_time_sec = time.time() stats = cifar_main.run_cifar(flags.FLAGS) wall_time_sec = time.time() - start_time_sec examples_per_sec_hook = None for hook in stats['train_hooks']: if isinstance(hook, hooks.ExamplesPerSecondHook): examples_per_sec_hook = hook break eval_results = stats['eval_results'] extras = {} extras['accuracy_top_1'] = self._json_description( eval_results['accuracy'].item(), priority=0) extras['accuracy_top_5'] = self._json_description( eval_results['accuracy_top_5'].item()) if examples_per_sec_hook: exp_per_second_list = examples_per_sec_hook.current_examples_per_sec_list # ExamplesPerSecondHook skips the first 10 steps. exp_per_sec = sum(exp_per_second_list) / (len(exp_per_second_list)) extras['exp_per_second'] = self._json_description(exp_per_sec) self.report_benchmark( iters=eval_results['global_step'], wall_time=wall_time_sec, extras=extras)
def _run_and_report_benchmark(self): """Executes benchmark and reports result.""" start_time_sec = time.time() stats = cifar_main.run_cifar(flags.FLAGS) wall_time_sec = time.time() - start_time_sec self._report_benchmark(stats, wall_time_sec, top_1_min=0.926, top_1_max=0.938)
def resnet56_fp16_2_gpu(self): """Test layers FP16 model with Estimator and dist_strat. 2 GPUs.""" self._setup() flags.FLAGS.num_gpus = 2 flags.FLAGS.data_dir = DATA_DIR flags.FLAGS.batch_size = 128 flags.FLAGS.train_epochs = 182 flags.FLAGS.model_dir = self._get_model_dir('resnet56_fp16_2_gpu') flags.FLAGS.resnet_size = 56 flags.FLAGS.dtype = 'fp16' stats = cifar_main.run_cifar(flags.FLAGS) self._fill_report_object(stats)
def _run_and_report_benchmark(self): start_time_sec = time.time() stats = cifar_main.run_cifar(flags.FLAGS) wall_time_sec = time.time() - start_time_sec self.report_benchmark( iters=stats['global_step'], wall_time=wall_time_sec, extras={ 'accuracy_top_1': self._json_description(stats['accuracy'].item(), priority=0), 'accuracy_top_5': self._json_description(stats['accuracy_top_5'].item()), })