def main(_): with logger.benchmark_context(FLAGS), \ mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging): mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0]) if FLAGS.tpu: raise ValueError("NCF in Keras does not support TPU for now") run_ncf(FLAGS)
def test_benchmark_context_failure(self, mock_config_benchmark_logger): mock_logger = mock.MagicMock() mock_config_benchmark_logger.return_value = mock_logger with self.assertRaises(RuntimeError): with logger.benchmark_context(None): raise RuntimeError("training error") mock_logger.on_finish.assert_called_once_with(logger.RUN_STATUS_FAILURE)
def main(_): with logger.benchmark_context(flags_obj): run_deep_speech(flags_obj)
def main(_): model_helpers.apply_clean(flags.FLAGS) with logger.benchmark_context(flags.FLAGS): stats = run(flags.FLAGS) logging.info('Run stats:\n%s', stats)
def main(_): model_helpers.apply_clean(flags.FLAGS) with logger.benchmark_context(flags.FLAGS): return run(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): return run(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): run_transformer(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): run_movie(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): run_wide_deep(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): # 调用配置Logger run_transformer(flags.FLAGS) # 将参数传递给transformer
def main(_): #FLAGS #pass with logger.benchmark_context(flags.FLAGS): #pass run_retinopathy(flags.FLAGS)
def main(_): model_helpers.apply_clean(flags.FLAGS) ckpt = "./models/model.ckpt-0060" with logger.benchmark_context(flags.FLAGS): evaluate_model(flags.FLAGS, ckpt)
def main(_): with logger.benchmark_context(flags.FLAGS): # Note: flags_obj 是在这里定义的 # print('flags.FLAGS',flags.FLAGS) run_cifar(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): run_imagenet(flags.FLAGS)
def main(_): with logger.benchmark_context(FLAGS), \ mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging): mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0]) run_ncf(FLAGS) mlperf_helper.stitch_ncf()
def main(_): with logger.benchmark_context(FLAGS): run_ncf(FLAGS)
def main(_): with logger.benchmark_context(FLAGS), mlperf_helper.LOGGER(FLAGS.ml_perf): mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0]) run_ncf(FLAGS) mlperf_helper.stitch_ncf()
def main(_): with logger.benchmark_context(FLAGS): run_keras_model_benchmark(FLAGS)
def start(_): # Xinyi modified with logger.benchmark_context(flags.FLAGS): eval_accuracy = run_cifar(flags.FLAGS) return eval_accuracy, flags.FLAGS.model_id
def main(_): with logger.benchmark_context(FLAGS), \ mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging): mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0]) run_ncf(FLAGS)
def test_benchmark_context(self, mock_config_benchmark_logger): mock_logger = mock.MagicMock() mock_config_benchmark_logger.return_value = mock_logger with logger.benchmark_context(None): tf.compat.v1.logging.info("start benchmarking") mock_logger.on_finish.assert_called_once_with(logger.RUN_STATUS_SUCCESS)
def test_benchmark_context(self, mock_config_benchmark_logger): mock_logger = mock.MagicMock() mock_config_benchmark_logger.return_value = mock_logger with logger.benchmark_context(None): logging.info("start benchmarking") mock_logger.on_finish.assert_called_once_with(logger.RUN_STATUS_SUCCESS)
def main(_): with logger.benchmark_context(flags.FLAGS): run_retinopathy(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): # Main Function run_wide_deep(flags.FLAGS)
def main(_): with logger.benchmark_context(flags.FLAGS): stats = run(flags.FLAGS) if stats: logging.info('Run stats:\n%s', stats)
def main(_): with logger.benchmark_context(flags.FLAGS): run_cifar(flags.FLAGS)
def main(_): with logger.benchmark_context(flags_obj): run(flags_obj)