def test_benchmark_context(self, mock_config_benchmark_logger):
     mock_logger = mock.MagicMock()
     mock_config_benchmark_logger.return_value = mock_logger
     with logger.benchmark_context(None):
         tf.compat.v1.logging.info("start benchmarking")
     mock_logger.on_finish.assert_called_once_with(
         logger.RUN_STATUS_SUCCESS)
Ejemplo n.º 2
0
 def test_benchmark_context_failure(self, mock_config_benchmark_logger):
   mock_logger = mock.MagicMock()
   mock_config_benchmark_logger.return_value = mock_logger
   with self.assertRaises(RuntimeError):
     with logger.benchmark_context(None):
       raise RuntimeError("training error")
   mock_logger.on_finish.assert_called_once_with(logger.RUN_STATUS_FAILURE)
Ejemplo n.º 3
0
def main(_):
    model_helpers.apply_clean(flags.FLAGS)
    logdir = '/tmp/logs'
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    logname = 'imagenet_strategy_{}_model_{}_node_{}_gpu_{}_patch_{}_proxy_{}'.format(
        flags.FLAGS.autodist_strategy, flags.FLAGS.cnn_model, node_num,
        gpu_num, flags.FLAGS.autodist_patch_tf, flags.FLAGS.proxy)
    logging.get_absl_handler().use_absl_log_file(logname, logdir)
    with logger.benchmark_context(flags.FLAGS):
        run(flags.FLAGS)
Ejemplo n.º 4
0
def main(_):
    logdir = '/tmp/logs'
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    logname = 'ncf_strategy_{}_opt_{}_dense_{}'.format(
        FLAGS.autodist_strategy, FLAGS.optimizer, FLAGS.dense_gradient)
    logging.get_absl_handler().use_absl_log_file(logname, logdir)
    with logger.benchmark_context(FLAGS), mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
        mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
        FLAGS.keras_use_ctl = True
        FLAGS.run_eagerly = False
        FLAGS.eval_batch_size = 1000
        FLAGS.dataset = 'ml-20mx16x32'
        FLAGS.train_dataset_path = os.path.join(
            FLAGS.default_data_dir,
            FLAGS.dataset,
            'tfrecord/training_cycle_0/*')
        FLAGS.eval_dataset_path = os.path.join(
            FLAGS.default_data_dir, FLAGS.dataset, 'tfrecord/eval_data/*')
        FLAGS.input_meta_data_path = os.path.join(
            FLAGS.default_data_dir, FLAGS.dataset, 'tfrecord/meta')
        run_ncf(FLAGS)
Ejemplo n.º 5
0
def main(_):
  with logger.benchmark_context(flags.FLAGS):
    run_transformer(flags.FLAGS)
Ejemplo n.º 6
0
def main(_):
	with logger.benchmark_context(flags.FLAGS):
		run_imagenet(flags.FLAGS)
Ejemplo n.º 7
0
def main(_):
    with logger.benchmark_context(flags.FLAGS):
      run_meal(flags.FLAGS)
Ejemplo n.º 8
0
def main(_):
    with logger.benchmark_context(flags.FLAGS):
        run_census(flags.FLAGS)
Ejemplo n.º 9
0
def main(_):
    with logger.benchmark_context(flags.FLAGS):
        stats = run(flags.FLAGS)
    if stats:
        logging.info('Run stats:\n%s', stats)
Ejemplo n.º 10
0
def main(_):
  with logger.benchmark_context(flags.FLAGS):
    run_wide_deep(flags.FLAGS)
def main(_):
    with logger.benchmark_context(FLAGS):
        run_keras_model_benchmark(FLAGS)
Ejemplo n.º 12
0
def main(_):
    with logger.benchmark_context(flags.FLAGS):
        return run(flags.FLAGS)
Ejemplo n.º 13
0
def main(_):
    print("============== Main ==============")
    with logger.benchmark_context(flags.FLAGS):
        run_transformer(flags.FLAGS)