Ejemplo n.º 1
0
 def _test_run_benchmark(self, params):
     """Tests that run_benchmark() runs successfully with the params."""
     logs = []
     with test_util.monkey_patch(
             all_reduce_benchmark,
             log_fn=test_util.print_and_add_to_list(logs)):
         bench_cnn = benchmark_cnn.BenchmarkCNN(params)
         all_reduce_benchmark.run_benchmark(bench_cnn, num_iters=5)
         self.assertRegexpMatches(logs[-1],
                                  '^Average time per step: [0-9.]+$')
def run_with_test_model(params):
  """Runs tf_cnn_benchmarks with a test model."""
  model = test_util.TestCNNModel()
  inputs = test_util.get_fake_var_update_inputs()
  with test_util.monkey_patch(benchmark_cnn,
                              LOSS_AND_ACCURACY_DIGITS_TO_SHOW=15):
    bench = benchmark_cnn.BenchmarkCNN(params, dataset=test_util.TestDataSet(),
                                       model=model)
    # The test model does not use labels when computing loss, so the label
    # values do not matter as long as it's the right shape.
    labels = np.array([1] * inputs.shape[0])
    bench.input_preprocessor.set_fake_data(inputs, labels)
    bench.run()
def run_with_real_model(params):
  """Runs tf_cnn_benchmarks with a real model."""
  bench = benchmark_cnn.BenchmarkCNN(params)
  bench.print_info()
  preprocessor = get_test_image_preprocessor(bench.batch_size, params)
  if preprocessor is not None:
    # The test image preprocessor requires queue runners. Since this file is
    # used for testing, it is OK to access protected members.
    # pylint: disable=protected-access
    bench.dataset._queue_runner_required = True
    # pylint: enable=protected-access
    bench.input_preprocessor = preprocessor
  bench.run()
Ejemplo n.º 4
0
 def testMlPerfCompliance(self):
     string_io = six.StringIO()
     handler = logging.StreamHandler(string_io)
     data_dir = test_util.create_black_and_white_images()
     try:
         mlperf_log.LOGGER.addHandler(handler)
         params = benchmark_cnn.make_params(
             data_dir=data_dir,
             data_name='imagenet',
             batch_size=2,
             num_warmup_batches=0,
             num_batches=2,
             num_eval_batches=3,
             eval_during_training_every_n_steps=1,
             distortions=False,
             weight_decay=0.5,
             optimizer='momentum',
             momentum=0.5,
             stop_at_top_1_accuracy=2.0,
             tf_random_seed=9876,
             ml_perf=True)
         with mlperf.mlperf_logger(use_mlperf_logger=True,
                                   model='resnet50_v1.5'):
             bench_cnn = benchmark_cnn.BenchmarkCNN(
                 params, model=_MlPerfTestModel())
             bench_cnn.run()
         logs = string_io.getvalue().splitlines()
         log_regexes = Counter()
         for log in logs:
             for regex in self.EXPECTED_LOG_REGEXES:
                 if regex.search(log):
                     log_regexes[regex] += 1
         if log_regexes != self.EXPECTED_LOG_REGEXES:
             diff_counter = Counter(log_regexes)
             diff_counter.subtract(self.EXPECTED_LOG_REGEXES)
             differences = []
             for regex in (k for k in diff_counter.keys()
                           if diff_counter[k]):
                 found_count = log_regexes[regex]
                 expected_count = self.EXPECTED_LOG_REGEXES[regex]
                 differences.append(
                     '  For regex %s: Found %d lines matching but '
                     'expected to find %d' %
                     (regex.pattern, found_count, expected_count))
             raise AssertionError(
                 'Logs did not match expected logs. Differences:\n'
                 '%s' % '\n'.join(differences))
     finally:
         mlperf_log.LOGGER.removeHandler(handler)
Ejemplo n.º 5
0
def main(positional_arguments):
    # Command-line arguments like '--distortions False' are equivalent to
    # '--distortions=True False', where False is a positional argument. To prevent
    # this from silently running with distortions, we do not allow positional
    # arguments.
    assert len(positional_arguments) >= 1
    if len(positional_arguments) > 1:
        raise ValueError('Received unknown positional arguments: %s' %
                         positional_arguments[1:])

    params = benchmark_cnn.make_params_from_flags()
    params = benchmark_cnn.setup(params)
    bench = benchmark_cnn.BenchmarkCNN(params)

    tfversion = cnn_util.tensorflow_version_tuple()
    log_fn('TensorFlow:  %i.%i' % (tfversion[0], tfversion[1]))

    run_benchmark(bench, absl_flags.FLAGS.iters_per_step)
Ejemplo n.º 6
0
def main(positional_arguments):
    # Command-line arguments like '--distortions False' are equivalent to
    # '--distortions=True False', where False is a positional argument. To prevent
    # this from silently running with distortions, we do not allow positional
    # arguments.
    assert len(positional_arguments) >= 1
    if len(positional_arguments) > 1:
        raise ValueError('Received unknown positional arguments: %s' %
                         positional_arguments[1:])

    params = benchmark_cnn.make_params_from_flags()
    with mlperf.mlperf_logger(absl_flags.FLAGS.ml_perf_compliance_logging,
                              params.model):
        params = benchmark_cnn.setup(params)
        bench = benchmark_cnn.BenchmarkCNN(params)

        tfversion = cnn_util.tensorflow_version_tuple()
        log_fn('TensorFlow:  %i.%i' % (tfversion[0], tfversion[1]))

        bench.print_info()
        bench.run()