Пример #1
0
 def _test_run_benchmark(self, params):
     """Tests that run_benchmark() runs successfully with the params."""
     logs = []
     with test_util.monkey_patch(
             all_reduce_benchmark,
             log_fn=test_util.print_and_add_to_list(logs)):
         bench_cnn = benchmark_cnn.BenchmarkCNN(params)
         all_reduce_benchmark.run_benchmark(bench_cnn, num_iters=5)
         self.assertRegexpMatches(logs[-1],
                                  '^Average time per step: [0-9.]+$')
def run_with_test_model(params):
  """Runs tf_cnn_benchmarks with a test model."""
  model = test_util.TestCNNModel()
  inputs = test_util.get_fake_var_update_inputs()
  with test_util.monkey_patch(benchmark_cnn,
                              LOSS_AND_ACCURACY_DIGITS_TO_SHOW=15):
    bench = benchmark_cnn.BenchmarkCNN(params, dataset=test_util.TestDataSet(),
                                       model=model)
    # The test model does not use labels when computing loss, so the label
    # values do not matter as long as it's the right shape.
    labels = np.array([1] * inputs.shape[0])
    bench.input_preprocessor.set_fake_data(inputs, labels)
    bench.run()