Exemplo n.º 1
0
  input_data = np.array(random_input, dtype=np.uint8)

  benchmark_time = timeit.timeit(
      lambda: engine.run_inference(input_data),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference


if __name__ == '__main__':
  args = test_utils.parse_args()
  machine = test_utils.machine_info()
  test_utils.check_cpu_scaling_governor_status()
  # Read references from csv file.
  model_list, reference = test_utils.read_reference(
      'basic_engine_reference_%s.csv' % machine)
  total_models = len(model_list)
  # Put column names in first row.
  results = [('MODEL', 'INFERENCE_TIME')]
  for cnt, model in enumerate(model_list, start=1):
    print('-------------- Model ', cnt, '/', total_models, ' ---------------')
    results.append((model, _run_benchmark_for_model(model)))
  test_utils.save_as_csv(
      'basic_engine_benchmarks_%s_%s.csv' % (
          machine, time.strftime('%Y%m%d-%H%M%S')),
      results)
  test_utils.check_result(reference, results, args.enable_assertion)
    learning_rate = 0.01
    batch_size = 100
    print('Start backprop')
    start_time = time.perf_counter()
    model.train_with_sgd(dataset,
                         num_iter,
                         learning_rate,
                         batch_size,
                         print_every=-1)
    training_time = time.perf_counter() - start_time
    print('Backprop time: ', training_time, 's')
    return training_time


if __name__ == '__main__':
    args = test_utils.parse_args()
    machine = test_utils.machine_info()
    # cases are defined by parameter pairs [num_classes, feature_dim].
    cases = [[4, 256], [16, 256], [4, 1024], [16, 1024]]
    results = [('CASE', 'TRAINING_TIME(s)')]
    for params in cases:
        num_classes = params[0]
        feature_dim = params[1]
        print('-------- num_classes=%d / feature_dim=%d --------' %
              (num_classes, feature_dim))
        results.append((":".join(str(i) for i in params),
                        _benchmark_for_training(num_classes, feature_dim)))
    test_utils.save_as_csv(
        'softmax_regression_benchmarks_%s_%s.csv' %
        (machine, time.strftime('%Y%m%d-%H%M%S')), results)
Exemplo n.º 3
0
    print('Image pre-processing time: ', end_time - start_time, 's')
    start_time = end_time
    for class_id, tensors in enumerate(image_list_by_category.values()):
        engine.train(tensors, class_id)
    engine.save_model(output_model_path)
    training_time = time.monotonic() - start_time
    print('Model: ', model)
    print('Data set : ', data_set)
    print('Training time : ', training_time, 's')
    # Remove the model.
    subprocess.call(['rm', output_model_path])
    return training_time


if __name__ == '__main__':
    args = test_utils.parse_args()
    machine = test_utils.machine_info()
    models, reference = test_utils.read_reference(
        'imprinting_reference_%s.csv' % machine)
    model_num = len(models)
    results = [('MODEL', 'DATA_SET', 'INFERENCE_TIME')]
    for cnt, name in enumerate(models, start=1):
        # 10 Categories, each has 20 images.
        data = 'open_image_v4_subset'
        print('---------------- ', cnt, '/', model_num, ' ----------------')
        results.append((name, data, _benchmark_for_training(name, data)))
    test_utils.save_as_csv(
        'imprinting_benchmarks_%s_%s.csv' %
        (machine, time.strftime('%Y%m%d-%H%M%S')), results)
    test_utils.check_result(reference, results, args.enable_assertion)
Exemplo n.º 4
0
    engines.append(engine)
    input_data_list.append(input_data)

  benchmark_time = timeit.timeit(
      lambda: _run_inferences(engines, input_data_list),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference

if __name__ == '__main__':
  args = test_utils.parse_args()
  machine = test_utils.machine_info()
  test_utils.check_cpu_scaling_governor_status()
  # Read references from csv file.
  modelsets_list, reference = test_utils.read_reference(
      'cocompilation_reference_%s.csv' % machine)
  total_modelsets = len(modelsets_list)
  # Put column names in first row.
  results = [('MODELS', 'INFERENCE_TIME')]
  for cnt, modelsets in enumerate(modelsets_list, start=1):
    print('-------------- Models ', cnt, '/', total_modelsets, ' ---------------')
    results.append((modelsets, _run_benchmark_for_cocompiled_models(modelsets.split(','))))
  test_utils.save_as_csv(
      'cocompilation_benchmarks_%s_%s.csv' % (
          machine, time.strftime('%Y%m%d-%H%M%S')),
      results)
  test_utils.check_result(reference, results, args.enable_assertion)