예제 #1
0
  input_data = np.array(random_input, dtype=np.uint8)

  benchmark_time = timeit.timeit(
      lambda: engine.run_inference(input_data),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference


if __name__ == '__main__':
  args = test_utils.parse_args()
  machine = test_utils.machine_info()
  test_utils.check_cpu_scaling_governor_status()
  # Read references from csv file.
  model_list, reference = test_utils.read_reference(
      'basic_engine_reference_%s.csv' % machine)
  total_models = len(model_list)
  # Put column names in first row.
  results = [('MODEL', 'INFERENCE_TIME')]
  for cnt, model in enumerate(model_list, start=1):
    print('-------------- Model ', cnt, '/', total_models, ' ---------------')
    results.append((model, _run_benchmark_for_model(model)))
  test_utils.save_as_csv(
      'basic_engine_benchmarks_%s_%s.csv' % (
          machine, time.strftime('%Y%m%d-%H%M%S')),
      results)
  test_utils.check_result(reference, results, args.enable_assertion)
예제 #2
0
    print('Image pre-processing time: ', end_time - start_time, 's')
    start_time = end_time
    for class_id, tensors in enumerate(image_list_by_category.values()):
        engine.train(tensors, class_id)
    engine.save_model(output_model_path)
    training_time = time.monotonic() - start_time
    print('Model: ', model)
    print('Data set : ', data_set)
    print('Training time : ', training_time, 's')
    # Remove the model.
    subprocess.call(['rm', output_model_path])
    return training_time


if __name__ == '__main__':
    args = test_utils.parse_args()
    machine = test_utils.machine_info()
    models, reference = test_utils.read_reference(
        'imprinting_reference_%s.csv' % machine)
    model_num = len(models)
    results = [('MODEL', 'DATA_SET', 'INFERENCE_TIME')]
    for cnt, name in enumerate(models, start=1):
        # 10 Categories, each has 20 images.
        data = 'open_image_v4_subset'
        print('---------------- ', cnt, '/', model_num, ' ----------------')
        results.append((name, data, _benchmark_for_training(name, data)))
    test_utils.save_as_csv(
        'imprinting_benchmarks_%s_%s.csv' %
        (machine, time.strftime('%Y%m%d-%H%M%S')), results)
    test_utils.check_result(reference, results, args.enable_assertion)
예제 #3
0
  print('Benchmark for [', model_name, '] on ', image)
  engine = ClassificationEngine(test_utils.test_data_path(model_name))
  iterations = 200 if ('edgetpu' in model_name) else 10

  with test_utils.test_image(image) as img_obj:
    benchmark_time = timeit.timeit(
        lambda: engine.classify_with_image(img_obj, threshold=0.4, top_k=10),
        number=iterations)

  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference


if __name__ == '__main__':
  args = test_utils.parse_args()
  images_for_tests = ['cat.bmp', 'cat_720p.jpg', 'cat_1080p.jpg']
  machine = test_utils.machine_info()
  test_utils.check_cpu_scaling_governor_status()
  model_list, reference = test_utils.read_reference(
      'classification_reference_%s.csv' % machine)
  total_models = len(model_list)
  results = [('MODEL', 'IMAGE_NAME', 'INFERENCE_TIME')]
  for cnt, model in enumerate(model_list, start=1):
    print('-------------- Model ', cnt, '/', total_models, ' ---------------')
    for img in images_for_tests:
      results.append((model, img, _run_benchmark_for_model(model, img)))
  test_utils.save_as_csv('classification_benchmarks_%s_%s.csv' %
                         (machine, time.strftime('%Y%m%d-%H%M%S')), results)
  test_utils.check_result(reference, results, args.enable_assertion)
예제 #4
0
    engines.append(engine)
    input_data_list.append(input_data)

  benchmark_time = timeit.timeit(
      lambda: _run_inferences(engines, input_data_list),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference

if __name__ == '__main__':
  args = test_utils.parse_args()
  machine = test_utils.machine_info()
  test_utils.check_cpu_scaling_governor_status()
  # Read references from csv file.
  modelsets_list, reference = test_utils.read_reference(
      'cocompilation_reference_%s.csv' % machine)
  total_modelsets = len(modelsets_list)
  # Put column names in first row.
  results = [('MODELS', 'INFERENCE_TIME')]
  for cnt, modelsets in enumerate(modelsets_list, start=1):
    print('-------------- Models ', cnt, '/', total_modelsets, ' ---------------')
    results.append((modelsets, _run_benchmark_for_cocompiled_models(modelsets.split(','))))
  test_utils.save_as_csv(
      'cocompilation_benchmarks_%s_%s.csv' % (
          machine, time.strftime('%Y%m%d-%H%M%S')),
      results)
  test_utils.check_result(reference, results, args.enable_assertion)
예제 #5
0
    engine = DetectionEngine(test_utils.test_data_path(model_name))
    iterations = 200 if ('edgetpu' in model_name) else 10

    with Image.open(test_utils.test_data_path(image)) as img_obj:
        benchmark_time = timeit.timeit(
            lambda: engine.detect_with_image(img_obj, threshold=0.4, top_k=10),
            number=iterations)

    time_per_inference = (benchmark_time / iterations) * 1000
    return time_per_inference


if __name__ == '__main__':
    args = test_utils.parse_args()
    images_for_tests = ['cat.bmp', 'cat_720p.jpg', 'cat_1080p.jpg']
    machine = test_utils.machine_info()
    test_utils.check_cpu_scaling_governor_status()
    model_list, reference = test_utils.read_reference(
        'detection_reference_%s.csv' % machine)
    total_models = len(model_list)
    results = [('MODEL', 'IMAGE_NAME', 'INFERENCE_TIME')]
    for cnt, model in enumerate(model_list, start=1):
        print('-------------- Model ', cnt, '/', total_models,
              ' ---------------')
        for img in images_for_tests:
            results.append((model, img, _run_benchmark_for_model(model, img)))
    test_utils.save_as_csv(
        'detection_benchmarks_%s_%s.csv' %
        (machine, time.strftime('%Y%m%d-%H%M%S')), results)
    test_utils.check_result(reference, results, args.enable_assertion)