Exemple #1
0
 def classification_task(num_inferences):
     tid = threading.get_ident()
     print('Thread: %d, %d inferences for classification task' %
           (tid, num_inferences))
     labels = test_utils.read_label_file(
         test_utils.test_data_path('imagenet_labels.txt'))
     model_name = 'mobilenet_v1_1.0_224_quant_edgetpu.tflite'
     engine = ClassificationEngine(
         test_utils.test_data_path(model_name))
     print('Thread: %d, using device %s' % (tid, engine.device_path()))
     with test_utils.test_image('cat.bmp') as img:
         for _ in range(num_inferences):
             ret = engine.classify_with_image(img, top_k=1)
             self.assertEqual(len(ret), 1)
             self.assertEqual(labels[ret[0][0]], 'Egyptian cat')
     print('Thread: %d, done classification task' % tid)
Exemple #2
0
def run_two_models_one_tpu(classification_model, detection_model, image_name,
                           num_inferences, batch_size):
    """Runs two models ALTERNATIVELY using one Edge TPU.

  It runs classification model `batch_size` times and then switch to run
  detection model `batch_size` time until each model is run `num_inferences`
  times.

  Args:
    classification_model: string, path to classification model
    detection_model: string, path to detection model.
    image_name: string, path to input image.
    num_inferences: int, number of inferences to run for each model.
    batch_size: int, indicates how many inferences to run one model before
      switching to the other one.

  Returns:
    double, wall time it takes to finish the job.
  """
    start_time = time.perf_counter()
    engine_a = ClassificationEngine(classification_model)
    # `engine_b` shares the same Edge TPU as `engine_a`
    engine_b = DetectionEngine(detection_model, engine_a.device_path())
    with open_image(image_name) as image:
        # Resized image for `engine_a`, `engine_b`.
        tensor_a = get_input_tensor(engine_a, image)
        tensor_b = get_input_tensor(engine_b, image)

    num_iterations = (num_inferences + batch_size - 1) // batch_size
    for _ in range(num_iterations):
        # Using `classify_with_input_tensor` and `detect_with_input_tensor` on purpose to
        # exclude image down-scale cost.
        for _ in range(batch_size):
            engine_a.classify_with_input_tensor(tensor_a, top_k=1)
        for _ in range(batch_size):
            engine_b.detect_with_input_tensor(tensor_b, top_k=1)
    return time.perf_counter() - start_time
Exemple #3
0
    help=".tflite model to be executed")
  parser.add_argument("-l", "--label_file", default="edgetpu/test_data/inat_bird_labels.txt", \
    help="name of file containing labels")
  parser.add_argument("-k", "--top_k", default=5, help="top_k")
  parser.add_argument("-t", "--threshold", default=0.0, help="threshold")
  parser.add_argument("-c", "--loop_counts", default=1, help="loop counts")
  parser.add_argument("-d", "--device_path", help="device_path")
  parser.add_argument("-b", "--input_mean", default=127.5, help="input_mean")
  parser.add_argument("-s", "--input_std", default=127.5, help="input standard deviation")
  args = parser.parse_args()

  if args.device_path:
    engine = ClassificationEngine(args.model_file, device_path=args.device_path)
  else:
    engine = ClassificationEngine(args.model_file)
  print("device path:", engine.device_path())

  input_tensor_shape = engine.get_input_tensor_shape()
  if (input_tensor_shape.size != 4 or input_tensor_shape[3] != 3 or
                  input_tensor_shape[0] != 1):
    raise RuntimeError('Invalid input tensor shape! Expected: [1, height, width, 3]')
  _, height, width, _ = input_tensor_shape

  img = Image.open(args.image)
  img = img.resize((width, height))

  input_tensor = np.asarray(img).flatten()

  if floating_model:
    input_tensor = (np.float32(input_tensor) - args.input_mean) / args.input_std