Example #1
0
    def test_debug_info(self):
        engine = BasicEngine(
            test_utils.test_data_path('mobilenet_v1_1.0_224_quant.tflite'))
        # Check model's input format.
        input_tensor_shape = engine.get_input_tensor_shape()
        self.assertListEqual([1, 224, 224, 3], input_tensor_shape.tolist())
        self.assertEqual(224 * 224 * 3, engine.required_input_array_size())

        # Check model's output.
        output_tensors_sizes = engine.get_all_output_tensors_sizes()
        self.assertListEqual([1001], output_tensors_sizes.tolist())
        self.assertEqual(1, engine.get_num_of_output_tensors())
        self.assertEqual(1001, engine.get_output_tensor_size(0))
        self.assertEqual(1001, engine.total_output_array_size())

        # Check SSD model.
        ssd_engine = BasicEngine(
            test_utils.test_data_path(
                'mobilenet_ssd_v1_coco_quant_postprocess.tflite'))
        # Check model's input format.
        input_tensor_shape = ssd_engine.get_input_tensor_shape()
        self.assertListEqual([1, 300, 300, 3], input_tensor_shape.tolist())
        self.assertEqual(300 * 300 * 3, ssd_engine.required_input_array_size())

        # Check model's output.
        output_tensors_sizes = ssd_engine.get_all_output_tensors_sizes()
        self.assertListEqual([80, 20, 20, 1], output_tensors_sizes.tolist())
        self.assertEqual(4, ssd_engine.get_num_of_output_tensors())
        self.assertEqual(80, ssd_engine.get_output_tensor_size(0))
        self.assertEqual(20, ssd_engine.get_output_tensor_size(1))
        self.assertEqual(20, ssd_engine.get_output_tensor_size(2))
        self.assertEqual(1, ssd_engine.get_output_tensor_size(3))
        self.assertEqual(121, ssd_engine.total_output_array_size())
Example #2
0
def _run_benchmark_for_model(model_name):
  """Runs benchmark for given model with a random input.

  Args:
    model_name: string, file name of the model.

  Returns:
    float, average inference time.
  """
  iterations = 200 if ('edgetpu' in model_name) else 20
  print('Benchmark for [', model_name, ']')
  print('model path = ', test_utils.test_data_path(model_name))
  engine = BasicEngine(test_utils.test_data_path(model_name))
  print('Shape of input tensor : ', engine.get_input_tensor_shape())

  # Prepare a random generated input.
  input_size = engine.required_input_array_size()
  random_input = test_utils.generate_random_input(1, input_size)

  # Convert it to a numpy.array.
  input_data = np.array(random_input, dtype=np.uint8)

  benchmark_time = timeit.timeit(
      lambda: engine.run_inference(input_data),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference
Example #3
0
 def test_run_inference_implicit_size_different_types(self):
     engine = BasicEngine(
         test_utils.test_data_path('mobilenet_v1_1.0_224_quant.tflite'))
     input_size = engine.required_input_array_size()
     input_data = test_utils.generate_random_input(1, input_size)
     self._test_inference_with_different_input_types(engine, input_data)
     input_data = test_utils.generate_random_input(1, input_size + 1)
     self._test_inference_with_different_input_types(engine, input_data)
     input_data = test_utils.generate_random_input(1, input_size + 64)
     self._test_inference_with_different_input_types(engine, input_data)
Example #4
0
 def test_inference_with_bad_input_size(self):
     engine = BasicEngine(
         test_utils.test_data_path('mobilenet_v1_1.0_224_quant.tflite'))
     expected_size = engine.required_input_array_size()
     input_data = test_utils.generate_random_input(1, expected_size - 1)
     error_message = None
     try:
         engine.run_inference(input_data, expected_size - 1)
     except AssertionError as e:
         error_message = str(e)
     self.assertEqual(
         'Wrong input size={}, expected={}.'.format(expected_size - 1,
                                                    expected_size),
         error_message)
Example #5
0
 def test_run_inference(self):
     for model in test_utils.get_model_list():
         print('Testing model :', model)
         engine = BasicEngine(test_utils.test_data_path(model))
         input_data = test_utils.generate_random_input(
             1, engine.required_input_array_size())
         latency, ret = engine.run_inference(input_data)
         self.assertEqual(ret.size, engine.total_output_array_size())
         # Check debugging functions.
         self.assertLess(math.fabs(engine.get_inference_time() - latency),
                         0.001)
         raw_output = engine.get_raw_output()
         self.assertEqual(ret.size, raw_output.size)
         for i in range(ret.size):
             if math.isnan(ret[i]) and math.isnan(raw_output[i]):
                 continue
             self.assertLess(math.fabs(ret[i] - raw_output[i]), 0.001)
def run_benchmark(model):
    """Returns average inference time in ms on specified model on random input."""

    print('Benchmark for [%s]' % model)
    print('model path = %s' % test_utils.test_data_path(model))
    engine = BasicEngine(test_utils.test_data_path(model))
    print('input tensor shape = %s' % engine.get_input_tensor_shape())

    iterations = 200 if 'edgetpu' in model else 20
    input_size = engine.required_input_array_size()
    random_input = test_utils.generate_random_input(1, input_size)
    input_data = np.array(random_input, dtype=np.uint8)
    result = 1000 * timeit.timeit(lambda: engine.run_inference(input_data),
                                  number=iterations) / iterations

    print('%.2f ms (iterations = %d)' % (result, iterations))
    return result
def _run_benchmark_for_cocompiled_models(model_names):
  """Runs benchmark for a given model set with random inputs. Models run
  inferences alternately with random inputs. It benchmarks the total time
  running each model once.

  Args:
    model_names: list of string, file names of the models.

  Returns:
    float, average sum of inferences times.
  """
  iterations = 200
  print('Benchmark for ', model_names)

  engines = []
  input_data_list = []
  edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
      edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)

  for model_name in model_names:
    # Run models on a single edgetpu to achieve accurate benchmark results.
    engine = BasicEngine(test_utils.test_data_path(model_name), edge_tpus[0])

    # Prepare a random generated input.
    input_size = engine.required_input_array_size()
    random_input = test_utils.generate_random_input(1, input_size)

    # Convert it to a numpy.array.
    input_data = np.array(random_input, dtype=np.uint8)

    engines.append(engine)
    input_data_list.append(input_data)

  benchmark_time = timeit.timeit(
      lambda: _run_inferences(engines, input_data_list),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference

def label_to_color_image(label):
    if label.ndim != 2:
        raise ValueError('Expect 2-D input label')
    colormap = create_pascal_label_colormap()
    if np.max(label) >= len(colormap):
        raise ValueError('label value too large.')
    return colormap[label]


engine = BasicEngine(
    '/usr/share/edgetpu/examples/models/deeplabv3_mnv2_pascal_quant_edgetpu.tflite'
)
_, height, width, _ = engine.get_input_tensor_shape()
algo = engine.required_input_array_size()

cam = cv2.VideoCapture(2)
while True:
    ret, frame = cam.read()
    frame = cv2.resize(frame, (width, height), cv2.INTER_NEAREST)

    start = time.time()
    _, result = engine.run_inference(frame[:, :, ::-1].flatten())
    result = np.reshape(result, (height, width))
    end = time.time()
    result = label_to_color_image(result.astype(int)).astype(np.uint8)

    print(1 / (end - start))
    cv2.imshow("Result", result)
    if cv2.waitKey(1) == ord('q'):
Example #9
0
class FaceEmbeddingEngine:
    ''' class FaceEmbeddingEngine

        Purpose: generate embeddings for images of faces
    '''
    def __init__(self, embedding_model):
        ''' function constructor

        Constructor for FaceEmbeddingEngine

        Args:
        embedding_model (FaceEmbeddingModelEnum): The model to use for generating
                        embeddings for face images

        Returns:
            None
        '''

        # We only want to import these modules at run-time since
        # they will only be installed on certain platforms.
        # pylint: disable=import-outside-toplevel, import-error

        self.embedding_model = embedding_model
        self.required_image_shape = get_image_dimensions_for_embedding_model(
            embedding_model) + (3, )  # need 3 arrays for RGB

        if self.embedding_model == FaceEmbeddingModelEnum.CELEBRITY_KERAS:
            print("Using Celebrity trained Keras model for face embeddings")
            from keras.models import load_model
            self.face_embedding_engine = load_model(
                FACE_EMBEDDING_CELEBRITY_KERAS_MODEL_PATH, compile=False)
        elif self.embedding_model == FaceEmbeddingModelEnum.CELEBRITY_TFLITE:
            print("Using Celebrity trained tflite model for face embeddings")
            from edgetpu.basic.basic_engine import BasicEngine
            self.face_embedding_engine = BasicEngine(
                FACE_EMBEDDING_CELEBRITY_TFLITE_MODEL_PATH)
            print("Embedding model input tensor shape: {}".format(
                self.face_embedding_engine.get_input_tensor_shape()))
            print("Embedding model input size: {}".format(
                self.face_embedding_engine.required_input_array_size()))
        else:
            raise Exception(
                "Invalid embedding mode method: {}".format(embedding_model))

    def get_embedding_model(self):
        ''' function get_embedding_model

        Get the embedding model being used by this instance of the FaceEmbeddingEngine

        Args:
            None

        Returns:
            The FaceEmbeddingModelEnum being used by this instance of FaceEmbeddingEngine
        '''
        return self.embedding_model

    # get the face embedding for one face
    def get_embedding(self, face_pixels):
        ''' function get_embedding

        Generate an embedding for the given face

        Args:
            face_pixels (cv2 image): The image of the face to generate the
                            embedding for. The dimensions of the image must
                            match the dimensions required by the selected
                            embedding model.

        Returns:
            A numpy array with the embedding that was generated
        '''

        # Confirm we're using a proper sized image to generate the embedding with
        if face_pixels.shape != self.required_image_shape:
            raise Exception(
                "Invalid shape: {} for embedding mode method: {}".format(
                    face_pixels.shape, self.embedding_model))

        # scale pixel values
        face_pixels = face_pixels.astype('float32')
        # standardize pixel values across channels (global)
        mean, std = face_pixels.mean(), face_pixels.std()
        face_pixels = (face_pixels - mean) / std
        # transform face into one sample
        sample = expand_dims(face_pixels, axis=0)

        # get embedding
        if self.embedding_model == FaceEmbeddingModelEnum.CELEBRITY_KERAS:
            embeddings = self.face_embedding_engine.predict(sample)
            result = embeddings[0]
        else:
            sample = sample.flatten()
            # normalize values to between 0 and 255 (UINT)
            sample *= 255.0 / sample.max()
            # convert to UNIT8
            sample = sample.astype(np_uint8)
            embeddings = self.face_embedding_engine.run_inference(sample)
            result = embeddings[1]

        return result