Esempio n. 1
0
def test_resize():
    config = _good_config()
    img_detect = TFImageDetection(**config)
    _dir = os.path.dirname(os.path.abspath(__file__))
    img_path = os.path.join(_dir, 'background.jpg')
    image = Image.open(img_path)
    orig_width = image.size[0]
    assert orig_width == 1280
    orig_height = image.size[1]
    assert orig_height == 720
    new_size = (300, 300)
    new_image = img_detect.resize(image=image, desired_size=new_size)
    new_width = new_image.size[0]
    assert new_width == new_size[0]
    new_height = new_image.size[1]
    assert new_height == new_size[1]
Esempio n. 2
0
def test_inference_init_good_config():
    config = _good_config()
    img_detect = TFImageDetection(**config)
    assert img_detect
    assert img_detect._tfengine
    assert img_detect._tfengine._model_tflite_path.endswith('.tflite')
    assert img_detect._tfengine._model_edgetpu_path.endswith('.tflite')
    assert img_detect._tfengine.confidence_threshold == 0.654
    assert img_detect._tfengine.top_k == 123
    assert img_detect._tfengine.is_quantized
    assert img_detect._tfengine._model_labels_path.endswith('.txt')
Esempio n. 3
0
def test_inference_init_bad_config():
    config = {
        'model': {
            'tflite': 'some_bad_tflite_model',
        },
        'labels': 'no_labels',
        'top_k': 123,
        'confidence_threshold': 654,
    }
    with pytest.raises(AssertionError):
        TFImageDetection(**config)
Esempio n. 4
0
def test_model_inputs():
    """Verify against known model inputs."""
    config = _good_config()
    img_detect = TFImageDetection(**config)
    tfe = img_detect._tfengine
    samples = tfe.input_details[0]['shape'][0]
    assert samples == 1
    height = tfe.input_details[0]['shape'][1]
    assert height == 300
    width = tfe.input_details[0]['shape'][2]
    assert width == 300
    colors = tfe.input_details[0]['shape'][3]
    assert colors == 3
Esempio n. 5
0
def test_model_outputs():
    """Verify against known model outputs."""
    config = _good_config()
    img_detect = TFImageDetection(**config)
    tfe = img_detect._tfengine
    assert tfe.output_details[0]['shape'][0] == 1
    scores = tfe.output_details[0]['shape'][1]
    assert scores == 20
    assert tfe.output_details[1]['shape'][0] == 1
    boxes = tfe.output_details[1]['shape'][1]
    assert boxes == 20
    assert tfe.output_details[2]['shape'][0] == 1
    labels = tfe.output_details[2]['shape'][1]
    assert labels == 20
    num = tfe.output_details[3]['shape'][0]
    assert num == 1
Esempio n. 6
0
def test_inference_init_no_config():
    with pytest.raises(AssertionError):
        TFImageDetection()
Esempio n. 7
0
def test_load_labels():
    config = _good_config()
    img_detect = TFImageDetection(**config)
    labels = img_detect._labels
    assert labels[0] == 'person'
    assert labels[15] == 'bird'
Esempio n. 8
0
def test_receive_next_sample():
    config = _good_config()
    img_detect = TFImageDetection(**config)
    # no action expected from the abstract method
    img_detect.receive_next_sample(image=None)
Esempio n. 9
0
    def DetectPosesInImage(self, img):
        """
        Detects poses in a given image.

        :Parameters:
        ----------
        img : PIL.Image
            Input Image for AI model detection.

        :Returns:
        -------
        poses:
            A list of Pose objects with keypoints and confidence scores
        PIL.Image
            Resized image fitting the AI model input tensor.
        """

        _tensor_input_size = (self._tensor_image_width,
                              self._tensor_image_height)

        # thumbnail is a proportionately resized image
        thumbnail = TFImageDetection.thumbnail(image=img,
                                               desired_size=_tensor_input_size)
        # convert thumbnail into an image with the exact size
        # as the input tensor preserving proportions by padding with a solid color as needed
        template_image = TFImageDetection.resize(
            image=thumbnail, desired_size=_tensor_input_size)

        template_input = np.expand_dims(template_image.copy(), axis=0)
        floating_model = self._tfengine.input_details[0]['dtype'] == np.float32

        if floating_model:
            template_input = (np.float32(template_input) - 127.5) / 127.5

        self.tf_interpreter().set_tensor(
            self._tfengine.input_details[0]['index'], template_input)
        self.tf_interpreter().invoke()

        template_output_data = self.tf_interpreter().get_tensor(
            self._tfengine.output_details[0]['index'])
        template_offset_data = self.tf_interpreter().get_tensor(
            self._tfengine.output_details[1]['index'])

        template_heatmaps = np.squeeze(template_output_data)
        template_offsets = np.squeeze(template_offset_data)

        kps = self.parse_output(template_heatmaps, template_offsets, 0.3)

        poses = []

        keypoint_dict = {}
        cnt = 0

        for point_i in range(kps.shape[0]):
            x, y = kps[point_i, 1], kps[point_i, 0]
            prob = self.sigmoid(kps[point_i, 3])

            if prob > 0.60:
                cnt += 1
            keypoint = Keypoint(KEYPOINTS[point_i], [x, y], prob)
            keypoint_dict[KEYPOINTS[point_i]] = keypoint
            # draw on image and save it for debugging
            draw = ImageDraw.Draw(template_image)
            draw.line(((0, 0), (x, y)), fill='red')

        pose_scores = cnt / 17
        poses.append(Pose(keypoint_dict, pose_scores))
        # DEBUG: save template_image for debugging
        # DEBUG: timestr = int(time.monotonic()*1000)
        # DEBUG: template_image.save(f'tmp-template-image-time-{timestr}-keypoints-{cnt}.jpg', format='JPEG')
        return poses, thumbnail