def test_resize():
    config = _good_config()
    img_detect = TFImageDetection(**config)
    _dir = os.path.dirname(os.path.abspath(__file__))
    img_path = os.path.join(_dir, 'background.jpg')
    image = Image.open(img_path)
    orig_width = image.size[0]
    assert orig_width == 1280
    orig_height = image.size[1]
    assert orig_height == 720
    new_size = (300, 300)
    new_image = img_detect.resize(image=image, desired_size=new_size)
    new_width = new_image.size[0]
    assert new_width == new_size[0]
    new_height = new_image.size[1]
    assert new_height == new_size[1]
Example #2
0
    def DetectPosesInImage(self, img):
        """
        Detects poses in a given image.

        :Parameters:
        ----------
        img : PIL.Image
            Input Image for AI model detection.

        :Returns:
        -------
        poses:
            A list of Pose objects with keypoints and confidence scores
        PIL.Image
            Resized image fitting the AI model input tensor.
        """

        _tensor_input_size = (self._tensor_image_width,
                              self._tensor_image_height)

        # thumbnail is a proportionately resized image
        thumbnail = TFImageDetection.thumbnail(image=img,
                                               desired_size=_tensor_input_size)
        # convert thumbnail into an image with the exact size
        # as the input tensor preserving proportions by padding with a solid color as needed
        template_image = TFImageDetection.resize(
            image=thumbnail, desired_size=_tensor_input_size)

        template_input = np.expand_dims(template_image.copy(), axis=0)
        floating_model = self._tfengine.input_details[0]['dtype'] == np.float32

        if floating_model:
            template_input = (np.float32(template_input) - 127.5) / 127.5

        self.tf_interpreter().set_tensor(
            self._tfengine.input_details[0]['index'], template_input)
        self.tf_interpreter().invoke()

        template_output_data = self.tf_interpreter().get_tensor(
            self._tfengine.output_details[0]['index'])
        template_offset_data = self.tf_interpreter().get_tensor(
            self._tfengine.output_details[1]['index'])

        template_heatmaps = np.squeeze(template_output_data)
        template_offsets = np.squeeze(template_offset_data)

        kps = self.parse_output(template_heatmaps, template_offsets, 0.3)

        poses = []

        keypoint_dict = {}
        cnt = 0

        for point_i in range(kps.shape[0]):
            x, y = kps[point_i, 1], kps[point_i, 0]
            prob = self.sigmoid(kps[point_i, 3])

            if prob > 0.60:
                cnt += 1
            keypoint = Keypoint(KEYPOINTS[point_i], [x, y], prob)
            keypoint_dict[KEYPOINTS[point_i]] = keypoint
            # draw on image and save it for debugging
            draw = ImageDraw.Draw(template_image)
            draw.line(((0, 0), (x, y)), fill='red')

        pose_scores = cnt / 17
        poses.append(Pose(keypoint_dict, pose_scores))
        # DEBUG: save template_image for debugging
        # DEBUG: timestr = int(time.monotonic()*1000)
        # DEBUG: template_image.save(f'tmp-template-image-time-{timestr}-keypoints-{cnt}.jpg', format='JPEG')
        return poses, thumbnail