Exemplo n.º 1
0
    def test_size_constructor2(self):
        """Check if the constructor can handle the singleton case, that is
        initializing from a `Size` object should return the same
        `Size` object.
        """
        size = Size(3, 4)
        size2 = Size(size)

        self.assertEqual(size, size2)
        self.assertIs(size, size2)
Exemplo n.º 2
0
    def test_size_equal(self):
        """Test size equality between different types.
        """

        # equality of size objects
        size1 = Size(3, 4)
        size2 = Size(3, 4)
        self.assertEqual(size1, size2)

        # equality of size objects and tuples
        size_tuple = (3, 4)
        self.assertEqual(size1, size_tuple)
        self.assertEqual(size_tuple, size1)

        # equality of size objects and lists
        size_list = [3, 4]
        self.assertEqual(size1, size_list)
        self.assertEqual(size_list, size1)
Exemplo n.º 3
0
    def test_size_type(self):
        """Test :py:class:`Size` type.
        """
        size_tuple = (3, 4)
        size_list = [3, 4]
        size = Size(3, 4)

        self.assertNotIsInstance(size_tuple, Size)
        self.assertNotIsInstance(size_list, Size)
        self.assertIsInstance(size, Size)
Exemplo n.º 4
0
    def test_sizelike_type(self):
        """Test :py:class:`Sizelike` type.
        """
        size_tuple = (3, 4)
        size_list = [3, 4]
        size_str = '3x4'
        size = Size(3, 4)

        # TypeError: Subscripted generics cannot be used with class
        # and instance checks
        sizelike_types = \
            tuple(get_origin(arg) or arg for arg in get_args(Sizelike))
        self.assertIsInstance(size_tuple, sizelike_types)
        self.assertIsInstance(size_list, sizelike_types)
        self.assertIsInstance(size, sizelike_types)
        self.assertIsInstance(size_str, sizelike_types)
Exemplo n.º 5
0
class TestImage(unittest.TestCase):
    """Test the :py:class:`Image` type.
    """

    example_image_filename = 'examples/dog.jpg'
    example_image_size = Size(1546, 1213)

    def test_supported_formats(self):
        """Test supported image formats.
        """
        self.assertIn('array', Image.supported_formats())
        self.assertIn('image', Image.supported_formats())

    @unittest.skipUnless(os.path.isfile(example_image_filename),
                         reason="Example image file is not available")
    def test_image_creation(self):
        """Test creation of `Image`.
        """
        image = Image(self.example_image_filename)
        self.assertEqual(image.size(), self.example_image_size)

    @unittest.skipIf(importlib.util.find_spec("PyQt5") is None or
                     not os.path.isfile(example_image_filename),
                     reason="PyQt is not available")
    def test_qt(self):
        """Test :py:class:`Size` type.
        """
        module = importlib.import_module('qtgui.widgets.image')
        self.assertIn('qimage', Image.supported_formats())

        image = Image(self.example_image_filename)
        qimage = Image.as_qimage(image)
        self.assertIsInstance(qimage, module.QImage)

    @unittest.skipIf(importlib.util.find_spec("PIL") is None or
                     not os.path.isfile(example_image_filename),
                     reason="Python Image Library (PIL/pillow)"
                     " is not available")
    def test_pillow(self):
        """Test :py:class:`Size` type.
        """
        module = importlib.import_module('dltb.thirdparty.pil')
        self.assertIn('pil', Image.supported_formats())

        image = Image(self.example_image_filename)
        pil = Image.as_pil(image)
        self.assertIsInstance(pil, module.PIL.Image.Image)
Exemplo n.º 6
0
 def warp(image: Imagelike, transformation: np.ndarray,
          size: Sizelike) -> np.ndarray:
     """Warp an image by applying a transformation.
     """
     image = Image.as_array(image)
     size = Size(size)
     output_shape = (size[1], size[0])
     # further argument: order (int, optional):
     #    The order of interpolation. The order has to be in the range 0-5:
     #        0: Nearest-neighbor
     #        1: Bi-linear (default)
     #        2: Bi-quadratic
     #        3: Bi-cubic
     #        4: Bi-quartic
     #        5: Bi-quintic
     warped = warp(image, transformation, output_shape=output_shape,
                   preserve_range=True)
     warped = warped.astype(image.dtype)
     return warped
Exemplo n.º 7
0
    def test_size_constructor(self):
        """Test the constructor with different aruments.
        """

        # two arguments
        size = Size(3, 4)
        self.assertEqual(size.width, 3)
        self.assertEqual(size.height, 4)

        # pair of arguments
        size_tuple = (3, 4)
        size = Size(size_tuple)
        self.assertEqual(size.width, size_tuple[0])
        self.assertEqual(size.height, size_tuple[1])

        # list of two arguments
        size_list = [3, 4]
        size = Size(size_list)
        self.assertEqual(size.width, size_list[0])
        self.assertEqual(size.height, size_list[1])

        size_str1 = '3x4'
        size = Size(size_str1)
        self.assertEqual(size.width, 3)
        self.assertEqual(size.height, 4)

        size_str2 = '3,4'
        size = Size(size_str2)
        self.assertEqual(size.width, 3)
        self.assertEqual(size.height, 4)

        size = Size(5)
        self.assertEqual(size.width, 5)
        self.assertEqual(size.height, 5)

        size = Size('5')
        self.assertEqual(size.width, 5)
        self.assertEqual(size.height, 5)
Exemplo n.º 8
0
class Detector(DetectorBase):
    """Torch-based implementation of the MTCNN detector from the
    face.evoLVe repository [1].

    [1] https://github.com/ZhaoJ9014/face.evoLVe
    """
    internal_arguments: Tuple[str, ...] = ('pil', )

    # Default reference facial points for crop_size = (96, 112); (this
    # is taken from the the face_evoLVe repository,
    # applications/align/align_trans.py. Note: According to the
    # comments in that file, these landmarks are for a "facial points
    # crop_size = (112, 112)", however, the crop size is then
    # specified as (96, 112), and this actually seems to be more
    # appropriate).  The coordinates are of the form (x, y), with the
    # origin located at the bottom left corner of the image.
    _reference_landmarks = Landmarks(points=np.asarray([
        [30.29459953, 51.69630051],  # left mouth
        [65.53179932, 51.50139999],  # right mouth
        [48.02519989, 71.73660278],  # nose
        [33.54930115, 92.3655014],  # left eye
        [62.72990036, 92.20410156]  # right eye
    ]))

    _reference_size = Size(96, 112)

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._face_evolve_repository = \
            'https://github.com/ZhaoJ9014/face.evoLVe'
        self._face_evolve_directory = config.github_directory / 'face.evoLVe'
        self._module_detector = None

    def _prepared(self) -> bool:
        return (self._module_detector is not None) and super()._prepared()

    def _preparable(self) -> bool:
        return self._face_evolve_directory.is_dir() and super()._preparable()

    def _prepare(self) -> None:
        super()._prepare()

        # (1) load the model
        align_code_directory = \
            self._face_evolve_directory / 'applications' / 'align'
        self._module_detector = \
            Importer.import_module_from('detector',
                                        directory=align_code_directory)

    def _preprocess_data(self, data: Data, **kwargs):
        print("Preprocessing data")  # FIXME: is not called
        return PIL.Image.fromarray(data.array)

    def _preprocess(self, *args, **kwargs) -> Data:
        context = super()._preprocess(*args, **kwargs)
        context.add_attribute('pil', PIL.Image.fromarray(context.input_image))
        return context

    def _detect(self, image: PIL.Image, **kwargs) -> Metadata:
        """Apply the MTCNN detector to detect faces in the given image.

        Arguments
        ---------
        image:
            The image to detect faces in. Expected is a RGB image
            with np.uint8 data.

        Returns
        ------
        metadata: Metadata
            A Metadata structure in which BoundingBoxes and
            FacialLandmarks are provided, annotated with a numeric 'id'
            and a 'confidence' value.
        """
        # FIXME[hack]:
        if isinstance(image, np.ndarray):
            image = PIL.Image.fromarray(image)
        #
        # (1) Run the MTCNN detector
        #
        try:
            # FIXME[question]: what is going on here? can this be done
            # in prepare?
            align_code_directory = \
                self._face_evolve_directory / 'applications' / 'align'
            prev_cwd = os.getcwd()
            os.chdir(align_code_directory)
            LOG.info("MTCNN: detecting facess ...")
            bounding_boxes, landmarks = \
                self._module_detector.detect_faces(image)
            LOG.info("MTCNN: ... found %d faces.", len(bounding_boxes))
        finally:
            os.chdir(prev_cwd)

        #
        # (2) Create Metadata
        #
        self.detect_boxes = True
        self.detect_landmarks = True

        detections = Metadata(
            description='Detections by the Torch MTCNN detector')
        for face_id, (bbox, mark) in enumerate(zip(bounding_boxes, landmarks)):
            confidence = bbox[4]

            if self.detect_boxes:
                # The bounding boxes are reported as a 5-tuple:
                # (x1, y1, x2, y2, confidence)
                detections.add_region(BoundingBox(x1=bbox[0],
                                                  y1=bbox[1],
                                                  x2=bbox[2],
                                                  y2=bbox[3]),
                                      confidence=confidence,
                                      id=face_id)

            if self.detect_landmarks:
                # landmarks are reported as array of length 10, consisting
                # of 5 consecutive x-coordinates followed by 5 corresponding
                # y-coordinates.  The order of these pairs
                # is: left_mouth, right_mouth, nose, left_eye, right_eye
                # That is the array holds
                #    [left_mouth_x, right_mouth_x, nose_x,
                #     left_eye_x, right_eye_x,
                #     left_mouth_y, right_mouth_y, nose_y,
                #     left_eye_y, right_eye_y]
                points = mark.reshape(2, 5).T.astype(int)
                detections.add_region(Landmarks(points=points),
                                      confidence=confidence,
                                      id=face_id)

        return detections