def test_transform_set_select_extract_max_size(self):
        with deepstar_path():
            video_0001 = os.path.dirname(os.path.realpath(
                __file__)) + '/../../support/video_0001.mp4'  # noqa

            shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4'))

            VideoModel().insert('test', 'video_0001.mp4')

            DefaultVideoSelectExtractPlugin().video_select_extract(1)  # noqa

            self.mock_transform_set()

            with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}):
                transform_set_id = MaxSizeTransformSetSelectExtractPlugin(
                ).transform_set_select_extract(1, {})  # noqa

            self.assertEqual(transform_set_id, 2)

            # db
            result = TransformSetModel().select(2)
            self.assertEqual(result, (2, 'max_size', 1, 1))

            result = TransformModel().list(2)
            self.assertEqual(len(result), 5)
            t = list(result[0])
            json.loads(t.pop(3))
            self.assertEqual(t, [6, 2, 1, 0])
            t = list(result[1])
            json.loads(t.pop(3))
            self.assertEqual(t, [7, 2, 2, 0])
            t = list(result[2])
            json.loads(t.pop(3))
            self.assertEqual(t, [8, 2, 3, 0])
            t = list(result[3])
            json.loads(t.pop(3))
            self.assertEqual(t, [9, 2, 4, 0])
            t = list(result[4])
            json.loads(t.pop(3))
            self.assertEqual(t, [10, 2, 5, 0])

            # files
            p1 = TransformSetSubDir.path(2)

            # transforms
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 6, 'jpg')).shape[0],
                299)  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 7, 'jpg')).shape[0],
                299)  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 8, 'jpg')).shape[0],
                299)  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 9, 'jpg')).shape[0],
                299)  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 10, 'jpg')).shape[0],
                299)  # noqa
    def mock_transform_set(self):
        image_0001 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0001.jpg'  # noqa
        image_0003 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0003.jpg'  # 200-blurry  # noqa
        image_0004 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0004.jpg'  # 200-sharp   # noqa
        image_0005 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0005.jpg'  # 650-blurry  # noqa
        image_0006 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0006.jpg'  # 650-sharp   # noqa

        TransformSetModel().insert('face', 1, None)

        p1 = TransformSetSubDir.path(1)

        os.mkdir(p1)

        transform_model = TransformModel()

        transform_model.insert(1, 1, '{}', 0)
        shutil.copy(image_0001, TransformFile.path(p1, 1, 'jpg'))
        transform_model.insert(1, 2, '{}', 0)
        shutil.copy(image_0003, TransformFile.path(p1, 2, 'jpg'))
        transform_model.insert(1, 3, '{}', 0)
        shutil.copy(image_0004, TransformFile.path(p1, 3, 'jpg'))
        transform_model.insert(1, 4, '{}', 0)
        shutil.copy(image_0005, TransformFile.path(p1, 4, 'jpg'))
        transform_model.insert(1, 5, '{}', 0)
        shutil.copy(image_0006, TransformFile.path(p1, 5, 'jpg'))
Ejemplo n.º 3
0
    def _pad(self, transform_set_path, transform_id, frame_id, metadata,
             target_set_id, size):
        """
        This method pads a transform.

        :param str transform_set_path: The transform set path.
        :param int transform_id: The transform ID.
        :param int frame_id: The frame ID.
        :param str metadata: Metadata for the transform.
        :param int target_set_id: The new transform set ID.
        :param int size: The size to which to pad.
        :rtype: None
        """

        transform_path = TransformFile().path(transform_set_path, transform_id,
                                              'jpg')
        img = cv2.imread(transform_path)
        img_height, img_width = img.shape[:2]

        img_padded = np.zeros((size, size, 3), dtype=np.uint8)
        img_padded[:img_height, :img_width, :] = img.copy()

        target_id = TransformModel().insert(target_set_id, frame_id,
                                            json.dumps(metadata), 0)
        output_path = TransformFile.path(
            TransformSetSubDir.path(target_set_id), target_id, 'jpg')

        cv2.imwrite(output_path, img_padded, [cv2.IMWRITE_JPEG_QUALITY, 100])

        debug(
            f'Transform with ID {target_id:08d} at {output_path} extracted '
            f'from transform with ID {transform_id:08d} at {transform_path}',
            4)
    def _resize(self, transform_set_path, transform_id, frame_id, metadata,
                target_set_id, max_size):
        """
        This method resizes a transform.

        :param str transform_set_path: The transform set path.
        :param int transform_id:  The transform ID.
        :param int frame_id: The frame ID.
        :param str metadata: Metadata for the transform.
        :param int target_set_id: The new transform set ID.
        :param int max_size: The max size.
        :rtype: None
        """

        transform_path = TransformFile().path(transform_set_path, transform_id,
                                              'jpg')
        img = cv2.imread(transform_path)
        img_height, img_width = img.shape[:2]

        if img_height > max_size or img_width > max_size:
            if img_height > img_width:
                img = imutils.resize(img, height=max_size)
            else:
                img = imutils.resize(img, width=max_size)

        target_id = TransformModel().insert(target_set_id, frame_id,
                                            json.dumps(metadata), 0)
        output_path = TransformFile.path(
            TransformSetSubDir.path(target_set_id), target_id, 'jpg')

        cv2.imwrite(output_path, img, [cv2.IMWRITE_JPEG_QUALITY, 100])

        debug(f'Transform with ID {target_id:08d} at {output_path} extracted '
              f'from transform with ID {transform_id:08d} at {transform_path}',
              4)
Ejemplo n.º 5
0
    def test_select_extract_face(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

            args = ['main.py', 'select', 'frame_sets', '1', 'extract', 'face']  # noqa
            opts = {}

            route_handler = FrameSetCommandLineRouteHandler()

            try:
                sys.stdout = StringIO()
                route_handler.handle(args, opts)
                actual = sys.stdout.getvalue().strip()
            finally:
                sys.stdout = sys.__stdout__

            # stdout
            self.assertEqual(actual, 'transform_set_id=1, name=face, fk_frame_sets=1, fk_prev_transform_sets=None')  # noqa

            # db
            result = TransformSetModel().select(1)
            self.assertEqual(result, (1, 'face', 1, None))

            result = TransformModel().list(1)
            self.assertEqual(len(result), 5)
            t = list(result[0])
            json.loads(t.pop(3))
            self.assertEqual(t, [1, 1, 1, 0])
            t = list(result[1])
            json.loads(t.pop(3))
            self.assertEqual(t, [2, 1, 2, 0])
            t = list(result[2])
            json.loads(t.pop(3))
            self.assertEqual(t, [3, 1, 3, 0])
            t = list(result[3])
            json.loads(t.pop(3))
            self.assertEqual(t, [4, 1, 4, 0])
            t = list(result[4])
            json.loads(t.pop(3))
            self.assertEqual(t, [5, 1, 5, 0])

            # files
            p1 = TransformSetSubDir.path(1)

            # transforms
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 1, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 2, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 3, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 4, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 5, 'jpg')))
Ejemplo n.º 6
0
    def test_transform_set_select_extract_pad_rejected(self):
        with deepstar_path():
            video_0001 = os.path.dirname(os.path.realpath(
                __file__)) + '/../../support/video_0001.mp4'  # noqa

            shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4'))

            VideoModel().insert('test', 'video_0001.mp4')

            DefaultVideoSelectExtractPlugin().video_select_extract(1)  # noqa

            self.mock_transform_set()

            MaxSizeTransformSetSelectExtractPlugin(
            ).transform_set_select_extract(1, {})  # noqa

            transform_model = TransformModel()
            transform_model.update(7, rejected=1)
            transform_model.update(9, rejected=1)

            with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}):
                transform_set_id = PadTransformSetSelectExtractPlugin(
                ).transform_set_select_extract(2, {})  # noqa

            self.assertEqual(transform_set_id, 3)

            # db
            result = TransformSetModel().select(3)
            self.assertEqual(result, (3, 'pad', 1, 2))

            result = TransformModel().list(3)
            self.assertEqual(len(result), 3)
            t = list(result[0])
            json.loads(t.pop(3))
            self.assertEqual(t, [11, 3, 1, 0])
            t = list(result[1])
            json.loads(t.pop(3))
            self.assertEqual(t, [12, 3, 3, 0])
            t = list(result[2])
            json.loads(t.pop(3))
            self.assertEqual(t, [13, 3, 5, 0])

            # files
            p1 = TransformSetSubDir.path(3)

            # transforms
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 11,
                                                                'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 12,
                                                                'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 13,
                                                                'jpg')),
                                  np.ndarray)  # noqa
Ejemplo n.º 7
0
    def test_transform_set_select_merge_overlay_rejected(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(
                    __file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

                route_handler = FrameSetCommandLineRouteHandler()

                route_handler.select_extract([1], 'transform_set', {})
                route_handler.select_extract([1], 'transform_set', {})

                transform_model = TransformModel()

                transform_model.update(1, rejected=1)
                transform_model.update(10, rejected=1)

            OverlayTransformSetSelectMergePlugin().transform_set_select_merge(
                [1, 2], {
                    'x1': '0',
                    'y1': '0'
                })  # noqa

            # db
            result = TransformSetModel().select(3)
            self.assertEqual(result, (3, 'overlay', None, None))

            result = TransformModel().list(3)
            self.assertEqual(len(result), 4)
            self.assertEqual(result[0], (11, 3, None, None, 0))
            self.assertEqual(result[1], (12, 3, None, None, 0))
            self.assertEqual(result[2], (13, 3, None, None, 0))
            self.assertEqual(result[3], (14, 3, None, None, 0))

            # files
            p1 = TransformSetSubDir.path(3)

            # transforms
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 11,
                                                                'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 12,
                                                                'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 13,
                                                                'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 14,
                                                                'jpg')),
                                  np.ndarray)  # noqa
    def test_transform_set_select_extract_crop(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(
                    __file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

                FrameSetCommandLineRouteHandler().select_extract(
                    [1], 'transform_set', {})  # noqa

            CropTransformSetSelectExtractPlugin().transform_set_select_extract(
                1, {
                    'x1': '0',
                    'y1': '0',
                    'x2': '50',
                    'y2': '50'
                })  # noqa

            # db
            result = TransformSetModel().select(2)
            self.assertEqual(result, (2, 'crop', 1, 1))

            result = TransformModel().list(2)
            self.assertEqual(len(result), 5)
            self.assertEqual(result[0], (6, 2, 1, None, 0))
            self.assertEqual(result[1], (7, 2, 2, None, 0))
            self.assertEqual(result[2], (8, 2, 3, None, 0))
            self.assertEqual(result[3], (9, 2, 4, None, 0))
            self.assertEqual(result[4], (10, 2, 5, None, 0))

            # files
            p1 = TransformSetSubDir.path(2)

            # transforms
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 6, 'jpg')).shape[:2],
                (50, 50))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 7, 'jpg')).shape[:2],
                (50, 50))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 8, 'jpg')).shape[:2],
                (50, 50))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 9, 'jpg')).shape[:2],
                (50, 50))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 10, 'jpg')).shape[:2],
                (50, 50))  # noqa
    def test_transform_set_select_merge_overlay_image(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(
                    __file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

                route_handler = FrameSetCommandLineRouteHandler()

                route_handler.select_extract([1], 'transform_set', {})

            image_0007 = os.path.dirname(os.path.realpath(
                __file__)) + '/../../support/image_0007.png'  # noqa

            OverlayImageTransformSetSelectMergePlugin(
            ).transform_set_select_merge([1], {
                'image-path': image_0007,
                'x1': '0',
                'y1': '0'
            })  # noqa

            # db
            result = TransformSetModel().select(2)
            self.assertEqual(result, (2, 'overlay_image', None, None))

            result = TransformModel().list(2)
            self.assertEqual(len(result), 5)
            self.assertEqual(result[0], (6, 2, None, None, 0))
            self.assertEqual(result[1], (7, 2, None, None, 0))
            self.assertEqual(result[2], (8, 2, None, None, 0))
            self.assertEqual(result[3], (9, 2, None, None, 0))
            self.assertEqual(result[4], (10, 2, None, None, 0))

            # files
            p1 = TransformSetSubDir.path(2)

            # transforms
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 6, 'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 7, 'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 8, 'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 9, 'jpg')),
                                  np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 10,
                                                                'jpg')),
                                  np.ndarray)  # noqa
    def test_frame_set_select_extract_face(self):
        with deepstar_path():
            video_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/video_0001.mp4'  # noqa

            shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4'))

            VideoModel().insert('test', 'video_0001.mp4')

            DefaultVideoSelectExtractPlugin().video_select_extract(1)  # noqa

            with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}):
                transform_set_id = MTCNNFrameSetSelectExtractPlugin().frame_set_select_extract(1, {})  # noqa

            self.assertEqual(transform_set_id, 1)

            # db
            result = TransformSetModel().select(1)
            self.assertEqual(result, (1, 'face', 1, None))

            result = TransformModel().list(1)
            self.assertEqual(len(result), 5)
            t = list(result[0])
            json.loads(t.pop(3))
            self.assertEqual(t, [1, 1, 1, 0])
            t = list(result[1])
            json.loads(t.pop(3))
            self.assertEqual(t, [2, 1, 2, 0])
            t = list(result[2])
            json.loads(t.pop(3))
            self.assertEqual(t, [3, 1, 3, 0])
            t = list(result[3])
            json.loads(t.pop(3))
            self.assertEqual(t, [4, 1, 4, 0])
            t = list(result[4])
            json.loads(t.pop(3))
            self.assertEqual(t, [5, 1, 5, 0])

            # files
            p1 = TransformSetSubDir.path(1)

            # transforms
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 1, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 2, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 3, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 4, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 5, 'jpg')), np.ndarray)  # noqa
Ejemplo n.º 11
0
    def test_transform_set_select_extract_adjust_color_rejected(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(
                    __file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

                FrameSetCommandLineRouteHandler().select_extract(
                    [1], 'transform_set', {})  # noqa

                TransformModel().update(5, rejected=1)

            AdjustColorTransformSetSelectExtractPlugin(
            ).transform_set_select_extract(1, {
                'r': '+10',
                'g': '-10',
                'b': '+10'
            })  # noqa

            # db
            result = TransformSetModel().select(2)
            self.assertEqual(result, (2, 'adjust_color', 1, 1))

            result = TransformModel().list(2)
            self.assertEqual(len(result), 4)
            self.assertEqual(result[0], (6, 2, 1, None, 0))
            self.assertEqual(result[1], (7, 2, 2, None, 0))
            self.assertEqual(result[2], (8, 2, 3, None, 0))
            self.assertEqual(result[3], (9, 2, 4, None, 0))

            # files
            p1 = TransformSetSubDir.path(2)

            # transforms
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 6, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 7, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 8, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 9, 'jpg')))
    def transform_set_select_curate(self, transform_set_id, opts):
        """
        This method automatically curates a transform set and rejects
        transforms that are more blurry than the 'max-blur'.

        :param int transform_set_id: The transform set ID.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: None
        """

        if 'max-blur' not in opts:
            raise ValueError(
                'The max-blur option is required but was not supplied')

        max_blur = float(opts['max-blur'])

        transform_model = TransformModel()
        length = 100
        offset = 0
        p1 = TransformSetSubDir.path(transform_set_id)

        while True:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset)
            if not transforms:
                break

            for transform in transforms:
                p2 = TransformFile.path(p1, transform[0], 'jpg')

                debug(f'Curating transform with ID {transform[0]:08d} at {p2}',
                      4)

                image = cv2.imread(p2)

                image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

                h, w = image.shape[:2]

                # recommendation to scale down image to ~500
                if h > 600 or w > 600:
                    # imutils.resize preserves aspect ratio.
                    image = imutils.resize(image, width=500, height=500)

                score = cv2.Laplacian(image, cv2.CV_64F).var()

                if score < max_blur:
                    transform_model.update(transform[0], rejected=1)

                    debug(f'Transform with ID {transform[0]:08d} rejected', 4)

            offset += length
    def list(self, transform_set_id):
        """
        This method lists transforms in the transform collection for a
        transform set.

        :param int transform_set_id: The transform set ID.
        :raises: CommandLineRouteHandlerError
        :rtype: None
        """

        result = TransformSetModel().select(transform_set_id)
        if result is None:
            raise CommandLineRouteHandlerError(
                f'Transform set with ID {transform_set_id:08d} not found')

        transform_model = TransformModel()

        count = transform_model.count(transform_set_id)

        debug(f'{count} results', 3)
        debug(
            'id | fk_transform_sets | fk_frames | metadata | rejected | '
            '(width | height)', 3)
        debug(
            '-----------------------------------------------------------'
            '----------------', 3)

        if count == 0:
            return

        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0
        p1 = TransformSetSubDir.path(transform_set_id)

        while True:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset)

            if not transforms:
                break

            for transform in transforms:
                p2 = TransformFile.path(p1, transform[0], 'jpg')

                height, width, _ = cv2.imread(p2).shape

                debug(
                    f'{transform[0]} | {transform[1]} | {transform[2]} | '
                    f'{transform[3]} | {transform[4]} | ({width} | '
                    f'{height})', 3)

            offset += length
    def test_transform_set_select_extract_slice(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(
                    __file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

                FrameSetCommandLineRouteHandler().select_extract(
                    [1], 'transform_set', {})  # noqa

            SliceTransformSetSelectExtractPlugin(
            ).transform_set_select_extract(1, {
                'start': '2',
                'end': '4'
            })  # noqa

            # db
            result = TransformSetModel().select(2)
            self.assertEqual(result, (2, 'slice', 1, 1))

            result = TransformModel().list(2)
            self.assertEqual(len(result), 3)
            self.assertEqual(result[0], (6, 2, 2, None, 0))
            self.assertEqual(result[1], (7, 2, 3, None, 0))
            self.assertEqual(result[2], (8, 2, 4, None, 0))

            # files
            p1 = TransformSetSubDir.path(2)

            # transforms
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 6, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 7, 'jpg')))
            self.assertTrue(os.path.isfile(TransformFile.path(p1, 8, 'jpg')))
Ejemplo n.º 15
0
    def mock_transform_set(self):
        image_0001 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0001.jpg'  # noqa
        image_0002 = os.path.dirname(os.path.realpath(
            __file__)) + '/../../support/image_0002.jpg'  # noqa

        TransformSetModel().insert('face', 1, None)

        p1 = TransformSetSubDir.path(1)

        os.mkdir(p1)

        transform_model = TransformModel()

        transform_model.insert(1, 1, '{}', 0)
        shutil.copy(image_0001, TransformFile.path(p1, 1, 'jpg'))
        transform_model.insert(1, 2, '{}', 0)
        shutil.copy(image_0002, TransformFile.path(p1, 2, 'jpg'))
        transform_model.insert(1, 3, '{}', 0)
        shutil.copy(image_0001, TransformFile.path(p1, 3, 'jpg'))
        transform_model.insert(1, 4, '{}', 0)
        shutil.copy(image_0002, TransformFile.path(p1, 4, 'jpg'))
        transform_model.insert(1, 5, '{}', 0)
        shutil.copy(image_0001, TransformFile.path(p1, 5, 'jpg'))
Ejemplo n.º 16
0
    def test_transform_set_select_merge_fade(self):
        with deepstar_path():
            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                route_handler = VideoCommandLineRouteHandler()

                video_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/video_0001.mp4'  # noqa

                route_handler.insert_file(video_0001)

                route_handler.select_extract([1])

                route_handler = FrameSetCommandLineRouteHandler()

                route_handler.select_extract([1], 'transform_set', {})
                route_handler.select_extract([1], 'transform_set', {})

            FadeTransformSetSelectMergePlugin().transform_set_select_merge([1, 2], {'frame-count': '2'})  # noqa

            # db
            result = TransformSetModel().select(3)
            self.assertEqual(result, (3, 'fade', None, None))

            result = TransformModel().list(3)
            self.assertEqual(len(result), 8)
            self.assertEqual(result[0], (11, 3, 1, None, 0))
            self.assertEqual(result[1], (12, 3, 2, None, 0))
            self.assertEqual(result[2], (13, 3, 3, None, 0))
            self.assertEqual(result[3], (14, 3, None, None, 0))
            self.assertEqual(result[4], (15, 3, None, None, 0))
            self.assertEqual(result[5], (16, 3, 3, None, 0))
            self.assertEqual(result[6], (17, 3, 4, None, 0))
            self.assertEqual(result[7], (18, 3, 5, None, 0))

            # files
            p1 = TransformSetSubDir.path(3)

            # transforms
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 11, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 12, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 13, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 14, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 15, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 16, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 17, 'jpg')), np.ndarray)  # noqa
            self.assertIsInstance(cv2.imread(TransformFile.path(p1, 18, 'jpg')), np.ndarray)  # noqa
    def frame_set_select_extract(self, frame_set_id, opts):
        """
        This method extracts a frame set to a transform set.

        :param int frame_set_id: The frame set ID.
        :param dict opts: The dict of options.
        :rtype: int
        """

        transform_set_id = TransformSetModel().insert('transform_set',
                                                      frame_set_id)

        p1 = TransformSetSubDir.path(transform_set_id)

        os.mkdir(p1)

        frame_model = FrameModel()
        transform_model = TransformModel()
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0
        p2 = FrameSetSubDir.path(frame_set_id)

        while True:
            frames = frame_model.list(frame_set_id, length=length,
                                      offset=offset, rejected=False)

            if not frames:
                break

            for frame in frames:
                transform_id = transform_model.insert(transform_set_id,
                                                      frame[0], None, 0)

                p3 = FrameFile.path(p2, frame[0], 'jpg')
                p4 = TransformFile.path(p1, transform_id, 'jpg')

                shutil.copy(p3, p4)

                debug(f'Transform with ID {transform_id:08d} at {p4} '
                      f'extracted from frame with ID {frame[0]:08d} at '
                      f'{p3}', 4)

            offset += length

        return transform_set_id
Ejemplo n.º 18
0
    def transform_set_select_curate(self, transform_set_id, opts):
        """
        This method automatically curates a transform set and rejects
        transforms with width or height less than 'min-size'.

        :param int transform_set_id: The transform set ID.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: None
        """

        if 'min-size' not in opts:
            raise ValueError(
                'The min-size option is required but was not supplied')

        min_length = int(opts['min-size'])

        transform_model = TransformModel()
        length = 100
        offset = 0
        p1 = TransformSetSubDir.path(transform_set_id)

        while True:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset)
            if not transforms:
                break

            for transform in transforms:
                p2 = TransformFile.path(p1, transform[0], 'jpg')

                debug(f'Curating transform with ID {transform[0]:08d} at {p2}',
                      4)

                h, w = cv2.imread(p2).shape[:2]

                if h < min_length or w < min_length:
                    transform_model.update(transform[0], rejected=1)

                    debug(f'Transform with ID {transform[0]:08d} rejected', 4)

            offset += length
Ejemplo n.º 19
0
            def image_paths():
                offset = 0
                p1 = TransformSetSubDir.path(transform_set_id)

                while True:
                    transforms = transform_model.list(transform_set_id,
                                                      length=length,
                                                      offset=offset,
                                                      rejected=False)

                    if not transforms:
                        break

                    for transform in transforms:
                        image_path = TransformFile.path(
                            p1, transform[0], 'jpg')

                        yield image_path

                        debug(
                            f'Transform with ID {transform[0]:08d} at '
                            f'{image_path} exported to {video_path}', 4)

                    offset += length
Ejemplo n.º 20
0
    def test_transform_set_select_extract_pad_size(self):
        with deepstar_path():
            video_0001 = os.path.dirname(os.path.realpath(
                __file__)) + '/../../support/video_0001.mp4'  # noqa

            shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4'))

            VideoModel().insert('test', 'video_0001.mp4')

            DefaultVideoSelectExtractPlugin().video_select_extract(1)  # noqa

            self.mock_transform_set()

            MaxSizeTransformSetSelectExtractPlugin(
            ).transform_set_select_extract(1, {'max-size': '200'})  # noqa

            with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}):
                PadTransformSetSelectExtractPlugin(
                ).transform_set_select_extract(2, {'size': '200'})  # noqa

            # db
            result = TransformSetModel().select(3)
            self.assertEqual(result, (3, 'pad', 1, 2))

            result = TransformModel().list(3)
            self.assertEqual(len(result), 5)
            t = list(result[0])
            json.loads(t.pop(3))
            self.assertEqual(t, [11, 3, 1, 0])
            t = list(result[1])
            json.loads(t.pop(3))
            self.assertEqual(t, [12, 3, 2, 0])
            t = list(result[2])
            json.loads(t.pop(3))
            self.assertEqual(t, [13, 3, 3, 0])
            t = list(result[3])
            json.loads(t.pop(3))
            self.assertEqual(t, [14, 3, 4, 0])
            t = list(result[4])
            json.loads(t.pop(3))
            self.assertEqual(t, [15, 3, 5, 0])

            # files
            p1 = TransformSetSubDir.path(3)

            # transforms
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 11, 'jpg')).shape[:2],
                (200, 200))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 12, 'jpg')).shape[:2],
                (200, 200))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 13, 'jpg')).shape[:2],
                (200, 200))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 14, 'jpg')).shape[:2],
                (200, 200))  # noqa
            self.assertEqual(
                cv2.imread(TransformFile.path(p1, 15, 'jpg')).shape[:2],
                (200, 200))  # noqa
    def transform_set_select_extract(self, transform_set_id, opts):
        """
        This method adjusts color for each transform in a transform set.

        :param int transform_set_id: The transform set ID.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: None
        """

        r = opts.get('r', None)
        g = opts.get('g', None)
        b = opts.get('b', None)

        if r is None and g is None and b is None:
            raise ValueError(
                'The r, g and/or b options are required but were not supplied')

        for channel in [r, g, b]:
            if channel is not None:
                if not re.match('^[+,\\-]\\d+$', channel):
                    raise ValueError(
                        'A color adjustment option value must be formatted as '
                        '+/- followed by a number (e.g. --r=+10)')

        color_adjustments = []

        for index, channel in enumerate([b, g, r]):
            if channel is not None:
                color_adjustments.append(
                    [index, int(channel[1:]),
                     True if (channel[:1] == '+') else False])
            else:
                color_adjustments.append(None)

        transform_set_model = TransformSetModel()

        result = transform_set_model.select(transform_set_id)

        transform_set_id_ = TransformSetModel().insert('adjust_color',
                                                       result[2],
                                                       transform_set_id)

        p1 = TransformSetSubDir.path(transform_set_id_)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id)
        transform_model = TransformModel()
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0

        while True:
            transforms = transform_model.list(transform_set_id, length=length,
                                              offset=offset, rejected=False)

            if not transforms:
                break

            for transform in transforms:
                transform_id = transform_model.insert(transform_set_id_,
                                                      transform[2],
                                                      transform[3],
                                                      transform[4])

                p3 = TransformFile.path(p2, transform[0], 'jpg')
                p4 = TransformFile.path(p1, transform_id, 'jpg')

                image = cv2.imread(p3)

                image = image.astype(np.short)

                for color_adjustment in color_adjustments:
                    if color_adjustment is not None:
                        image = adjust_color(image, color_adjustment[0],
                                             color_adjustment[1],
                                             color_adjustment[2])

                cv2.imwrite(p4, image, [cv2.IMWRITE_JPEG_QUALITY, 100])

                debug(f'Transform with ID {transform_id:08d} at {p4} '
                      f'extracted from transform with ID {transform[0]:08d} '
                      f'at {p3}', 4)

            offset += length

        return transform_set_id_
Ejemplo n.º 22
0
    def transform_set_select_extract(self, transform_set_id, opts):
        """
        This method crops each transform in a transform set.

        :param int transform_set_id: The transform set ID.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: None
        """

        x1 = int(opts['x1']) if ('x1' in opts) else None
        y1 = int(opts['y1']) if ('y1' in opts) else None
        x2 = int(opts['x2']) if ('x2' in opts) else None
        y2 = int(opts['y2']) if ('y2' in opts) else None

        if x1 is None or y1 is None or x2 is None or y2 is None:
            raise ValueError(
                'The x1, y1, x2 and y2 options are required but were not '
                'supplied')

        transform_set_model = TransformSetModel()

        result = transform_set_model.select(transform_set_id)

        transform_set_id_ = TransformSetModel().insert('crop', result[2],
                                                       transform_set_id)

        p1 = TransformSetSubDir.path(transform_set_id_)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id)
        transform_model = TransformModel()
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0

        while True:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset,
                                              rejected=False)

            if not transforms:
                break

            for transform in transforms:
                transform_id = transform_model.insert(transform_set_id_,
                                                      transform[2],
                                                      transform[3],
                                                      transform[4])

                p3 = TransformFile.path(p2, transform[0], 'jpg')
                p4 = TransformFile.path(p1, transform_id, 'jpg')

                image_1 = cv2.imread(p3)

                image_2 = image_1[y1:y2, x1:x2]

                cv2.imwrite(p4, image_2, [cv2.IMWRITE_JPEG_QUALITY, 100])

                debug(
                    f'Transform with ID {transform_id:08d} at {p4} '
                    f'extracted from transform with ID {transform[0]:08d} '
                    f'at {p3}', 4)

            offset += length

        return transform_set_id_
Ejemplo n.º 23
0
    def transform_set_select_merge(self, transform_set_ids, opts):
        """
        This method merges transform sets overlaying transform set 1 onto
        transform set 2 at a specified position.

        :param list(int) transform_set_ids: The transform set IDs.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: int
        """

        if len(transform_set_ids) != 2:
            raise ValueError('Exactly two transform set IDs must be supplied')

        x1 = int(opts['x1']) if ('x1' in opts) else None
        y1 = int(opts['y1']) if ('y1' in opts) else None

        if x1 is None or y1 is None:
            raise ValueError(
                'The x1 and y1 options are required but were not supplied')

        transform_set_id_1 = transform_set_ids[0]
        transform_set_id_2 = transform_set_ids[1]

        transform_model = TransformModel()

        transform_set_1_count = transform_model.count(transform_set_id_1,
                                                      rejected=False)
        transform_set_2_count = transform_model.count(transform_set_id_2,
                                                      rejected=False)

        if transform_set_1_count != transform_set_2_count:
            raise ValueError(
                'Both transform sets must have the same number of '
                'non-rejected transforms (be the same length)')

        transform_set_id = TransformSetModel().insert('overlay', None, None)

        p1 = TransformSetSubDir.path(transform_set_id)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id_1)
        p3 = TransformSetSubDir.path(transform_set_id_2)
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0

        while True:
            transforms_1 = transform_model.list(transform_set_id_1,
                                                length=length,
                                                offset=offset,
                                                rejected=False)
            transforms_2 = transform_model.list(transform_set_id_2,
                                                length=length,
                                                offset=offset,
                                                rejected=False)

            if not transforms_1:
                break

            for i in range(0, len(transforms_1)):
                transform_id_1 = transforms_1[i][0]
                transform_id_2 = transforms_2[i][0]

                image_path_1 = TransformFile.path(p2, transform_id_1, 'jpg')
                image_path_2 = TransformFile.path(p3, transform_id_2, 'jpg')

                transform_id = transform_model.insert(transform_set_id, None,
                                                      None, 0)

                image_path_3 = TransformFile.path(p1, transform_id, 'jpg')

                image_1 = cv2.imread(image_path_1)

                height_1, width_1 = image_1.shape[:2]

                image_2 = cv2.imread(image_path_2)

                image_2[y1:y1 + height_1, x1:x1 + width_1] = image_1

                cv2.imwrite(image_path_3, image_2,
                            [cv2.IMWRITE_JPEG_QUALITY, 100])

                debug(
                    f'Transforms with ID {transform_id_1:08d} at '
                    f'{image_path_1} and {transform_id_2:08d} at '
                    f'{image_path_2} merged as ID {transform_id:08d} at '
                    f'{image_path_3}', 4)

            offset += length

        return transform_set_id
    def _get_mouth(self, transform_set_path, transform_id, frame_id, metadata,
                   target_set_id):
        """
        This method extracts a square cropping of a mouth.

        :param str transform_set_path: The transform set path.
        :param int transform_id: The transform ID.
        :param int frame_id: The frame ID.
        :param str metadata: Metadata for the transform.
        :param int target_set_id: The new transform set ID.
        :rtype: None
        """

        face_pts = metadata.get('face')
        if face_pts is None:
            return

        img = cv2.imread(TransformFile().path(transform_set_path, transform_id,
                                              'jpg'))
        img_height, img_width = img.shape[:2]

        # identify the right and left X values for the mouth crop
        m_right_x = face_pts['mouth_right'][0]
        m_left_x = face_pts['mouth_left'][0]
        mouth_width = m_right_x - m_left_x
        left_x = int(m_left_x - (self._offset_percent * mouth_width / 2))
        right_x = int(m_right_x + (self._offset_percent * mouth_width / 2))
        if left_x < 0:
            left_x = 0
        if right_x > img_width:
            right_x = img_width
        if (right_x - left_x) % 2 != 0:
            right_x -= 1

        # identify the upper and lower Y values for the mouth crop
        m_right_y = face_pts['mouth_right'][1]
        m_left_y = face_pts['mouth_left'][1]
        mouth_height = abs(m_right_y - m_left_y)
        if mouth_height % 2 != 0:
            mouth_height -= 1

        crop_mouth_height = (right_x - left_x) - mouth_height

        top_y = int(min(m_right_y, m_left_y) - (crop_mouth_height / 2))
        bottom_y = int(max(m_right_y, m_left_y) + (crop_mouth_height / 2))

        # check that we are within frame boundaries
        if top_y < 0:
            top_y = 0
        if bottom_y > img_height:
            bottom_y = img_height

        # guarantee that we extract a square cropping
        while (bottom_y - top_y) > (right_x - left_x):
            if top_y > 0:
                top_y += 1
            elif bottom_y < img_height:
                bottom_y -= 1

        while (bottom_y - top_y) < (right_x - left_x):
            if top_y > 0:
                top_y -= 1
            elif bottom_y < img_height:
                bottom_y += 1

        mouth_img = img[top_y:bottom_y, left_x:right_x]

        metadata['mouth'] = {'box': [(left_x, top_y), (right_x, bottom_y)]}
        target_id = TransformModel().insert(target_set_id, frame_id,
                                            json.dumps(metadata), 0)
        output_path = TransformFile.path(
            TransformSetSubDir.path(target_set_id), target_id, 'jpg')
        cv2.imwrite(output_path, mouth_img)
Ejemplo n.º 25
0
    def transform_set_select_merge(self, transform_set_ids, opts):
        """
        This method merges one transform set with one image. This is to say
        that one image is overlaid onto every transform in a transform set and
        at a specified position.

        :param list(int) transform_set_ids: The transform set IDs.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: int
        """

        if len(transform_set_ids) != 1:
            raise ValueError('Exactly one transform set ID must be supplied')

        image_path_1 = opts.get('image-path', None)
        x1 = int(opts['x1']) if ('x1' in opts) else None
        y1 = int(opts['y1']) if ('y1' in opts) else None

        if image_path_1 is None or x1 is None or y1 is None:
            raise ValueError(
                'The image-path, x1 and y1 options are required but were not '
                'supplied')

        if not os.path.isfile(image_path_1):
            raise ValueError(
                f'The image path {image_path_1} does not exist or is not a '
                f'file')

        transform_set_id = transform_set_ids[0]

        transform_model = TransformModel()

        transform_set_id_ = TransformSetModel().insert('overlay_image', None,
                                                       None)

        p1 = TransformSetSubDir.path(transform_set_id_)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id)
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0

        image_1 = cv2.imread(image_path_1, cv2.IMREAD_UNCHANGED)

        while True:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset,
                                              rejected=False)

            if not transforms:
                break

            for transform in transforms:
                transform_id = transform_model.insert(transform_set_id_, None,
                                                      None, 0)

                image_path_2 = TransformFile.path(p2, transform[0], 'jpg')

                image_2 = cv2.imread(image_path_2)

                image_3 = overlay_transparent_image(image_2, image_1, x1, y1)

                image_path_3 = TransformFile.path(p1, transform_id, 'jpg')

                cv2.imwrite(image_path_3, image_3,
                            [cv2.IMWRITE_JPEG_QUALITY, 100])

                debug(
                    f'{image_path_1} and transform with ID '
                    f'{transform[0]:08d} at {image_path_2} merged as ID '
                    f'{transform_id:08d} at {image_path_3}', 4)

            offset += length

        return transform_set_id_
    def transform_set_select_extract(self, transform_set_id, opts):
        """
        This method resizes each transform in a transform set.

        :param int transform_set_id: The transform set ID.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: None
        """

        height = int(opts['height']) if ('height' in opts) else None
        width = int(opts['width']) if ('width' in opts) else None

        if height is None and width is None:
            raise ValueError(
                'The height or width options are required but were not '
                'supplied')

        transform_set_model = TransformSetModel()

        result = transform_set_model.select(transform_set_id)

        transform_set_id_ = TransformSetModel().insert('resize', result[2],
                                                       transform_set_id)

        p1 = TransformSetSubDir.path(transform_set_id_)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id)
        transform_model = TransformModel()
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0

        while True:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset,
                                              rejected=False)

            if not transforms:
                break

            for transform in transforms:
                transform_id = transform_model.insert(transform_set_id_,
                                                      transform[2],
                                                      transform[3],
                                                      transform[4])

                p3 = TransformFile.path(p2, transform[0], 'jpg')
                p4 = TransformFile.path(p1, transform_id, 'jpg')

                image_1 = cv2.imread(p3)

                if width is not None:
                    image_2 = imutils.resize(image_1, width=width)
                else:
                    image_2 = imutils.resize(image_1, height=height)

                cv2.imwrite(p4, image_2, [cv2.IMWRITE_JPEG_QUALITY, 100])

                debug(
                    f'Transform with ID {transform_id:08d} at {p4} '
                    f'extracted from transform with ID {transform[0]:08d} '
                    f'at {p3}', 4)

            offset += length

        return transform_set_id_
 def get(self, transform_set_id, transform_id):
     return send_from_directory(
         TransformSetSubDir.path(transform_set_id),
         TransformFile.name(transform_id, 'jpg'))
Ejemplo n.º 28
0
    def transform_set_select_merge(self, transform_set_ids, opts):
        """
        This method merges transform sets w/ a fade effect applied.

        :param list(int) transform_set_ids: The transform set IDs.
        :param dict opts: The dict of options.
        :raises: ValueError
        :rtype: int
        """

        if len(transform_set_ids) != 2:
            raise ValueError('Exactly two transform set IDs must be supplied')

        if 'frame-count' not in opts:
            raise ValueError(
                'The frame-count option is required but was not supplied')

        frame_count = int(opts['frame-count'])

        if frame_count < 1:
            raise ValueError('Frame count must be 1 or greater')

        transform_set_id_1 = transform_set_ids[0]
        transform_set_id_2 = transform_set_ids[1]

        transform_model = TransformModel()

        transform_set_1_count = transform_model.count(transform_set_id_1,
                                                      rejected=False)
        transform_set_2_count = transform_model.count(transform_set_id_2,
                                                      rejected=False)

        if transform_set_1_count <= frame_count or \
           transform_set_2_count <= frame_count:
            raise ValueError(
                'Both transform sets must be greater than frame count')

        transform_set_id = TransformSetModel().insert('fade', None, None)

        p1 = TransformSetSubDir.path(transform_set_id)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id_1)
        p3 = TransformSetSubDir.path(transform_set_id_2)
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0
        flag = True

        while flag:
            transforms = transform_model.list(transform_set_id_1,
                                              length=length,
                                              offset=offset,
                                              rejected=False)

            for transform in transforms:
                transform_id = transform_model.insert(transform_set_id,
                                                      transform[2],
                                                      transform[3],
                                                      transform[4])

                p4 = TransformFile.path(p2, transform[0], 'jpg')
                p5 = TransformFile.path(p1, transform_id, 'jpg')

                shutil.copy(p4, p5)

                debug(
                    f'Transform with ID {transform[0]:08d} at {p4} '
                    f'merged as ID {transform_id:08d} at {p5}', 4)

                offset += 1

                if transform_set_1_count - offset == frame_count:
                    flag = False
                    break

        transforms_1 = transform_model.list(transform_set_id_1,
                                            length=frame_count,
                                            offset=offset,
                                            rejected=False)
        transforms_2 = transform_model.list(transform_set_id_2,
                                            length=frame_count,
                                            offset=0,
                                            rejected=False)

        for i in range(0, frame_count):
            transform_id_1 = transforms_1[i][0]
            transform_id_2 = transforms_2[i][0]

            image_path_1 = TransformFile.path(p2, transform_id_1, 'jpg')
            image_path_2 = TransformFile.path(p3, transform_id_2, 'jpg')

            transform_id = transform_model.insert(transform_set_id, None, None,
                                                  0)

            image_path_3 = TransformFile.path(p1, transform_id, 'jpg')

            image_1 = cv2.imread(image_path_1)
            image_2 = cv2.imread(image_path_2)
            alpha = 1.0 - float(i + 1) / float(frame_count)
            image_3 = cv2.addWeighted(image_1, alpha, image_2, 1.0 - alpha, 0)

            cv2.imwrite(image_path_3, image_3, [cv2.IMWRITE_JPEG_QUALITY, 100])

            debug(
                f'Transforms with ID {transform_id_1:08d} at {image_path_1} '
                f'and {transform_id_2:08d} at {image_path_2} merged with '
                f'alpha {alpha} as ID {transform_id:08d} at {image_path_3}', 4)

        offset = frame_count

        while True:
            transforms = transform_model.list(transform_set_id_2,
                                              length=length,
                                              offset=offset,
                                              rejected=False)

            if not transforms:
                break

            for transform in transforms:
                transform_id = transform_model.insert(transform_set_id,
                                                      transform[2],
                                                      transform[3],
                                                      transform[4])

                p4 = TransformFile.path(p3, transform[0], 'jpg')
                p5 = TransformFile.path(p1, transform_id, 'jpg')

                shutil.copy(p4, p5)

                debug(
                    f'Transform with ID {transform[0]:08d} at {p4} '
                    f'merged as ID {transform_id:08d} at {p5}', 4)

                offset += 1

        return transform_set_id
    def transform_set_select_extract(self, transform_set_id, opts):
        """
        This method extracts a slice from a transform set (a subset).

        :param int transform_set_id: The transform set ID.
        :param dict opts: The dict of opts.
        :raises: ValueError
        :rtype: int
        """

        start = int(opts['start']) if ('start' in opts) else None
        end = int(opts['end']) if ('end' in opts) else None

        if start is None or end is None:
            raise ValueError(
                'The start and end options are required but were not '
                'supplied')

        transform_set_model = TransformSetModel()

        result = transform_set_model.select(transform_set_id)

        transform_set_id_ = TransformSetModel().insert('slice', result[2],
                                                       transform_set_id)

        p1 = TransformSetSubDir.path(transform_set_id_)

        os.makedirs(p1)

        p2 = TransformSetSubDir.path(transform_set_id)
        transform_model = TransformModel()
        length = int(os.environ.get('MODEL_LIST_LENGTH', '100'))
        offset = 0
        flag = True

        while flag:
            transforms = transform_model.list(transform_set_id,
                                              length=length,
                                              offset=offset,
                                              rejected=False)

            if not transforms:
                break

            for transform in transforms:
                if transform[0] < start:
                    continue

                if transform[0] > end:
                    flag = False
                    break

                transform_id = transform_model.insert(transform_set_id_,
                                                      transform[2],
                                                      transform[3],
                                                      transform[4])

                p3 = TransformFile.path(p2, transform[0], 'jpg')
                p4 = TransformFile.path(p1, transform_id, 'jpg')

                shutil.copy(p3, p4)

                debug(
                    f'Transform with ID {transform_id:08d} at {p4} '
                    f'extracted from transform with ID {transform[0]:08d} '
                    f'at {p3}', 4)

            offset += length

        return transform_set_id_
Ejemplo n.º 30
0
    def _extract_faces(self, frame_set_path, frame_id, transform_set_path,
                       transform_set_id, detector, offset_percent,
                       min_confidence, debug_):
        """
        This method extracts faces from a frame.

        :param str frame_set_path:The frame set path.
        :param int frame_id: The frame ID.
        :param str transform_set_path: The transform set path.
        :param transform_set_id: The transform set ID.
        :param MTCNN detector: The detector to use to detect faces.
        :param float offset_percent:
        :param float min_confidence: The minimum confidence value required to
            accept/reject a detected face.
        :param bool debug_: True if should place markers on landmarks else
            False if should not.
        :rtype: None
        """

        frame_path = FrameFile.path(frame_set_path, frame_id, 'jpg')
        img = cv2.imread(frame_path)
        img_height, img_width = img.shape[:2]

        results = detector.detect_faces(img)
        for r in results:
            if r['confidence'] < min_confidence:
                continue

            x, y, width, height = r['box']

            adjusted_x = int(max(0, x - (0.5 * width * offset_percent)))
            adjusted_y = int(max(0, y - (0.5 * height * offset_percent)))
            t = x + width + (0.5 * width * offset_percent)
            adjusted_right_x = int(min(img_width, t))
            t = y + height + (0.5 * height * offset_percent)
            adjusted_bottom_y = int(min(img_height, t))

            metadata = {
                'face': {
                    k: [v[0] - adjusted_x, v[1] - adjusted_y]
                    for k, v in r['keypoints'].items()
                }
            }

            transform_id = TransformModel().insert(transform_set_id, frame_id,
                                                   json.dumps(metadata), 0)

            face_crop = img[adjusted_y:adjusted_bottom_y,
                            adjusted_x:adjusted_right_x]
            output_path = TransformFile.path(transform_set_path, transform_id,
                                             'jpg')

            if debug_ is True:
                for _, v in metadata['face'].items():
                    cv2.drawMarker(face_crop,
                                   tuple(v), (0, 0, 255),
                                   markerType=cv2.MARKER_DIAMOND,
                                   markerSize=15,
                                   thickness=2)

            cv2.imwrite(output_path, face_crop,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])

            debug(
                f'Transform with ID {transform_id:08d} at {output_path} '
                f'extracted from frame with ID {frame_id:08d} at '
                f'{frame_path}', 4)