def transform_set_select_extract(self, transform_set_id, opts): """ This method extracts just the mouth from an image of a face. :param int transform_set_id: The transform set ID. :rtype: int """ self._offset_percent = int(opts.get('offset-percent', 20)) / 100 transform_set_path = TransformSetSubDir.path(transform_set_id) _, _, frame_set_id, _ = TransformSetModel().select(transform_set_id) target_set_id = TransformSetModel().insert(f'{self.name}', frame_set_id, transform_set_id) target_path = TransformSetSubDir.path(target_set_id) os.makedirs(target_path) result = TransformModel().list(transform_set_id) for transform_id, _, frame_id, metadata, rejected in result: if rejected == 1: continue metadata = json.loads(metadata) self._get_mouth(transform_set_path, transform_id, frame_id, metadata, target_set_id) return target_set_id
def mock_transform_set(self): image_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0001.jpg' # noqa image_0003 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0003.jpg' # 200-blurry # noqa image_0004 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0004.jpg' # 200-sharp # noqa image_0005 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0005.jpg' # 650-blurry # noqa image_0006 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0006.jpg' # 650-sharp # noqa TransformSetModel().insert('face', 1, None) p1 = TransformSetSubDir.path(1) os.mkdir(p1) transform_model = TransformModel() transform_model.insert(1, 1, '{}', 0) shutil.copy(image_0001, TransformFile.path(p1, 1, 'jpg')) transform_model.insert(1, 2, '{}', 0) shutil.copy(image_0003, TransformFile.path(p1, 2, 'jpg')) transform_model.insert(1, 3, '{}', 0) shutil.copy(image_0004, TransformFile.path(p1, 3, 'jpg')) transform_model.insert(1, 4, '{}', 0) shutil.copy(image_0005, TransformFile.path(p1, 4, 'jpg')) transform_model.insert(1, 5, '{}', 0) shutil.copy(image_0006, TransformFile.path(p1, 5, 'jpg'))
def test_transform_set_select_extract_max_size(self): with deepstar_path(): video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4')) VideoModel().insert('test', 'video_0001.mp4') DefaultVideoSelectExtractPlugin().video_select_extract(1) # noqa self.mock_transform_set() with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}): transform_set_id = MaxSizeTransformSetSelectExtractPlugin( ).transform_set_select_extract(1, {}) # noqa self.assertEqual(transform_set_id, 2) # db result = TransformSetModel().select(2) self.assertEqual(result, (2, 'max_size', 1, 1)) result = TransformModel().list(2) self.assertEqual(len(result), 5) t = list(result[0]) json.loads(t.pop(3)) self.assertEqual(t, [6, 2, 1, 0]) t = list(result[1]) json.loads(t.pop(3)) self.assertEqual(t, [7, 2, 2, 0]) t = list(result[2]) json.loads(t.pop(3)) self.assertEqual(t, [8, 2, 3, 0]) t = list(result[3]) json.loads(t.pop(3)) self.assertEqual(t, [9, 2, 4, 0]) t = list(result[4]) json.loads(t.pop(3)) self.assertEqual(t, [10, 2, 5, 0]) # files p1 = TransformSetSubDir.path(2) # transforms self.assertEqual( cv2.imread(TransformFile.path(p1, 6, 'jpg')).shape[0], 299) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 7, 'jpg')).shape[0], 299) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 8, 'jpg')).shape[0], 299) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 9, 'jpg')).shape[0], 299) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 10, 'jpg')).shape[0], 299) # noqa
def _pad(self, transform_set_path, transform_id, frame_id, metadata, target_set_id, size): """ This method pads a transform. :param str transform_set_path: The transform set path. :param int transform_id: The transform ID. :param int frame_id: The frame ID. :param str metadata: Metadata for the transform. :param int target_set_id: The new transform set ID. :param int size: The size to which to pad. :rtype: None """ transform_path = TransformFile().path(transform_set_path, transform_id, 'jpg') img = cv2.imread(transform_path) img_height, img_width = img.shape[:2] img_padded = np.zeros((size, size, 3), dtype=np.uint8) img_padded[:img_height, :img_width, :] = img.copy() target_id = TransformModel().insert(target_set_id, frame_id, json.dumps(metadata), 0) output_path = TransformFile.path( TransformSetSubDir.path(target_set_id), target_id, 'jpg') cv2.imwrite(output_path, img_padded, [cv2.IMWRITE_JPEG_QUALITY, 100]) debug( f'Transform with ID {target_id:08d} at {output_path} extracted ' f'from transform with ID {transform_id:08d} at {transform_path}', 4)
def _resize(self, transform_set_path, transform_id, frame_id, metadata, target_set_id, max_size): """ This method resizes a transform. :param str transform_set_path: The transform set path. :param int transform_id: The transform ID. :param int frame_id: The frame ID. :param str metadata: Metadata for the transform. :param int target_set_id: The new transform set ID. :param int max_size: The max size. :rtype: None """ transform_path = TransformFile().path(transform_set_path, transform_id, 'jpg') img = cv2.imread(transform_path) img_height, img_width = img.shape[:2] if img_height > max_size or img_width > max_size: if img_height > img_width: img = imutils.resize(img, height=max_size) else: img = imutils.resize(img, width=max_size) target_id = TransformModel().insert(target_set_id, frame_id, json.dumps(metadata), 0) output_path = TransformFile.path( TransformSetSubDir.path(target_set_id), target_id, 'jpg') cv2.imwrite(output_path, img, [cv2.IMWRITE_JPEG_QUALITY, 100]) debug(f'Transform with ID {target_id:08d} at {output_path} extracted ' f'from transform with ID {transform_id:08d} at {transform_path}', 4)
def test_select_extract_face(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) args = ['main.py', 'select', 'frame_sets', '1', 'extract', 'face'] # noqa opts = {} route_handler = FrameSetCommandLineRouteHandler() try: sys.stdout = StringIO() route_handler.handle(args, opts) actual = sys.stdout.getvalue().strip() finally: sys.stdout = sys.__stdout__ # stdout self.assertEqual(actual, 'transform_set_id=1, name=face, fk_frame_sets=1, fk_prev_transform_sets=None') # noqa # db result = TransformSetModel().select(1) self.assertEqual(result, (1, 'face', 1, None)) result = TransformModel().list(1) self.assertEqual(len(result), 5) t = list(result[0]) json.loads(t.pop(3)) self.assertEqual(t, [1, 1, 1, 0]) t = list(result[1]) json.loads(t.pop(3)) self.assertEqual(t, [2, 1, 2, 0]) t = list(result[2]) json.loads(t.pop(3)) self.assertEqual(t, [3, 1, 3, 0]) t = list(result[3]) json.loads(t.pop(3)) self.assertEqual(t, [4, 1, 4, 0]) t = list(result[4]) json.loads(t.pop(3)) self.assertEqual(t, [5, 1, 5, 0]) # files p1 = TransformSetSubDir.path(1) # transforms self.assertTrue(os.path.isfile(TransformFile.path(p1, 1, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 2, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 3, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 4, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 5, 'jpg')))
def test_transform_set_select_merge_overlay_rejected(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) route_handler = FrameSetCommandLineRouteHandler() route_handler.select_extract([1], 'transform_set', {}) route_handler.select_extract([1], 'transform_set', {}) transform_model = TransformModel() transform_model.update(1, rejected=1) transform_model.update(10, rejected=1) OverlayTransformSetSelectMergePlugin().transform_set_select_merge( [1, 2], { 'x1': '0', 'y1': '0' }) # noqa # db result = TransformSetModel().select(3) self.assertEqual(result, (3, 'overlay', None, None)) result = TransformModel().list(3) self.assertEqual(len(result), 4) self.assertEqual(result[0], (11, 3, None, None, 0)) self.assertEqual(result[1], (12, 3, None, None, 0)) self.assertEqual(result[2], (13, 3, None, None, 0)) self.assertEqual(result[3], (14, 3, None, None, 0)) # files p1 = TransformSetSubDir.path(3) # transforms self.assertIsInstance(cv2.imread(TransformFile.path(p1, 11, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 12, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 13, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 14, 'jpg')), np.ndarray) # noqa
def test_transform_set_select_extract_pad_rejected(self): with deepstar_path(): video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4')) VideoModel().insert('test', 'video_0001.mp4') DefaultVideoSelectExtractPlugin().video_select_extract(1) # noqa self.mock_transform_set() MaxSizeTransformSetSelectExtractPlugin( ).transform_set_select_extract(1, {}) # noqa transform_model = TransformModel() transform_model.update(7, rejected=1) transform_model.update(9, rejected=1) with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}): transform_set_id = PadTransformSetSelectExtractPlugin( ).transform_set_select_extract(2, {}) # noqa self.assertEqual(transform_set_id, 3) # db result = TransformSetModel().select(3) self.assertEqual(result, (3, 'pad', 1, 2)) result = TransformModel().list(3) self.assertEqual(len(result), 3) t = list(result[0]) json.loads(t.pop(3)) self.assertEqual(t, [11, 3, 1, 0]) t = list(result[1]) json.loads(t.pop(3)) self.assertEqual(t, [12, 3, 3, 0]) t = list(result[2]) json.loads(t.pop(3)) self.assertEqual(t, [13, 3, 5, 0]) # files p1 = TransformSetSubDir.path(3) # transforms self.assertIsInstance(cv2.imread(TransformFile.path(p1, 11, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 12, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 13, 'jpg')), np.ndarray) # noqa
def transform_set_select_curate(self, transform_set_id, opts): """ This method automatically curates a transform set and rejects transforms that are more blurry than the 'max-blur'. :param int transform_set_id: The transform set ID. :param dict opts: The dict of options. :raises: ValueError :rtype: None """ if 'max-blur' not in opts: raise ValueError( 'The max-blur option is required but was not supplied') max_blur = float(opts['max-blur']) transform_model = TransformModel() length = 100 offset = 0 p1 = TransformSetSubDir.path(transform_set_id) while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset) if not transforms: break for transform in transforms: p2 = TransformFile.path(p1, transform[0], 'jpg') debug(f'Curating transform with ID {transform[0]:08d} at {p2}', 4) image = cv2.imread(p2) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) h, w = image.shape[:2] # recommendation to scale down image to ~500 if h > 600 or w > 600: # imutils.resize preserves aspect ratio. image = imutils.resize(image, width=500, height=500) score = cv2.Laplacian(image, cv2.CV_64F).var() if score < max_blur: transform_model.update(transform[0], rejected=1) debug(f'Transform with ID {transform[0]:08d} rejected', 4) offset += length
def test_transform_set_select_extract_crop(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) FrameSetCommandLineRouteHandler().select_extract( [1], 'transform_set', {}) # noqa CropTransformSetSelectExtractPlugin().transform_set_select_extract( 1, { 'x1': '0', 'y1': '0', 'x2': '50', 'y2': '50' }) # noqa # db result = TransformSetModel().select(2) self.assertEqual(result, (2, 'crop', 1, 1)) result = TransformModel().list(2) self.assertEqual(len(result), 5) self.assertEqual(result[0], (6, 2, 1, None, 0)) self.assertEqual(result[1], (7, 2, 2, None, 0)) self.assertEqual(result[2], (8, 2, 3, None, 0)) self.assertEqual(result[3], (9, 2, 4, None, 0)) self.assertEqual(result[4], (10, 2, 5, None, 0)) # files p1 = TransformSetSubDir.path(2) # transforms self.assertEqual( cv2.imread(TransformFile.path(p1, 6, 'jpg')).shape[:2], (50, 50)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 7, 'jpg')).shape[:2], (50, 50)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 8, 'jpg')).shape[:2], (50, 50)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 9, 'jpg')).shape[:2], (50, 50)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 10, 'jpg')).shape[:2], (50, 50)) # noqa
def test_transform_set_select_merge_overlay_image(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) route_handler = FrameSetCommandLineRouteHandler() route_handler.select_extract([1], 'transform_set', {}) image_0007 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0007.png' # noqa OverlayImageTransformSetSelectMergePlugin( ).transform_set_select_merge([1], { 'image-path': image_0007, 'x1': '0', 'y1': '0' }) # noqa # db result = TransformSetModel().select(2) self.assertEqual(result, (2, 'overlay_image', None, None)) result = TransformModel().list(2) self.assertEqual(len(result), 5) self.assertEqual(result[0], (6, 2, None, None, 0)) self.assertEqual(result[1], (7, 2, None, None, 0)) self.assertEqual(result[2], (8, 2, None, None, 0)) self.assertEqual(result[3], (9, 2, None, None, 0)) self.assertEqual(result[4], (10, 2, None, None, 0)) # files p1 = TransformSetSubDir.path(2) # transforms self.assertIsInstance(cv2.imread(TransformFile.path(p1, 6, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 7, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 8, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 9, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 10, 'jpg')), np.ndarray) # noqa
def list(self, transform_set_id): """ This method lists transforms in the transform collection for a transform set. :param int transform_set_id: The transform set ID. :raises: CommandLineRouteHandlerError :rtype: None """ result = TransformSetModel().select(transform_set_id) if result is None: raise CommandLineRouteHandlerError( f'Transform set with ID {transform_set_id:08d} not found') transform_model = TransformModel() count = transform_model.count(transform_set_id) debug(f'{count} results', 3) debug( 'id | fk_transform_sets | fk_frames | metadata | rejected | ' '(width | height)', 3) debug( '-----------------------------------------------------------' '----------------', 3) if count == 0: return length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 p1 = TransformSetSubDir.path(transform_set_id) while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset) if not transforms: break for transform in transforms: p2 = TransformFile.path(p1, transform[0], 'jpg') height, width, _ = cv2.imread(p2).shape debug( f'{transform[0]} | {transform[1]} | {transform[2]} | ' f'{transform[3]} | {transform[4]} | ({width} | ' f'{height})', 3) offset += length
def transform_set_select_extract(self, transform_set_id, opts): """ This method pads each transform in a transform set. :param int transform_set_id: The transform set ID. :param dict opts: The dict of options. :rtype: int """ size = int(opts.get('size', 299)) transform_set_path = TransformSetSubDir.path(transform_set_id) _, _, frame_set_id, _ = TransformSetModel().select(transform_set_id) target_set_id = TransformSetModel().insert(f'{self.name}', frame_set_id, transform_set_id) target_path = TransformSetSubDir.path(target_set_id) os.makedirs(target_path) length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 while True: result = TransformModel().list(transform_set_id, length=length, offset=offset, rejected=False) if not result: break for transform_id, _, frame_id, metadata, rejected in result: self._pad(transform_set_path, transform_id, frame_id, metadata, target_set_id, size) offset += length return target_set_id
def mock_transform_set(self): image_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/image_0001.jpg' # noqa TransformSetModel().insert('face', 1, None) p1 = TransformSetSubDir.path(1) os.mkdir(p1) transform_model = TransformModel() for i in range(0, 5): transform_model.insert(1, i + 1, '{}', 0) shutil.copy(image_0001, TransformFile.path(p1, i + 1, 'jpg'))
def test_frame_set_select_extract_face(self): with deepstar_path(): video_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/video_0001.mp4' # noqa shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4')) VideoModel().insert('test', 'video_0001.mp4') DefaultVideoSelectExtractPlugin().video_select_extract(1) # noqa with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}): transform_set_id = MTCNNFrameSetSelectExtractPlugin().frame_set_select_extract(1, {}) # noqa self.assertEqual(transform_set_id, 1) # db result = TransformSetModel().select(1) self.assertEqual(result, (1, 'face', 1, None)) result = TransformModel().list(1) self.assertEqual(len(result), 5) t = list(result[0]) json.loads(t.pop(3)) self.assertEqual(t, [1, 1, 1, 0]) t = list(result[1]) json.loads(t.pop(3)) self.assertEqual(t, [2, 1, 2, 0]) t = list(result[2]) json.loads(t.pop(3)) self.assertEqual(t, [3, 1, 3, 0]) t = list(result[3]) json.loads(t.pop(3)) self.assertEqual(t, [4, 1, 4, 0]) t = list(result[4]) json.loads(t.pop(3)) self.assertEqual(t, [5, 1, 5, 0]) # files p1 = TransformSetSubDir.path(1) # transforms self.assertIsInstance(cv2.imread(TransformFile.path(p1, 1, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 2, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 3, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 4, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 5, 'jpg')), np.ndarray) # noqa
def frame_set_select_extract(self, frame_set_id, opts): """ This method extracts a frame set to a transform set. :param int frame_set_id: The frame set ID. :param dict opts: The dict of options. :rtype: int """ transform_set_id = TransformSetModel().insert('transform_set', frame_set_id) p1 = TransformSetSubDir.path(transform_set_id) os.mkdir(p1) frame_model = FrameModel() transform_model = TransformModel() length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 p2 = FrameSetSubDir.path(frame_set_id) while True: frames = frame_model.list(frame_set_id, length=length, offset=offset, rejected=False) if not frames: break for frame in frames: transform_id = transform_model.insert(transform_set_id, frame[0], None, 0) p3 = FrameFile.path(p2, frame[0], 'jpg') p4 = TransformFile.path(p1, transform_id, 'jpg') shutil.copy(p3, p4) debug(f'Transform with ID {transform_id:08d} at {p4} ' f'extracted from frame with ID {frame[0]:08d} at ' f'{p3}', 4) offset += length return transform_set_id
def test_transform_set_select_merge_fade(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) route_handler = FrameSetCommandLineRouteHandler() route_handler.select_extract([1], 'transform_set', {}) route_handler.select_extract([1], 'transform_set', {}) FadeTransformSetSelectMergePlugin().transform_set_select_merge([1, 2], {'frame-count': '2'}) # noqa # db result = TransformSetModel().select(3) self.assertEqual(result, (3, 'fade', None, None)) result = TransformModel().list(3) self.assertEqual(len(result), 8) self.assertEqual(result[0], (11, 3, 1, None, 0)) self.assertEqual(result[1], (12, 3, 2, None, 0)) self.assertEqual(result[2], (13, 3, 3, None, 0)) self.assertEqual(result[3], (14, 3, None, None, 0)) self.assertEqual(result[4], (15, 3, None, None, 0)) self.assertEqual(result[5], (16, 3, 3, None, 0)) self.assertEqual(result[6], (17, 3, 4, None, 0)) self.assertEqual(result[7], (18, 3, 5, None, 0)) # files p1 = TransformSetSubDir.path(3) # transforms self.assertIsInstance(cv2.imread(TransformFile.path(p1, 11, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 12, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 13, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 14, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 15, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 16, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 17, 'jpg')), np.ndarray) # noqa self.assertIsInstance(cv2.imread(TransformFile.path(p1, 18, 'jpg')), np.ndarray) # noqa
def transform_set_select_curate(self, transform_set_id, opts): """ This method automatically curates a transform set and rejects transforms with width or height less than 'min-size'. :param int transform_set_id: The transform set ID. :param dict opts: The dict of options. :raises: ValueError :rtype: None """ if 'min-size' not in opts: raise ValueError( 'The min-size option is required but was not supplied') min_length = int(opts['min-size']) transform_model = TransformModel() length = 100 offset = 0 p1 = TransformSetSubDir.path(transform_set_id) while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset) if not transforms: break for transform in transforms: p2 = TransformFile.path(p1, transform[0], 'jpg') debug(f'Curating transform with ID {transform[0]:08d} at {p2}', 4) h, w = cv2.imread(p2).shape[:2] if h < min_length or w < min_length: transform_model.update(transform[0], rejected=1) debug(f'Transform with ID {transform[0]:08d} rejected', 4) offset += length
def test_transform_set_select_extract_adjust_color_rejected(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) FrameSetCommandLineRouteHandler().select_extract( [1], 'transform_set', {}) # noqa TransformModel().update(5, rejected=1) AdjustColorTransformSetSelectExtractPlugin( ).transform_set_select_extract(1, { 'r': '+10', 'g': '-10', 'b': '+10' }) # noqa # db result = TransformSetModel().select(2) self.assertEqual(result, (2, 'adjust_color', 1, 1)) result = TransformModel().list(2) self.assertEqual(len(result), 4) self.assertEqual(result[0], (6, 2, 1, None, 0)) self.assertEqual(result[1], (7, 2, 2, None, 0)) self.assertEqual(result[2], (8, 2, 3, None, 0)) self.assertEqual(result[3], (9, 2, 4, None, 0)) # files p1 = TransformSetSubDir.path(2) # transforms self.assertTrue(os.path.isfile(TransformFile.path(p1, 6, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 7, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 8, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 9, 'jpg')))
def frame_set_select_extract(self, frame_set_id, opts): """ This method extracts faces from each frame in a frame set. :param int frame_set_id: The frame set ID. :param dict opts: The dict of opts. :rtype: int """ detector = MTCNN() offset_percent = 0.2 min_confidence = 0.9 debug_ = True if 'debug' in opts else False frame_set_path = FrameSetSubDir.path(frame_set_id) transform_set_id = TransformSetModel().insert(self.name, frame_set_id) transform_set_path = TransformSetSubDir.path(transform_set_id) os.makedirs(transform_set_path) length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 while True: result = FrameModel().list(frame_set_id, length=length, offset=offset, rejected=False) if not result: break for frame_id, _, rejected in result: self._extract_faces(frame_set_path, frame_id, transform_set_path, transform_set_id, detector, offset_percent, min_confidence, debug_) offset += length return transform_set_id
def test_transform_set_select_extract_slice(self): with deepstar_path(): with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}): route_handler = VideoCommandLineRouteHandler() video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa route_handler.insert_file(video_0001) route_handler.select_extract([1]) FrameSetCommandLineRouteHandler().select_extract( [1], 'transform_set', {}) # noqa SliceTransformSetSelectExtractPlugin( ).transform_set_select_extract(1, { 'start': '2', 'end': '4' }) # noqa # db result = TransformSetModel().select(2) self.assertEqual(result, (2, 'slice', 1, 1)) result = TransformModel().list(2) self.assertEqual(len(result), 3) self.assertEqual(result[0], (6, 2, 2, None, 0)) self.assertEqual(result[1], (7, 2, 3, None, 0)) self.assertEqual(result[2], (8, 2, 4, None, 0)) # files p1 = TransformSetSubDir.path(2) # transforms self.assertTrue(os.path.isfile(TransformFile.path(p1, 6, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 7, 'jpg'))) self.assertTrue(os.path.isfile(TransformFile.path(p1, 8, 'jpg')))
def image_paths(): offset = 0 p1 = TransformSetSubDir.path(transform_set_id) while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset, rejected=False) if not transforms: break for transform in transforms: image_path = TransformFile.path( p1, transform[0], 'jpg') yield image_path debug( f'Transform with ID {transform[0]:08d} at ' f'{image_path} exported to {video_path}', 4) offset += length
def delete(self, transform_set_ids): """ This method deletes a transform set from the transform set collection. :param list(int) transform_set_ids: The transform set IDs. :raises: CommandLineRouteHandlerError :rtype: None """ transform_set_model = TransformSetModel() for transform_set_id in transform_set_ids: result = transform_set_model.select(transform_set_id) if result is None: raise CommandLineRouteHandlerError( f'Transform set with ID {transform_set_id:08d} not found') for transform_set_id in transform_set_ids: transform_set_model.delete(transform_set_id) shutil.rmtree(TransformSetSubDir.path(transform_set_id)) debug(f'Transform set {transform_set_id} was successfully deleted', 3)
def test_transform_set_select_extract_pad_size(self): with deepstar_path(): video_0001 = os.path.dirname(os.path.realpath( __file__)) + '/../../support/video_0001.mp4' # noqa shutil.copyfile(video_0001, VideoFile.path('video_0001.mp4')) VideoModel().insert('test', 'video_0001.mp4') DefaultVideoSelectExtractPlugin().video_select_extract(1) # noqa self.mock_transform_set() MaxSizeTransformSetSelectExtractPlugin( ).transform_set_select_extract(1, {'max-size': '200'}) # noqa with mock.patch.dict(os.environ, {'MODEL_LIST_LENGTH': '2'}): PadTransformSetSelectExtractPlugin( ).transform_set_select_extract(2, {'size': '200'}) # noqa # db result = TransformSetModel().select(3) self.assertEqual(result, (3, 'pad', 1, 2)) result = TransformModel().list(3) self.assertEqual(len(result), 5) t = list(result[0]) json.loads(t.pop(3)) self.assertEqual(t, [11, 3, 1, 0]) t = list(result[1]) json.loads(t.pop(3)) self.assertEqual(t, [12, 3, 2, 0]) t = list(result[2]) json.loads(t.pop(3)) self.assertEqual(t, [13, 3, 3, 0]) t = list(result[3]) json.loads(t.pop(3)) self.assertEqual(t, [14, 3, 4, 0]) t = list(result[4]) json.loads(t.pop(3)) self.assertEqual(t, [15, 3, 5, 0]) # files p1 = TransformSetSubDir.path(3) # transforms self.assertEqual( cv2.imread(TransformFile.path(p1, 11, 'jpg')).shape[:2], (200, 200)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 12, 'jpg')).shape[:2], (200, 200)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 13, 'jpg')).shape[:2], (200, 200)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 14, 'jpg')).shape[:2], (200, 200)) # noqa self.assertEqual( cv2.imread(TransformFile.path(p1, 15, 'jpg')).shape[:2], (200, 200)) # noqa
def transform_set_select_extract(self, transform_set_id, opts): """ This method adjusts color for each transform in a transform set. :param int transform_set_id: The transform set ID. :param dict opts: The dict of options. :raises: ValueError :rtype: None """ r = opts.get('r', None) g = opts.get('g', None) b = opts.get('b', None) if r is None and g is None and b is None: raise ValueError( 'The r, g and/or b options are required but were not supplied') for channel in [r, g, b]: if channel is not None: if not re.match('^[+,\\-]\\d+$', channel): raise ValueError( 'A color adjustment option value must be formatted as ' '+/- followed by a number (e.g. --r=+10)') color_adjustments = [] for index, channel in enumerate([b, g, r]): if channel is not None: color_adjustments.append( [index, int(channel[1:]), True if (channel[:1] == '+') else False]) else: color_adjustments.append(None) transform_set_model = TransformSetModel() result = transform_set_model.select(transform_set_id) transform_set_id_ = TransformSetModel().insert('adjust_color', result[2], transform_set_id) p1 = TransformSetSubDir.path(transform_set_id_) os.makedirs(p1) p2 = TransformSetSubDir.path(transform_set_id) transform_model = TransformModel() length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset, rejected=False) if not transforms: break for transform in transforms: transform_id = transform_model.insert(transform_set_id_, transform[2], transform[3], transform[4]) p3 = TransformFile.path(p2, transform[0], 'jpg') p4 = TransformFile.path(p1, transform_id, 'jpg') image = cv2.imread(p3) image = image.astype(np.short) for color_adjustment in color_adjustments: if color_adjustment is not None: image = adjust_color(image, color_adjustment[0], color_adjustment[1], color_adjustment[2]) cv2.imwrite(p4, image, [cv2.IMWRITE_JPEG_QUALITY, 100]) debug(f'Transform with ID {transform_id:08d} at {p4} ' f'extracted from transform with ID {transform[0]:08d} ' f'at {p3}', 4) offset += length return transform_set_id_
def transform_set_select_extract(self, transform_set_id, opts): """ This method crops each transform in a transform set. :param int transform_set_id: The transform set ID. :param dict opts: The dict of options. :raises: ValueError :rtype: None """ x1 = int(opts['x1']) if ('x1' in opts) else None y1 = int(opts['y1']) if ('y1' in opts) else None x2 = int(opts['x2']) if ('x2' in opts) else None y2 = int(opts['y2']) if ('y2' in opts) else None if x1 is None or y1 is None or x2 is None or y2 is None: raise ValueError( 'The x1, y1, x2 and y2 options are required but were not ' 'supplied') transform_set_model = TransformSetModel() result = transform_set_model.select(transform_set_id) transform_set_id_ = TransformSetModel().insert('crop', result[2], transform_set_id) p1 = TransformSetSubDir.path(transform_set_id_) os.makedirs(p1) p2 = TransformSetSubDir.path(transform_set_id) transform_model = TransformModel() length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset, rejected=False) if not transforms: break for transform in transforms: transform_id = transform_model.insert(transform_set_id_, transform[2], transform[3], transform[4]) p3 = TransformFile.path(p2, transform[0], 'jpg') p4 = TransformFile.path(p1, transform_id, 'jpg') image_1 = cv2.imread(p3) image_2 = image_1[y1:y2, x1:x2] cv2.imwrite(p4, image_2, [cv2.IMWRITE_JPEG_QUALITY, 100]) debug( f'Transform with ID {transform_id:08d} at {p4} ' f'extracted from transform with ID {transform[0]:08d} ' f'at {p3}', 4) offset += length return transform_set_id_
def transform_set_select_merge(self, transform_set_ids, opts): """ This method merges transform sets w/ a fade effect applied. :param list(int) transform_set_ids: The transform set IDs. :param dict opts: The dict of options. :raises: ValueError :rtype: int """ if len(transform_set_ids) != 2: raise ValueError('Exactly two transform set IDs must be supplied') if 'frame-count' not in opts: raise ValueError( 'The frame-count option is required but was not supplied') frame_count = int(opts['frame-count']) if frame_count < 1: raise ValueError('Frame count must be 1 or greater') transform_set_id_1 = transform_set_ids[0] transform_set_id_2 = transform_set_ids[1] transform_model = TransformModel() transform_set_1_count = transform_model.count(transform_set_id_1, rejected=False) transform_set_2_count = transform_model.count(transform_set_id_2, rejected=False) if transform_set_1_count <= frame_count or \ transform_set_2_count <= frame_count: raise ValueError( 'Both transform sets must be greater than frame count') transform_set_id = TransformSetModel().insert('fade', None, None) p1 = TransformSetSubDir.path(transform_set_id) os.makedirs(p1) p2 = TransformSetSubDir.path(transform_set_id_1) p3 = TransformSetSubDir.path(transform_set_id_2) length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 flag = True while flag: transforms = transform_model.list(transform_set_id_1, length=length, offset=offset, rejected=False) for transform in transforms: transform_id = transform_model.insert(transform_set_id, transform[2], transform[3], transform[4]) p4 = TransformFile.path(p2, transform[0], 'jpg') p5 = TransformFile.path(p1, transform_id, 'jpg') shutil.copy(p4, p5) debug( f'Transform with ID {transform[0]:08d} at {p4} ' f'merged as ID {transform_id:08d} at {p5}', 4) offset += 1 if transform_set_1_count - offset == frame_count: flag = False break transforms_1 = transform_model.list(transform_set_id_1, length=frame_count, offset=offset, rejected=False) transforms_2 = transform_model.list(transform_set_id_2, length=frame_count, offset=0, rejected=False) for i in range(0, frame_count): transform_id_1 = transforms_1[i][0] transform_id_2 = transforms_2[i][0] image_path_1 = TransformFile.path(p2, transform_id_1, 'jpg') image_path_2 = TransformFile.path(p3, transform_id_2, 'jpg') transform_id = transform_model.insert(transform_set_id, None, None, 0) image_path_3 = TransformFile.path(p1, transform_id, 'jpg') image_1 = cv2.imread(image_path_1) image_2 = cv2.imread(image_path_2) alpha = 1.0 - float(i + 1) / float(frame_count) image_3 = cv2.addWeighted(image_1, alpha, image_2, 1.0 - alpha, 0) cv2.imwrite(image_path_3, image_3, [cv2.IMWRITE_JPEG_QUALITY, 100]) debug( f'Transforms with ID {transform_id_1:08d} at {image_path_1} ' f'and {transform_id_2:08d} at {image_path_2} merged with ' f'alpha {alpha} as ID {transform_id:08d} at {image_path_3}', 4) offset = frame_count while True: transforms = transform_model.list(transform_set_id_2, length=length, offset=offset, rejected=False) if not transforms: break for transform in transforms: transform_id = transform_model.insert(transform_set_id, transform[2], transform[3], transform[4]) p4 = TransformFile.path(p3, transform[0], 'jpg') p5 = TransformFile.path(p1, transform_id, 'jpg') shutil.copy(p4, p5) debug( f'Transform with ID {transform[0]:08d} at {p4} ' f'merged as ID {transform_id:08d} at {p5}', 4) offset += 1 return transform_set_id
def transform_set_select_extract(self, transform_set_id, opts): """ This method resizes each transform in a transform set. :param int transform_set_id: The transform set ID. :param dict opts: The dict of options. :raises: ValueError :rtype: None """ height = int(opts['height']) if ('height' in opts) else None width = int(opts['width']) if ('width' in opts) else None if height is None and width is None: raise ValueError( 'The height or width options are required but were not ' 'supplied') transform_set_model = TransformSetModel() result = transform_set_model.select(transform_set_id) transform_set_id_ = TransformSetModel().insert('resize', result[2], transform_set_id) p1 = TransformSetSubDir.path(transform_set_id_) os.makedirs(p1) p2 = TransformSetSubDir.path(transform_set_id) transform_model = TransformModel() length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 while True: transforms = transform_model.list(transform_set_id, length=length, offset=offset, rejected=False) if not transforms: break for transform in transforms: transform_id = transform_model.insert(transform_set_id_, transform[2], transform[3], transform[4]) p3 = TransformFile.path(p2, transform[0], 'jpg') p4 = TransformFile.path(p1, transform_id, 'jpg') image_1 = cv2.imread(p3) if width is not None: image_2 = imutils.resize(image_1, width=width) else: image_2 = imutils.resize(image_1, height=height) cv2.imwrite(p4, image_2, [cv2.IMWRITE_JPEG_QUALITY, 100]) debug( f'Transform with ID {transform_id:08d} at {p4} ' f'extracted from transform with ID {transform[0]:08d} ' f'at {p3}', 4) offset += length return transform_set_id_
def get(self, transform_set_id, transform_id): return send_from_directory( TransformSetSubDir.path(transform_set_id), TransformFile.name(transform_id, 'jpg'))
def transform_set_select_extract(self, transform_set_id, opts): """ This method extracts a slice from a transform set (a subset). :param int transform_set_id: The transform set ID. :param dict opts: The dict of opts. :raises: ValueError :rtype: int """ start = int(opts['start']) if ('start' in opts) else None end = int(opts['end']) if ('end' in opts) else None if start is None or end is None: raise ValueError( 'The start and end options are required but were not ' 'supplied') transform_set_model = TransformSetModel() result = transform_set_model.select(transform_set_id) transform_set_id_ = TransformSetModel().insert('slice', result[2], transform_set_id) p1 = TransformSetSubDir.path(transform_set_id_) os.makedirs(p1) p2 = TransformSetSubDir.path(transform_set_id) transform_model = TransformModel() length = int(os.environ.get('MODEL_LIST_LENGTH', '100')) offset = 0 flag = True while flag: transforms = transform_model.list(transform_set_id, length=length, offset=offset, rejected=False) if not transforms: break for transform in transforms: if transform[0] < start: continue if transform[0] > end: flag = False break transform_id = transform_model.insert(transform_set_id_, transform[2], transform[3], transform[4]) p3 = TransformFile.path(p2, transform[0], 'jpg') p4 = TransformFile.path(p1, transform_id, 'jpg') shutil.copy(p3, p4) debug( f'Transform with ID {transform_id:08d} at {p4} ' f'extracted from transform with ID {transform[0]:08d} ' f'at {p3}', 4) offset += length return transform_set_id_