Esempio n. 1
0
    def process_image(self, filename):
        # TODO move the model load and the converter creation in a method called on init, but after the arg parsing
        (face_A,
         face_B) = ('/decoder_A.h5',
                    '/decoder_B.h5') if not self.arguments.swap_model else (
                        '/decoder_B.h5', '/decoder_A.h5')

        model_dir = self.arguments.model_dir
        encoder.load_weights(model_dir + "/encoder.h5")
        decoder_A.load_weights(model_dir + face_A)
        decoder_B.load_weights(model_dir + face_B)

        converter = PluginLoader.get_converter("Masked")(autoencoder_B)

        try:
            image = cv2.imread(filename)
            for (idx, face) in enumerate(detect_faces(image)):
                if idx > 0 and self.arguments.verbose:
                    print('- Found more than one face!')
                    self.verify_output = True

                image = converter.patch_image(image, face)
                self.faces_detected = self.faces_detected + 1

            output_file = self.output_dir / Path(filename).name
            cv2.imwrite(str(output_file), image)
        except Exception as e:
            print('Failed to convert image: {}. Reason: {}'.format(
                filename, e))
Esempio n. 2
0
    def process(self):
        # Original & LowMem models go with Adjust or Masked converter
        # GAN converter & model must go together
        # Note: GAN prediction outputs a mask + an image, while other predicts only an image
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter

        if conv_name.startswith("GAN"):
            assert model_name.startswith(
                "GAN"
            ) is True, "GAN converter can only be used with GAN model!"
        else:
            assert model_name.startswith(
                "GAN"
            ) is False, "GAN model can only be used with GAN converter!"

        model = PluginLoader.get_model(model_name)(get_folder(
            self.arguments.model_dir))
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
Esempio n. 3
0
    def process_arguments(self, arguments):
        if not arguments.swap_model:
            self.face_A, self.face_B = ('/decoder_A.h5', '/decoder_B.h5')
        else:
            self.face_A, self.face_B = ('/decoder_B.h5', '/decoder_A.h5')

        model_dir = arguments.model_dir
        encoder.load_weights(model_dir + "/encoder.h5")

        decoder_A.load_weights(model_dir + self.face_A)
        decoder_B.load_weights(model_dir + self.face_B)
        self.converter = PluginLoader.get_converter("Masked")(autoencoder_B)
        super().process_arguments(arguments)
Esempio n. 4
0
    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(model.converter(False),
                                                     trainer=args.trainer,
                                                     blur_size=args.blur_size,
                                                     seamless_clone=args.seamless_clone,
                                                     sharpen_image=args.sharpen_image,
                                                     mask_type=args.mask_type,
                                                     erosion_kernel_size=args.erosion_kernel_size,
                                                     match_histogram=args.match_histogram,
                                                     smooth_mask=args.smooth_mask,
                                                     avg_color_adjust=args.avg_color_adjust)
        return converter
Esempio n. 5
0
    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(model.converter(False),
                                                     trainer=args.trainer,
                                                     blur_size=args.blur_size,
                                                     seamless_clone=args.seamless_clone,
                                                     sharpen_image=args.sharpen_image,
                                                     mask_type=args.mask_type,
                                                     erosion_kernel_size=args.erosion_kernel_size,
                                                     match_histogram=args.match_histogram,
                                                     smooth_mask=args.smooth_mask,
                                                     avg_color_adjust=args.avg_color_adjust)
        return converter
Esempio n. 6
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        # does the LowMem one work with only one?
        # seems to work with both in testing - although Adjust with LowMem
        # looks a real mess - you can see that it is "working"
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter

        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
Esempio n. 7
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        model_name = "Original"  # TODO Pass as argument
        conv_name = "Masked"  # TODO Pass as argument

        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False))

        batch = BackgroundGenerator(self.prepare_images(), 1)
        for item in batch.iterator():
            self.convert(converter, item)
Esempio n. 8
0
    def process(self):
    	model_name = self.arguments.trainer
    	conv_name = self.arguments.converter

    	if conv_name.startswith("GAN"):
    		assert  model_name.startswith("GAN") is True, "GAN converter can only be used with GAN model"
    	else:
    		assert model_name.startswith("GAN") is False, "GAN model can only be used with GAN Converter"


    	model = PluginLoader.get_model(model_name)(get_folder(self.arguments.model_dir))

    	if not model.load(self.arguments.swap_model):
    		print("Model Not Found! A valid model must be provided to continue!")
    		exit(1)

    	converter = PluginLoader.get_converter(conv_name)(model.converter(False),
    		blur_size = self.arguments.blur_size,
    		seamless_clone=self.arguments.seamless_clone,
    		mask_type=self.arguments.mask_type,
    		erosion_kernel_size=self.arguments.erosion_kernel_size,
    		smooth_mask=self.arguments.smooth_mask,
    		avg_color_adjust=self.arguments.avg_color_adjust)

    	batch = BackgroundGenerator(self.prepare_images(),1)

    	#frame ranges stuffs
    	self.frame_ranges = None 

    	minmax = {
    		"min":0,
    		"max":float("inf")
    	}

    	if self.arguments.frame_ranges:
    		self.frame_ranges = [tuple(map(lamda q: minmax[q] if q in minmax.keys() else int(q),v.split("-"))) for v in self.arguments.frame_ranges]


    	self.imageidxre - re.compile(r'(\d+)(?!.*\d)')

    	for item in batch.iterator():
    		self.convert(converter,item)
Esempio n. 9
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        # does the LowMem one work with only one?
        model_name = "Original" # TODO Pass as argument
        conv_name = self.arguments.converter
        
        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print('Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust
        )

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None
        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0, # never any frames less than 0
            "max": float("inf")
        }
        if self.arguments.frame_ranges:
            self.frame_ranges = [tuple(map(lambda q: minmax[q] if q in minmax.keys() else int(q), v.split("-"))) for v in self.arguments.frame_ranges]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
Esempio n. 10
0
    def process(self):
        # Original & LowMem models go with Adjust or Masked converter
        # Note: GAN prediction outputs a mask + an image, while other predicts only an image
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter
        self.input_aligned_dir = None

        model = PluginLoader.get_model(model_name)(get_folder(
            self.arguments.model_dir))
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        input_aligned_dir = Path(self.arguments.input_dir) / Path('aligned')
        if self.arguments.input_aligned_dir is not None:
            input_aligned_dir = self.arguments.input_aligned_dir
        try:
            self.input_aligned_dir = [
                Path(path) for path in get_image_paths(input_aligned_dir)
            ]
            if len(self.input_aligned_dir) == 0:
                print(
                    'Aligned directory is empty, no faces will be converted!')
            elif len(self.input_aligned_dir) <= len(self.input_dir) / 3:
                print(
                    'Aligned directory contains an amount of images much less than the input, are you sure this is the right directory?'
                )
        except:
            print(
                'Aligned directory not found. All faces listed in the alignments file will be converted.'
            )

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            trainer=self.arguments.trainer,
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            match_histogram=self.arguments.match_histogram,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
Esempio n. 11
0
    def convert(self, video_file, swap_model = False, duration = None, start_time = None, use_gan = False, face_filter = False, photos = True, crop_x = None, width = None, side_by_side = False, live=False, webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list="0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        if not model.load(swap_model):
            print('model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors = True):
            if convert_colors:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:                    
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            return frame
        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4) # float
            print("webcam dimensions = {} x {}".format(width,height))
            
            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print("Fake webcam created, try using appear.in on Firefox or  ")
          
            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way! 
                frame = cv2.flip(frame,1)
                image = _convert_frame(frame, convert_colors = False)
                # flip it back
                image = cv2.flip(image,1)

                
                if (webcam):
                    time.sleep(1/30.0)
Esempio n. 12
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
Esempio n. 13
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False,
                webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("webcam dimensions = {} x {}".format(width, height))

            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print(
                    "Fake webcam created, try using appear.in on Firefox or  ")

            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way!
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                # flip it back
                image = cv2.flip(image, 1)

                if (webcam):
                    time.sleep(1 / 30.0)
                    # firefox needs RGB
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    # chrome and skype UYUV - not working at the moment
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)

                    camera.schedule_frame(image)
                    #print("writing to stream")

                else:
                    cv2.imshow('Video', image)
                    #print("writing to screen")

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
Esempio n. 14
0
    def convert(self, video_file, swap_model = False, duration = None, start_time = None, use_gan = False, face_filter = False, photos = True, crop_x = None, width = None, side_by_side = False, live=False, webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list="0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        if not model.load(swap_model):
            print('model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors = True):
            if convert_colors:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:                    
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if live:

            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")

            video_capture = cv2.VideoCapture("./data/videos/pair_360p_cut.mp4")
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4) # float
            print("video_source dimensions = {} x {}".format(width, height))
                      
            # loop until user clicks 'q' to exit
            while True:

                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                
                # flip image, because webcam inverts it and we trained the model the other way! 
                frame = cv2.flip(frame,1)
                image = _convert_frame(frame, convert_colors = False)
                
                # flip it back
                image = cv2.flip(image,1)

                cv2.imshow('Video', image)

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()