예제 #1
0
    def processThread(self):
        variant = "Original"  # TODO Pass as argument

        print('Loading data, this may take a while...')
        model = PluginLoader.get_model(variant)(self.arguments.model_dir)
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(variant)(model, images_A, images_B)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, 1000000):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(epoch,
                                       self.show if save_iteration else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
예제 #2
0
    def load_model(self):
        """ Load the model requested for training """
        model_dir = get_folder(self.args.model_dir)
        model = PluginLoader.get_model(self.trainer_name)(model_dir, self.args.gpus)

        model.load(swapped=False)
        return model
예제 #3
0
    def process(self):
        # Original & LowMem models go with Adjust or Masked converter
        # GAN converter & model must go together
        # Note: GAN prediction outputs a mask + an image, while other predicts only an image
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter

        if conv_name.startswith("GAN"):
            assert model_name.startswith(
                "GAN"
            ) is True, "GAN converter can only be used with GAN model!"
        else:
            assert model_name.startswith(
                "GAN"
            ) is False, "GAN model can only be used with GAN converter!"

        model = PluginLoader.get_model(model_name)(get_folder(
            self.arguments.model_dir))
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
예제 #4
0
    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model
예제 #5
0
    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model
예제 #6
0
    def processThread(self):
        try:
            if self.arguments.allow_growth:
                self.set_tf_allow_growth()

            print('Loading data, this may take a while...')
            # this is so that you can enter case insensitive values for trainer
            trainer = self.arguments.trainer
            trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
            model = PluginLoader.get_model(trainer)(get_folder(
                self.arguments.model_dir))
            model.load(swapped=False)

            images_A = get_image_paths(self.arguments.input_A)
            images_B = get_image_paths(self.arguments.input_B)
            trainer = PluginLoader.get_trainer(trainer)
            trainer = trainer(model, images_A, images_B,
                              self.arguments.batch_size,
                              self.arguments.perceptual_loss)

            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, self.arguments.epochs):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(
                    epoch, self.show if
                    (save_iteration or self.save_now) else None,
                    self.arguments.save_interval)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
        except Exception as e:
            print(e)
            exit(1)
예제 #7
0
파일: convert.py 프로젝트: zhy5200/faceswap
    def process(self):
        # Original model goes with Adjust or Masked converter
        # does the LowMem one work with only one?
        # seems to work with both in testing - although Adjust with LowMem
        # looks a real mess - you can see that it is "working"
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter

        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
예제 #8
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        model_name = "Original"  # TODO Pass as argument
        conv_name = "Masked"  # TODO Pass as argument

        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False))

        batch = BackgroundGenerator(self.prepare_images(), 1)
        for item in batch.iterator():
            self.convert(converter, item)
예제 #9
0
    def processThread(self):
        print("Loading Data..! This may take a while")

        trainer = self.arguments.trainer
        trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
        model = PluginLoader.get_model(trainer)(get_folder(
            self.arguments.model_dir))
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)

        trainer = PluginLoader.get_trainer(trainer)
        trainer = trainer(model,
                          images_A,
                          images_B,
                          batch_size=self.arguments.batch_size)

        try:

            print("Starting. Press Enter to stop Training and Save model")

            for epoch in range(0, 100000):
                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(
                    epoch, self.show if
                    (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print("Saving model weights has been cancelled...!")
            exit(0)
예제 #10
0
파일: train.py 프로젝트: nhu2000/faceswap
    def processThread(self):
        if self.arguments.allow_growth:
            self.set_tf_allow_growth()
        
        print('Loading data, this may take a while...')
        # this is so that you can enter case insensitive values for trainer
        trainer = self.arguments.trainer
        trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
        model = PluginLoader.get_model(trainer)(get_folder(self.arguments.model_dir))
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(trainer)
        trainer = trainer(model, images_A, images_B, batch_size=self.arguments.batch_size)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, self.arguments.epochs):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(epoch, self.show if (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
        except Exception as e:
            print(e)
            exit(1)
예제 #11
0
    def process(self):
    	model_name = self.arguments.trainer
    	conv_name = self.arguments.converter

    	if conv_name.startswith("GAN"):
    		assert  model_name.startswith("GAN") is True, "GAN converter can only be used with GAN model"
    	else:
    		assert model_name.startswith("GAN") is False, "GAN model can only be used with GAN Converter"


    	model = PluginLoader.get_model(model_name)(get_folder(self.arguments.model_dir))

    	if not model.load(self.arguments.swap_model):
    		print("Model Not Found! A valid model must be provided to continue!")
    		exit(1)

    	converter = PluginLoader.get_converter(conv_name)(model.converter(False),
    		blur_size = self.arguments.blur_size,
    		seamless_clone=self.arguments.seamless_clone,
    		mask_type=self.arguments.mask_type,
    		erosion_kernel_size=self.arguments.erosion_kernel_size,
    		smooth_mask=self.arguments.smooth_mask,
    		avg_color_adjust=self.arguments.avg_color_adjust)

    	batch = BackgroundGenerator(self.prepare_images(),1)

    	#frame ranges stuffs
    	self.frame_ranges = None 

    	minmax = {
    		"min":0,
    		"max":float("inf")
    	}

    	if self.arguments.frame_ranges:
    		self.frame_ranges = [tuple(map(lamda q: minmax[q] if q in minmax.keys() else int(q),v.split("-"))) for v in self.arguments.frame_ranges]


    	self.imageidxre - re.compile(r'(\d+)(?!.*\d)')

    	for item in batch.iterator():
    		self.convert(converter,item)
예제 #12
0
    def processThread(self):
        print('Loading data, this may take a while...')
        # this is so that you can enter case insensitive values for trainer
        trainer = self.arguments.trainer
        trainer = trainer if trainer != "Lowmem" else "LowMem"
        model = PluginLoader.get_model(trainer)(self.arguments.model_dir)
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(trainer)(
            model, images_A, images_B, batch_size=self.arguments.batch_size)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, 1000000):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(
                    epoch, self.show if
                    (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
예제 #13
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        # does the LowMem one work with only one?
        model_name = "Original" # TODO Pass as argument
        conv_name = self.arguments.converter
        
        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print('Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust
        )

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None
        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0, # never any frames less than 0
            "max": float("inf")
        }
        if self.arguments.frame_ranges:
            self.frame_ranges = [tuple(map(lambda q: minmax[q] if q in minmax.keys() else int(q), v.split("-"))) for v in self.arguments.frame_ranges]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
예제 #14
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
예제 #15
0
    def convert(self, video_file, swap_model = False, duration = None, start_time = None, use_gan = False, face_filter = False, photos = True, crop_x = None, width = None, side_by_side = False, live=False, webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list="0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        if not model.load(swap_model):
            print('model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors = True):
            if convert_colors:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:                    
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if live:

            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")

            video_capture = cv2.VideoCapture("./data/videos/pair_360p_cut.mp4")
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4) # float
            print("video_source dimensions = {} x {}".format(width, height))
                      
            # loop until user clicks 'q' to exit
            while True:

                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                
                # flip image, because webcam inverts it and we trained the model the other way! 
                frame = cv2.flip(frame,1)
                image = _convert_frame(frame, convert_colors = False)
                
                # flip it back
                image = cv2.flip(image,1)

                cv2.imshow('Video', image)

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()
예제 #16
0
    def convert(self, video_file, swap_model = False, duration = None, start_time = None, use_gan = False, face_filter = False, photos = True, crop_x = None, width = None, side_by_side = False, live=False, webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list="0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        if not model.load(swap_model):
            print('model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors = True):
            if convert_colors:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:                    
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            return frame
        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4) # float
            print("webcam dimensions = {} x {}".format(width,height))
            
            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print("Fake webcam created, try using appear.in on Firefox or  ")
          
            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way! 
                frame = cv2.flip(frame,1)
                image = _convert_frame(frame, convert_colors = False)
                # flip it back
                image = cv2.flip(image,1)

                
                if (webcam):
                    time.sleep(1/30.0)
예제 #17
0
    def process(self):
        # Original & LowMem models go with Adjust or Masked converter
        # Note: GAN prediction outputs a mask + an image, while other predicts only an image
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter
        self.input_aligned_dir = None

        model = PluginLoader.get_model(model_name)(get_folder(
            self.arguments.model_dir))
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        input_aligned_dir = Path(self.arguments.input_dir) / Path('aligned')
        if self.arguments.input_aligned_dir is not None:
            input_aligned_dir = self.arguments.input_aligned_dir
        try:
            self.input_aligned_dir = [
                Path(path) for path in get_image_paths(input_aligned_dir)
            ]
            if len(self.input_aligned_dir) == 0:
                print(
                    'Aligned directory is empty, no faces will be converted!')
            elif len(self.input_aligned_dir) <= len(self.input_dir) / 3:
                print(
                    'Aligned directory contains an amount of images much less than the input, are you sure this is the right directory?'
                )
        except:
            print(
                'Aligned directory not found. All faces listed in the alignments file will be converted.'
            )

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            trainer=self.arguments.trainer,
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            match_histogram=self.arguments.match_histogram,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
예제 #18
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False,
                webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("webcam dimensions = {} x {}".format(width, height))

            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print(
                    "Fake webcam created, try using appear.in on Firefox or  ")

            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way!
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                # flip it back
                image = cv2.flip(image, 1)

                if (webcam):
                    time.sleep(1 / 30.0)
                    # firefox needs RGB
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    # chrome and skype UYUV - not working at the moment
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)

                    camera.schedule_frame(image)
                    #print("writing to stream")

                else:
                    cv2.imshow('Video', image)
                    #print("writing to screen")

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)