예제 #1
0
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)
            
            # Resize clip before processing
            if width:
                video = video.resize(width = width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2 = video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:
                def add_caption(caption, clip):
                    text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).
                            margin(40).
                            set_duration(clip.duration).
                            on_color(color=(0,0,0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])
                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)                
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video
예제 #2
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
예제 #3
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False):

        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"

        # -----------------------------------------------------------
        # FIXING THE BUG with Model loading:
        # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        # TypeError: __init__() takes exactly 3 arguments (2 given)
        # -----------------------------------------------------------

        # tmp_1 = PluginLoader.get_model(model_name)
        # tmp_1 = PluginLoader._import("Model_LIVE", "Model_LIVE") # that works (crutch however)
        # tmp_2 = Path(self._model_path(use_gan)) # models/emma_to_jade
        # print('\n\n\n{}\n{}\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1), tmp_2, type(tmp_2)))
        # sys.exit(0)

        # values in faceit_live module:
        # plugins.Model_Original.Model
        # <type 'classobj'>
        # models/emma_to_jade
        # <class 'pathlib.PosixPath'>

        # values here:
        # plugins.Model_Original.Model.Model
        # <type 'classobj'>
        # models/emma_to_jade
        # <class 'pathlib.PosixPath'>
        # -----------------------------------------------------------

        # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan))) # ==> crash
        model = PluginLoader._import("Model_LIVE", "Model_LIVE")(Path(
            self._model_path(use_gan)))

        # print('\n\n\n{}\n\n\n'.format(self._model_path(use_gan))) # e.g. models/test_2_faces
        # sys.exit(0)

        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        print('Checkpoint_1 ... Model loaded')

        # -----------------------------------------------------------
        # FIXING THE BUG with Converter loading:
        # -----------------------------------------------------------

        # tmp_1 = PluginLoader.get_converter(converter_name)
        # tmp_1 = PluginLoader._import("Convert", "Convert_Masked_LIVE")
        # print('\n\n\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1)))
        # sys.exit(0)

        # faceit_live module:
        # plugins.Convert_Masked.Convert
        # <type 'classobj'>

        # here:
        # plugins.Convert_Masked.Convert
        # <type 'classobj'>
        # -----------------------------------------------------------

        # Load converter
        # converter = PluginLoader.get_converter(converter_name) # ==> crash
        converter = PluginLoader._import("Convert", "Convert_Masked_LIVE")
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        print('Checkpoint_2 ... Converter loaded')

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter_LIVE(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            # if convert_colors:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV

            DEBUG_MODE = 0
            for face in detect_faces_LIVE(frame, "cnn"):

                if DEBUG_MODE:
                    print('Got face!')
                    # print(dir(face)) # image, x, y, w, h, landmarks
                    print('Face geometry: ({},{},{},{})'.format(
                        face.x, face.y, face.w, face.h))
                    print('Face landmarks: {}'.format(face.landmarks))

                    cv2.imshow('Face', face.image)
                    continue

                if (not face_filter) or (face_filter and filter.check(face)):

                    # if 1:
                    #     print(dir(face.landmarks))
                    #     face.landmarks = []

                    frame = converter.patch_image(frame, face)
                    if not live:
                        frame = frame.astype(numpy.float32)

            # if convert_colors:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV

            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        # ===================================================
        if live:

            print('Staring live mode ...')
            print('Press "Q" to Quit')

            PATH_TO_VIDEO = './data/videos/emma_360_cut.mp4'

            if TEST_2_FACES_FLAG:
                # PATH_TO_VIDEO = './_data/videos/pair_360p_original.mp4'
                PATH_TO_VIDEO = './data/videos/pair_360p_cut.mp4'

            video_capture = cv2.VideoCapture(PATH_TO_VIDEO)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("video dimensions = {} x {}".format(width, height))

            while 1:

                ret, frame = video_capture.read()
                # print(frame.shape, frame.dtype) # (360, 640, 3), uint8

                # frame = cv2.resize(frame, (640, 480))

                print('HANDLING NEW FRAME ...')

                if CROP_HALF_OF_FRAME == 'left':
                    frame[:, 0:frame.shape[1] /
                          2] = 0  # ~ cropping left half of an image
                # elif CROP_HALF_OF_FRAME == 'right':
                # pass

                if not ret:
                    print("RET IS NONE ... I'M QUIT")
                    video_capture.release()
                    break

                # block without try/except -  to catch actual errors:
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                print('GOT AN IMAGE!')
                frame = cv2.flip(frame, 1)
                image = cv2.flip(image, 1)

                try:  # with flip:

                    # flip image, because webcam inverts it and we trained the model the other way!
                    frame = cv2.flip(frame, 1)

                    image = _convert_frame(frame, convert_colors=False)
                    print('GOT AN IMAGE!')

                    # flip it back
                    frame = cv2.flip(frame, 1)
                    image = cv2.flip(image, 1)

                except:

                    try:  # without flip:

                        image = _convert_frame(frame, convert_colors=False)
                        print('GOT AN IMAGE!')

                    except:

                        print("HMM ... CONVERTATION FAILED ... I'M QUIT")
                        continue
                        # video_capture.release()
                        # break

                cv2.imshow('Video', image)
                cv2.imshow('Original', frame)

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    print("KEYBOARD INTERRUPT ... I'M QUIT")
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()
        # ===================================================

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
예제 #4
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False,
                webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("webcam dimensions = {} x {}".format(width, height))

            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print(
                    "Fake webcam created, try using appear.in on Firefox or  ")

            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way!
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                # flip it back
                image = cv2.flip(image, 1)

                if (webcam):
                    time.sleep(1 / 30.0)
                    # firefox needs RGB
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    # chrome and skype UYUV - not working at the moment
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)

                    camera.schedule_frame(image)
                    #print("writing to stream")

                else:
                    cv2.imshow('Video', image)
                    #print("writing to screen")

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)