def blackwhite_filter(self): self.mediaPlayer.pause() self.update_icon() clip = VideoFileClip(self.media_path) new_clip = clip.fx(vfx.blackwhite) self.put_media_to_history() new_clip.write_videofile(self.new_video_path()) self.start_new_file()
def mirror_y(self): self.mediaPlayer.pause() self.update_icon() clip = VideoFileClip(self.media_path) clip2 = clip.fx(vfx.mirror_y) self.put_media_to_history() clip2.write_videofile(self.new_video_path()) self.start_new_file()
def change_speed(self): self.mediaPlayer.pause() self.update_icon() text = self.speed_line.text() speed_coef = float(text) clip = VideoFileClip(self.media_path) new_clip = clip.fx(vfx.speedx, speed_coef) self.put_media_to_history() new_clip.write_videofile(self.new_video_path()) self.start_new_file()
def test_FramesMatches_select_scenes( filename, subclip, match_threshold, min_time_span, nomatch_threshold, expected_result, ): video_clip = VideoFileClip(filename) if subclip is not None: video_clip = video_clip.subclip(subclip[0], subclip[1]) clip = concatenate_videoclips([video_clip.fx(time_mirror), video_clip]) result = FramesMatches.from_clip(clip, 10, 3, logger=None).select_scenes( match_threshold, min_time_span, nomatch_threshold=nomatch_threshold, ) assert len(result) == len(expected_result) assert result == expected_result
def makeClipsBySentence(self, inputData): outputPath = "player/media/outputs/" processid = inputData[0] startindex = inputData[1] inputClipPaths = inputData[2] duration = inputData[3] clips = [] if not os.path.isdir("player/media/outputs"): os.mkdir("player/media/outputs") if (len(inputClipPaths) != len(duration)): print("The number of sentence and duration is not equal.") return -1 # Merge clips (sentence units) for number in range(0, len(inputClipPaths)): outputFile = outputPath + str(startindex) + ".mp4" inputClips = inputClipPaths[number] self.mergeClips(inputClips, outputFile, processid) startindex += 1 print("First Merge Completed") startindex = inputData[1] # Change durations for number in range(0, len(duration)): inputFile = outputPath + str(startindex) + ".mp4" clip = VideoFileClip(inputFile) clip = clip.fx(vfx.speedx, final_duration=float(duration[number])) clips.append(clip) startindex += 1 startindex = inputData[1] #finalVideo = concatenate_videoclips(clips) for number in range(0, len(duration)): clips[number].write_videofile(outputPath + "_" + str(startindex) + ".mp4") startindex += 1 print("Change Durations Completed")
def test_FramesMatches_write_gifs(util): video_clip = VideoFileClip("media/chaplin.mp4").subclip(0, 0.2) clip = concatenate_videoclips([video_clip.fx(time_mirror), video_clip]) # add matching frame starting at start < clip.start which should be ignored matching_frames = FramesMatches.from_clip(clip, 10, 3, logger=None) matching_frames.insert(0, FramesMatch(-1, -0.5, 0, 0)) matching_frames = matching_frames.select_scenes( 1, 0.01, nomatch_threshold=0, ) gifs_dir = os.path.join(util.TMP_DIR, "moviepy_FramesMatches_write_gifs") if os.path.isdir(gifs_dir): shutil.rmtree(gifs_dir) os.mkdir(gifs_dir) assert os.path.isdir(gifs_dir) matching_frames.write_gifs(clip, gifs_dir, logger=None) gifs_filenames = os.listdir(gifs_dir) assert len(gifs_filenames) == 7 for filename in gifs_filenames: filepath = os.path.join(gifs_dir, filename) assert os.path.isfile(filepath) with open(filepath, "rb") as f: assert len(f.readline()) end, start = filename.split(".")[0].split("_") end, start = (int(end), int(start)) assert isinstance(end, int) assert isinstance(end, int) shutil.rmtree(gifs_dir)
media_path = self._video_path({ 'name' : video_file }) if not photos: # Process video; start loading the video clip video = VideoFileClip(media_path) # If a duration is set, trim clip if duration: video = video.subclip(start_time, start_time + duration) # Resize clip before processing if width: video = video.resize(width = width) # Crop clip if desired if crop_x: video = video.fx(crop, x2 = video.w / 2) # Kick off convert frames for each frame new_video = video.fl(_convert_helper) # Stack clips side by side if side_by_side: def add_caption(caption, clip): text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80). margin(40). set_duration(clip.duration). on_color(color=(0,0,0), col_opacity=0.6)) return CompositeVideoClip([clip, text]) video = add_caption("Original", video) new_video = add_caption("Swapped", new_video) final_video = clips_array([[video], [new_video]])
def convert(self, video_file, swap_model=False, duration=None, start_time=None, use_gan=False, face_filter=False, photos=True, crop_x=None, width=None, side_by_side=False, live=False): # Magic incantation to not have tensorflow blow up with an out of memory error. import tensorflow as tf import keras.backend.tensorflow_backend as K config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = "0" K.set_session(tf.Session(config=config)) # Load model model_name = "Original" converter_name = "Masked" if use_gan: model_name = "GAN" converter_name = "GAN" # ----------------------------------------------------------- # FIXING THE BUG with Model loading: # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan))) # TypeError: __init__() takes exactly 3 arguments (2 given) # ----------------------------------------------------------- # tmp_1 = PluginLoader.get_model(model_name) # tmp_1 = PluginLoader._import("Model_LIVE", "Model_LIVE") # that works (crutch however) # tmp_2 = Path(self._model_path(use_gan)) # models/emma_to_jade # print('\n\n\n{}\n{}\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1), tmp_2, type(tmp_2))) # sys.exit(0) # values in faceit_live module: # plugins.Model_Original.Model # <type 'classobj'> # models/emma_to_jade # <class 'pathlib.PosixPath'> # values here: # plugins.Model_Original.Model.Model # <type 'classobj'> # models/emma_to_jade # <class 'pathlib.PosixPath'> # ----------------------------------------------------------- # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan))) # ==> crash model = PluginLoader._import("Model_LIVE", "Model_LIVE")(Path( self._model_path(use_gan))) # print('\n\n\n{}\n\n\n'.format(self._model_path(use_gan))) # e.g. models/test_2_faces # sys.exit(0) if not model.load(swap_model): print( 'model Not Found! A valid model must be provided to continue!') exit(1) print('Checkpoint_1 ... Model loaded') # ----------------------------------------------------------- # FIXING THE BUG with Converter loading: # ----------------------------------------------------------- # tmp_1 = PluginLoader.get_converter(converter_name) # tmp_1 = PluginLoader._import("Convert", "Convert_Masked_LIVE") # print('\n\n\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1))) # sys.exit(0) # faceit_live module: # plugins.Convert_Masked.Convert # <type 'classobj'> # here: # plugins.Convert_Masked.Convert # <type 'classobj'> # ----------------------------------------------------------- # Load converter # converter = PluginLoader.get_converter(converter_name) # ==> crash converter = PluginLoader._import("Convert", "Convert_Masked_LIVE") converter = converter(model.converter(False), blur_size=8, seamless_clone=True, mask_type="facehullandrect", erosion_kernel_size=None, smooth_mask=True, avg_color_adjust=True) print('Checkpoint_2 ... Converter loaded') # Load face filter filter_person = self._person_a if swap_model: filter_person = self._person_b filter = FaceFilter_LIVE(self._people[filter_person]['faces']) # Define conversion method per frame def _convert_frame(frame, convert_colors=True): # if convert_colors: # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV DEBUG_MODE = 0 for face in detect_faces_LIVE(frame, "cnn"): if DEBUG_MODE: print('Got face!') # print(dir(face)) # image, x, y, w, h, landmarks print('Face geometry: ({},{},{},{})'.format( face.x, face.y, face.w, face.h)) print('Face landmarks: {}'.format(face.landmarks)) cv2.imshow('Face', face.image) continue if (not face_filter) or (face_filter and filter.check(face)): # if 1: # print(dir(face.landmarks)) # face.landmarks = [] frame = converter.patch_image(frame, face) if not live: frame = frame.astype(numpy.float32) # if convert_colors: # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV return frame def _convert_helper(get_frame, t): return _convert_frame(get_frame(t)) # =================================================== if live: print('Staring live mode ...') print('Press "Q" to Quit') PATH_TO_VIDEO = './data/videos/emma_360_cut.mp4' if TEST_2_FACES_FLAG: # PATH_TO_VIDEO = './_data/videos/pair_360p_original.mp4' PATH_TO_VIDEO = './data/videos/pair_360p_cut.mp4' video_capture = cv2.VideoCapture(PATH_TO_VIDEO) width = video_capture.get(3) # float height = video_capture.get(4) # float print("video dimensions = {} x {}".format(width, height)) while 1: ret, frame = video_capture.read() # print(frame.shape, frame.dtype) # (360, 640, 3), uint8 # frame = cv2.resize(frame, (640, 480)) print('HANDLING NEW FRAME ...') if CROP_HALF_OF_FRAME == 'left': frame[:, 0:frame.shape[1] / 2] = 0 # ~ cropping left half of an image # elif CROP_HALF_OF_FRAME == 'right': # pass if not ret: print("RET IS NONE ... I'M QUIT") video_capture.release() break # block without try/except - to catch actual errors: frame = cv2.flip(frame, 1) image = _convert_frame(frame, convert_colors=False) print('GOT AN IMAGE!') frame = cv2.flip(frame, 1) image = cv2.flip(image, 1) try: # with flip: # flip image, because webcam inverts it and we trained the model the other way! frame = cv2.flip(frame, 1) image = _convert_frame(frame, convert_colors=False) print('GOT AN IMAGE!') # flip it back frame = cv2.flip(frame, 1) image = cv2.flip(image, 1) except: try: # without flip: image = _convert_frame(frame, convert_colors=False) print('GOT AN IMAGE!') except: print("HMM ... CONVERTATION FAILED ... I'M QUIT") continue # video_capture.release() # break cv2.imshow('Video', image) cv2.imshow('Original', frame) # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): print("KEYBOARD INTERRUPT ... I'M QUIT") video_capture.release() break cv2.destroyAllWindows() exit() # =================================================== media_path = self._video_path({'name': video_file}) if not photos: # Process video; start loading the video clip video = VideoFileClip(media_path) # If a duration is set, trim clip if duration: video = video.subclip(start_time, start_time + duration) # Resize clip before processing if width: video = video.resize(width=width) # Crop clip if desired if crop_x: video = video.fx(crop, x2=video.w / 2) # Kick off convert frames for each frame new_video = video.fl(_convert_helper) # Stack clips side by side if side_by_side: def add_caption(caption, clip): text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).margin(40).set_duration( clip.duration).on_color( color=(0, 0, 0), col_opacity=0.6)) return CompositeVideoClip([clip, text]) video = add_caption("Original", video) new_video = add_caption("Swapped", new_video) final_video = clips_array([[video], [new_video]]) else: final_video = new_video # Resize clip after processing #final_video = final_video.resize(width = (480 * 2)) # Write video if not os.path.exists(os.path.join(self.OUTPUT_PATH)): os.makedirs(self.OUTPUT_PATH) output_path = os.path.join(self.OUTPUT_PATH, video_file) final_video.write_videofile(output_path, rewrite_audio=True) # Clean up del video del new_video del final_video else: # Process a directory of photos for face_file in os.listdir(media_path): face_path = os.path.join(media_path, face_file) image = cv2.imread(face_path) image = _convert_frame(image, convert_colors=False) cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
def convert(self, video_file, swap_model=False, duration=None, start_time=None, use_gan=False, face_filter=False, photos=True, crop_x=None, width=None, side_by_side=False): # Magic incantation to not have tensorflow blow up with an out of memory error. import tensorflow as tf import keras.backend.tensorflow_backend as K config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = "0" K.set_session(tf.Session(config=config)) # Load model model_name = "Original" converter_name = "Masked" if use_gan: model_name = "GAN" converter_name = "GAN" model = PluginLoader.get_model(model_name)(Path( self._model_path(use_gan))) if not model.load(swap_model): print( 'model Not Found! A valid model must be provided to continue!') exit(1) # Load converter converter = PluginLoader.get_converter(converter_name) converter = converter(model.converter(False), blur_size=8, seamless_clone=True, mask_type="facehullandrect", erosion_kernel_size=None, smooth_mask=True, avg_color_adjust=True) # Load face filter filter_person = self._person_a if swap_model: filter_person = self._person_b filter = FaceFilter(self._people[filter_person]['faces']) # Define conversion method per frame def _convert_frame(frame, convert_colors=True): if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV for face in detect_faces(frame, "cnn"): if (not face_filter) or (face_filter and filter.check(face)): frame = converter.patch_image(frame, face) frame = frame.astype(numpy.float32) if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV return frame def _convert_helper(get_frame, t): return _convert_frame(get_frame(t)) media_path = self._video_path({'name': video_file}) if not photos: # Process video; start loading the video clip video = VideoFileClip(media_path) # If a duration is set, trim clip if duration: video = video.subclip(start_time, start_time + duration) # Resize clip before processing if width: video = video.resize(width=width) # Crop clip if desired if crop_x: video = video.fx(crop, x2=video.w / 2) # Kick off convert frames for each frame new_video = video.fl(_convert_helper) # Stack clips side by side if side_by_side: def add_caption(caption, clip): text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).margin(40).set_duration( clip.duration).on_color( color=(0, 0, 0), col_opacity=0.6)) return CompositeVideoClip([clip, text]) video = add_caption("Original", video) new_video = add_caption("Swapped", new_video) final_video = clips_array([[video], [new_video]]) else: final_video = new_video # Resize clip after processing #final_video = final_video.resize(width = (480 * 2)) # Write video output_path = os.path.join(self.OUTPUT_PATH, video_file) final_video.write_videofile(output_path, rewrite_audio=True) # Clean up del video del new_video del final_video else: # Process a directory of photos for face_file in os.listdir(media_path): face_path = os.path.join(media_path, face_file) image = cv2.imread(face_path) image = _convert_frame(image, convert_colors=False) cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
def convert(self, video_file, swap_model=False, duration=None, start_time=None, use_gan=False, face_filter=False, photos=True, crop_x=None, width=None, side_by_side=False, live=False, webcam=False): # Magic incantation to not have tensorflow blow up with an out of memory error. import tensorflow as tf import keras.backend.tensorflow_backend as K config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = "0" K.set_session(tf.Session(config=config)) # Load model model_name = "Original" converter_name = "Masked" if use_gan: model_name = "GAN" converter_name = "GAN" model = PluginLoader.get_model(model_name)(Path( self._model_path(use_gan))) if not model.load(swap_model): print( 'model Not Found! A valid model must be provided to continue!') exit(1) # Load converter converter = PluginLoader.get_converter(converter_name) converter = converter(model.converter(False), blur_size=8, seamless_clone=True, mask_type="facehullandrect", erosion_kernel_size=None, smooth_mask=True, avg_color_adjust=True) # Load face filter filter_person = self._person_a if swap_model: filter_person = self._person_b filter = FaceFilter(self._people[filter_person]['faces']) # Define conversion method per frame def _convert_frame(frame, convert_colors=True): if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV for face in detect_faces(frame, "cnn"): if (not face_filter) or (face_filter and filter.check(face)): frame = converter.patch_image(frame, face) if (not live and not webcam): frame = frame.astype(numpy.float32) if convert_colors: frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV return frame def _convert_helper(get_frame, t): return _convert_frame(get_frame(t)) if (live): # generate dummy content for testing /dev/video1 #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1 print("Staring live mode. Capturing video from webcam!") print("Press q to Quit") # connect to webcam video_capture = cv2.VideoCapture(0) time.sleep(1) width = video_capture.get(3) # float height = video_capture.get(4) # float print("webcam dimensions = {} x {}".format(width, height)) #video_capture = cv2.VideoCapture('./data/videos/ale.mp4') if (webcam): # create fake webcam device camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480) camera.print_capabilities() print( "Fake webcam created, try using appear.in on Firefox or ") # loop until user clicks 'q' to exit while True: ret, frame = video_capture.read() frame = cv2.resize(frame, (640, 480)) # flip image, because webcam inverts it and we trained the model the other way! frame = cv2.flip(frame, 1) image = _convert_frame(frame, convert_colors=False) # flip it back image = cv2.flip(image, 1) if (webcam): time.sleep(1 / 30.0) # firefox needs RGB # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # chrome and skype UYUV - not working at the moment # image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) camera.schedule_frame(image) #print("writing to stream") else: cv2.imshow('Video', image) #print("writing to screen") # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): video_capture.release() break cv2.destroyAllWindows() exit() media_path = self._video_path({'name': video_file}) if not photos: # Process video; start loading the video clip video = VideoFileClip(media_path) # If a duration is set, trim clip if duration: video = video.subclip(start_time, start_time + duration) # Resize clip before processing if width: video = video.resize(width=width) # Crop clip if desired if crop_x: video = video.fx(crop, x2=video.w / 2) # Kick off convert frames for each frame new_video = video.fl(_convert_helper) # Stack clips side by side if side_by_side: def add_caption(caption, clip): text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).margin(40).set_duration( clip.duration).on_color( color=(0, 0, 0), col_opacity=0.6)) return CompositeVideoClip([clip, text]) video = add_caption("Original", video) new_video = add_caption("Swapped", new_video) final_video = clips_array([[video], [new_video]]) else: final_video = new_video # Resize clip after processing #final_video = final_video.resize(width = (480 * 2)) # Write video if not os.path.exists(os.path.join(self.OUTPUT_PATH)): os.makedirs(self.OUTPUT_PATH) output_path = os.path.join(self.OUTPUT_PATH, video_file) final_video.write_videofile(output_path, rewrite_audio=True) # Clean up del video del new_video del final_video else: # Process a directory of photos for face_file in os.listdir(media_path): face_path = os.path.join(media_path, face_file) image = cv2.imread(face_path) image = _convert_frame(image, convert_colors=False) cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)