def write_remix_video(remix_filenames, video_filename): # Concatenate clips together video_file_clips = [ VideoFileClip(remix_filename) for remix_filename in remix_filenames ] remix_video = concatenate_videoclips(video_file_clips) remix_filename = REMIX_DIRECTORY + 'remix-' + video_filename remix_filename_noaudio = remix_filename.replace('.mp4', '-noaudio.mp4') remix_video.write_videofile(remix_filename_noaudio) audio_filename = AUDIO_DIRECTORY + video_filename.replace('mp4', 'mp3') ffmpeg_extract_audio(VIDEO_DIRECTORY + video_filename, audio_filename) ffmpeg_merge_video_audio(remix_filename_noaudio, audio_filename, remix_filename) os.remove(remix_filename_noaudio)
def merge_audio_and_video(videofname, audiofname, outputfile): logger.debug("Merging video and audio data...") ffmpeg_tools.ffmpeg_merge_video_audio(videofname, audiofname, outputfile)
def merge_audio_video(self): print("Merging audio and video.") self.get_audio_clip() mv.ffmpeg_merge_video_audio(self.tempresult, self.audiofile, self.result) remove(self.audiofile)
def set_audio(self): print("set audio...") audio = self.__audio_concat() cis.wavwrite(self.outfile+".wav", self.afps, audio) ffmpeg_merge_video_audio(video=self.outfile+".avi", audio=self.outfile+".wav", output=self.outfile+"_demo.avi")
def ffwd_video(path_in, path_out, checkpoint_dir, device_t='/gpu:0', batch_size=4): video_clip = VideoFileClip(path_in, audio=False) # Create a temporary file to store the audio. fp = tempfile.NamedTemporaryFile(suffix='.aac') temp_audio_file_name = fp.name fp.close() # Create a temporary file to store the video. fp = tempfile.NamedTemporaryFile(suffix='.mp4') temp_video_file_name = fp.name fp.close() # Extract the audio. ffmpeg_tools.ffmpeg_extract_audio(path_in, temp_audio_file_name) video_writer = ffmpeg_writer.FFMPEG_VideoWriter( temp_video_file_name, video_clip.size, video_clip.fps, codec="libx264", preset="medium", audiofile=None, threads=None, ffmpeg_params=["-b:v", "2000k"]) g = tf.Graph() soft_config = tf.compat.v1.ConfigProto(allow_soft_placement=True) soft_config.gpu_options.allow_growth = True with g.as_default(), g.device(device_t), \ tf.compat.v1.Session(config=soft_config) as sess: batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3) img_placeholder = tf.compat.v1.placeholder(tf.float32, shape=batch_shape, name='img_placeholder') preds = src.transform.net(img_placeholder) saver = tf.compat.v1.train.Saver() if os.path.isdir(checkpoint_dir): ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: raise Exception("No checkpoint found...") else: saver.restore(sess, checkpoint_dir) X = np.zeros(batch_shape, dtype=np.float32) def style_and_write(count): for i in range(count, batch_size): X[i] = X[count - 1] # Use last frame to fill X _preds = sess.run(preds, feed_dict={img_placeholder: X}) for i in range(0, count): video_writer.write_frame( np.clip(_preds[i], 0, 255).astype(np.uint8)) frame_count = 0 # The frame count that written to X for frame in video_clip.iter_frames(): X[frame_count] = frame frame_count += 1 if frame_count == batch_size: style_and_write(frame_count) frame_count = 0 if frame_count != 0: style_and_write(frame_count) video_writer.close() # Merge audio and video ffmpeg_tools.ffmpeg_merge_video_audio(temp_video_file_name, temp_audio_file_name, path_out) # Delete temporary files os.remove(temp_video_file_name) os.remove(temp_audio_file_name)