def generate(self, avatars, text, usernames, kwargs): name = uuid.uuid4().hex + '.gif' @after_this_request def remove(response): # pylint: disable=W0612 try: os.remove(name) except (FileNotFoundError, OSError, PermissionError): pass return response clip = VideoFileClip("assets/kowalski/kowalski.gif") text = TextClip(text, fontsize=36, method='caption', size=(245, None), align='West', color='black', stroke_color='black', stroke_width=1, font='Verdana').set_duration(clip.duration) text = text.set_position((340, 65)).set_duration(clip.duration) text = rotate(text, angle=10, resample='bilinear') video = CompositeVideoClip([clip, text]).set_duration(clip.duration) video.write_gif(name) clip.close() video.close() return send_file(name, mimetype='image/gif')
def make_crab(self, t, u_id): """Non blocking crab rave video generation from DankMemer bot https://github.com/DankMemer/meme-server/blob/master/endpoints/crab.py """ fp = str(cog_data_path(self) / f"Verdana.ttf") clip = VideoFileClip(str(cog_data_path(self)) + "/template.mp4") text = TextClip(t[0], fontsize=48, color="white", font=fp) text2 = ( TextClip("____________________", fontsize=48, color="white", font=fp) .set_position(("center", 210)) .set_duration(15.4) ) text = text.set_position(("center", 200)).set_duration(15.4) text3 = ( TextClip(t[1], fontsize=48, color="white", font=fp) .set_position(("center", 270)) .set_duration(15.4) ) video = CompositeVideoClip( [clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1)] ).set_duration(15.4) video.write_videofile( str(cog_data_path(self)) + f"/{u_id}crabrave.mp4", threads=1, preset="superfast", verbose=False, logger=None, temp_audiofile=str(cog_data_path(self) / "crabraveaudio.mp3") ) clip.close() video.close() return True
def get_output(video_path, out_filename, label, fps=30, font_size=20, font_color='white', resize_algorithm='bicubic', use_frames=False): """Get demo output using ``moviepy``. This function will generate video file or gif file from raw video or frames, by using ``moviepy``. For more information of some parameters, you can refer to: https://github.com/Zulko/moviepy. Args: video_path (str): The video file path or the rawframes directory path. If ``use_frames`` is set to True, it should be rawframes directory path. Otherwise, it should be video file path. out_filename (str): Output filename for the generated file. label (str): Predicted label of the generated file. fps (int): Number of picture frames to read per second. Default: 30. font_size (int): Font size of the label. Default: 20. font_color (str): Font color of the label. Default: 'white'. resize_algorithm (str): The algorithm used for resizing. Default: 'bicubic'. For more information, see https://ffmpeg.org/ffmpeg-scaler.html. use_frames: Determine Whether to use rawframes as input. Default:False. """ try: from moviepy.editor import (ImageSequenceClip, TextClip, VideoFileClip, CompositeVideoClip) except ImportError: raise ImportError('Please install moviepy to enable output file.') if use_frames: frame_list = sorted( [osp.join(video_path, x) for x in os.listdir(video_path)]) video_clips = ImageSequenceClip(frame_list, fps=fps) else: video_clips = VideoFileClip( video_path, resize_algorithm=resize_algorithm) duration_video_clip = video_clips.duration text_clips = TextClip(label, fontsize=font_size, color=font_color) text_clips = ( text_clips.set_position( ('right', 'bottom'), relative=True).set_duration(duration_video_clip)) video_clips = CompositeVideoClip([video_clips, text_clips]) out_type = osp.splitext(out_filename)[1][1:] if out_type == 'gif': video_clips.write_gif(out_filename) else: video_clips.write_videofile(out_filename, remove_temp=True)
def render(self, talk, act, exp, move, render_video, dft_exp_dt=0.2): if self.cache_dir is not None: cache_video = '{}.mp4'.format( get_macro_act_key(talk, act, exp, move)) cache_video = os.path.join(self.cache_dir, cache_video) if os.path.exists(cache_video): clip = VideoFileClip(cache_video) clip.write_videofile(render_video) return act_clip = self.act_assets[act] default_exp_clip = self.exp_assets['null'] exp_clip = self.exp_assets[exp] if talk == '': clips = [ act_clip, default_exp_clip.set_position( lambda t: (291, 160)).set_duration(dft_exp_dt) ] else: talk_clip = TextClip(talk, font='data/SimHei.ttf', color='green', method='caption', fontsize=30) clips = [ act_clip, talk_clip.set_position(('center', 50)), default_exp_clip.set_position( lambda t: (291, 160)).set_duration(dft_exp_dt) ] clips.append( exp_clip.set_position(lambda t: (291, 160)).set_start(dft_exp_dt)) ts = dft_exp_dt + exp_clip.duration if ts < act_clip.duration: clips.append( default_exp_clip.set_position( lambda t: (291, 160)).set_duration(act_clip.duration - ts).set_start(ts)) if move != 'null': move_clip = self.move_assets[move] clips.append(move_clip.set_position(('center', 650))) final_clip = CompositeVideoClip(clips).set_duration(act_clip.duration) final_clip.write_videofile(render_video)
def generate(self, avatars, text, usernames, kwargs): raise BadRequest( "Crab endpoint is disabled on flare's imgen instance. Use trustys crab rave cog or host your own imgen." ) name = uuid.uuid4().hex + '.mp4' @after_this_request def remove(response): # pylint: disable=W0612 try: os.remove(name) except (FileNotFoundError, OSError, PermissionError): pass return response t = text.upper().replace(', ', ',').split(',') if len(t) != 2: raise BadRequest( 'You must submit exactly two strings split by comma') if (not t[0] and not t[0].strip()) or (not t[1] and not t[1].strip()): raise BadRequest('Cannot render empty text') clip = VideoFileClip("assets/crab/template.mp4") text = TextClip(t[0], fontsize=48, color='white', font='Symbola') text2 = TextClip("____________________", fontsize=48, color='white', font='Verdana')\ .set_position(("center", 210)).set_duration(15.4) text = text.set_position(("center", 200)).set_duration(15.4) text3 = TextClip(t[1], fontsize=48, color='white', font='Verdana')\ .set_position(("center", 270)).set_duration(15.4) video = CompositeVideoClip([ clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1) ]).set_duration(15.4) video.write_videofile(name, threads=4, preset='superfast', verbose=False) clip.close() video.close() return send_file(name, mimetype='video/mp4')
def gencrabrave(self, t, filename): clip = VideoFileClip("crabtemplate.mp4") text = TextClip(t[0], fontsize=48, color='white', font='Verdana') text2 = TextClip("____________________", fontsize=48, color='white', font='Verdana')\ .set_position(("center", 210)).set_duration(15.4) text = text.set_position(("center", 200)).set_duration(15.4) text3 = TextClip(t[1], fontsize=48, color='white', font='Verdana')\ .set_position(("center", 270)).set_duration(15.4) video = CompositeVideoClip([ clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1) ]).set_duration(15.4) video.write_videofile(filename, threads=25, preset='superfast', verbose=False) clip.close() video.close()
def annotate(chip, txt, txt_color='white', font_size=20): txt_clip = TextClip(txt, fontsize=font_size, color=txt_color) cvc = CompositeVideoClip( [chip, txt_clip.set_position(("center", "bottom"))]) return cvc.set_duration(chip.duration)
def matchImg(image, frame): print 'Frame: %s' % frame already_shown = False frame_total_secs = frame * 1.0 / fps frame_hour = int(frame_total_secs / (60 * 60)) frame_min = int((frame_total_secs / 60) % 60) frame_sec = frame_total_secs % 60 frame_time = '%02d:%02d:%04.1f' % (frame_hour, frame_min, frame_sec) if dump and not use_frames: cv2.imwrite("%s/frame_%s_%s.jpg" % (adirectory, frame, frame_time), image) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) large_edge = cv2.Canny(gray_image, 50, 200) good_matches = [] for goodimg in good_dict_list: goodimgdata = good_dict.get(goodimg) small_edge = goodimgdata['edge'] just_found = goodimgdata['just_found'] result = cv2.matchTemplate(small_edge, large_edge, method) minres = cv2.minMaxLoc(result) mn, mx, mnLoc, mxLoc = minres #print mn,mx if method == cv2.TM_SQDIFF_NORMED: thresh_pass = mn < thresh MPx, MPy = mnLoc else: thresh_pass = mx >= thresh MPx, MPy = mxLoc if thresh_pass: print '============== MATCH FOUND (%s) ===============' % goodimg if not just_found: good_matches.append(goodimg) trows, tcols = small_edge.shape[:2] edit_image = image.copy() cv2.rectangle(edit_image, (MPx, MPy), (MPx + tcols, MPy + trows), (0, 0, 255), 2) print "writing frame %s at time %s" % (frame, frame_time) cv2.imwrite( "%s/%s_%s_%s.jpg" % (directory, goodimg, frame, frame_time), edit_image) if show_image and not already_shown: cv2.imshow('output', edit_image) cv2.waitKey(0) already_shown = True cv2.destroyWindow('output') cv2.imshow('output', edit_image) goodimgdata['just_found'] = True else: goodimgdata['just_found'] = False if (generate_videos or aggregate_video) and good_matches: sm_start = max(frame_total_secs - predelay, 0) sm_end = min(frame_total_secs + postdelay, movielen) sub_movie = movie.subclip(sm_start, sm_end) match_str = ','.join(good_matches) #if caption: txt_clip = TextClip(match_str, fontsize=55, color='white') txt_clip = txt_clip.set_position(('left', 'top')).set_duration(3) sub_movie = CompositeVideoClip([sub_movie, txt_clip]) if generate_videos: print 'Writing single video to %s/%s_%s.mp4' % ( vdirectory, match_str, frame_time) sub_movie.write_videofile("%s/%s_%s.mp4" % (vdirectory, match_str, frame_time)) else: video_list.append(sub_movie) return
# another audio file to clip background_audio_clip = AudioFileClip(source_audio_path) # creating text clip text = ''' This clip shows how to open terminal. ''' intro_duration = 5 intro_text = TextClip(txt=text, color='white', fontsize=70, size=video_clip.size) intro_text = intro_text.set_fps(video_clip.fps) intro_text = intro_text.set_duration(intro_duration) intro_text = intro_text.set_position('center') intro_music = background_audio_clip.subclip(t_start=0, t_end=intro_duration) intro_text = intro_text.set_audio(intro_music) intro_video_dir = os.path.join(overlay_text_dir, 'intro-video.mp4') intro_text.write_videofile(intro_video_dir) # overlaying text on the original video w, h = video_clip.size watermark_text = TextClip(txt='CFE', fontsize=30, align='East', color='white', size=(w, 30)) watermark_text = watermark_text.set_fps(video_clip.fps)
def get_output(video_path, out_filename, label, fps=30, font_size=20, font_color='white', target_resolution=None, resize_algorithm='bicubic', use_frames=False): """Get demo output using ``moviepy``. This function will generate video file or gif file from raw video or frames, by using ``moviepy``. For more information of some parameters, you can refer to: https://github.com/Zulko/moviepy. Args: video_path (str): The video file path or the rawframes directory path. If ``use_frames`` is set to True, it should be rawframes directory path. Otherwise, it should be video file path. out_filename (str): Output filename for the generated file. label (str): Predicted label of the generated file. fps (int): Number of picture frames to read per second. Default: 30. font_size (int): Font size of the label. Default: 20. font_color (str): Font color of the label. Default: 'white'. target_resolution (None | tuple[int | None]): Set to (desired_width desired_height) to have resized frames. If either dimension is None, the frames are resized by keeping the existing aspect ratio. Default: None. resize_algorithm (str): Support "bicubic", "bilinear", "neighbor", "lanczos", etc. Default: 'bicubic'. For more information, see https://ffmpeg.org/ffmpeg-scaler.html use_frames: Determine Whether to use rawframes as input. Default:False. """ if video_path.startswith(('http://', 'https://')): raise NotImplementedError try: from moviepy.editor import (ImageSequenceClip, TextClip, VideoFileClip, CompositeVideoClip) except ImportError: raise ImportError('Please install moviepy to enable output file.') if use_frames: frame_list = sorted( [osp.join(video_path, x) for x in os.listdir(video_path)]) video_clips = ImageSequenceClip(frame_list, fps=fps) else: # revert the order to suit ``VideoFileClip``. # (weight, height) -> (height, weight) target_resolution = (target_resolution[1], target_resolution[0]) video_clips = VideoFileClip(video_path, target_resolution=target_resolution, resize_algorithm=resize_algorithm) duration_video_clip = video_clips.duration text_clips = TextClip(label, fontsize=font_size, color=font_color) text_clips = (text_clips.set_position( ('right', 'bottom'), relative=True).set_duration(duration_video_clip)) video_clips = CompositeVideoClip([video_clips, text_clips]) out_type = osp.splitext(out_filename)[1][1:] if out_type == 'gif': video_clips.write_gif(out_filename) else: video_clips.write_videofile(out_filename, remove_temp=True)
intro_text = intro_text.set_duration(intro_duration) intro_text = intro_text.set_fps(fps) intro_text = intro_text.set_pos("center") # to add audio to your intro: intro_music = audio_clip.subclip(25, 30) intro_text = intro_text.set_audio(intro_music) watermark_size = 50 watermark_text = TextClip(watermark, fontsize=watermark_size, color='black', align='East', size=(w, watermark_size)) watermark_text = watermark_text.set_fps(fps) watermark_text = watermark_text.set_duration(video_clip.reader.duration) watermark_text = watermark_text.margin(left=10, right=10, bottom=2, opacity=0) watermark_text = watermark_text.set_position(("bottom")) watermarked_clip = CompositeVideoClip([video_clip, watermark_text], size=video_clip.size) watermarked_clip = watermarked_clip.set_duration(video_clip.reader.duration) watermarked_clip = watermarked_clip.set_fps(fps) watermarked_clip = watermarked_clip.set_audio(final_audio) final_clip = concatenate_videoclips([intro_text, watermarked_clip]) final_clip.write_videofile(final_video_path, codec='libx264', audio_codec="aac")
def render_video(self, res, output_path, audio_level=0.7, bg_color=(0, 0, 0)): """ Renders all added videos into a complete compilation. Args: res (int, int): Width and height of video. output_path (str): Path to write video to. audio_level (float): Audio level to normalize all videos around, (0, 1]. bg_color (int, int, int): Color of background as RGB, [0, 255]. Raises: NotEnoughVideos: There are fewer than two video provided, or fewer than two videos are successfully downloaded. """ if self.video_count < 2: raise NotEnoughVideos("Need at least 2 videos for a compilation") # Download videos. dl = list(self._batch_dl()) if len(dl) < 2: raise NotEnoughVideos( "Only {} videos downloaded successfully, need at least 2". format(len(dl))) # We use this directory when we need to write a video to a file temporarily. temp_dir = tempfile.TemporaryDirectory() # Load all clips. timestamp = 0 manifest = Manifest() clips = [] w, h = res videos_used = 0 for v, path in dl: title = v.title author = v.author if self._censor is not None: title = self._censor.censor(title) author = self._censor.censor(author) clip = VideoFileClip(path) # Adjust audio levels. if clip.audio is not None: if clip.audio.max_volume() > 0: audio = clip.audio.fx(afx.audio_normalize) max_volume = clip.audio.max_volume() volume_mult = audio_level / max_volume clip.set_audio(audio) clip = clip.fx(afx.volumex, volume_mult) # Resize video. cw, ch = clip.size size_mult = min(w / cw, h / ch) new_size = (cw * size_mult, ch * size_mult) clip = clip.resize(newsize=new_size) # If the video does not fill the screen, add a background to it. # This intends to make the video more visually interesting. if clip.size != res: ext = os.path.splitext(path)[1] # We use time_ns to generate a unique filename. temp_vid_path = os.path.join(temp_dir.name, str(time_ns()) + ext) clip.write_videofile( temp_vid_path, ffmpeg_params=[ "-lavfi", "[0:v]scale=ih*16/9:-1,boxblur=luma_radius=min(h\,w)/20:luma_power=1:chroma_radius=min(cw\,ch)/20:chroma_power=1[bg];[bg][0:v]overlay=(W-w)/2:(H-h)/2,crop=h=iw*9/16", ], ) clip = VideoFileClip(temp_vid_path) # Add text. try: # A title that is too long can cause ImageMagick to fail. # Titles longer than 100 characters won't fit on the screen anyway. title_slice = title[:100] title_clip = TextClip(title_slice, font="IBM Plex Sans", fontsize=60, color="white") title_clip = title_clip.set_position( (10, 10)).set_duration(clip.duration) title_clip_shadow = TextClip(title_slice, font="IBM Plex Sans", fontsize=60, color="black") title_clip_shadow = title_clip_shadow.set_position( (12, 12)).set_duration(clip.duration) author_text = "u/{}".format(author) author_clip = TextClip(author_text, font="IBM Plex Sans", fontsize=40, color="grey") author_clip = author_clip.set_position( (40, 75)).set_duration(clip.duration) except OSError as e: # This is intended to catch ImageMagick related errors. # ImageMagick can fail in unexpected ways, but it happens seldom enough that # we can just ignore it. # Future versions of Moviepy will likely move away from ImageMagick (https://github.com/Zulko/moviepy/issues/1145#issuecomment-623594679) print("Unexpected error: {}".format(e), file=sys.stderr) continue clip = CompositeVideoClip( [clip, title_clip_shadow, title_clip, author_clip], size=res) clips.append(clip) # Update manifest. manifest.add_entry(v, timestamp) timestamp += clip.duration videos_used += 1 # Videos might have been skipped due to recoverable errors. if videos_used < 2: raise NotEnoughVideos( "Only {} videos successfully editted, need at least 2".format( videos_used)) final = concatenate_videoclips(clips) thread_cnt = multiprocessing.cpu_count() final.write_videofile(output_path, threads=thread_cnt) # Delete all downloaded videos. rmtree(_DOWNLOAD_DIR) return manifest
def create_clip(ride): clip = VideoFileClip(ride + ".mp4").margin(5) txt_clip = TextClip('Track 1, dataset ' + ride) txt_clip = txt_clip.set_position((10, 10)).set_duration(160) return CompositeVideoClip([clip, txt_clip])
def render_video(self, res, output_path, audio_level=0.7, bg_color=(0, 0, 0)): """ Renders all added videos into a complete compilation. Args: res (int, int): Width and height of video. output_path (str): Path to write video to. audio_level (float): Audio level to normalize all videos around, (0, 1]. bg_color (int, int, int): Color of background as RGB, [0, 255]. Raises: NotEnoughVideos: There are fewer than two video provided, or fewer than two videos are successfully downloaded. """ if self.get_video_count() < 2: raise NotEnoughVideos("Need at least 2 videos for a compilation") # Download videos. dl = list(self._batch_dl()) if len(dl) < 2: raise NotEnoughVideos( "Only {} videos downloaded successfully, need at least 2".format( len(dl) ) ) # Load all clips. timestamp = 0 manifest = Manifest() clips = [] w, h = res for v, path in dl: title = v.get_title() author = v.get_author() if self._censor is not None: title = self._censor.censor(title) author = self._censor.censor(author) clip = VideoFileClip(path) # Adjust audio levels. if clip.audio is not None: if clip.audio.max_volume() > 0: audio = clip.audio.fx(afx.audio_normalize) max_volume = clip.audio.max_volume() volume_mult = audio_level / max_volume clip.set_audio(audio) clip = clip.fx(afx.volumex, volume_mult) # Resize video. cw, ch = clip.size size_mult = min(w / cw, h / ch) new_size = (cw * size_mult, ch * size_mult) clip = clip.resize(newsize=new_size).on_color( size=res, color=bg_color, pos="center" ) # Add text. title_clip = TextClip( title, font="IBM Plex Sans", fontsize=60, color="white" ) title_clip = title_clip.set_position((10, 10)).set_duration(clip.duration) title_clip_shadow = TextClip( title, font="IBM Plex Sans", fontsize=60, color="black" ) title_clip_shadow = title_clip_shadow.set_position((12, 12)).set_duration( clip.duration ) author_text = "u/{}".format(author) author_clip = TextClip( author_text, font="IBM Plex Sans", fontsize=40, color="grey" ) author_clip = author_clip.set_position((40, 75)).set_duration(clip.duration) clip = CompositeVideoClip( [clip, title_clip_shadow, title_clip, author_clip], size=res ) clips.append(clip) # Update manifest. manifest.add_entry(v, timestamp) timestamp += clip.duration final = concatenate_videoclips(clips) final.write_videofile(output_path) # Delete all downloaded videos. rmtree(_DOWNLOAD_DIR) return manifest