def _make_round_credits( round_credits: RoundCredits, round_index: int, width: int, height: int, color: str = 'white', stroke_color: str = 'black', stroke_width: str = 2, font: str = 'Impact-Normal', fontsize: int = 60, gap: int = 0 ) -> Clip: texts = [] texts += [["\n", "\n"]] * 16 if round_credits.audio != []: texts += _make_credit_texts( str(round_credits.audio[0]), "ROUND {} MUSIC".format(round_index + 1)) for audio_credit in round_credits.audio[1:]: texts += _make_credit_texts(str(audio_credit)) if round_credits.video != []: texts += _make_credit_texts( str(round_credits.video[0]), "ROUND {} VIDEOS".format(round_index + 1)) for video_credit in round_credits.video[1:]: texts += _make_credit_texts(str(video_credit)) texts += [["\n", "\n"]] * 2 # Make two columns for the credits left, right = ("".join(t) for t in zip(*texts)) left, right = [TextClip(txt, color=color, stroke_color=stroke_color, stroke_width=stroke_width, font=font, fontsize=fontsize, align=al) for txt, al in [(left, 'East'), (right, 'West')]] # Combine the columns cc = CompositeVideoClip([left, right.set_position((left.w + gap, 0))], size=(left.w + right.w + gap, right.h), bg_color=None) scaled = resize(cc, width=width) # Scale to the required size # Transform the whole credit clip into an ImageClip credits_video = ImageClip(scaled.get_frame(0)) mask = ImageClip(scaled.mask.get_frame(0), ismask=True) lines_per_second = height / CREDIT_DISPLAY_TIME def scroll(t): return ("center", -lines_per_second * t) credits_video = credits_video.set_position(scroll) credits_duration = credits_video.h / lines_per_second credits_video = credits_video.set_duration(credits_duration) return credits_video.set_mask(mask)
def _render_clip(self, frames): logger = logging.getLogger('logger') logger.info("Rendering video...") clips = [] clip_duration = 1 / self.frame_rate for frame in frames: clip = ImageClip(frame.img) clip = clip.set_duration(clip_duration) clips.append(clip) final_clip = concatenate_videoclips(clips, method="chain") final_clip = final_clip.set_audio(AudioFileClip(self.audio.path)) final_clip = final_clip.set_fps(self.frame_rate) return final_clip
def freeze_at_end(clip, freeze_duration=None, total_duration=None): """ Makes the clip freeze on its last frame. With ``duration`` you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. """ freezed_clip = ImageClip(clip.get_frame(clip.end)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return CompositeVideoClip([clip,freezed_clip.set_start(clip.end)])
def freeze_at_start(clip, freeze_duration=None, total_duration=None): """ Makes the clip freeze on its last frame. With ``duration`` you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. """ freezed_clip = ImageClip(clip.get_frame(0)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return concatenate([freezed_clip,clip])
def freeze_at_start(clip, freeze_duration=None, total_duration=None): """ Momentarily freeze the clip on its first frame. With ``duration``you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. """ freezed_clip = ImageClip(clip.get_frame(0)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return concatenate([freezed_clip, clip])
def freeze_at_end(clip, freeze_duration=None, total_duration=None, delta=0.05): """ Makes the clip freeze on its last frame. With ``duration`` you can specify the duration of the freeze. With ``total_duration`` you can specify the total duration of the clip and the freeze (i.e. the duration of the freeze is automatically calculated). If neither is provided, the freeze will have an infinite length. The clip is frozen on the frame at time (clip.duration - delta) """ freezed_clip = ImageClip(clip.get_frame(clip.end - delta)) if total_duration: freeze_duration = total_duration - clip.duration if freeze_duration: freezed_clip = freezed_clip.set_duration(freeze_duration) return CompositeVideoClip([clip, freezed_clip.set_start(clip.end)])
def generate_text_clip(text, number): filename = "tmp/" + name + "/clips/" + name + number + ".mp4" if not os.path.exists(filename): audio_filename = make_tts(text, number) audio = AudioFileClip(audio_filename) image = ImageClip(background_image).set_fps(30) video = image.set_duration(audio.duration) withaudio = video.set_audio(audio) fontsize = (len(text) + 10) / withaudio.w text_clip = TextClip(text, fontsize=fontsize, size=(withaudio.w, withaudio.h)).set_pos("center") final_clip = CompositeVideoClip( [withaudio, text_clip.set_duration(video.duration)]) final_clip.write_videofile(filename) return filename
def image_to_video(self, image_path: str, destination: str, duration: float) -> None: video = ImageClip(image_path) video = video.set_duration(duration) video.write_videofile(destination, fps=24)
def _compile_worker(session_key: str, video_id: str) -> None: # Use this for conditional creation config = get_nosql_handler().get_video_config(video_id) session_clips = get_sql_handler().get_session_clips_by_session_key(session_key) for session_clip in session_clips: download_session_clip(session_clip, sync=True) session_audios = get_sql_handler().get_session_audio_by_session_key(session_key) session_audio = None if session_audios: session_audio = session_audios[0] download_session_audio(session_audio, sync=True) # Create VideoFileClips clips = [VideoFileClip(session_clip.local_file_path()) for session_clip in session_clips] total_duration = sum([clip.duration for clip in clips]) # Make all clips the same size final_w = min([clip.w for clip in clips]) final_h = min([clip.h for clip in clips]) clips = [resize(clip, newsize=(final_w, final_h)) for clip in clips] # Adding gamertag and logo to the video # gamertag = config.get('gamertag', '') # gamertag_position = config.get('gamertag_position', ['right', 'bottom']) # # if gamertag != '': # gamertag_clip = TextClip(txt='@'+gamertag, fontsize=50, font = 'Comfortaa', color='white') # gamertag_clip = gamertag_clip.set_duration(final.duration)\ # .margin(right=8,top = 8, left=8, bottom=8, opacity=0)\ # .set_position((gamertag_position[0], gamertag_position[1]) # === WATERMARK === logo_position = config.get('logo_position', ['left', 'top']) logo_clip = ImageClip('./reels/static/reels/reels-logo-white.png') logo_clip = resize(logo_clip, height=final_h / 5) try: logo_x = (0 if logo_position[0] == 'left' else final_w - logo_clip.w) logo_y = (0 if logo_position[1] == 'top' else final_h - logo_clip.h) except (KeyError, TypeError): logo_x, logo_y = 0, final_h - logo_clip.h logo_clip = logo_clip.set_pos((logo_x, logo_y)) clips = [CompositeVideoClip([clip, logo_clip.set_duration(clip.duration)]) for clip in clips] # Concatenate clips final = concatenate_videoclips(clips, method="compose") # Add audio, only if there is audio audio_clip = None if session_audio: audio_clip = AudioFileClip(session_audio.local_file_path()) audio_clip = audio_clip \ .set_start(config.get('audio_start', 0)) \ .set_end(config.get('audio_end', audio_clip.duration)) # Attach audio to video, but make it only as long as the videos are # TODO: Manage case where videos are longer than audio clip final = final.set_audio(audio_clip.set_duration(final.duration)) # If extra editing is enabled, do so if config.get('extras', False) and session_audio and get_file_type(session_audio.local_file_path()) == 'wav': fs, data = read(session_audio.local_file_path()) data = data[:, 0] data = data[:len(data) - len(data) % 48000] data2 = np.mean(data.reshape(-1, int(48000 / 4)), axis=1) x = np.diff(data2, n=1) secs = np.where(x > 200)[0] t = list(secs[np.where(np.diff(secs) > 12)[0] + 1]) if np.diff(secs)[0] > 12: t.insert(0, secs[0]) for i in range(0, len(t)): t[i] /= 4 for i in t: tfreeze = i if tfreeze + 1.75 >= final.duration: break clip_before = final.subclip(t_end=tfreeze) clip_after = final.subclip(t_start=tfreeze + 1) clip = final.subclip(t_start=tfreeze, t_end=tfreeze + 1) if int(i) % 2 == 0: clip = clip.fl_image(invert_colors).crossfadein(0.5).crossfadeout(0.5) else: clip = clip.fx(vfx.painting, saturation=1.6, black=0.006).crossfadein(0.5).crossfadeout(0.5) final = concatenate_videoclips([clip_before, clip, clip_after]) else: pass # === Final Saving === video = get_sql_handler().get_video(video_id) final.write_videofile(filename=video.local_file_path(), verbose=True, codec="libx264", audio_codec='aac', temp_audiofile=f'temp-audio-{video.video_id}.m4a', remove_temp=True, preset="medium", ffmpeg_params=["-profile:v", "baseline", "-level", "3.0", "-pix_fmt", "yuv420p"]) # close local files because we don't need them anymore and so they can be removed later for clip in clips: clip.close() if audio_clip: audio_clip.close() # upload to cold storage save_video(video, sync=True, clean=False)