def test_PR_339(): # In caption mode. TextClip(txt='foo', color='white', font=FONT, size=(640, 480), method='caption', align='center', fontsize=25).close() # In label mode. TextClip(txt='foo', font=FONT, method='label').close()
def test_PR_306(): assert TextClip.list("font") != [] assert TextClip.list("color") != [] with pytest.raises(Exception): TextClip.list("blah") close_all_clips(locals())
def test_PR_306(): assert TextClip.list('font') != [] assert TextClip.list('color') != [] with pytest.raises(Exception, message="Expecting Exception"): TextClip.list('blah')
def add_info_overlay(clip, size, video, pos, counter, total): video_id = video["id"]["videoId"] video_title = html.unescape(video["snippet"]["title"]) video_published_at = video["snippet"]["publishedAt"] start, _, end = video["data"]["timestamps"][pos - 1] episode_counter = str(pos) aligned_counter = str(counter).rjust(len(str(total))) width, height = map(int, size.split("x")) clip_text_title = (TextClip( txt= f"{video_title}\nhttps://youtube.com/watch?v={video_id}\nTimestamp: {start}\nDate: {video_published_at}", fontsize=24, color="black", bg_color="white", align="west", ).set_duration(clip.duration).set_position(("left", "bottom"))) clip_text_counter = (TextClip( txt= f"Episode counter: {episode_counter}\nTotal counter : {aligned_counter}/{total}", fontsize=24, color="black", bg_color="white", align="west", ).set_duration(clip.duration).set_position(("left", "top"))) if clip.size != [width, height]: clip = clip.fx(vfx.resize, width=width) return CompositeVideoClip([clip, clip_text_title, clip_text_counter], size=(width, height))
def test_PR_306(): assert TextClip.list('font') != [] assert TextClip.list('color') != [] with pytest.raises(Exception): TextClip.list('blah') close_all_clips(locals())
def ts_clip(path): logger.debug(f'Txt frame with timestamp {path}') label = path.split('/')[-1].split('.')[0].replace('_', '.') txt = TextClip(txt=label, fontsize=20, color="red", font='Ubuntu-Bold', transparent=True) return txt.get_frame(0)
def test_list(): fonts = TextClip.list("font") assert isinstance(fonts, list) assert isinstance(fonts[0], str) colors = TextClip.list("color") assert isinstance(colors, list) assert isinstance(colors[0], str) assert "blue" in colors
def test_PR_306(): if TRAVIS: return #put this back in once we get ImageMagick working on travis-ci assert TextClip.list('font') != [] assert TextClip.list('color') != [] with pytest.raises(Exception, message="Expecting Exception"): TextClip.list('blah')
def test_duration(): clip = TextClip('hello world', size=(1280,720), color='white') clip = clip.set_duration(5) # Changed due to #598. assert clip.duration == 5 clip.close() clip2 = clip.fx(blink, d_on=1, d_off=1) clip2 = clip2.set_duration(5) assert clip2.duration == 5 close_all_clips(locals())
def test_duration(): #TextClip returns the following error under Travis (issue with Imagemagick) #convert.im6: not authorized `@/tmp/tmpWL7I3M.txt' @ error/property.c/InterpretImageProperties/3057. #convert.im6: no images defined `PNG32:/tmp/tmpRZVqGQ.png' @ error/convert.c/ConvertImageCommand/3044. if TRAVIS: return clip = TextClip('hello world', size=(1280,720), color='white') clip.set_duration(5) assert clip.duration == 5 clip2 = clip.fx(blink, d_on=1, d_off=1) clip2.set_duration(5) assert clip2.duration == 5
def test_duration(): #TextClip returns the following error under Travis (issue with Imagemagick) #convert.im6: not authorized `@/tmp/tmpWL7I3M.txt' @ error/property.c/InterpretImageProperties/3057. #convert.im6: no images defined `PNG32:/tmp/tmpRZVqGQ.png' @ error/convert.c/ConvertImageCommand/3044. if TRAVIS: return clip = TextClip('hello world', size=(1280, 720), color='white') clip = clip.set_duration(5) assert clip.duration == 5 clip2 = clip.fx(blink, d_on=1, d_off=1) clip2.set_duration(5) assert clip2.duration == 5
def test_PR_339(): # In caption mode. TextClip( text="foo", color="white", font=FONT, size=(640, 480), method="caption", align="center", font_size=25, ).close() # In label mode. TextClip(text="foo", font=FONT, method="label").close()
def add_outro(clips): thx = TextClip(THANKS, color='white', fontsize=72, size=VIDEO_SIZE, method='caption').set_duration(2) sub = TextClip(SUBSCRIBE, color='white', fontsize=72, size=VIDEO_SIZE, method='caption').set_duration(2) return concatenate_videoclips(clips + [thx, sub], method='compose').on_color(color=BLACK, col_opacity=1)
def test_PR_339(): if TRAVIS: return # In caption mode. TextClip(txt='foo', color='white', font="Liberation-Mono", size=(640, 480), method='caption', align='center', fontsize=25) # In label mode. TextClip(txt='foo', font="Liberation-Mono", method='label')
def generate_from_scenes(self, scenes: List[Scene], fps): clips = [] total_duration = 0 for scene in scenes: clip = self._generate_from_scene(scene, fps) total_duration += clip.duration if total_duration > self.max_length.seconds: break clips.append(clip) if clips: if self.title: title_clip = TextClip(self.title, color='white', bg_color='black', fontsize=60, size=(clips[0].w, clips[0].h)).set_duration(5) clips.append(title_clip) trailer = concatenate_videoclips(clips) if os.path.exists(self.out_path): os.remove(self.out_path) trailer.write_videofile(self.out_path)
def test_subtitles(): red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 #travis does not like TextClip.. so return for now.. #but allow regular users to still run the test below if TRAVIS: return generator = lambda txt: TextClip(txt, font='Liberation-Mono', size=(800, 600), fontsize=24, method='caption', align='South', color='white') subtitles = SubtitlesClip("media/subtitles1.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30) data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'), ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'), ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')] assert subtitles.subtitles == data subtitles = SubtitlesClip(data, generator) assert subtitles.subtitles == data
def __init__(self, subtitles, make_textclip=None, encoding=None): VideoClip.__init__(self, has_constant_size=False) if isinstance(subtitles, str): subtitles = file_to_subtitles(subtitles, encoding=encoding) # subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip( txt, font="Georgia-Bold", fontsize=24, color="white", stroke_color="black", stroke_width=0.5, ) self.make_textclip = make_textclip self.start = 0 self.duration = max([tb for ((ta, tb), txt) in self.subtitles]) self.end = self.duration def add_textclip_if_none(t): """ Will generate a textclip if it hasn't been generated asked to generate it yet. If there is no subtitle to show at t, return false. """ sub = [ ((ta, tb), txt) for ((ta, tb), txt) in self.textclips.keys() if (ta <= t < tb) ] if not sub: sub = [ ((ta, tb), txt) for ((ta, tb), txt) in self.subtitles if (ta <= t < tb) ] if not sub: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def make_frame(t): sub = add_textclip_if_none(t) return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]]) def make_mask_frame(t): sub = add_textclip_if_none(t) return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]]) self.make_frame = make_frame hasmask = bool(self.make_textclip("T").mask) self.mask = VideoClip(make_mask_frame, ismask=True) if hasmask else None
def default_subtitle_generator(): return lambda txt: TextClip(txt.replace('\n', ''), font='assets/font/PingFang.ttf', fontsize=45, stroke_width=2, color='white', bg_color='#00000066')
def test_subtitles(util): red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 generator = lambda txt: TextClip( txt, font=util.FONT, size=(800, 600), font_size=24, method="caption", align="South", color="white", ) subtitles = SubtitlesClip("media/subtitles.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.subclip(0, 0.5).write_videofile( os.path.join(util.TMP_DIR, "subtitles.mp4"), fps=5, logger=None, ) assert subtitles.subtitles == MEDIA_SUBTITLES_DATA subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator) assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
def test_subtitles(): red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 generator = lambda txt: TextClip(txt, font=FONT, size=(800, 600), fontsize=24, method='caption', align='South', color='white') subtitles = SubtitlesClip("media/subtitles1.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30) data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'), ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'), ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')] assert subtitles.subtitles == data subtitles = SubtitlesClip(data, generator) assert subtitles.subtitles == data close_all_clips(locals())
def test_if_textclip_crashes_in_caption_mode(): TextClip(txt='foo', color='white', size=(640, 480), method='caption', align='center', fontsize=25, font=FONT).close()
def __init__(self, subtitles, make_textclip=None): VideoClip.__init__(self, has_constant_size=False) if isinstance(subtitles, str): subtitles = file_to_subtitles(subtitles) subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', fontsize=24, color='white', stroke_color='black', stroke_width=0.5) self.make_textclip = make_textclip self.inicia = 0 self.duracion = max([tb for ((ta, tb), txt) in self.subtitles]) self.fin = self.duracion def add_textclip_if_none(t): """ Will generate a textclip if it hasn't been generated asked to generate it yet. If there is no subtitle to show at t, return false. """ sub = [((ta, tb), txt) for ((ta, tb), txt) in self.textclips.keys() if (ta <= t < tb)] if sub == []: sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles if (ta <= t < tb)] if sub == []: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def make_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])) def make_mask_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])) self.make_frame = make_frame hasmask = (self.make_textclip('T').mask is not None) self.mask = (VideoClip(make_mask_frame, ismask=True) if hasmask else None)
def generator(txt): if direction == 'South': tc = TextClip(txt, font='Nunito', fontsize=fontsize, color='white', method='caption', align=direction, size=size) else: tc = TextClip(txt, font='Nunito', fontsize=fontsize, color='white', method='caption', align='center', size=(512, 25)) return tc
def make_textclip(txt): return TextClip( txt, font=FONT, font_size=24, color="white", stroke_color="black", stroke_width=0.5, )
def add_text(content, font_size): ''' add text on the top of video stream ''' txt_clip = (TextClip(content, fontsize=font_size, color='white').set_position('top').set_duration( video.duration)) result = CompositeVideoClip([video, txt_clip]) result.write_videofile(new_file)
def make_textclip(txt): return TextClip( txt, font="Georgia-Bold", font_size=24, color="white", stroke_color="black", stroke_width=0.5, )
def test_if_textclip_crashes_in_caption_mode(): if TRAVIS: return TextClip(txt='foo', color='white', size=(640, 480), method='caption', align='center', fontsize=25)
def test_if_textclip_crashes_in_caption_mode(util): TextClip( text="foo", color="white", size=(640, 480), method="caption", align="center", font_size=25, font=util.FONT, ).close()
def generate_text_clip(text, number): filename = "tmp/" + name + "/clips/" + name + number + ".mp4" if not os.path.exists(filename): audio_filename = make_tts(text, number) audio = AudioFileClip(audio_filename) image = ImageClip(background_image).set_fps(30) video = image.set_duration(audio.duration) withaudio = video.set_audio(audio) fontsize = (len(text) + 10) / withaudio.w text_clip = TextClip(text, fontsize=fontsize, size=(withaudio.w, withaudio.h)).set_pos("center") final_clip = CompositeVideoClip( [withaudio, text_clip.set_duration(video.duration)]) final_clip.write_videofile(filename) return filename
def add_subtitle(video_path, subtitle_path, filename=None): generator: Callable[[Any], TextClip] = lambda txt: TextClip(txt, font='assets/font/GothamMedium.ttf', fontsize=45, color='white', bg_color='#00000066') subtitle = margin(clip=SubtitlesClip(subtitle_path, generator).set_position(('center', 'bottom')), bottom=35, opacity=0) video = VideoFileClip(video_path, audio=True) composed_video = CompositeVideoClip([video, subtitle]) output_filename = filename or replace_extension(add_prefix_to_filename(video_path, '[WITH-SUBTITLE] '), '.mp4') composed_video.write_videofile(output_filename, threads=2, fps=video.fps)
def _make_round_credits( round_credits: RoundCredits, round_index: int, width: int, height: int, color: str = 'white', stroke_color: str = 'black', stroke_width: str = 2, font: str = 'Impact-Normal', fontsize: int = 60, gap: int = 0 ) -> Clip: texts = [] texts += [["\n", "\n"]] * 16 if round_credits.audio != []: texts += _make_credit_texts( str(round_credits.audio[0]), "ROUND {} MUSIC".format(round_index + 1)) for audio_credit in round_credits.audio[1:]: texts += _make_credit_texts(str(audio_credit)) if round_credits.video != []: texts += _make_credit_texts( str(round_credits.video[0]), "ROUND {} VIDEOS".format(round_index + 1)) for video_credit in round_credits.video[1:]: texts += _make_credit_texts(str(video_credit)) texts += [["\n", "\n"]] * 2 # Make two columns for the credits left, right = ("".join(t) for t in zip(*texts)) left, right = [TextClip(txt, color=color, stroke_color=stroke_color, stroke_width=stroke_width, font=font, fontsize=fontsize, align=al) for txt, al in [(left, 'East'), (right, 'West')]] # Combine the columns cc = CompositeVideoClip([left, right.set_position((left.w + gap, 0))], size=(left.w + right.w + gap, right.h), bg_color=None) scaled = resize(cc, width=width) # Scale to the required size # Transform the whole credit clip into an ImageClip credits_video = ImageClip(scaled.get_frame(0)) mask = ImageClip(scaled.mask.get_frame(0), ismask=True) lines_per_second = height / CREDIT_DISPLAY_TIME def scroll(t): return ("center", -lines_per_second * t) credits_video = credits_video.set_position(scroll) credits_duration = credits_video.h / lines_per_second credits_video = credits_video.set_duration(credits_duration) return credits_video.set_mask(mask)
def create_thumbnail(self): logger.info('Creating thumbnail...') color = (255, 255, 255) size = (1280, 720) background = ColorClip(size, color) logo = ImageClip(config.LOGO_PATH).set_duration(1) \ .resize(width=400, height=200) \ .set_pos(('center', 'center')) text = TextClip(txt=str(self.id), size=(500, 500)).set_position( ('center', 'bottom')) CompositeVideoClip([background, logo, text]).save_frame(config.THUMB_PATH) logger.info('Thumbnail saved...')