def zoom(clip, screensize, show_full_height=False): """Zooms preferably image clip for clip duration a little To make slideshow more movable Parameters --------- clip ImageClip on which to work with duration screensize Wanted (width, height) tuple show_full_height Should this image be shown in full height. This is usefull when 4:3 images are shown in 16:9 video and need to be shown in full. Otherwise they are shown in full width and top and bottom is cut off. Returns ------ VideoClip in desired size """ #We need to resize high imageč differently if clip.h > clip.w or show_full_height: clip_resized = (clip.fx(resize, width=screensize[0] * 2).fx( resize, lambda t: 1 + 0.02 * t).set_position(('center', 'center'))) clip_composited = CompositeVideoClip([clip_resized]) \ .fx(resize, height=screensize[1]) else: clip_resized = (clip.fx(resize, height=screensize[1] * 2).fx( resize, lambda t: 1 + 0.02 * t).set_position(('center', 'center'))) clip_composited = CompositeVideoClip([clip_resized]) \ .fx(resize, width=screensize[0]) vid = CompositeVideoClip( [clip_composited.set_position(('center', 'center'))], size=screensize) return vid
def normalize(v): w, h = v.size if w == 1280 and h == 720: return v if not (1280 / w == 1280 // w and 720 / h == 720 // h): backVideo = v.copy() backVideo = backVideo.fl_image(_blur) backVideo = backVideo.resize(width=1280, height=h * 1280 / w) nw = w * (720 / h) nh = 720 if nw > 1280: nh = 720 * 1280 / nw nw = 1280 v = v.set_pos('center').resize(width=nw, height=nh) if h / w >= 1: v = CompositeVideoClip([backVideo, v], size=(1280, 720)) else: v = CompositeVideoClip([ v, ], size=(1280, 720)) else: v = v.set_pos('center').resize(width=w * (720 / h), height=h * (720 / h)) return v
def test_compositing_with_same_layers(): bottom_clip = BitmapClip([["ABC"], ["BCA"]], fps=1) top_clip = BitmapClip([["DEF"], ["EFD"]], fps=1) composite_clip = CompositeVideoClip([bottom_clip, top_clip]) reversed_composite_clip = CompositeVideoClip([top_clip, bottom_clip]) assert composite_clip == top_clip assert reversed_composite_clip == bottom_clip
def test_slide_in(): duration = 0.1 size = (10, 1) fps = 10 color = (255, 0, 0) # left and right sides clip = ColorClip( color=color, duration=duration, size=size, ).with_fps(fps) for side in ["left", "right"]: new_clip = CompositeVideoClip([slide_in(clip, duration, side)]) for t in np.arange(0, duration, duration / fps): n_reds, n_reds_expected = (0, int(t * 100)) if t: assert n_reds_expected if n_reds_expected == 7: # skip 7 due to innacurate frame continue for r, g, b in new_clip.get_frame(t)[0]: if r == color[0] and g == color[1] and g == color[2]: n_reds += 1 assert n_reds == n_reds_expected # top and bottom sides clip = ColorClip( color=color, duration=duration, size=(size[1], size[0]), ).with_fps(fps) for side in ["top", "bottom"]: new_clip = CompositeVideoClip([slide_in(clip, duration, side)]) for t in np.arange(0, duration, duration / fps): n_reds, n_reds_expected = (0, int(t * 100)) if t: assert n_reds_expected if n_reds_expected == 7: # skip 7 due to innacurate frame continue for row in new_clip.get_frame(t): r, g, b = row[0] if r == color[0] and g == color[1] and g == color[2]: n_reds += 1 assert n_reds == n_reds_expected
def on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None): """Place the clip on a colored background. Returns a clip made of the current clip overlaid on a color clip of a possibly bigger size. Can serve to flatten transparent clips. Parameters ----------- size Size (width, height) in pixels of the final clip. By default it will be the size of the current clip. color Background color of the final clip ([R,G,B]). pos Position of the clip in the final clip. 'center' is the default col_opacity Parameter in 0..1 indicating the opacity of the colored background. """ from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip if size is None: size = self.size if pos is None: pos = "center" colorclip = ColorClip(size, color=color) if col_opacity is not None: colorclip = ColorClip( size, color=color, duration=self.duration ).with_opacity(col_opacity) result = CompositeVideoClip([colorclip, self.with_position(pos)]) else: result = CompositeVideoClip( [self.with_position(pos)], size=size, bg_color=color ) if ( isinstance(self, ImageClip) and (not hasattr(pos, "__call__")) and ((self.mask is None) or isinstance(self.mask, ImageClip)) ): new_result = result.to_ImageClip() if result.mask is not None: new_result.mask = result.mask.to_ImageClip() return new_result.with_duration(result.duration) return result
def test_set_layer(): bottom_clip = BitmapClip([["ABC"], ["BCA"], ["CAB"]], fps=1).set_layer(1) top_clip = BitmapClip([["DEF"], ["EFD"]], fps=1).set_layer(2) composite_clip = CompositeVideoClip([bottom_clip, top_clip]) reversed_composite_clip = CompositeVideoClip([top_clip, bottom_clip]) # Make sure that the order of clips makes no difference to the composite clip assert composite_clip.subclip(0, 2) == reversed_composite_clip.subclip(0, 2) # Make sure that only the 'top' clip is kept assert top_clip.subclip(0, 2) == composite_clip.subclip(0, 2) # Make sure that it works even when there is only one clip playing at that time target_clip = BitmapClip([["DEF"], ["EFD"], ["CAB"]], fps=1) assert composite_clip == target_clip
def merge_video_audio(video_path, audio_path, outpath): """视频和音频合并""" audioclip = AudioFileClip(str(audio_path)) videoclip = VideoFileClip(str(video_path)) videoclip2 = videoclip.set_audio(audioclip) video = CompositeVideoClip([videoclip2]) video.write_videofile(str(outpath), codec='mpeg4', fps=_fps)
def test_subtitles(): red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 #travis does not like TextClip.. so return for now.. #but allow regular users to still run the test below if TRAVIS: return generator = lambda txt: TextClip(txt, font='Liberation-Mono', size=(800, 600), fontsize=24, method='caption', align='South', color='white') subtitles = SubtitlesClip("media/subtitles1.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.to_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30) data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'), ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'), ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')] assert subtitles.subtitles == data subtitles = SubtitlesClip(data, generator) assert subtitles.subtitles == data
def add_info_overlay(clip, size, video, pos, counter, total): video_id = video["id"]["videoId"] video_title = html.unescape(video["snippet"]["title"]) video_published_at = video["snippet"]["publishedAt"] start, _, end = video["data"]["timestamps"][pos - 1] episode_counter = str(pos) aligned_counter = str(counter).rjust(len(str(total))) width, height = map(int, size.split("x")) clip_text_title = (TextClip( txt= f"{video_title}\nhttps://youtube.com/watch?v={video_id}\nTimestamp: {start}\nDate: {video_published_at}", fontsize=24, color="black", bg_color="white", align="west", ).set_duration(clip.duration).set_position(("left", "bottom"))) clip_text_counter = (TextClip( txt= f"Episode counter: {episode_counter}\nTotal counter : {aligned_counter}/{total}", fontsize=24, color="black", bg_color="white", align="west", ).set_duration(clip.duration).set_position(("left", "top"))) if clip.size != [width, height]: clip = clip.fx(vfx.resize, width=width) return CompositeVideoClip([clip, clip_text_title, clip_text_counter], size=(width, height))
def test_subtitles(util): red = ColorClip((800, 600), color=(255, 0, 0)).with_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).with_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).with_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 generator = lambda txt: TextClip( txt, font=util.FONT, size=(800, 600), font_size=24, method="caption", align="South", color="white", ) subtitles = SubtitlesClip("media/subtitles.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.subclip(0, 0.5).write_videofile( os.path.join(util.TMP_DIR, "subtitles.mp4"), fps=5, logger=None, ) assert subtitles.subtitles == MEDIA_SUBTITLES_DATA subtitles = SubtitlesClip(MEDIA_SUBTITLES_DATA, generator) assert subtitles.subtitles == MEDIA_SUBTITLES_DATA
def test_afterimage(): ai = ImageClip("media/afterimage.png") masked_clip = mask_color(ai, color=[0, 255, 1]) # for green some_background_clip = ColorClip((800, 600), color=(255, 255, 255)) final_clip = CompositeVideoClip([some_background_clip, masked_clip], use_bgclip=True).with_duration(0.2) final_clip.write_videofile(os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
def convert_video(srtfile, xgenerator, invideo, outvideo): sub = SubtitlesClip(srtfile, xgenerator) # sub.set_position(("center", "bottom"), relative=True) myvideo = VideoFileClip(invideo) final = CompositeVideoClip( [myvideo, sub.set_position((0.2, 0.8), relative=True)]) final.to_videofile(outvideo, fps=myvideo.fps)
def crossfade(videos: [VideoClip], fade_duration: float = FADE_DURATION) -> VideoClip: for v_i in range(1, len(videos)): videos[v_i] = crossfadein( videos[v_i], fade_duration).set_start(videos[v_i - 1].end - fade_duration) return CompositeVideoClip(videos)
def make_gpx_track_view(width, height, gpx_file=None, gpx_style=None, mapfile=None, maps_cache=None, font_path=None, layer_padding=10, lat=45, lon=15, render_point=False): #TODO at given point location render something to show start map_renderer = MapnikRenderer(map_w=width, map_h=height, gpx_file=gpx_file, gpx_style=gpx_style, mapfile=mapfile, maps_cache=maps_cache, font_path=font_path) map_clip, center_coordinate = map_renderer.render_map( lat, lon, zoom_to_layer=True, layer_padding=layer_padding) if render_point: #We composite it on map image to get current location point map_w = map_clip.w map_h = map_clip.h circle_clip = circle circle_clip = circle_clip.set_pos( (center_coordinate[0] - radius, center_coordinate[1] - radius)) #We get circle on transparent background both = CompositeVideoClip([map_clip, circle_clip]) return both else: return map_clip
def _concatenate(self, clip: Clip, buffer=5): if self._transition == 'crossfadein': try: before_cross_part = self._final_clip.subclip( t_end=self._final_clip.duration - buffer) before_buffer = self._final_clip.subclip( t_start=self._final_clip.duration - buffer) after_cross_part = clip.subclip( t_start=self._transition_padding) after_buffer = clip.subclip( t_end=self._transition_padding).set_start( buffer - self._transition_padding) except OSError: if buffer >= 60: raise Exception('Buffer {} is very big'.format(buffer)) self._concatenate(clip, buffer=buffer * 2) return crossfade = CompositeVideoClip([ before_buffer, after_buffer.crossfadein(self._transition_padding) ], use_bgclip=True) crossfade = crossfade.set_audio(before_buffer.audio) self._final_clip = concatenate_videoclips( [before_cross_part, crossfade, after_cross_part]) else: self._final_clip = concatenate_videoclips([self._final_clip, clip])
def create_video(dependencies, targets): backing_track_path = output_dir_path / 'accompaniment.wav' with open(sync_map_path(output_dir_path), encoding='utf-8') as sync_json_file, \ open(silences_path(output_dir_path), encoding='utf-8') as silence_json_file: lyric_clips = list( _generate_lyric_clips( json.load(sync_json_file), json.load(silence_json_file) ) ) backing_track_clip = AudioFileClip(str(backing_track_path)) background_clip = ColorClip( size=(1024, 768), color=[0, 0, 0], duration=backing_track_clip.duration ) karaoke = ( CompositeVideoClip([background_clip] + lyric_clips). set_duration(backing_track_clip.duration). set_audio(backing_track_clip) ) karaoke.write_videofile( str(targets[0]), fps=10, # Workaround for missing audio # https://github.com/Zulko/moviepy/issues/820 codec='libx264', audio_codec='aac', temp_audiofile='temp-audio.m4a', remove_temp=True )
def load_clip(index): image = self.sequence[index] text = titles[index] if text.startswith("W:"): text = text[2:] show_full_height = True else: show_full_height = False if height is None and width is None: clip = ImageClip(image, duration=image_duration) else: if zoom_images: clip = ImageClip(image, duration=image_duration) \ .fx(image_effect, screensize=(width, height), \ duration=20, show_full_height=show_full_height) elif show_full_height: clip = ImageClip(image, duration=image_duration) \ .fx(resize, height=height).set_position('center', 'center') clip = CompositeVideoClip([clip], size=(width, height)) else: clip = ImageClip(image, duration=image_duration) \ .fx(resize, height=height, width=width) #Adds text label etc. on clip clip = make_clip(clip, text, height, width, font, font_color, fontsize) return clip
def test_subtitles(): red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10) green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10) blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10) myvideo = concatenate_videoclips([red, green, blue]) assert myvideo.duration == 30 generator = lambda txt: TextClip(txt, font=FONT, size=(800, 600), fontsize=24, method='caption', align='South', color='white') subtitles = SubtitlesClip("media/subtitles1.srt", generator) final = CompositeVideoClip([myvideo, subtitles]) final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30) data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'), ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'), ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')] assert subtitles.subtitles == data subtitles = SubtitlesClip(data, generator) assert subtitles.subtitles == data close_all_clips(locals())
def make_weekly_movie(cam: Cam, executor): root = Path(conf.root_dir) / 'data' / cam.name path = root / 'regular' / 'imgs' start = pendulum.yesterday() logger.info(f'Running make weekly movie for ww{start.week_of_year}') week_ago = start.subtract(weeks=1).date() sequence = [] morning = pendulum.Time(6) evening = pendulum.Time(18) for day in sorted(list(path.iterdir()), key=lambda x: pendulum.from_format(x.name, 'DD_MM_YYYY')): if pendulum.from_format(day.name, 'DD_MM_YYYY').date() > week_ago: for img in sorted(day.iterdir()): t_img = img.name.split('.')[0] t_img = pendulum.from_format(t_img, 'DD_MM_YYYY_HH-mm-ss').time() if morning < t_img < evening: sequence.append(str(img)) sequence = check_sequence_for_gray_images(sequence, executor) txt_clip = make_txt_movie(sequence, 100, executor) logger.info(f'Composing clip for weekly movie ww{start.week_of_year}') image_clip = ImageSequenceClip(sequence, fps=100) clip = CompositeVideoClip([image_clip, txt_clip.set_position(('right', 'top'))], use_bgclip=True) movie_path = root / 'regular' / 'weekly' / f'ww{start.week_of_year}.mp4' movie_path.parent.mkdir(parents=True, exist_ok=True) clip.write_videofile(str(movie_path), audio=False) logger.info(f'Finished with clip for weekly movie ww{start.week_of_year}') return Movie(clip.h, clip.w, movie_path, sequence[seq_middle(sequence)])
def clips_array_maybe_none(array, rows_widths=None, cols_widths=None, bg_color=None): """ Like CompositeVideoClip.clips_array, but support empty clips in array. Args: rows_widths: widths of the different rows in pixels. If None, is set automatically. cols_widths: widths of the different colums in pixels. If None, is set automatically. bg_color: Fill color for the masked and unfilled regions. Set to None for these regions to be transparent (will be slower). """ array = np.array(array) sizes_array = np.array( [[c.size if c else (0, 0) for c in line] for line in array]) # find row width and col_widths automatically if not provided if rows_widths is None: rows_widths = sizes_array[:, :, 1].max(axis=1) if cols_widths is None: cols_widths = sizes_array[:, :, 0].max(axis=0) rows_widths[rows_widths == 0] = rows_widths.max() cols_widths[cols_widths == 0] = cols_widths.max() xx = np.cumsum([0] + list(cols_widths)) yy = np.cumsum([0] + list(rows_widths)) for j, (x, cw) in list(enumerate(zip(xx[:-1], cols_widths))): for i, (y, rw) in list(enumerate(zip(yy[:-1], rows_widths))): clip = array[i, j] if clip is None: continue w, h = clip.size if (w < cw) or (h < rw): clip = (CompositeVideoClip( [clip.set_pos('center')], size=(cw, rw), bg_color=bg_color).set_duration(clip.duration)) array[i, j] = clip.set_pos((x, y)) return CompositeVideoClip( [x for x in array.flatten() if x is not None], size=(xx[-1], yy[-1]), bg_color=bg_color)
def make_loopable(clip, overlap_duration): """Makes the clip fade in progressively at its own end, this way it can be looped indefinitely. ``overlap_duration`` is the duration in seconds of the fade-in.""" clip2 = clip.fx(crossfadein, overlap_duration).with_start(clip.duration - overlap_duration) return CompositeVideoClip([clip, clip2]).subclip(overlap_duration, clip.duration)
def freeze_region(clip, t=0, region=None, outside_region=None, mask=None): """ Freezes one region of the clip while the rest remains animated. You can choose one of three methods by providing either `region`, `outside_region`, or `mask`. Parameters ----------- t Time at which to freeze the freezed region. region A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) which will be freezed. You can provide outside_region or mask instead. outside_region A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) which will be the only non-freezed region. mask If not None, will overlay a freezed version of the clip on the current clip, with the provided mask. In other words, the "visible" pixels in the mask indicate the freezed region in the final picture. """ if region is not None: x1, y1, x2, y2 = region freeze = (clip.fx(crop, *region).set_position( (x1, y1)).to_ImageClip(t=t).set_duration(clip.duration)) return CompositeVideoClip([clip, freeze]) elif outside_region is not None: x1, y1, x2, y2 = outside_region animated_region = (clip.fx(crop, *outside_region).set_position( (x1, y1))) freeze = (clip.to_ImageClip(t=t).set_duration(clip.duration)) return CompositeVideoClip([freeze, animated_region]) elif mask is not None: freeze = (clip.to_ImageClip(t=t).set_duration( clip.duration).set_mask(mask)) return CompositeVideoClip([clip, freeze])
def make_loopable(clip, cross): """ Makes the clip fade in progressively at its own end, this way it can be looped indefinitely. ``cross`` is the duration in seconds of the fade-in.""" d = clip.duration clip2 = clip.fx(transfx.crossfadein, cross).set_start(d - cross) return CompositeVideoClip([clip, clip2]).subclip(cross, d)
def test_PR_610(): """Test that the max fps of video clips is used for the composite video clip.""" clip1 = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1) clip2 = ColorClip((640, 480), color=(0, 255, 0)).with_duration(1) clip1.fps = 24 clip2.fps = 25 composite = CompositeVideoClip([clip1, clip2]) assert composite.fps == 25
def addlogo(file_dir,img="",time=20,X=30,Y=30): clip = VideoFileClip(file_dir) img_clip = ImageClip(img) #位置 img_clip = img_clip.set_pos((X,Y)).set_duration(time) clip = CompositeVideoClip([clip, img_clip]) filen = os.path.basename(file_dir) clip.write_videofile(work_path+"\\"+filen) clip.close()
def generate_intro(): logger.info('Generating intro...') color = (255, 255, 255) size = (1280, 720) clip = ColorClip(size, color, duration=3) logo = ImageClip(config.LOGO_PATH).set_duration(clip.duration) \ .resize(width=400, height=200) \ .set_pos(('center', 'center')) return CompositeVideoClip([clip, logo])
def test_afterimage(): ai = ImageClip("media/afterimage.png") masked_clip = mask_color(ai, color=[0, 255, 1]) # for green some_background_clip = ColorClip((800, 600), color=(255, 255, 255)) final_clip = CompositeVideoClip([some_background_clip, masked_clip], use_bgclip=True) final_clip.duration = 5 final_clip.write_videofile("/tmp/afterimage.mp4", fps=30)
def add_pics(self, clip): logger.info('Adding pics...') pic_clip = [clip] for i in range(self.pic_num): x_pos, y_pos, x_size, y_size = self.generate_coordinates(clip) pic_path = config.PIC_PATH + str(i) + '.jpg' pic = ImageClip(pic_path).set_duration(clip.duration).resize((x_size, y_size)) \ .set_pos((x_pos, y_pos)).add_mask().rotate(random.randint(-180, 180)) pic_clip.append(pic) return CompositeVideoClip(pic_clip)
def do_composite(clips, height, width): """ Create a composition of clips. clips - list of clips to composite ordered by layer height - height of the final composition width - width of the final composition """ result = CompositeVideoClip(clips, size=(width, height)) return result
def add_screens(self, clip): logger.info('Adding screens...') screen_num = random.randint(3, 7) screen_clip = [clip] for i in range(screen_num): x_pos, y_pos, x_size, y_size = self.generate_coordinates(clip) screen = clip.volumex(0).resize((x_size, y_size)) \ .set_pos((x_pos, y_pos)).add_mask().rotate(random.randint(-180, 180)) screen = self.modify_clip(screen) screen_clip.append(screen) return CompositeVideoClip(screen_clip)