def __init__(self, subtitles, make_textclip=None, encoding=None): VideoClip.__init__(self, has_constant_size=False) if isinstance(subtitles, str): subtitles = file_to_subtitles(subtitles, encoding=encoding) # subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip( txt, font="Georgia-Bold", fontsize=24, color="white", stroke_color="black", stroke_width=0.5, ) self.make_textclip = make_textclip self.start = 0 self.duration = max([tb for ((ta, tb), txt) in self.subtitles]) self.end = self.duration def add_textclip_if_none(t): """ Will generate a textclip if it hasn't been generated asked to generate it yet. If there is no subtitle to show at t, return false. """ sub = [ ((ta, tb), txt) for ((ta, tb), txt) in self.textclips.keys() if (ta <= t < tb) ] if not sub: sub = [ ((ta, tb), txt) for ((ta, tb), txt) in self.subtitles if (ta <= t < tb) ] if not sub: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def make_frame(t): sub = add_textclip_if_none(t) return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]]) def make_mask_frame(t): sub = add_textclip_if_none(t) return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]]) self.make_frame = make_frame hasmask = bool(self.make_textclip("T").mask) self.mask = VideoClip(make_mask_frame, ismask=True) if hasmask else None
def __init__(self, filename, has_mask=False, audio=True, audio_buffersize = 200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self) # Make a reader pix_fmt= "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size if has_mask: self.get_frame = lambda t: self.reader.get_frame(t)[:,:,:3] mask_gf = lambda t: self.reader.get_frame(t)[:,:,3]/255.0 self.mask = (VideoClip(ismask = True, get_frame = mask_gf) .set_duration(self.duration)) self.mask.fps = self.fps else: self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize= audio_buffersize, fps = audio_fps, nbytes = audio_nbytes)
def __init__(self, filename, ismask=False, has_mask=False, audio=True, audio_buffersize=200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self, ismask) # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes)
def __init__(self, filename, ismask=False, has_mask=False, audio=True, audio_buffersize = 200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self, ismask) # We store the construction parameters in case we need to make # a copy (a 'co-reader'). self.parameters = {'filename':filename, 'ismask':ismask, 'has_mask':has_mask, 'audio':audio, 'audio_buffersize':audio_buffersize} # Make a reader pix_fmt= "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio: try: self.audio = AudioFileClip(filename, buffersize= audio_buffersize, fps = audio_fps, nbytes = audio_nbytes) except: if verbose: print "No audio found in %s"%filename pass
def __init__(self, foldername, fps, withmask=True, ismask=False): VideoClip.__init__(self, ismask=ismask) self.directory = foldername self.fps = fps self.imagefiles = sorted(os.listdir(foldername)) self.duration = 1.0 * len(self.imagefiles) / self.fps self.end = self.duration self.lastpos = None self.lastimage = None def get_frame(t): pos = int(self.fps * t) if pos != self.lastpos: self.lastimage = ffmpeg_read_image(self.imagefiles[ind], withmask=withmask) self.lastpos = pos return self.lastimage self.get_frame = get_frame self.size = get_frame(0).shape[:2][::-1]
def __init__( self, filename, has_mask=False, audio=True, audio_buffersize=200000, target_resolution=None, resize_algorithm="bicubic", audio_fps=44100, audio_nbytes=2, fps_source="tbr", ): VideoClip.__init__(self) # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader( filename, pix_fmt=pix_fmt, target_resolution=target_resolution, resize_algo=resize_algorithm, fps_source=fps_source, ) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.rotation = self.reader.rotation self.filename = filename if has_mask: self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3] def mask_mf(t): return self.reader.get_frame(t)[:, :, 3] / 255.0 self.mask = VideoClip( ismask=True, make_frame=mask_mf).set_duration(self.duration) self.mask.fps = self.fps else: self.make_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos["audio_found"]: self.audio = AudioFileClip( filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes, )
def __init__(self, glob_store, freq, fft_clip, ismask=False): def make_frame(t): freq_amplitude = fft_clip.freq_amplitude(freq, t) image_data = glob_store.image_from_normal(freq_amplitude) return image_data VideoClip.__init__(self, make_frame=make_frame, ismask=ismask, duration=fft_clip.duration)
def __init__(self, clip, gpx_file=None, time_offset=0, interval=0, speedup_factor=1, clip_start_time=None, config=None, calculate_stats=False): self.stats = Counter() self.calculate_stats = calculate_stats self.clip = clip duration = clip.duration #Finds breaks in image sequences (Break is when we have GPS information but no #images at that time #FIXME: find breaks with GPS and images not just durations if isinstance(clip, ImageSequenceClip): self.have_any_breaks = any((duration > (config.effect_length * 2 + 2) for duration in self.clip.durations)) self.gpx_data = GPXData(sequence=self.clip.sequence, gpx_file=gpx_file, time_offset=time_offset) self.durations = self.clip.durations self.images_starts = self.clip.images_starts self.find_image_index = lambda t: max([ i for i in range(len(self.clip.sequence)) if self.clip.images_starts[i] <= t ]) else: if speedup_factor != 1: self.clip.old_make_frame = self.clip.make_frame self.clip.make_frame = lambda t: \ self.clip.old_make_frame(t*self.speedup_factor) duration = duration / speedup_factor self.have_any_breaks = False self.gpx_data = GPXData(gpx_file=gpx_file, gpx_start_time=clip_start_time, time_offset=time_offset) self.find_image_index = lambda t: None VideoClip.__init__(self, ismask=clip.ismask, duration=duration) self.size = clip.size #TODO: check if both this exists in clips #self.fps = clip.fps self.chart_data = {} self.speedup_factor = speedup_factor self.gpx_file = gpx_file if config is not None: self.config = config for key, key_config in config.config_items(need_config=True): print(key, "needs config") key_config.init(vars(self))
def __init__(self, subtitles, make_textclip=None): VideoClip.__init__(self, has_constant_size=False) if isinstance(subtitles, str): subtitles = file_to_subtitles(subtitles) subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', fontsize=24, color='white', stroke_color='black', stroke_width=0.5) self.make_textclip = make_textclip self.inicia = 0 self.duracion = max([tb for ((ta, tb), txt) in self.subtitles]) self.fin = self.duracion def add_textclip_if_none(t): """ Will generate a textclip if it hasn't been generated asked to generate it yet. If there is no subtitle to show at t, return false. """ sub = [((ta, tb), txt) for ((ta, tb), txt) in self.textclips.keys() if (ta <= t < tb)] if sub == []: sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles if (ta <= t < tb)] if sub == []: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def make_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])) def make_mask_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])) self.make_frame = make_frame hasmask = (self.make_textclip('T').mask is not None) self.mask = (VideoClip(make_mask_frame, ismask=True) if hasmask else None)
def __init__(self, clips, size=None, bg_color=None, transparent=False, ismask=False): if size is None: size = clips[0].size if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.transparent = transparent self.bg_color = bg_color self.bg = ColorClip(size, col=self.bg_color).get_frame(0) # compute duration ends = [c.end for c in self.clips] if not any([(e is None) for e in ends]): self.duration = max(ends) self.end = max(ends) # compute audio audioclips = [v.audio for v in self.clips if v.audio != None] if len(audioclips) > 0: self.audio = CompositeAudioClip(audioclips) # compute mask if transparent: maskclips = [ c.mask.set_pos(c.pos) for c in self.clips if c.mask is not None ] if maskclips != []: self.mask = CompositeVideoClip(maskclips, self.size, transparent=False, ismask=True) def make_frame(t): """ The clips playing at time `t` are blitted over one another. """ f = self.bg for c in self.playing_clips(t): f = c.blit_on(f, t) return f self.make_frame = make_frame
def __init__(self, subtitles, make_textclip=None): VideoClip.__init__(self, has_constant_size=False) if isinstance( subtitles, basestring): subtitles = file_to_subtitles(subtitles) subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', fontsize=24, color='white', stroke_color='black', stroke_width=0.5) self.make_textclip = make_textclip self.start=0 self.duration = max([tb for ((ta,tb), txt) in self.subtitles]) self.end=self.duration def add_textclip_if_none(t): """ Will generate a textclip if it hasn't been generated asked to generate it yet. If there is no subtitle to show at t, return false. """ sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys() if (ta<=t<tb)] if sub == []: sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if (ta<=t<tb)] if sub == []: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def make_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].get_frame(t) if sub else np.array([[[0,0,0]]])) def make_mask_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])) self.make_frame = make_frame hasmask = (self.make_textclip('T').mask is not None) self.mask = (VideoClip(make_mask_frame, ismask=True) if hasmask else None)
def __init__(self, filename, has_mask=False, audio=True, audio_buffersize=200000, target_resolution=None, resize_algorithm='bicubic', audio_fps=44100, audio_nbytes=2, verbose=False, fps_source='tbr'): VideoClip.__init__(self) # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = None # need this just in case FFMPEG has issues (__del__ complains) self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, target_resolution=target_resolution, resize_algo=resize_algorithm, fps_source=fps_source) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.rotation = self.reader.rotation self.filename = self.reader.filename if has_mask: self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3] mask_mf = lambda t: self.reader.get_frame(t)[:, :, 3] / 255.0 self.mask = (VideoClip( ismask=True, make_frame=mask_mf).set_duration(self.duration)) self.mask.fps = self.fps else: self.make_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes)
def __init__(self, subtitles, make_textclip=None): VideoClip.__init__(self) if isinstance(subtitles, str): subtitles = file_to_subtitles(subtitles) subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', fontsize=24, color='white', stroke_color='black', stroke_width=0.5) self.make_textclip = make_textclip self.start = 0 self.duration = max([tb for ((ta, tb), txt) in self.subtitles]) self.end = self.duration def add_textclip_if_none(t): sub = [((ta, tb), txt) for ((ta, tb), txt) in self.textclips.keys() if (ta <= t < tb)] if sub == []: sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles if (ta <= t < tb)] if sub == []: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def make_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])) def make_mask_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])) self.make_frame = make_frame self.mask = VideoClip(make_mask_frame, ismask=True)
def __init__(self, clips, size=None, bg_color=None, transparent=False, ismask=False): if size is None: size = clips[0].size if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.transparent = transparent self.bg_color = bg_color self.bg = ColorClip(size, col=self.bg_color).get_frame(0) # compute duration ends = [c.end for c in self.clips] if not any([(e is None) for e in ends]): self.duration = max(ends) self.end = max(ends) # compute audio audioclips = [v.audio for v in self.clips if v.audio != None] if len(audioclips) > 0: self.audio = CompositeAudioClip(audioclips) # compute mask if transparent: maskclips = [c.mask.set_pos(c.pos) for c in self.clips if c.mask is not None] if maskclips != []: self.mask = CompositeVideoClip(maskclips,self.size, transparent=False, ismask=True) def gf(t): """ The clips playing at time `t` are blitted over one another. """ f = self.bg for c in self.playing_clips(t): f = c.blit_on(f, t) return f self.get_frame = gf
def __init__(self, subtitles, make_textclip=None): VideoClip.__init__(self) if isinstance( subtitles, str): subtitles = file_to_subtitles(subtitles) subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles] self.subtitles = subtitles self.textclips = dict() if make_textclip is None: make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', fontsize=24, color='white', stroke_color='black', stroke_width=0.5) self.make_textclip = make_textclip self.start=0 self.duration = max([tb for ((ta,tb), txt) in self.subtitles]) self.end=self.duration def add_textclip_if_none(t): sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys() if (ta<=t<tb)] if sub == []: sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if (ta<=t<tb)] if sub == []: return False sub = sub[0] if sub not in self.textclips.keys(): self.textclips[sub] = self.make_textclip(sub[1]) return sub def get_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].get_frame(t) if sub else np.array([[[0,0,0]]])) def mask_get_frame(t): sub = add_textclip_if_none(t) return (self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])) self.get_frame = get_frame self.mask = VideoClip(ismask=True, get_frame=mask_get_frame)
def __init__(self, filename, has_mask=False, audio=True, audio_buffersize = 200000, target_resolution=None, resize_algorithm='bicubic', audio_fps=44100, audio_nbytes=2, verbose=False, fps_source='tbr'): VideoClip.__init__(self) # Make a reader pix_fmt= "rgba" if has_mask else "rgb24" self.reader = None # need this just in case FFMPEG has issues (__del__ complains) self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, target_resolution=target_resolution, resize_algo=resize_algorithm, fps_source=fps_source) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.rotation = self.reader.rotation self.filename = self.reader.filename if has_mask: self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3] mask_mf = lambda t: self.reader.get_frame(t)[:,:,3]/255.0 self.mask = (VideoClip(ismask = True, make_frame = mask_mf) .set_duration(self.duration)) self.mask.fps = self.fps else: self.make_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize= audio_buffersize, fps = audio_fps, nbytes = audio_nbytes)
def __init__(self, filename, ismask=False, has_mask=False, audio=True, audio_buffersize=200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self, ismask) # We store the construction parameters in case we need to make # a copy (a 'co-reader'). self.parameters = { 'filename': filename, 'ismask': ismask, 'has_mask': has_mask, 'audio': audio, 'audio_buffersize': audio_buffersize } # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio: try: self.audio = AudioFileClip(filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes) except: if verbose: print "No audio found in %s" % filename pass
def __init__(self, foldername, fps, transparent=True, ismask=False): VideoClip.__init__(self, ismask=ismask) self.directory = foldername self.fps = fps allfiles = os.listdir(foldername) self.pics = sorted(["%s/%s" % (foldername, f) for f in allfiles if not f.endswith(('.txt','.wav'))]) audio = [f for f in allfiles if f.endswith('.wav')] if len(audio) > 0: self.audio = AudioFileClip(audio[0]) self.audiofile =audio[0] self.size = imread(self.pics[0]).shape[:2][::-1] if imread(self.pics[0]).shape[2] == 4: # transparent png if ismask: def get_frame(t): return 1.0 * imread(self.pics[int(self.fps * t)])[:, :, 3] / 255 else: def get_frame(t): return imread(self.pics[int(self.fps * t)])[:, :, :2] if transparent: self.mask = DirectoryClip(foldername, fps, ismask=True) else: def get_frame(t): return imread(self.pics[int(self.fps * t)]) self.get_frame = get_frame self.duration = 1.0 * len(self.pics) / self.fps
def __init__(self, filename, ismask=False, has_mask=False, audio=True, audio_buffersize = 200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self, ismask) # Make a reader pix_fmt= "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,print_infos=verbose) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio: self.audio = AudioFileClip(filename, buffersize= audio_buffersize, fps = audio_fps, nbytes = audio_nbytes)
def __init__(self, filename, has_mask=False, audio=True, audio_buffersize = 200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self) # Make a reader pix_fmt= "rgba" if has_mask else "rgb24" reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) self.reader = reader # Make some of the reader's attributes accessible from the clip self.duracion = self.reader.duracion self.fin = self.reader.duracion self.fps = self.reader.fps self.tamano = self.reader.tamano if has_mask: self.make_frame = lambda t: reader.get_frame(t)[:,:,:3] mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0 self.mask = (VideoClip(ismask = True, make_frame = mask_mf) .set_duracion(self.duracion)) self.mask.fps = self.fps else: self.make_frame = lambda t: reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize= audio_buffersize, fps = audio_fps, nbytes = audio_nbytes)
def __init__( self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False, allow_no_bg=False, ): if size is None: size = clips[0].size if use_bgclip and (clips[0].mask is None): transparent = False else: transparent = bg_color is None if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) fpss = [ c.fps for c in clips if hasattr(c, "fps") and c.fps is not None ] if len(fpss) == 0: self.fps = None else: self.fps = max(fpss) VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.bg_color = bg_color self.created_bg = False if use_bgclip: self.bg = clips[0] self.clips = clips[1:] elif allow_no_bg: # In some cases if we are rendering images, # we allow no bottom BG self.clips = clips self.bg = None else: self.bg = ColorClip(size, color=self.bg_color) self.created_bg = True # compute duration ends = [c.end for c in self.clips] if not any([(e is None) for e in ends]): self.duration = max(ends) self.end = max(ends) # compute audio audioclips = [v.audio for v in self.clips if v.audio is not None] if len(audioclips) > 0: self.audio = CompositeAudioClip(audioclips) # compute mask if necessary if transparent: maskclips = [ (c.mask if (c.mask is not None) else c.add_mask().mask).set_position( c.pos).set_end(c.end).set_start(c.start, change_end=False) for c in self.clips ] self.mask = CompositeVideoClip(maskclips, self.size, ismask=True, bg_color=0.0) def make_frame(t): """ The clips playing at time `t` are blitted over one another. """ f = None if self.bg is None else self.bg.get_frame(t) for c in self.playing_clips(t): if f is None: f = c.get_frame(t) else: f = c.blit_on(f, t) return f self.make_frame = make_frame
def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False): if size is None: size = clips[0].size if use_bgclip and (clips[0].mask is None): transparent = False else: transparent = (bg_color is None) if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) fps_list = list(set([c.fps for c in clips if hasattr(c, 'fps')])) if len(fps_list) == 1: self.fps = fps_list[0] VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.bg_color = bg_color if use_bgclip: self.bg = clips[0] self.clips = clips[1:] else: self.clips = clips self.bg = ColorClip(size, col=self.bg_color) # compute duration ends = [c.end for c in self.clips] if not any([(e is None) for e in ends]): self.duration = max(ends) self.end = max(ends) # compute audio audioclips = [v.audio for v in self.clips if v.audio is not None] if len(audioclips) > 0: self.audio = CompositeAudioClip(audioclips) # compute mask if necessary if transparent: maskclips = [ (c.mask if (c.mask is not None) else c.add_mask().mask).set_pos(c.pos) for c in self.clips ] self.mask = CompositeVideoClip(maskclips, self.size, ismask=True, bg_color=0.0) def make_frame(t): """ The clips playing at time `t` are blitted over one another. """ f = self.bg.get_frame(t) for c in self.playing_clips(t): f = c.blit_on(f, t) return f self.make_frame = make_frame
def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False): if size is None: size = clips[0].size if use_bgclip and (clips[0].mask is None): transparent = False else: transparent = (bg_color is None) if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')])) if len(fps_list)==1: self.fps= fps_list[0] VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.bg_color = bg_color if use_bgclip: self.bg = clips[0] self.clips = clips[1:] else: self.clips = clips self.bg = ColorClip(size, col=self.bg_color) # compute duration ends = [c.end for c in self.clips] if not any([(e is None) for e in ends]): self.duration = max(ends) self.end = max(ends) # compute audio audioclips = [v.audio for v in self.clips if v.audio is not None] if len(audioclips) > 0: self.audio = CompositeAudioClip(audioclips) # compute mask if necessary if transparent: maskclips = [(c.mask if (c.mask is not None) else c.add_mask().mask).set_pos(c.pos).set_end(c.end).set_start(c.start, change_end=False) for c in self.clips] self.mask = CompositeVideoClip(maskclips,self.size, ismask=True, bg_color=0.0) def make_frame(t): """ The clips playing at time `t` are blitted over one another. """ f = self.bg.get_frame(t) for c in self.playing_clips(t): f = c.blit_on(f, t) return f self.make_frame = make_frame
def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False): if size is None: size = clips[0].size if use_bgclip and (clips[0].mask is None): transparent = False else: transparent = bg_color is None if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) fpss = [c.fps for c in clips if getattr(c, "fps", None)] self.fps = max(fpss) if fpss else None VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.bg_color = bg_color if use_bgclip: self.bg = clips[0] self.clips = clips[1:] self.created_bg = False else: self.clips = clips self.bg = ColorClip(size, color=self.bg_color, ismask=ismask) self.created_bg = True # compute duration ends = [c.end for c in self.clips] if None not in ends: duration = max(ends) self.duration = duration self.end = duration # compute audio audioclips = [v.audio for v in self.clips if v.audio is not None] if audioclips: self.audio = CompositeAudioClip(audioclips) # compute mask if necessary if transparent: maskclips = [ (c.mask if (c.mask is not None) else c.add_mask().mask) .set_position(c.pos) .set_end(c.end) .set_start(c.start, change_end=False) for c in self.clips ] self.mask = CompositeVideoClip( maskclips, self.size, ismask=True, bg_color=0.0 ) def make_frame(t): """ The clips playing at time `t` are blitted over one another. """ f = self.bg.get_frame(t) for c in self.playing_clips(t): f = c.blit_on(f, t) return f self.make_frame = make_frame
def __init__(self, sequence, fps=None, durations=None, with_mask=True, ismask=False, load_images=False): # CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE if (fps is None) and (durations is None): raise ValueError("Please provide either 'fps' or 'durations'.") VideoClip.__init__(self, ismask=ismask) # Parse the data fromfiles = True if isinstance(sequence, list) or isinstance(sequence, np.ndarray): if isinstance(sequence[0], str): if load_images: sequence = [imread(f) for f in sequence] fromfiles = False else: fromfiles= True else: # sequence is already a list of numpy arrays fromfiles = False else: # sequence is a folder name, make it a list of files: fromfiles = True sequence = sorted([os.path.join(sequence, f) for f in os.listdir(sequence)]) #check that all the images are of the same size if isinstance(sequence[0], str): size = imread(sequence[0]).shape else: size = sequence[0].shape for image in sequence: image1=image if isinstance(image, str): image1=imread(image) if size != image1.shape: raise Exception("Moviepy: ImageSequenceClip requires all images to be the same size") self.fps = fps if fps is not None: durations = [1.0/fps for image in sequence] self.images_starts = [1.0*i/fps-np.finfo(np.float32).eps for i in range(len(sequence))] else: self.images_starts = [0]+list(np.cumsum(durations)) self.durations = durations self.duration = sum(durations) self.end = self.duration self.sequence = sequence def find_image_index(t): return max([i for i in range(len(self.sequence)) if self.images_starts[i]<=t]) if fromfiles: self.lastindex = None self.lastimage = None def make_frame(t): index = find_image_index(t) if index != self.lastindex: self.lastimage = imread(self.sequence[index])[:,:,:3] self.lastindex = index return self.lastimage if with_mask and (imread(self.sequence[0]).shape[2]==4): self.mask = VideoClip(ismask=True) self.mask.lastindex = None self.mask.lastimage = None def mask_make_frame(t): index = find_image_index(t) if index != self.mask.lastindex: frame = imread(self.sequence[index])[:,:,3] self.mask.lastimage = frame.astype(float)/255 self.mask.lastindex = index return self.mask.lastimage self.mask.make_frame = mask_make_frame self.mask.size = mask_make_frame(0).shape[:2][::-1] else: def make_frame(t): index = find_image_index(t) return self.sequence[index][:,:,:3] if with_mask and (self.sequence[0].shape[2]==4): self.mask = VideoClip(ismask=True) def mask_make_frame(t): index = find_image_index(t) return 1.0*self.sequence[index][:,:,3]/255 self.mask.make_frame = mask_make_frame self.mask.size = mask_make_frame(0).shape[:2][::-1] self.make_frame = make_frame self.size = make_frame(0).shape[:2][::-1]
def __init__( self, clips, size=None, bg_color=None, use_bgclip=False, is_mask=False ): if size is None: size = clips[0].size if use_bgclip and (clips[0].mask is None): transparent = False else: transparent = bg_color is None if bg_color is None: bg_color = 0.0 if is_mask else (0, 0, 0) fpss = [clip.fps for clip in clips if getattr(clip, "fps", None)] self.fps = max(fpss) if fpss else None VideoClip.__init__(self) self.size = size self.is_mask = is_mask self.clips = clips self.bg_color = bg_color if use_bgclip: self.bg = clips[0] self.clips = clips[1:] self.created_bg = False else: self.clips = clips self.bg = ColorClip(size, color=self.bg_color, is_mask=is_mask) self.created_bg = True # order self.clips by layer self.clips = sorted(self.clips, key=lambda clip: clip.layer) # compute duration ends = [clip.end for clip in self.clips] if None not in ends: duration = max(ends) self.duration = duration self.end = duration # compute audio audioclips = [v.audio for v in self.clips if v.audio is not None] if audioclips: self.audio = CompositeAudioClip(audioclips) # compute mask if necessary if transparent: maskclips = [ (clip.mask if (clip.mask is not None) else clip.add_mask().mask) .with_position(clip.pos) .with_end(clip.end) .with_start(clip.start, change_end=False) .with_layer(clip.layer) for clip in self.clips ] self.mask = CompositeVideoClip( maskclips, self.size, is_mask=True, bg_color=0.0 )
def __init__(self, clips, size=None, bg_color=None, use_bgclip=True, ismask=False): if size is None: size = clips[0].size if use_bgclip and (clips[0].mask is None): transparent = False else: transparent = (bg_color is None) if bg_color is None: bg_color = 0.0 if ismask else (0, 0, 0) fpss = [c.fps for c in clips if getattr(c, 'fps', None)] self.fps = max(fpss) if fpss else None VideoClip.__init__(self) self.size = size self.ismask = ismask self.clips = clips self.bg_color = bg_color if use_bgclip: self.bg = clips[0] self.clips = clips[1:] self.created_bg = False else: self.clips = clips self.bg = ColorClip(size, color=self.bg_color) self.created_bg = True # compute duration ends = [c.end for c in self.clips] if None not in ends: duration = max(ends) self.duration = duration self.end = duration # compute audio audioclips = [v.audio for v in self.clips if v.audio is not None] if audioclips: self.audio = CompositeAudioClip(audioclips) # compute mask if necessary if transparent: maskclips = [ (c.mask if (c.mask is not None) else c.add_mask().mask).set_position( c.pos).set_end(c.end).set_start(c.start, change_end=False) for c in self.clips ] self.mask = CompositeVideoClip(maskclips, self.size, ismask=True, bg_color=0.0) def make_frame(t): full_w, full_h = self.bg.size f = self.bg.get_frame(t) bg_im = Image.fromarray(f) for c in self.playing_clips(t): img, pos, mask, ismask = c.new_blit_on(t, f) x, y = pos w, h = c.size out_x = x < -w or x == full_w out_y = y < -h or y == full_h if out_x and out_y: continue pos = (int(round(min(max(-w, x), full_w))), int(round(min(max(-h, y), full_h)))) paste_im = Image.fromarray(img) if mask is not None: mask_im = Image.fromarray(255 * mask).convert('L') bg_im.paste(paste_im, pos, mask_im) else: bg_im.paste(paste_im, pos) result_frame = np.array(bg_im) return result_frame.astype('uint8') if ( not ismask) else result_frame self.make_frame = make_frame
def __init__(self, sequence, titles, height=None, width=None, image_duration=4, transition_duration=1, fontsize=30, font="M+-1p-medium", font_color="white", zoom_images=False, test=False): """Function makes slideshow VideoClip from sequence of images with their captions Parameters --------- sequence List of paths to images titles : list List of image captions height Height of wanted VideoClip width Width of wanted VideoClip image_duration How long is one image visible transition_duration How long is fade transition between images fontsize Font point size font Name of the font to use. See ``TextClip.list('font')`` for the list of fonts you can use on your computer. font_color Color of the text. See ``TextClip.list('color')`` for a list of acceptable names. test : bool If true shows each image with caption in preview """ VideoClip.__init__(self, ismask=False) image_duration += transition_duration * 2 #FIXME: duration is wrong if there is panorama in images tt = np.cumsum([0] + list(np.repeat(image_duration, len(sequence)))) #print (tt) self.images_starts = np.maximum( 0, tt + (-1 * transition_duration) * np.arange(len(tt))) self.duration = self.images_starts[-1] self.end = self.images_starts[-1] self.sequence = sequence #print(self.images_starts) #print ("DUR:", self.duration) def find_image_index(t): return max([ i for i in range(len(self.sequence)) if self.images_starts[i] <= t ]) self.lastindex = None self.lastimage = None self.previndex = None self.previmage = None def load_clip(index): image = self.sequence[index] text = titles[index] if text.startswith("W:"): text = text[2:] show_full_height = True else: show_full_height = False if height is None and width is None: clip = ImageClip(image, duration=image_duration) else: if zoom_images: clip = ImageClip(image, duration=image_duration) \ .fx(image_effect, screensize=(width, height), \ duration=20, show_full_height=show_full_height) elif show_full_height: clip = ImageClip(image, duration=image_duration) \ .fx(resize, height=height).set_position('center', 'center') clip = CompositeVideoClip([clip], size=(width, height)) else: clip = ImageClip(image, duration=image_duration) \ .fx(resize, height=height, width=width) #Adds text label etc. on clip clip = make_clip(clip, text, height, width, font, font_color, fontsize) return clip def make_frame(t): index = find_image_index(t) fade = False #print ("INDEX:", index) #print ("TIME:", t, t-self.images_starts[index]) clip_time = t - self.images_starts[index] #At the start of the clip we need to fade to the next if clip_time < transition_duration: fade = True #print ("FADE IN") if self.lastindex == index - 1 and self.previndex != index - 1: self.previmage = self.lastimage self.previndex = self.lastindex #print ("PREV IDX:", self.previndex) #When we are on last image we delete previous one from memory #Instead of 2 images in memory we have only one #Bu this can only happen on last image elif self.previndex and index == len(self.sequence) - 1: #print ("LAST INDEX") del self.previmage self.previmage = None self.previndex = None #If we need to load new image we load it add the mask and add fade #if needed if index != self.lastindex: clip = load_clip(index) #print ("MASK:", clip.mask) if clip.mask: #TODO: is mask actually needed outside fading? clip.mask.duration = clip.duration newclip = clip.copy() newclip.mask = clip.mask.fx(fadein, transition_duration) else: newclip = clip self.lastimage = newclip self.lastindex = index #Fading between two images if fade and self.previmage: image = self.lastimage.blit_on( self.previmage.get_frame( t - self.images_starts[self.previndex]), t - self.images_starts[self.lastindex]) else: ##Fade at the start where we don't have previous image ##TODO: is this actually needed #if fade: #self.set_mask( #self.lastimage.mask) ##print ("Fade MASK") #else: #self.mask = None self.mask = None #Normal image without fading image = self.lastimage.get_frame(t - self.images_starts[index]) #print ("DIFF:", self.duration-t, 1/25) #Deletes image from memory if we are closer to end of the clip as #last frame in 25 FPS #If we are wrong we will just load the image again if index == len(self.sequence) - 1 and self.duration - t < 2 / 25: #print ("LAST FRAME") del self.lastimage self.lastimage = None self.lastindex = None return image self.make_frame = make_frame if width is not None and height is not None: self.size = (width, height) else: self.size = make_frame(0).shape[:2][::-1]
def __init__( self, sequence, fps=None, durations=None, with_mask=True, is_mask=False, load_images=False, ): # CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE if (fps is None) and (durations is None): raise ValueError("Please provide either 'fps' or 'durations'.") VideoClip.__init__(self, is_mask=is_mask) # Parse the data fromfiles = True if isinstance(sequence, list): if isinstance(sequence[0], str): if load_images: sequence = [imread(file) for file in sequence] fromfiles = False else: fromfiles = True else: # sequence is already a list of numpy arrays fromfiles = False else: # sequence is a folder name, make it a list of files: fromfiles = True sequence = sorted([ os.path.join(sequence, file) for file in os.listdir(sequence) ]) # check that all the images are of the same size and check if they are grayscale grayscale = False if isinstance(sequence[0], str): size = imread(sequence[0]).shape else: size = sequence[0].shape for image in sequence: image1 = image if isinstance(image, str): image1 = imread(image) if size != image1.shape: raise Exception( "Moviepy: ImageSequenceClip requires all images to be the same size" ) if len(size) == 2 or size[2] == 1: grayscale = True self.fps = fps if fps is not None: durations = [1.0 / fps for image in sequence] self.images_starts = [ 1.0 * i / fps - np.finfo(np.float32).eps for i in range(len(sequence)) ] else: self.images_starts = [0] + list(np.cumsum(durations)) self.durations = durations self.duration = sum(durations) self.end = self.duration self.sequence = sequence def find_image_index(t): return max([ i for i in range(len(self.sequence)) if self.images_starts[i] <= t ]) def read_image(name, grayscale): """ Wrapper for optional conversion from grayscale into rgb by duplicating single channel into 3 channels. """ image = imread(name) if grayscale: image = np.stack((image, ) * 3, -1) return image if fromfiles: self.last_index = None self.last_image = None def make_frame(t): index = find_image_index(t) if index != self.last_index: # using wrapper function to resolve possible grayscale issues self.last_image = read_image(self.sequence[index], grayscale)[:, :, :3] self.last_index = index return self.last_image if with_mask and (read_image(self.sequence[0], grayscale).shape[2] == 4): self.mask = VideoClip(is_mask=True) self.mask.last_index = None self.mask.last_image = None def mask_make_frame(t): index = find_image_index(t) if index != self.mask.last_index: frame = imread(self.sequence[index])[:, :, 3] self.mask.last_image = frame.astype(float) / 255 self.mask.last_index = index return self.mask.last_image self.mask.make_frame = mask_make_frame self.mask.size = mask_make_frame(0).shape[:2][::-1] else: def make_frame(t): index = find_image_index(t) return self.sequence[index][:, :, :3] if with_mask and (self.sequence[0].shape[2] == 4): self.mask = VideoClip(is_mask=True) def mask_make_frame(t): index = find_image_index(t) return 1.0 * self.sequence[index][:, :, 3] / 255 self.mask.make_frame = mask_make_frame self.mask.size = mask_make_frame(0).shape[:2][::-1] self.make_frame = make_frame self.size = make_frame(0).shape[:2][::-1]