예제 #1
0
def test_matplotlib():
    #for now, python 3.5 installs a version of matplotlib that complains
    #about $DISPLAY variable, so lets just ignore for now.
    if PYTHON_VERSION in ('2.7', '3.3') or (PYTHON_VERSION == '3.5' and TRAVIS):
       return

    import matplotlib.pyplot as plt
    import numpy as np
    from moviepy.video.io.bindings import mplfig_to_npimage
    from moviepy.video.VideoClip import VideoClip

    x = np.linspace(-2, 2, 200)

    duration = 2

    fig, ax = plt.subplots()

    def make_frame(t):
        ax.clear()
        ax.plot(x, np.sinc(x**2) + np.sin(x + 2*np.pi/duration * t), lw=3)
        ax.set_ylim(-1.5, 2.5)
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=duration)
    animation.write_gif(os.path.join(TMP_DIR, 'matplotlib.gif'), fps=20)
예제 #2
0
def make_video(audio, filename, progan, n_bins=60, random_state=0, imgs_per_batch=20):
    y, sr = librosa.load(audio)
    song_length = len(y) / sr
    z_audio = get_z_from_audio(y, z_length=progan.z_length, n_bins=n_bins, random_state=random_state)
    fps = z_audio.shape[0] / song_length
    res = progan.get_cur_res()
    shape = (res, res * 16 // 9, 3)

    imgs = np.zeros(shape=[imgs_per_batch, *shape], dtype=np.float32)

    def make_frame(t):
        global imgs
        cur_frame_idx = int(t * fps)

        if cur_frame_idx >= len(z_audio):
            return np.zeros(shape=shape, dtype=np.uint8)

        if cur_frame_idx % imgs_per_batch == 0:
            imgs = progan.generate(z_audio[cur_frame_idx:cur_frame_idx + imgs_per_batch])
            imgs = imgs[:, :, :res * 8 // 9, :]
            imgs_rev = np.flip(imgs, 2)
            imgs = np.concatenate((imgs, imgs_rev), 2)

        return imgs[cur_frame_idx % imgs_per_batch]

    video_clip = VideoClip(make_frame=make_frame, duration=song_length)
    audio_clip = AudioFileClip(audio)
    video_clip = video_clip.set_audio(audio_clip)
    video_clip.write_videofile(filename, fps=fps)
예제 #3
0
    def __init__(self, subtitles, make_textclip=None, encoding=None):

        VideoClip.__init__(self, has_constant_size=False)

        if isinstance(subtitles, str):
            subtitles = file_to_subtitles(subtitles, encoding=encoding)

        # subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:
            make_textclip = lambda txt: TextClip(
                txt,
                font="Georgia-Bold",
                fontsize=24,
                color="white",
                stroke_color="black",
                stroke_width=0.5,
            )

        self.make_textclip = make_textclip
        self.start = 0
        self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
        self.end = self.duration

        def add_textclip_if_none(t):
            """ Will generate a textclip if it hasn't been generated asked
            to generate it yet. If there is no subtitle to show at t, return
            false. """
            sub = [
                ((ta, tb), txt)
                for ((ta, tb), txt) in self.textclips.keys()
                if (ta <= t < tb)
            ]
            if not sub:
                sub = [
                    ((ta, tb), txt)
                    for ((ta, tb), txt) in self.subtitles
                    if (ta <= t < tb)
                ]
                if not sub:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])

            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])

        self.make_frame = make_frame
        hasmask = bool(self.make_textclip("T").mask)
        self.mask = VideoClip(make_mask_frame, ismask=True) if hasmask else None
예제 #4
0
 def __init__(self, filename, ismask=False, has_mask=False,
              audio=True, audio_buffersize = 200000,
              audio_fps=44100, audio_nbytes=2, verbose=False):
     
     VideoClip.__init__(self, ismask)
     
     # We store the construction parameters in case we need to make
     # a copy (a 'co-reader').
     
     self.parameters = {'filename':filename, 'ismask':ismask,
                        'has_mask':has_mask, 'audio':audio,
                        'audio_buffersize':audio_buffersize}
     
     # Make a reader
     pix_fmt= "rgba" if has_mask else "rgb24"
     self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
     
     # Make some of the reader's attributes accessible from the clip
     self.duration = self.reader.duration
     self.fps = self.reader.fps
     self.size = self.reader.size
     self.get_frame = lambda t: self.reader.get_frame(t)
     
     # Make a reader for the audio, if any.
     if audio:
         try:
             self.audio = AudioFileClip(filename, buffersize= audio_buffersize,
                 fps = audio_fps, nbytes = audio_nbytes)
         except:
             if verbose:
                 print "No audio found in %s"%filename
             pass
예제 #5
0
    def __init__(self, foldername, fps, withmask=True, ismask=False):

        VideoClip.__init__(self, ismask=ismask)

        self.directory = foldername
        self.fps = fps
        self.imagefiles = sorted(os.listdir(foldername))

        self.duration = 1.0 * len(self.imagefiles) / self.fps
        self.end = self.duration

        self.lastpos = None
        self.lastimage = None

        def get_frame(t):

            pos = int(self.fps * t)
            if pos != self.lastpos:
                self.lastimage = ffmpeg_read_image(self.imagefiles[ind], withmask=withmask)
                self.lastpos = pos

            return self.lastimage

        self.get_frame = get_frame
        self.size = get_frame(0).shape[:2][::-1]
예제 #6
0
    def __init__(self, filename, has_mask=False,
                 audio=True, audio_buffersize = 200000,
                 audio_fps=44100, audio_nbytes=2, verbose=False):
        
        VideoClip.__init__(self)
        
        # Make a reader
        pix_fmt= "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
        
        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration
        
        self.fps = self.reader.fps
        self.size = self.reader.size

        if has_mask:

          self.get_frame = lambda t: self.reader.get_frame(t)[:,:,:3]
          mask_gf =  lambda t: self.reader.get_frame(t)[:,:,3]/255.0
          self.mask = (VideoClip(ismask = True, get_frame = mask_gf)
                       .set_duration(self.duration))
          self.mask.fps = self.fps

        else:

          self.get_frame = lambda t: self.reader.get_frame(t)
        
        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:
            self.audio = AudioFileClip(filename,
                                       buffersize= audio_buffersize,
                                       fps = audio_fps,
                                       nbytes = audio_nbytes)
예제 #7
0
    def __init__(self,
                 filename,
                 ismask=False,
                 has_mask=False,
                 audio=True,
                 audio_buffersize=200000,
                 audio_fps=44100,
                 audio_nbytes=2,
                 verbose=False):

        VideoClip.__init__(self, ismask)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.get_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:
            self.audio = AudioFileClip(filename,
                                       buffersize=audio_buffersize,
                                       fps=audio_fps,
                                       nbytes=audio_nbytes)
예제 #8
0
    def __init__(
        self,
        filename,
        has_mask=False,
        audio=True,
        audio_buffersize=200000,
        target_resolution=None,
        resize_algorithm="bicubic",
        audio_fps=44100,
        audio_nbytes=2,
        fps_source="tbr",
    ):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(
            filename,
            pix_fmt=pix_fmt,
            target_resolution=target_resolution,
            resize_algo=resize_algorithm,
            fps_source=fps_source,
        )

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3]

            def mask_mf(t):
                return self.reader.get_frame(t)[:, :, 3] / 255.0

            self.mask = VideoClip(
                ismask=True, make_frame=mask_mf).set_duration(self.duration)
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos["audio_found"]:

            self.audio = AudioFileClip(
                filename,
                buffersize=audio_buffersize,
                fps=audio_fps,
                nbytes=audio_nbytes,
            )
예제 #9
0
    def __init__(self, glob_store, freq, fft_clip, ismask=False):

        def make_frame(t):
            freq_amplitude = fft_clip.freq_amplitude(freq, t)
            image_data = glob_store.image_from_normal(freq_amplitude)
            return image_data

        VideoClip.__init__(self, make_frame=make_frame, ismask=ismask, duration=fft_clip.duration)
예제 #10
0
    def __init__(self,
                 clip,
                 gpx_file=None,
                 time_offset=0,
                 interval=0,
                 speedup_factor=1,
                 clip_start_time=None,
                 config=None,
                 calculate_stats=False):

        self.stats = Counter()
        self.calculate_stats = calculate_stats
        self.clip = clip
        duration = clip.duration
        #Finds breaks in image sequences (Break is when we have GPS information but no
        #images at that time
        #FIXME: find breaks with GPS and images not just durations
        if isinstance(clip, ImageSequenceClip):
            self.have_any_breaks = any((duration >
                                        (config.effect_length * 2 + 2)
                                        for duration in self.clip.durations))
            self.gpx_data = GPXData(sequence=self.clip.sequence,
                                    gpx_file=gpx_file,
                                    time_offset=time_offset)
            self.durations = self.clip.durations
            self.images_starts = self.clip.images_starts
            self.find_image_index = lambda t: max([
                i for i in range(len(self.clip.sequence))
                if self.clip.images_starts[i] <= t
            ])
        else:
            if speedup_factor != 1:
                self.clip.old_make_frame = self.clip.make_frame
                self.clip.make_frame = lambda t: \
                    self.clip.old_make_frame(t*self.speedup_factor)
                duration = duration / speedup_factor
            self.have_any_breaks = False
            self.gpx_data = GPXData(gpx_file=gpx_file,
                                    gpx_start_time=clip_start_time,
                                    time_offset=time_offset)
            self.find_image_index = lambda t: None
        VideoClip.__init__(self, ismask=clip.ismask, duration=duration)

        self.size = clip.size

        #TODO: check if both this exists in clips
        #self.fps = clip.fps
        self.chart_data = {}
        self.speedup_factor = speedup_factor
        self.gpx_file = gpx_file

        if config is not None:
            self.config = config

            for key, key_config in config.config_items(need_config=True):
                print(key, "needs config")
                key_config.init(vars(self))
예제 #11
0
    def __init__(self, subtitles, make_textclip=None):

        VideoClip.__init__(self, has_constant_size=False)

        if isinstance(subtitles, str):
            subtitles = file_to_subtitles(subtitles)

        subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:

            make_textclip = lambda txt: TextClip(txt,
                                                 font='Georgia-Bold',
                                                 fontsize=24,
                                                 color='white',
                                                 stroke_color='black',
                                                 stroke_width=0.5)

        self.make_textclip = make_textclip
        self.inicia = 0
        self.duracion = max([tb for ((ta, tb), txt) in self.subtitles])
        self.fin = self.duracion

        def add_textclip_if_none(t):
            """ Will generate a textclip if it hasn't been generated asked
            to generate it yet. If there is no subtitle to show at t, return
            false. """
            sub = [((ta, tb), txt)
                   for ((ta, tb), txt) in self.textclips.keys()
                   if (ta <= t < tb)]
            if sub == []:
                sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles
                       if (ta <= t < tb)]
                if sub == []:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])

            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].get_frame(t)
                    if sub else np.array([[[0, 0, 0]]]))

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].mask.get_frame(t)
                    if sub else np.array([[0]]))

        self.make_frame = make_frame
        hasmask = (self.make_textclip('T').mask is not None)
        self.mask = (VideoClip(make_mask_frame, ismask=True)
                     if hasmask else None)
예제 #12
0
    def __init__(self,
                 clips,
                 size=None,
                 bg_color=None,
                 transparent=False,
                 ismask=False):

        if size is None:
            size = clips[0].size

        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)

        VideoClip.__init__(self)

        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.transparent = transparent
        self.bg_color = bg_color
        self.bg = ColorClip(size, col=self.bg_color).get_frame(0)

        # compute duration
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio != None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask
        if transparent:
            maskclips = [
                c.mask.set_pos(c.pos) for c in self.clips if c.mask is not None
            ]
            if maskclips != []:
                self.mask = CompositeVideoClip(maskclips,
                                               self.size,
                                               transparent=False,
                                               ismask=True)

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg
            for c in self.playing_clips(t):
                f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame
예제 #13
0
    def episode_to_gif(self, episode=None, path='', fps=30):
        frames = self.episode_video_frames(episode)

        for ep in frames:
            fig, ax = plt.subplots()
            animation = VideoClip(partial(self._make_frame,
                                          frames=frames[ep],
                                          axes=ax,
                                          fig=fig,
                                          title=f'Episode {ep}'),
                                  duration=frames[ep].shape[0])
            animation.write_gif(path + f'episode_{ep}.gif', fps=fps)
예제 #14
0
    def __init__(self, subtitles, make_textclip=None):
        
        VideoClip.__init__(self, has_constant_size=False)

        if isinstance( subtitles, basestring):
            subtitles = file_to_subtitles(subtitles)

        subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:

            make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold',
                                        fontsize=24, color='white',
                                        stroke_color='black', stroke_width=0.5)

        self.make_textclip = make_textclip
        self.start=0
        self.duration = max([tb for ((ta,tb), txt) in self.subtitles])
        self.end=self.duration
        
        def add_textclip_if_none(t):
            """ Will generate a textclip if it hasn't been generated asked
            to generate it yet. If there is no subtitle to show at t, return
            false. """
            sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys()
                   if (ta<=t<tb)]
            if sub == []:
                sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if
                       (ta<=t<tb)]
                if sub == []:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])

            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].get_frame(t) if sub
                    else np.array([[[0,0,0]]]))

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].mask.get_frame(t) if sub
                    else np.array([[0]]))
        
        self.make_frame = make_frame
        hasmask = (self.make_textclip('T').mask is not None)
        self.mask = (VideoClip(make_mask_frame, ismask=True) if hasmask else None)
예제 #15
0
    def __init__(self,
                 filename,
                 has_mask=False,
                 audio=True,
                 audio_buffersize=200000,
                 target_resolution=None,
                 resize_algorithm='bicubic',
                 audio_fps=44100,
                 audio_nbytes=2,
                 verbose=False,
                 fps_source='tbr'):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = None  # need this just in case FFMPEG has issues (__del__ complains)
        self.reader = FFMPEG_VideoReader(filename,
                                         pix_fmt=pix_fmt,
                                         target_resolution=target_resolution,
                                         resize_algo=resize_algorithm,
                                         fps_source=fps_source)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = self.reader.filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3]
            mask_mf = lambda t: self.reader.get_frame(t)[:, :, 3] / 255.0
            self.mask = (VideoClip(
                ismask=True, make_frame=mask_mf).set_duration(self.duration))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize=audio_buffersize,
                                       fps=audio_fps,
                                       nbytes=audio_nbytes)
예제 #16
0
    def __init__(self, subtitles, make_textclip=None):

        VideoClip.__init__(self)

        if isinstance(subtitles, str):
            subtitles = file_to_subtitles(subtitles)

        subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:

            make_textclip = lambda txt: TextClip(txt,
                                                 font='Georgia-Bold',
                                                 fontsize=24,
                                                 color='white',
                                                 stroke_color='black',
                                                 stroke_width=0.5)

        self.make_textclip = make_textclip
        self.start = 0
        self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
        self.end = self.duration

        def add_textclip_if_none(t):
            sub = [((ta, tb), txt)
                   for ((ta, tb), txt) in self.textclips.keys()
                   if (ta <= t < tb)]
            if sub == []:
                sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles
                       if (ta <= t < tb)]
                if sub == []:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])
            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].get_frame(t)
                    if sub else np.array([[[0, 0, 0]]]))

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].mask.get_frame(t)
                    if sub else np.array([[0]]))

        self.make_frame = make_frame
        self.mask = VideoClip(make_mask_frame, ismask=True)
예제 #17
0
    def iplot_episode(self, episode, fps=30):
        if episode is None:
            raise ValueError('The episode cannot be None for jupyter display')
        x = self.episode_video_frames(episode)[episode]
        fig, ax = plt.subplots()

        self.current_animation = VideoClip(partial(self._make_frame,
                                                   frames=x,
                                                   axes=ax,
                                                   fig=fig,
                                                   title=f'Episode {episode}'),
                                           duration=x.shape[0])
        self.current_animation.ipython_display(fps=fps,
                                               loop=True,
                                               autoplay=True)
예제 #18
0
def video_frame(fct_frame, **kwargs):
    """
    Creates a video from drawing or images.
    *fct_frame* can either be a function which draws a picture at time *t*
    or a list of picture names or a folder.
    Créé une vidéo à partir de dessins ou d'images.
    *fct_frame* est soit une fonction qui dessine chaque image à chaque instant *t*,
    une liste de noms d'images ou un répertoire.

    @param      fct_frame       function like ``def make_frame(t: float) -> numpy.ndarray``,
                                or list of images or folder name
    @param      kwargs          additional arguments for function
                                `make_frame <https://zulko.github.io/moviepy/getting_started/videoclips.html#videoclip>`_
    @return                     :epkg:`VideoClip`
    """
    if isinstance(fct_frame, str):
        if not os.path.exists(fct_frame):
            raise FileNotFoundError(
                "Unable to find folder '{0}'".format(fct_frame))
        imgs = os.listdir(fct_frame)
        exts = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff'}
        imgs = [
            os.path.join(fct_frame, _) for _ in imgs
            if os.path.splitext(_)[-1].lower() in exts
        ]
        return video_frame(imgs, **kwargs)
    elif isinstance(fct_frame, list):
        for img in fct_frame:
            if not os.path.exists(img):
                raise FileNotFoundError(
                    "Unable to find image '{0}'".format(img))
        return ImageSequenceClip(fct_frame, **kwargs)
    else:
        return VideoClip(fct_frame, **kwargs)
예제 #19
0
    def make_demo_clip(self, image=None):
        def make_frame(t):
            if image is None:
                f = ColorClip((1333, 1000), [56, 14, 252]).make_frame(0)
            else:
                f = ImageClip(image).make_frame(0)
            for key, key_config in self.make_items():
                data = key_config.sample(f)
                #print ("data:", data)
                if data is None:
                    continue
                #print (key, key_config)
                if key == "map":
                    map_conf = key_config.config
                    created_clip = ColorClip(
                        (map_conf["map_w"], map_conf["map_h"]), [23, 8, 89])
                else:
                    created_clip = key_config.func(data)
                if created_clip is None:
                    continue
                c = key_config.position(created_clip, f.shape[1], f.shape[0])
                f = c.blit_on(f, 0)
            return f

        return VideoClip(make_frame, duration=1)
예제 #20
0
    def __init__(self, subtitles, make_textclip=None):
        
        VideoClip.__init__(self)

        if isinstance( subtitles, str):
            subtitles = file_to_subtitles(subtitles)

        subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:

            make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold',
                                        fontsize=24, color='white',
                                        stroke_color='black', stroke_width=0.5)

        self.make_textclip = make_textclip
        self.start=0
        self.duration = max([tb for ((ta,tb), txt) in self.subtitles])
        self.end=self.duration
        
        def add_textclip_if_none(t):
            sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys()
                   if (ta<=t<tb)]
            if sub == []:
                sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if
                       (ta<=t<tb)]
                if sub == []:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])
            return sub

        def get_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].get_frame(t) if sub
                    else np.array([[[0,0,0]]]))

        def mask_get_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].mask.get_frame(t) if sub
                    else np.array([[0]]))
        
        self.get_frame = get_frame
        self.mask = VideoClip(ismask=True, get_frame=mask_get_frame)
예제 #21
0
    def __init__(self, clips, size=None, bg_color=None, transparent=False,
                 ismask=False):
                     
        if size is None:
            size = clips[0].size
        
        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)
        
        VideoClip.__init__(self)
        
        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.transparent = transparent
        self.bg_color = bg_color
        self.bg = ColorClip(size, col=self.bg_color).get_frame(0)
        
        # compute duration
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio != None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask
        if transparent:
            maskclips = [c.mask.set_pos(c.pos) for c in self.clips
                         if c.mask is not None]
            if maskclips != []:
                self.mask = CompositeVideoClip(maskclips,self.size,
                                        transparent=False, ismask=True)

        def gf(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg
            for c in self.playing_clips(t):
                    f = c.blit_on(f, t)
            return f

        self.get_frame = gf
예제 #22
0
    def __init__(self, filename, has_mask=False,
                 audio=True, audio_buffersize = 200000,
                 target_resolution=None, resize_algorithm='bicubic',
                 audio_fps=44100, audio_nbytes=2, verbose=False,
                 fps_source='tbr'):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt= "rgba" if has_mask else "rgb24"
        self.reader = None # need this just in case FFMPEG has issues (__del__ complains)
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,
                                         target_resolution=target_resolution,
                                         resize_algo=resize_algorithm,
                                         fps_source=fps_source)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = self.reader.filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3]
            mask_mf =  lambda t: self.reader.get_frame(t)[:,:,3]/255.0
            self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
                       .set_duration(self.duration))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize= audio_buffersize,
                                       fps = audio_fps,
                                       nbytes = audio_nbytes)
예제 #23
0
def test_matplotlib_simple_example():
    import matplotlib.pyplot as plt

    plt.switch_backend("agg")

    x = np.linspace(-2, 2, 200)
    duration = 0.5

    fig, ax = plt.subplots()

    def make_frame(t):
        ax.clear()
        ax.plot(x, np.sinc(x**2) + np.sin(x + 2 * np.pi / duration * t), lw=3)
        ax.set_ylim(-1.5, 2.5)
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=duration)
    animation.write_gif(os.path.join(TMP_DIR, "matplotlib.gif"), fps=20)
예제 #24
0
def make_video(audio, filename, progan, n_bins=84, random_state=0):
    y, sr = librosa.load(audio)
    song_length = len(y) / sr
    z_audio = get_z_from_audio(y, z_length=progan.z_length, n_bins=n_bins, random_state=random_state)
    fps = z_audio.shape[0] / song_length
    shape = progan.generate(z_audio[0]).shape
    def make_frame(t):
        cur_frame_idx = int(t * fps)
        if cur_frame_idx < len(z_audio):
            img = progan.generate(z_audio[cur_frame_idx])
        else:
            img = np.zeros(shape=shape, dtype=np.uint8)
        return img

    video_clip = VideoClip(make_frame=make_frame, duration=song_length)
    audio_clip = AudioFileClip(audio)
    video_clip = video_clip.set_audio(audio_clip)
    video_clip.write_videofile(filename, fps=fps)
예제 #25
0
    def __init__(self,
                 filename,
                 ismask=False,
                 has_mask=False,
                 audio=True,
                 audio_buffersize=200000,
                 audio_fps=44100,
                 audio_nbytes=2,
                 verbose=False):

        VideoClip.__init__(self, ismask)

        # We store the construction parameters in case we need to make
        # a copy (a 'co-reader').

        self.parameters = {
            'filename': filename,
            'ismask': ismask,
            'has_mask': has_mask,
            'audio': audio,
            'audio_buffersize': audio_buffersize
        }

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.fps = self.reader.fps
        self.size = self.reader.size
        self.get_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio:
            try:
                self.audio = AudioFileClip(filename,
                                           buffersize=audio_buffersize,
                                           fps=audio_fps,
                                           nbytes=audio_nbytes)
            except:
                if verbose:
                    print "No audio found in %s" % filename
                pass
예제 #26
0
def test_issue_368():
    import matplotlib.pyplot as plt
    import numpy as np
    from sklearn import svm
    from sklearn.datasets import make_moons

    from moviepy.video.io.bindings import mplfig_to_npimage

    plt.switch_backend("agg")

    X, Y = make_moons(50, noise=0.1, random_state=2)  # semi-random data

    fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1))
    fig.subplots_adjust(left=0, right=1, bottom=0)
    xx, yy = np.meshgrid(np.linspace(-2, 3, 500), np.linspace(-1, 2, 500))

    def make_frame(t):
        ax.clear()
        ax.axis("off")
        ax.set_title("SVC classification", fontsize=16)

        classifier = svm.SVC(gamma=2, C=1)
        # the varying weights make the points appear one after the other
        weights = np.minimum(1, np.maximum(0, t**2 + 10 - np.arange(50)))
        classifier.fit(X, Y, sample_weight=weights)
        Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        ax.contourf(
            xx,
            yy,
            Z,
            cmap=plt.cm.bone,
            alpha=0.8,
            vmin=-2.5,
            vmax=2.5,
            levels=np.linspace(-2, 2, 20),
        )
        ax.scatter(X[:, 0], X[:, 1], c=Y, s=50 * weights, cmap=plt.cm.bone)

        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=0.2)
    animation.write_gif(os.path.join(TMP_DIR, "svm.gif"), fps=20)
예제 #27
0
def test_preview_methods():
    stdout = io.StringIO()
    with redirect_stdout(stdout):
        try:
            preview_module = importlib.import_module(
                "moviepy.video.io.preview")
            assert preview_module.preview.__hash__(
            ) != VideoClip.preview.__hash__()
        except ImportError:
            editor_module = importlib.import_module("moviepy.editor")
            with pytest.raises(ImportError) as exc:
                VideoClip.preview(True)
            assert str(exc.value) == "clip.preview requires Pygame installed"

            with pytest.raises(ImportError) as exc:
                VideoClip.show(True)
            assert str(exc.value) == "clip.show requires Pygame installed"

            with pytest.raises(ImportError) as exc:
                AudioClip.preview(True)
            assert str(exc.value) == "clip.preview requires Pygame installed"
        else:
            editor_module = importlib.import_module("moviepy.editor")
            assert (editor_module.VideoClip.preview.__hash__() ==
                    preview_module.preview.__hash__())
        finally:
            if "moviepy.editor" in sys.modules:
                del sys.modules["moviepy.editor"]

        try:
            importlib.import_module("matplotlib.pyplot")
        except ImportError:
            editor_module = importlib.import_module("moviepy.editor")
            with pytest.raises(ImportError) as exc:
                editor_module.sliders()

            assert str(exc.value) == "sliders requires matplotlib installed"

            del sys.modules["moviepy.editor"]
        else:
            del sys.modules["matplotlib.pyplot"]

    del sys.modules["moviepy"]
예제 #28
0
def test_matplotlib():
    # for now, python 3.5 installs a version of matplotlib that complains
    # about $DISPLAY variable, so lets just ignore for now.

    x = np.linspace(-2, 2, 200)

    duration = 2

    matplotlib.use("Agg")
    fig, ax = matplotlib.pyplot.subplots()

    def make_frame(t):
        ax.clear()
        ax.plot(x, np.sinc(x ** 2) + np.sin(x + 2 * np.pi / duration * t), lw=3)
        ax.set_ylim(-1.5, 2.5)
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=duration)
    animation.write_gif(os.path.join(TMP_DIR, "matplotlib.gif"), fps=20)
예제 #29
0
def iter_frame(clip: VideoClip, processes: int, kwargs: dict):
    captures = [FaceCapture() for i in range(processes)]
    previous_frame = None
    for index, frame in enumerate(tqdm(clip.iter_frames(**kwargs))):
        if previous_frame is not None:
            clone_value = (previous_frame == frame).astype(float).mean()
            if clone_value > 0.99:
                continue
        previous_frame = frame
        capture = captures[index % processes]
        yield index, frame, capture
예제 #30
0
def test_videoclip_copy(copy_func):
    """It must be possible to do a mixed copy of VideoClip using ``clip.copy()``,
    ``copy.copy(clip)`` and ``copy.deepcopy(clip)``.
    """
    clip = VideoClip()
    other_clip = VideoClip()

    for attr in clip.__dict__:
        # mask and audio are shallow copies that should be initialized
        if attr in ("mask", "audio"):
            if attr == "mask":
                nested_object = BitmapClip([["R"]], duration=0.01)
            else:
                nested_object = AudioClip(
                    lambda t: [np.sin(880 * 2 * np.pi * t)],
                    duration=0.01,
                    fps=44100)
            setattr(clip, attr, nested_object)
        else:
            setattr(clip, attr, "foo")

    copied_clip = copy_func(clip)

    # VideoClip attributes are copied
    for attr in copied_clip.__dict__:
        value = getattr(copied_clip, attr)
        assert value == getattr(clip, attr)

        # other instances are not edited
        assert value != getattr(other_clip, attr)

        # shallow copies of mask and audio
        if attr in ("mask", "audio"):
            for nested_attr in value.__dict__:
                assert getattr(value,
                               nested_attr) == getattr(getattr(clip, attr),
                                                       nested_attr)

    # nested objects of instances copies are not edited
    assert other_clip.mask is None
    assert other_clip.audio is None
예제 #31
0
    def __init__(self, foldername, fps, transparent=True, ismask=False):

        VideoClip.__init__(self, ismask=ismask)

        self.directory = foldername
        self.fps = fps
        allfiles = os.listdir(foldername)
        self.pics = sorted(["%s/%s" % (foldername, f) for f in allfiles
                            if not f.endswith(('.txt','.wav'))])
        
        audio = [f for f in allfiles if f.endswith('.wav')]
        
        if len(audio) > 0:
            self.audio = AudioFileClip(audio[0])
            self.audiofile =audio[0]

        self.size = imread(self.pics[0]).shape[:2][::-1]

        if imread(self.pics[0]).shape[2] == 4:  # transparent png

            if ismask:
                def get_frame(t):
                    return 1.0 * imread(self.pics[int(self.fps * t)])[:, :, 3] / 255
            else:
                def get_frame(t):
                    return imread(self.pics[int(self.fps * t)])[:, :, :2]

            if transparent:
                self.mask = DirectoryClip(foldername, fps, ismask=True)

        else:

            def get_frame(t):
                return imread(self.pics[int(self.fps * t)])

        self.get_frame = get_frame
        self.duration = 1.0 * len(self.pics) / self.fps
예제 #32
0
    def __init__(self, filename, has_mask=False,
                 audio=True, audio_buffersize = 200000,
                 audio_fps=44100, audio_nbytes=2, verbose=False):
        
        VideoClip.__init__(self)
        
        # Make a reader
        pix_fmt= "rgba" if has_mask else "rgb24"
        reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
        self.reader = reader
        # Make some of the reader's attributes accessible from the clip
        self.duracion = self.reader.duracion
        self.fin = self.reader.duracion
        
        self.fps = self.reader.fps
        self.tamano = self.reader.tamano

        if has_mask:

            self.make_frame = lambda t: reader.get_frame(t)[:,:,:3]
            mask_mf =  lambda t: reader.get_frame(t)[:,:,3]/255.0
            self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
                       .set_duracion(self.duracion))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: reader.get_frame(t)
        
        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize= audio_buffersize,
                                       fps = audio_fps,
                                       nbytes = audio_nbytes)
예제 #33
0
 def __init__(self, filename, ismask=False, has_mask=False,
              audio=True, audio_buffersize = 200000,
              audio_fps=44100, audio_nbytes=2, verbose=False):
     
     VideoClip.__init__(self, ismask)
     
     # Make a reader
     pix_fmt= "rgba" if has_mask else "rgb24"
     self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,print_infos=verbose)
     
     # Make some of the reader's attributes accessible from the clip
     self.duration = self.reader.duration
     self.end = self.reader.duration
     
     self.fps = self.reader.fps
     self.size = self.reader.size
     self.get_frame = lambda t: self.reader.get_frame(t)
     
     # Make a reader for the audio, if any.
     if audio:
         self.audio = AudioFileClip(filename,
                                    buffersize= audio_buffersize,
                                    fps = audio_fps,
                                    nbytes = audio_nbytes)
예제 #34
0
    def get_gif(self, default_fps=15, frame_skip=None):
        if frame_skip is not None: self.frames = self.frames[::frame_skip]
        try:
            from moviepy.video.VideoClip import VideoClip
            from moviepy.video.io.VideoFileClip import VideoFileClip
            from moviepy.video.io.html_tools import ipython_display

            fig, ax = plt.subplots()
            clip = VideoClip(partial(self._make_frame,
                                     frames=self.frames,
                                     axes=ax,
                                     fig=fig,
                                     fps=default_fps,
                                     matplot_to_np_fn=mplfig_to_npimage,
                                     title=f'Episode {self.episode}'),
                             duration=(self.frames.shape[0] / default_fps) - 1)
            plt.close(fig)
            return clip

        except ImportError:
            raise ImportError(
                'Package: `moviepy` is not installed. You can install it via: `pip install moviepy`'
            )
예제 #35
0
def getFramesArray(video: VideoClip,
                   fps: int = 15) -> Tuple[np.ndarray, np.ndarray]:
    """
        get all frame arrays by fps
        And, if can't use frame or audio data, filter at try/except AttributeError
        
        argument
            - video : VideoClip class
            - fps : fps getting frames
            
        return
            - _frame_list : all frame array in video
            - _sound_list : all sound array in video
    """
    _frame_list = []
    _sound_list = []
    _i = 0

    while _i / fps < video.duration:
        try:
            _frame = video.get_frame(_i / fps)
            _sound = audio_util.getSubAudio(video.audio,
                                            start=_i / fps,
                                            end=(_i + 1) / fps)

            _sound_list.append(_sound)
            _frame_list.append(_frame)
        except AttributeError:
            print("[Warning] Error get frame and sound")
            pass
        _i += 1


#     _sound_len_min = min([len(_sound) for _sound in _sound_list])
#     _sound_list = [_sound[:_sound_len_min] for _sound in _sound_list]

    return np.array(_frame_list), np.array(_sound_list)
예제 #36
0
def concatenate_videoclips(clips, method="chain", transition=None,
                           bg_color=None, ismask=False, padding = 0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    
    There are two methods:

    - method="chain": will produce a clip that simply outputs
      the frames of the succesive clips, without any correction if they are
      not of the same size of anything. If none of the clips have masks the
      resulting clip has no mask, else the mask is a concatenation of masks
      (using completely opaque for clips that don't have masks, obviously).
      If you have clips of different size and you want to write directly the
      result of the concatenation to a file, use the method "compose" instead.

    - method="compose", if the clips do not have the same
      resolution, the final resolution will be such that no clip has
       to be resized.
       As a consequence the final clip has the height of the highest
       clip and the width of the widest clip of the list. All the
       clips with smaller dimensions will appear centered. The border
       will be transparent if mask=True, else it will be of the
       color specified by ``bg_color``.

    If all clips with a fps attribute have the same fps, it becomes the fps of
    the result.

    Parameters
    -----------

    clips
      A list of video clips which must all have their ``duration``
      attributes set.

    method
      "chain" or "compose": see above.

    transition
      A clip that will be played between each two clips of the list.
    
    bg_color
      Only for method='compose'. Color of the background.
      Set to None for a transparent clip
    
    padding
      Only for method='compose'. Duration during two consecutive clips.
      Note that for negative padding, a clip will partly play at the same
      time as the clip it follows (negative padding is cool for clips who fade
      in on one another). A non-null padding automatically sets the method to
      `compose`.
           
    """

    if transition is not None:
        l = [[v, transition] for v in clips[:-1]]
        clips = reduce(lambda x, y: x + y, l) + [clips[-1]]
        transition = None

    
    tt = np.cumsum([0] + [c.duration for c in clips])

    sizes = [v.size for v in clips]


    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])

    tt = np.maximum(0, tt + padding*np.arange(len(tt)))
    
    if method == "chain":
        def make_frame(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clips[i].get_frame(t - tt[i])
        
        result = VideoClip(ismask = ismask, make_frame = make_frame)
        if any([c.mask is not None for c in clips]):
            masks = [c.mask if (c.mask is not None) else
                     ColorClip([1,1], col=1, ismask=True, duration=c.duration)
                 #ColorClip(c.size, col=1, ismask=True).set_duration(c.duration)
                     for c in clips]
            result.mask = concatenate_videoclips(masks, method="chain", ismask=True)
            result.clips = clips


    elif method == "compose":
        result = CompositeVideoClip( [c.set_start(t).set_pos('center')
                                for (c, t) in zip(clips, tt)],
               size = (w, h), bg_color=bg_color, ismask=ismask)

    result.tt = tt
    
    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
    
    audio_t = [(c.audio,t) for c,t in zip(clips,tt) if c.audio is not None]
    if len(audio_t)>0:
        result.audio = CompositeAudioClip([a.set_start(t)
                                for a,t in audio_t])

    fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')]))
    if len(fps_list)==1:
        result.fps= fps_list[0]

    return result
예제 #37
0
def concatenate(clipslist, method="chain", transition=None, bg_color=(0, 0, 0),
                transparent=False, ismask=False, padding = 0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    
    There are two methods: method="chain" will produce a clip that simply outputs
    the frames of the succesive clips, without any correction if they are
    not of the same size of anything.

    With method="compose", if the clips do not have the same
    resolution, the final resolution will be such that no clip has
    to be resized.
    As a consequence the final clip has the height of the highest
    clip and the width of the widest clip of the list. All the
    clips with smaller dimensions will appear centered. The border
    will be transparent if mask=True, else it will be of the
    color specified by ``bg_color``.

    Returns a VideoClip instance if all clips have the same size and
    there is no transition, else a composite clip.
    
    Parameters
    -----------

    clipslist
      A list of video clips which must all have their ``duration``
      attributes set.

    method
      "chain" or "compose": see above.

    transition
      A clip that will be played between each two clips of the list.  
    
    bg_color
      Color of the background, if any.

    transparent
      If True, the resulting clip's mask will be the concatenation of
      the masks of the clips in the list. If the clips do not have the
      same resolution, the border around the smaller clips will be
      transparent.

    padding
      Duration during two consecutive clips. If negative, a clip will
      play at the same time as the clip it follows. A non-null
      padding automatically sets the method to `compose`.
           
    """

    if transition != None:
        l = [[v, transition] for v in clipslist[:-1]]
        clipslist = reduce(lambda x, y: x + y, l) + [clipslist[-1]]
        transition = None

    
    tt = np.cumsum([0] + [c.duration for c in clipslist])

    sizes = [v.size for v in clipslist]


    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])

    tt = np.maximum(0, tt + padding*np.arange(len(tt)))
    
    if method == "chain":
        def gf(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clipslist[i].get_frame(t - tt[i])
            result.get_frame = gf
        result = VideoClip(ismask = ismask, get_frame = gf)
        if transparent:
          clips_w_masks = [(c.add_mask() if c.mask is None else c) for c in clips]
          masks = [c.mask for c in clips_w_masks]
          result.mask = concatenate(masks, method="chain", ismask=True)


    elif method == "compose":
        result = CompositeVideoClip( [c.set_start(t).set_pos('center')
                                for (c, t) in zip(clipslist, tt)],
               size = (w, h), bg_color=bg_color, ismask=ismask,
               transparent=transparent )

    result.tt = tt
    result.clipslist = clipslist
    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
    
    audio_t = [(c.audio,t) for c,t in zip(clipslist,tt) if c.audio!=None]
    if len(audio_t)>0:
        result.audio = CompositeAudioClip([a.set_start(t)
                                for a,t in audio_t])
    return result
예제 #38
0
def subclip_creator(clip, start, end):
    return clip.subclip(start, end)
예제 #39
0
    def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, ismask=False):

        if size is None:
            size = clips[0].size

        if use_bgclip and (clips[0].mask is None):
            transparent = False
        else:
            transparent = bg_color is None

        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)

        fpss = [c.fps for c in clips if getattr(c, "fps", None)]
        self.fps = max(fpss) if fpss else None

        VideoClip.__init__(self)

        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.bg_color = bg_color

        if use_bgclip:
            self.bg = clips[0]
            self.clips = clips[1:]
            self.created_bg = False
        else:
            self.clips = clips
            self.bg = ColorClip(size, color=self.bg_color, ismask=ismask)
            self.created_bg = True

        # compute duration
        ends = [c.end for c in self.clips]
        if None not in ends:
            duration = max(ends)
            self.duration = duration
            self.end = duration

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio is not None]
        if audioclips:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask if necessary
        if transparent:
            maskclips = [
                (c.mask if (c.mask is not None) else c.add_mask().mask)
                .set_position(c.pos)
                .set_end(c.end)
                .set_start(c.start, change_end=False)
                for c in self.clips
            ]

            self.mask = CompositeVideoClip(
                maskclips, self.size, ismask=True, bg_color=0.0
            )

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg.get_frame(t)
            for c in self.playing_clips(t):
                f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame
예제 #40
0
    def __init__(self, clips, size=None, bg_color=None, use_bgclip=False,
                 ismask=False):

        if size is None:
            size = clips[0].size

        
        if use_bgclip and (clips[0].mask is None):
            transparent = False
        else:
            transparent = (bg_color is None)
        
        if bg_color is None:
            bg_color = 0.0 if ismask else (0, 0, 0)

        
        fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')]))
        if len(fps_list)==1:
            self.fps= fps_list[0]

        VideoClip.__init__(self)
        
        self.size = size
        self.ismask = ismask
        self.clips = clips
        self.bg_color = bg_color

        if use_bgclip:
            self.bg = clips[0]
            self.clips = clips[1:]
        else:
            self.clips = clips
            self.bg = ColorClip(size, col=self.bg_color)

        
        
        # compute duration
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio is not None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask if necessary
        if transparent:
            maskclips = [(c.mask if (c.mask is not None) else
                          c.add_mask().mask).set_pos(c.pos).set_end(c.end).set_start(c.start, change_end=False)
                          for c in self.clips]

            self.mask = CompositeVideoClip(maskclips,self.size, ismask=True,
                                               bg_color=0.0)

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg.get_frame(t)
            for c in self.playing_clips(t):
                    f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame
예제 #41
0
def concatenate_videoclips(clips,
                           method="chain",
                           transition=None,
                           bg_color=None,
                           ismask=False,
                           padding=0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    
    There are two methods:

    - method="chain": will produce a clip that simply outputs
      the frames of the succesive clips, without any correction if they are
      not of the same size of anything. If none of the clips have masks the
      resulting clip has no mask, else the mask is a concatenation of masks
      (using completely opaque for clips that don't have masks, obviously).
      If you have clips of different size and you want to write directly the
      result of the concatenation to a file, use the method "compose" instead.

    - method="compose", if the clips do not have the same
      resolution, the final resolution will be such that no clip has
       to be resized.
       As a consequence the final clip has the height of the highest
       clip and the width of the widest clip of the list. All the
       clips with smaller dimensions will appear centered. The border
       will be transparent if mask=True, else it will be of the
       color specified by ``bg_color``.

    If all clips with a fps attribute have the same fps, it becomes the fps of
    the result.

    Parameters
    -----------

    clips
      A list of video clips which must all have their ``duration``
      attributes set.

    method
      "chain" or "compose": see above.

    transition
      A clip that will be played between each two clips of the list.
    
    bg_color
      Only for method='compose'. Color of the background.
      Set to None for a transparent clip
    
    padding
      Only for method='compose'. Duration during two consecutive clips.
      Note that for negative padding, a clip will partly play at the same
      time as the clip it follows (negative padding is cool for clips who fade
      in on one another). A non-null padding automatically sets the method to
      `compose`.
           
    """

    if transition is not None:
        l = [[v, transition] for v in clips[:-1]]
        clips = reduce(lambda x, y: x + y, l) + [clips[-1]]
        transition = None

    tt = np.cumsum([0] + [c.duration for c in clips])

    sizes = [v.size for v in clips]

    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])

    tt = np.maximum(0, tt + padding * np.arange(len(tt)))

    if method == "chain":

        def make_frame(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clips[i].get_frame(t - tt[i])

        result = VideoClip(ismask=ismask, make_frame=make_frame)
        if any([c.mask is not None for c in clips]):
            masks = [
                c.mask if (c.mask is not None) else ColorClip(
                    [1, 1], col=1, ismask=True, duration=c.duration)
                #ColorClip(c.size, col=1, ismask=True).set_duration(c.duration)
                for c in clips
            ]
            result.mask = concatenate_videoclips(masks,
                                                 method="chain",
                                                 ismask=True)
            result.clips = clips

    elif method == "compose":
        result = CompositeVideoClip(
            [c.set_start(t).set_pos('center') for (c, t) in zip(clips, tt)],
            size=(w, h),
            bg_color=bg_color,
            ismask=ismask)

    result.tt = tt

    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1], tt[-1]

    audio_t = [(c.audio, t) for c, t in zip(clips, tt) if c.audio is not None]
    if len(audio_t) > 0:
        result.audio = CompositeAudioClip([a.set_start(t) for a, t in audio_t])

    fps_list = list(set([c.fps for c in clips if hasattr(c, 'fps')]))
    if len(fps_list) == 1:
        result.fps = fps_list[0]

    return result
예제 #42
0
def concatenate_videoclips(
    clips, method="chain", transition=None, bg_color=None, is_mask=False, padding=0
):
    """Concatenates several video clips

    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).

    There are two methods:

    - method="chain": will produce a clip that simply outputs
      the frames of the succesive clips, without any correction if they are
      not of the same size of anything. If none of the clips have masks the
      resulting clip has no mask, else the mask is a concatenation of masks
      (using completely opaque for clips that don't have masks, obviously).
      If you have clips of different size and you want to write directly the
      result of the concatenation to a file, use the method "compose" instead.

    - method="compose", if the clips do not have the same
      resolution, the final resolution will be such that no clip has
       to be resized.
       As a consequence the final clip has the height of the highest
       clip and the width of the widest clip of the list. All the
       clips with smaller dimensions will appear centered. The border
       will be transparent if mask=True, else it will be of the
       color specified by ``bg_color``.

    The clip with the highest FPS will be the FPS of the result clip.

    Parameters
    -----------
    clips
      A list of video clips which must all have their ``duration``
      attributes set.
    method
      "chain" or "compose": see above.
    transition
      A clip that will be played between each two clips of the list.

    bg_color
      Only for method='compose'. Color of the background.
      Set to None for a transparent clip

    padding
      Only for method='compose'. Duration during two consecutive clips.
      Note that for negative padding, a clip will partly play at the same
      time as the clip it follows (negative padding is cool for clips who fade
      in on one another). A non-null padding automatically sets the method to
      `compose`.

    """

    if transition is not None:
        clip_transition_pairs = [[v, transition] for v in clips[:-1]]
        clips = reduce(lambda x, y: x + y, clip_transition_pairs) + [clips[-1]]
        transition = None

    timings = np.cumsum([0] + [clip.duration for clip in clips])

    sizes = [clip.size for clip in clips]

    w = max(size[0] for size in sizes)
    h = max(size[1] for size in sizes)

    timings = np.maximum(0, timings + padding * np.arange(len(timings)))
    timings[-1] -= padding  # Last element is the duration of the whole

    if method == "chain":

        def make_frame(t):
            i = max([i for i, e in enumerate(timings) if e <= t])
            return clips[i].get_frame(t - timings[i])

        def get_mask(clip):
            mask = clip.mask or ColorClip([1, 1], color=1, is_mask=True)
            if mask.duration is None:
                mask.duration = clip.duration
            return mask

        result = VideoClip(is_mask=is_mask, make_frame=make_frame)
        if any([clip.mask is not None for clip in clips]):
            masks = [get_mask(clip) for clip in clips]
            result.mask = concatenate_videoclips(masks, method="chain", is_mask=True)
            result.clips = clips
    elif method == "compose":
        result = CompositeVideoClip(
            [
                clip.with_start(t).with_position("center")
                for (clip, t) in zip(clips, timings)
            ],
            size=(w, h),
            bg_color=bg_color,
            is_mask=is_mask,
        )
    else:
        raise Exception(
            "Moviepy Error: The 'method' argument of "
            "concatenate_videoclips must be 'chain' or 'compose'"
        )

    result.timings = timings

    result.start_times = timings[:-1]
    result.start, result.duration, result.end = 0, timings[-1], timings[-1]

    audio_t = [
        (clip.audio, t) for clip, t in zip(clips, timings) if clip.audio is not None
    ]
    if audio_t:
        result.audio = CompositeAudioClip([a.with_start(t) for a, t in audio_t])

    fpss = [clip.fps for clip in clips if getattr(clip, "fps", None) is not None]
    result.fps = max(fpss) if fpss else None
    return result
예제 #43
0
def concatenate(clipslist, method = 'chain', transition=None,
           bg_color=(0, 0, 0), transparent=False, ismask=False, crossover = 0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    if the clips do not have the same resolution, the final
    resolution will be such that no clip has to be resized. As
    a consequence the final clip has the height of the highest
    clip and the width of the widest clip of the list. All the
    clips with smaller dimensions will appear centered. The border
    will be transparent if mask=True, else it will be of the
    color specified by ``bg_color``.
    
    Returns a VideoClip instance if all clips have the same size and
    there is no transition, else a composite clip.
    
    Parameters
    -----------

    clipslist
      A list of video clips which must all have their ``duration``
      attributes set.
    
    transition
      A clip that will be played between each two clips of the list.  
    
    bg_color
      Color of the background, if any.

    transparent
      If True, the resulting clip's mask will be the concatenation of
      the masks of the clips in the list. If the clips do not have the
      same resolution, the border around the smaller clips will be
      transparent.
    
                       
    """
    
    if transition != None:
        l = [[v, transition] for v in clipslist[:-1]]
        clipslist = reduce(lambda x, y: x + y, l) + [clipslist[-1]]
        transition = None
    
    tt = np.cumsum([0] + [c.duration for c in clipslist])
    sizes = [v.size for v in clipslist]
    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])
    
    if method == 'chain':
        result = VideoClip(ismask = ismask)
        result.size = (w,h)

        def gf(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clipslist[i].get_frame(t - tt[i])
        
        result.get_frame = gf
        if (len(set(map(tuple,sizes)))>1) and (bg_color is not None):
            # If not all clips have the same size, flatten the result
            # on some color
            result = result.fx( on_color, (w,h), bg_color, 'center')
        
    elif method == 'compose':
        tt = np.maximum(0, tt - crossover*np.arange(len(tt)))
        result = concatenate( [c.set_start(t).set_pos('center')
                                    for (c, t) in zip(clipslist, tt)],
                   size = (w, h), bg_color=bg_color, ismask=ismask,
                   transparent=transparent)
    
    result.tt = tt
    result.clipslist = clipslist
    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
    
    # Compute the mask if any
    
    if transparent and (not ismask):
        # add a mask to the clips which have none
        clips_withmask = [(c if (c.mask!=None) else c.add_mask())
                          for c in clipslist] 
        result.mask = concatenate([c.mask for c in clips_withmask],
                    bg_color=0, ismask=True, transparent=False)
                    
                    
    # Compute the audio, if any.
    
    audio_t = [(c.audio,t) for c,t in zip(clipslist,tt) if c.audio!=None]
    if len(audio_t)>0:
        result.audio = CompositeAudioClip([a.set_start(t)
                                for a,t in audio_t])
    return result