Beispiel #1
0
def make_video(audio, filename, progan, n_bins=60, random_state=0, imgs_per_batch=20):
    y, sr = librosa.load(audio)
    song_length = len(y) / sr
    z_audio = get_z_from_audio(y, z_length=progan.z_length, n_bins=n_bins, random_state=random_state)
    fps = z_audio.shape[0] / song_length
    res = progan.get_cur_res()
    shape = (res, res * 16 // 9, 3)

    imgs = np.zeros(shape=[imgs_per_batch, *shape], dtype=np.float32)

    def make_frame(t):
        global imgs
        cur_frame_idx = int(t * fps)

        if cur_frame_idx >= len(z_audio):
            return np.zeros(shape=shape, dtype=np.uint8)

        if cur_frame_idx % imgs_per_batch == 0:
            imgs = progan.generate(z_audio[cur_frame_idx:cur_frame_idx + imgs_per_batch])
            imgs = imgs[:, :, :res * 8 // 9, :]
            imgs_rev = np.flip(imgs, 2)
            imgs = np.concatenate((imgs, imgs_rev), 2)

        return imgs[cur_frame_idx % imgs_per_batch]

    video_clip = VideoClip(make_frame=make_frame, duration=song_length)
    audio_clip = AudioFileClip(audio)
    video_clip = video_clip.set_audio(audio_clip)
    video_clip.write_videofile(filename, fps=fps)
Beispiel #2
0
    def make_demo_clip(self, image=None):
        def make_frame(t):
            if image is None:
                f = ColorClip((1333, 1000), [56, 14, 252]).make_frame(0)
            else:
                f = ImageClip(image).make_frame(0)
            for key, key_config in self.make_items():
                data = key_config.sample(f)
                #print ("data:", data)
                if data is None:
                    continue
                #print (key, key_config)
                if key == "map":
                    map_conf = key_config.config
                    created_clip = ColorClip(
                        (map_conf["map_w"], map_conf["map_h"]), [23, 8, 89])
                else:
                    created_clip = key_config.func(data)
                if created_clip is None:
                    continue
                c = key_config.position(created_clip, f.shape[1], f.shape[0])
                f = c.blit_on(f, 0)
            return f

        return VideoClip(make_frame, duration=1)
Beispiel #3
0
    def __init__(self, subtitles, make_textclip=None, encoding=None):

        VideoClip.__init__(self, has_constant_size=False)

        if isinstance(subtitles, str):
            subtitles = file_to_subtitles(subtitles, encoding=encoding)

        # subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:
            make_textclip = lambda txt: TextClip(
                txt,
                font="Georgia-Bold",
                fontsize=24,
                color="white",
                stroke_color="black",
                stroke_width=0.5,
            )

        self.make_textclip = make_textclip
        self.start = 0
        self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
        self.end = self.duration

        def add_textclip_if_none(t):
            """ Will generate a textclip if it hasn't been generated asked
            to generate it yet. If there is no subtitle to show at t, return
            false. """
            sub = [
                ((ta, tb), txt)
                for ((ta, tb), txt) in self.textclips.keys()
                if (ta <= t < tb)
            ]
            if not sub:
                sub = [
                    ((ta, tb), txt)
                    for ((ta, tb), txt) in self.subtitles
                    if (ta <= t < tb)
                ]
                if not sub:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])

            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])

        self.make_frame = make_frame
        hasmask = bool(self.make_textclip("T").mask)
        self.mask = VideoClip(make_mask_frame, ismask=True) if hasmask else None
Beispiel #4
0
def video_frame(fct_frame, **kwargs):
    """
    Creates a video from drawing or images.
    *fct_frame* can either be a function which draws a picture at time *t*
    or a list of picture names or a folder.
    Créé une vidéo à partir de dessins ou d'images.
    *fct_frame* est soit une fonction qui dessine chaque image à chaque instant *t*,
    une liste de noms d'images ou un répertoire.

    @param      fct_frame       function like ``def make_frame(t: float) -> numpy.ndarray``,
                                or list of images or folder name
    @param      kwargs          additional arguments for function
                                `make_frame <https://zulko.github.io/moviepy/getting_started/videoclips.html#videoclip>`_
    @return                     :epkg:`VideoClip`
    """
    if isinstance(fct_frame, str):
        if not os.path.exists(fct_frame):
            raise FileNotFoundError(
                "Unable to find folder '{0}'".format(fct_frame))
        imgs = os.listdir(fct_frame)
        exts = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff'}
        imgs = [
            os.path.join(fct_frame, _) for _ in imgs
            if os.path.splitext(_)[-1].lower() in exts
        ]
        return video_frame(imgs, **kwargs)
    elif isinstance(fct_frame, list):
        for img in fct_frame:
            if not os.path.exists(img):
                raise FileNotFoundError(
                    "Unable to find image '{0}'".format(img))
        return ImageSequenceClip(fct_frame, **kwargs)
    else:
        return VideoClip(fct_frame, **kwargs)
Beispiel #5
0
def test_matplotlib():
    #for now, python 3.5 installs a version of matplotlib that complains
    #about $DISPLAY variable, so lets just ignore for now.
    if PYTHON_VERSION in ('2.7', '3.3') or (PYTHON_VERSION == '3.5' and TRAVIS):
       return

    import matplotlib.pyplot as plt
    import numpy as np
    from moviepy.video.io.bindings import mplfig_to_npimage
    from moviepy.video.VideoClip import VideoClip

    x = np.linspace(-2, 2, 200)

    duration = 2

    fig, ax = plt.subplots()

    def make_frame(t):
        ax.clear()
        ax.plot(x, np.sinc(x**2) + np.sin(x + 2*np.pi/duration * t), lw=3)
        ax.set_ylim(-1.5, 2.5)
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=duration)
    animation.write_gif(os.path.join(TMP_DIR, 'matplotlib.gif'), fps=20)
Beispiel #6
0
    def __init__(
        self,
        filename,
        has_mask=False,
        audio=True,
        audio_buffersize=200000,
        target_resolution=None,
        resize_algorithm="bicubic",
        audio_fps=44100,
        audio_nbytes=2,
        fps_source="tbr",
    ):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(
            filename,
            pix_fmt=pix_fmt,
            target_resolution=target_resolution,
            resize_algo=resize_algorithm,
            fps_source=fps_source,
        )

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3]

            def mask_mf(t):
                return self.reader.get_frame(t)[:, :, 3] / 255.0

            self.mask = VideoClip(
                ismask=True, make_frame=mask_mf).set_duration(self.duration)
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos["audio_found"]:

            self.audio = AudioFileClip(
                filename,
                buffersize=audio_buffersize,
                fps=audio_fps,
                nbytes=audio_nbytes,
            )
Beispiel #7
0
    def __init__(self, subtitles, make_textclip=None):

        VideoClip.__init__(self, has_constant_size=False)

        if isinstance(subtitles, str):
            subtitles = file_to_subtitles(subtitles)

        subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:

            make_textclip = lambda txt: TextClip(txt,
                                                 font='Georgia-Bold',
                                                 fontsize=24,
                                                 color='white',
                                                 stroke_color='black',
                                                 stroke_width=0.5)

        self.make_textclip = make_textclip
        self.inicia = 0
        self.duracion = max([tb for ((ta, tb), txt) in self.subtitles])
        self.fin = self.duracion

        def add_textclip_if_none(t):
            """ Will generate a textclip if it hasn't been generated asked
            to generate it yet. If there is no subtitle to show at t, return
            false. """
            sub = [((ta, tb), txt)
                   for ((ta, tb), txt) in self.textclips.keys()
                   if (ta <= t < tb)]
            if sub == []:
                sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles
                       if (ta <= t < tb)]
                if sub == []:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])

            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].get_frame(t)
                    if sub else np.array([[[0, 0, 0]]]))

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].mask.get_frame(t)
                    if sub else np.array([[0]]))

        self.make_frame = make_frame
        hasmask = (self.make_textclip('T').mask is not None)
        self.mask = (VideoClip(make_mask_frame, ismask=True)
                     if hasmask else None)
Beispiel #8
0
def test_videoclip_copy(copy_func):
    """It must be possible to do a mixed copy of VideoClip using ``clip.copy()``,
    ``copy.copy(clip)`` and ``copy.deepcopy(clip)``.
    """
    clip = VideoClip()
    other_clip = VideoClip()

    for attr in clip.__dict__:
        # mask and audio are shallow copies that should be initialized
        if attr in ("mask", "audio"):
            if attr == "mask":
                nested_object = BitmapClip([["R"]], duration=0.01)
            else:
                nested_object = AudioClip(
                    lambda t: [np.sin(880 * 2 * np.pi * t)],
                    duration=0.01,
                    fps=44100)
            setattr(clip, attr, nested_object)
        else:
            setattr(clip, attr, "foo")

    copied_clip = copy_func(clip)

    # VideoClip attributes are copied
    for attr in copied_clip.__dict__:
        value = getattr(copied_clip, attr)
        assert value == getattr(clip, attr)

        # other instances are not edited
        assert value != getattr(other_clip, attr)

        # shallow copies of mask and audio
        if attr in ("mask", "audio"):
            for nested_attr in value.__dict__:
                assert getattr(value,
                               nested_attr) == getattr(getattr(clip, attr),
                                                       nested_attr)

    # nested objects of instances copies are not edited
    assert other_clip.mask is None
    assert other_clip.audio is None
Beispiel #9
0
    def episode_to_gif(self, episode=None, path='', fps=30):
        frames = self.episode_video_frames(episode)

        for ep in frames:
            fig, ax = plt.subplots()
            animation = VideoClip(partial(self._make_frame,
                                          frames=frames[ep],
                                          axes=ax,
                                          fig=fig,
                                          title=f'Episode {ep}'),
                                  duration=frames[ep].shape[0])
            animation.write_gif(path + f'episode_{ep}.gif', fps=fps)
Beispiel #10
0
    def __init__(self,
                 filename,
                 has_mask=False,
                 audio=True,
                 audio_buffersize=200000,
                 target_resolution=None,
                 resize_algorithm='bicubic',
                 audio_fps=44100,
                 audio_nbytes=2,
                 verbose=False,
                 fps_source='tbr'):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = None  # need this just in case FFMPEG has issues (__del__ complains)
        self.reader = FFMPEG_VideoReader(filename,
                                         pix_fmt=pix_fmt,
                                         target_resolution=target_resolution,
                                         resize_algo=resize_algorithm,
                                         fps_source=fps_source)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = self.reader.filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3]
            mask_mf = lambda t: self.reader.get_frame(t)[:, :, 3] / 255.0
            self.mask = (VideoClip(
                ismask=True, make_frame=mask_mf).set_duration(self.duration))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize=audio_buffersize,
                                       fps=audio_fps,
                                       nbytes=audio_nbytes)
Beispiel #11
0
    def __init__(self, subtitles, make_textclip=None):

        VideoClip.__init__(self)

        if isinstance(subtitles, str):
            subtitles = file_to_subtitles(subtitles)

        subtitles = [(map(cvsecs, tt), txt) for tt, txt in subtitles]
        self.subtitles = subtitles
        self.textclips = dict()

        if make_textclip is None:

            make_textclip = lambda txt: TextClip(txt,
                                                 font='Georgia-Bold',
                                                 fontsize=24,
                                                 color='white',
                                                 stroke_color='black',
                                                 stroke_width=0.5)

        self.make_textclip = make_textclip
        self.start = 0
        self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
        self.end = self.duration

        def add_textclip_if_none(t):
            sub = [((ta, tb), txt)
                   for ((ta, tb), txt) in self.textclips.keys()
                   if (ta <= t < tb)]
            if sub == []:
                sub = [((ta, tb), txt) for ((ta, tb), txt) in self.subtitles
                       if (ta <= t < tb)]
                if sub == []:
                    return False
            sub = sub[0]
            if sub not in self.textclips.keys():
                self.textclips[sub] = self.make_textclip(sub[1])
            return sub

        def make_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].get_frame(t)
                    if sub else np.array([[[0, 0, 0]]]))

        def make_mask_frame(t):
            sub = add_textclip_if_none(t)
            return (self.textclips[sub].mask.get_frame(t)
                    if sub else np.array([[0]]))

        self.make_frame = make_frame
        self.mask = VideoClip(make_mask_frame, ismask=True)
Beispiel #12
0
    def iplot_episode(self, episode, fps=30):
        if episode is None:
            raise ValueError('The episode cannot be None for jupyter display')
        x = self.episode_video_frames(episode)[episode]
        fig, ax = plt.subplots()

        self.current_animation = VideoClip(partial(self._make_frame,
                                                   frames=x,
                                                   axes=ax,
                                                   fig=fig,
                                                   title=f'Episode {episode}'),
                                           duration=x.shape[0])
        self.current_animation.ipython_display(fps=fps,
                                               loop=True,
                                               autoplay=True)
Beispiel #13
0
def test_matplotlib_simple_example():
    import matplotlib.pyplot as plt

    plt.switch_backend("agg")

    x = np.linspace(-2, 2, 200)
    duration = 0.5

    fig, ax = plt.subplots()

    def make_frame(t):
        ax.clear()
        ax.plot(x, np.sinc(x**2) + np.sin(x + 2 * np.pi / duration * t), lw=3)
        ax.set_ylim(-1.5, 2.5)
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=duration)
    animation.write_gif(os.path.join(TMP_DIR, "matplotlib.gif"), fps=20)
Beispiel #14
0
def make_video(audio, filename, progan, n_bins=84, random_state=0):
    y, sr = librosa.load(audio)
    song_length = len(y) / sr
    z_audio = get_z_from_audio(y, z_length=progan.z_length, n_bins=n_bins, random_state=random_state)
    fps = z_audio.shape[0] / song_length
    shape = progan.generate(z_audio[0]).shape
    def make_frame(t):
        cur_frame_idx = int(t * fps)
        if cur_frame_idx < len(z_audio):
            img = progan.generate(z_audio[cur_frame_idx])
        else:
            img = np.zeros(shape=shape, dtype=np.uint8)
        return img

    video_clip = VideoClip(make_frame=make_frame, duration=song_length)
    audio_clip = AudioFileClip(audio)
    video_clip = video_clip.set_audio(audio_clip)
    video_clip.write_videofile(filename, fps=fps)
Beispiel #15
0
def test_matplotlib():
    # for now, python 3.5 installs a version of matplotlib that complains
    # about $DISPLAY variable, so lets just ignore for now.

    x = np.linspace(-2, 2, 200)

    duration = 2

    matplotlib.use("Agg")
    fig, ax = matplotlib.pyplot.subplots()

    def make_frame(t):
        ax.clear()
        ax.plot(x, np.sinc(x ** 2) + np.sin(x + 2 * np.pi / duration * t), lw=3)
        ax.set_ylim(-1.5, 2.5)
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=duration)
    animation.write_gif(os.path.join(TMP_DIR, "matplotlib.gif"), fps=20)
Beispiel #16
0
def test_issue_368():
    import matplotlib.pyplot as plt
    import numpy as np
    from sklearn import svm
    from sklearn.datasets import make_moons

    from moviepy.video.io.bindings import mplfig_to_npimage

    plt.switch_backend("agg")

    X, Y = make_moons(50, noise=0.1, random_state=2)  # semi-random data

    fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1))
    fig.subplots_adjust(left=0, right=1, bottom=0)
    xx, yy = np.meshgrid(np.linspace(-2, 3, 500), np.linspace(-1, 2, 500))

    def make_frame(t):
        ax.clear()
        ax.axis("off")
        ax.set_title("SVC classification", fontsize=16)

        classifier = svm.SVC(gamma=2, C=1)
        # the varying weights make the points appear one after the other
        weights = np.minimum(1, np.maximum(0, t**2 + 10 - np.arange(50)))
        classifier.fit(X, Y, sample_weight=weights)
        Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        ax.contourf(
            xx,
            yy,
            Z,
            cmap=plt.cm.bone,
            alpha=0.8,
            vmin=-2.5,
            vmax=2.5,
            levels=np.linspace(-2, 2, 20),
        )
        ax.scatter(X[:, 0], X[:, 1], c=Y, s=50 * weights, cmap=plt.cm.bone)

        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=0.2)
    animation.write_gif(os.path.join(TMP_DIR, "svm.gif"), fps=20)
Beispiel #17
0
    def __init__(self,
                 filename,
                 has_mask=False,
                 audio=True,
                 audio_buffersize=200000,
                 audio_fps=44100,
                 audio_nbytes=2,
                 verbose=False):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
        self.reader = reader
        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size

        self.filename = self.reader.filename

        if has_mask:

            self.make_frame = lambda t: reader.get_frame(t)[:, :, :3]
            mask_mf = lambda t: reader.get_frame(t)[:, :, 3] / 255.0
            self.mask = (VideoClip(
                ismask=True, make_frame=mask_mf).set_duration(self.duration))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize=audio_buffersize,
                                       fps=audio_fps,
                                       nbytes=audio_nbytes)
    def get_gif(self, default_fps=15, frame_skip=None):
        if frame_skip is not None: self.frames = self.frames[::frame_skip]
        try:
            from moviepy.video.VideoClip import VideoClip
            from moviepy.video.io.VideoFileClip import VideoFileClip
            from moviepy.video.io.html_tools import ipython_display

            fig, ax = plt.subplots()
            clip = VideoClip(partial(self._make_frame,
                                     frames=self.frames,
                                     axes=ax,
                                     fig=fig,
                                     fps=default_fps,
                                     matplot_to_np_fn=mplfig_to_npimage,
                                     title=f'Episode {self.episode}'),
                             duration=(self.frames.shape[0] / default_fps) - 1)
            plt.close(fig)
            return clip

        except ImportError:
            raise ImportError(
                'Package: `moviepy` is not installed. You can install it via: `pip install moviepy`'
            )
Beispiel #19
0
def concatenate_videoclips(
    clips, method="chain", transition=None, bg_color=None, is_mask=False, padding=0
):
    """Concatenates several video clips

    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).

    There are two methods:

    - method="chain": will produce a clip that simply outputs
      the frames of the succesive clips, without any correction if they are
      not of the same size of anything. If none of the clips have masks the
      resulting clip has no mask, else the mask is a concatenation of masks
      (using completely opaque for clips that don't have masks, obviously).
      If you have clips of different size and you want to write directly the
      result of the concatenation to a file, use the method "compose" instead.

    - method="compose", if the clips do not have the same
      resolution, the final resolution will be such that no clip has
       to be resized.
       As a consequence the final clip has the height of the highest
       clip and the width of the widest clip of the list. All the
       clips with smaller dimensions will appear centered. The border
       will be transparent if mask=True, else it will be of the
       color specified by ``bg_color``.

    The clip with the highest FPS will be the FPS of the result clip.

    Parameters
    -----------
    clips
      A list of video clips which must all have their ``duration``
      attributes set.
    method
      "chain" or "compose": see above.
    transition
      A clip that will be played between each two clips of the list.

    bg_color
      Only for method='compose'. Color of the background.
      Set to None for a transparent clip

    padding
      Only for method='compose'. Duration during two consecutive clips.
      Note that for negative padding, a clip will partly play at the same
      time as the clip it follows (negative padding is cool for clips who fade
      in on one another). A non-null padding automatically sets the method to
      `compose`.

    """

    if transition is not None:
        clip_transition_pairs = [[v, transition] for v in clips[:-1]]
        clips = reduce(lambda x, y: x + y, clip_transition_pairs) + [clips[-1]]
        transition = None

    timings = np.cumsum([0] + [clip.duration for clip in clips])

    sizes = [clip.size for clip in clips]

    w = max(size[0] for size in sizes)
    h = max(size[1] for size in sizes)

    timings = np.maximum(0, timings + padding * np.arange(len(timings)))
    timings[-1] -= padding  # Last element is the duration of the whole

    if method == "chain":

        def make_frame(t):
            i = max([i for i, e in enumerate(timings) if e <= t])
            return clips[i].get_frame(t - timings[i])

        def get_mask(clip):
            mask = clip.mask or ColorClip([1, 1], color=1, is_mask=True)
            if mask.duration is None:
                mask.duration = clip.duration
            return mask

        result = VideoClip(is_mask=is_mask, make_frame=make_frame)
        if any([clip.mask is not None for clip in clips]):
            masks = [get_mask(clip) for clip in clips]
            result.mask = concatenate_videoclips(masks, method="chain", is_mask=True)
            result.clips = clips
    elif method == "compose":
        result = CompositeVideoClip(
            [
                clip.with_start(t).with_position("center")
                for (clip, t) in zip(clips, timings)
            ],
            size=(w, h),
            bg_color=bg_color,
            is_mask=is_mask,
        )
    else:
        raise Exception(
            "Moviepy Error: The 'method' argument of "
            "concatenate_videoclips must be 'chain' or 'compose'"
        )

    result.timings = timings

    result.start_times = timings[:-1]
    result.start, result.duration, result.end = 0, timings[-1], timings[-1]

    audio_t = [
        (clip.audio, t) for clip, t in zip(clips, timings) if clip.audio is not None
    ]
    if audio_t:
        result.audio = CompositeAudioClip([a.with_start(t) for a, t in audio_t])

    fpss = [clip.fps for clip in clips if getattr(clip, "fps", None) is not None]
    result.fps = max(fpss) if fpss else None
    return result
Beispiel #20
0
    def __init__(
        self,
        sequence,
        fps=None,
        durations=None,
        with_mask=True,
        is_mask=False,
        load_images=False,
    ):

        # CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE

        if (fps is None) and (durations is None):
            raise ValueError("Please provide either 'fps' or 'durations'.")
        VideoClip.__init__(self, is_mask=is_mask)

        # Parse the data

        fromfiles = True

        if isinstance(sequence, list):
            if isinstance(sequence[0], str):
                if load_images:
                    sequence = [imread(file) for file in sequence]
                    fromfiles = False
                else:
                    fromfiles = True
            else:
                # sequence is already a list of numpy arrays
                fromfiles = False
        else:
            # sequence is a folder name, make it a list of files:
            fromfiles = True
            sequence = sorted([
                os.path.join(sequence, file) for file in os.listdir(sequence)
            ])

        # check that all the images are of the same size and check if they are grayscale
        grayscale = False

        if isinstance(sequence[0], str):
            size = imread(sequence[0]).shape
        else:
            size = sequence[0].shape

        for image in sequence:
            image1 = image
            if isinstance(image, str):
                image1 = imread(image)
            if size != image1.shape:
                raise Exception(
                    "Moviepy: ImageSequenceClip requires all images to be the same size"
                )

        if len(size) == 2 or size[2] == 1:
            grayscale = True

        self.fps = fps
        if fps is not None:
            durations = [1.0 / fps for image in sequence]
            self.images_starts = [
                1.0 * i / fps - np.finfo(np.float32).eps
                for i in range(len(sequence))
            ]
        else:
            self.images_starts = [0] + list(np.cumsum(durations))
        self.durations = durations
        self.duration = sum(durations)
        self.end = self.duration
        self.sequence = sequence

        def find_image_index(t):
            return max([
                i for i in range(len(self.sequence))
                if self.images_starts[i] <= t
            ])

        def read_image(name, grayscale):
            """
            Wrapper for optional conversion from grayscale into rgb
            by duplicating single channel into 3 channels.
            """
            image = imread(name)
            if grayscale:
                image = np.stack((image, ) * 3, -1)
            return image

        if fromfiles:

            self.last_index = None
            self.last_image = None

            def make_frame(t):

                index = find_image_index(t)

                if index != self.last_index:
                    # using wrapper function to resolve possible grayscale issues
                    self.last_image = read_image(self.sequence[index],
                                                 grayscale)[:, :, :3]
                    self.last_index = index

                return self.last_image

            if with_mask and (read_image(self.sequence[0], grayscale).shape[2]
                              == 4):

                self.mask = VideoClip(is_mask=True)
                self.mask.last_index = None
                self.mask.last_image = None

                def mask_make_frame(t):

                    index = find_image_index(t)
                    if index != self.mask.last_index:
                        frame = imread(self.sequence[index])[:, :, 3]
                        self.mask.last_image = frame.astype(float) / 255
                        self.mask.last_index = index

                    return self.mask.last_image

                self.mask.make_frame = mask_make_frame
                self.mask.size = mask_make_frame(0).shape[:2][::-1]

        else:

            def make_frame(t):

                index = find_image_index(t)
                return self.sequence[index][:, :, :3]

            if with_mask and (self.sequence[0].shape[2] == 4):
                self.mask = VideoClip(is_mask=True)

                def mask_make_frame(t):
                    index = find_image_index(t)
                    return 1.0 * self.sequence[index][:, :, 3] / 255

                self.mask.make_frame = mask_make_frame
                self.mask.size = mask_make_frame(0).shape[:2][::-1]

        self.make_frame = make_frame
        self.size = make_frame(0).shape[:2][::-1]
Beispiel #21
0
def concatenate_videoclips(clips,
                           method="chain",
                           transition=None,
                           bg_color=None,
                           ismask=False,
                           padding=0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    
    There are two methods:

    - method="chain": will produce a clip that simply outputs
      the frames of the succesive clips, without any correction if they are
      not of the same size of anything. If none of the clips have masks the
      resulting clip has no mask, else the mask is a concatenation of masks
      (using completely opaque for clips that don't have masks, obviously).
      If you have clips of different size and you want to write directly the
      result of the concatenation to a file, use the method "compose" instead.

    - method="compose", if the clips do not have the same
      resolution, the final resolution will be such that no clip has
       to be resized.
       As a consequence the final clip has the height of the highest
       clip and the width of the widest clip of the list. All the
       clips with smaller dimensions will appear centered. The border
       will be transparent if mask=True, else it will be of the
       color specified by ``bg_color``.

    If all clips with a fps attribute have the same fps, it becomes the fps of
    the result.

    Parameters
    -----------

    clips
      A list of video clips which must all have their ``duration``
      attributes set.

    method
      "chain" or "compose": see above.

    transition
      A clip that will be played between each two clips of the list.
    
    bg_color
      Only for method='compose'. Color of the background.
      Set to None for a transparent clip
    
    padding
      Only for method='compose'. Duration during two consecutive clips.
      Note that for negative padding, a clip will partly play at the same
      time as the clip it follows (negative padding is cool for clips who fade
      in on one another). A non-null padding automatically sets the method to
      `compose`.
           
    """

    if transition is not None:
        l = [[v, transition] for v in clips[:-1]]
        clips = reduce(lambda x, y: x + y, l) + [clips[-1]]
        transition = None

    tt = np.cumsum([0] + [c.duration for c in clips])

    sizes = [v.size for v in clips]

    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])

    tt = np.maximum(0, tt + padding * np.arange(len(tt)))

    if method == "chain":

        def make_frame(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clips[i].get_frame(t - tt[i])

        result = VideoClip(ismask=ismask, make_frame=make_frame)
        if any([c.mask is not None for c in clips]):
            masks = [
                c.mask if (c.mask is not None) else ColorClip(
                    [1, 1], col=1, ismask=True, duration=c.duration)
                #ColorClip(c.size, col=1, ismask=True).set_duration(c.duration)
                for c in clips
            ]
            result.mask = concatenate_videoclips(masks,
                                                 method="chain",
                                                 ismask=True)
            result.clips = clips

    elif method == "compose":
        result = CompositeVideoClip(
            [c.set_start(t).set_pos('center') for (c, t) in zip(clips, tt)],
            size=(w, h),
            bg_color=bg_color,
            ismask=ismask)

    result.tt = tt

    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1], tt[-1]

    audio_t = [(c.audio, t) for c, t in zip(clips, tt) if c.audio is not None]
    if len(audio_t) > 0:
        result.audio = CompositeAudioClip([a.set_start(t) for a, t in audio_t])

    fps_list = list(set([c.fps for c in clips if hasattr(c, 'fps')]))
    if len(fps_list) == 1:
        result.fps = fps_list[0]

    return result
Beispiel #22
0
def concatenate(clipslist,
                method="chain",
                transition=None,
                bg_color=(0, 0, 0),
                transparent=False,
                ismask=False,
                padding=0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    
    There are two methods: method="chain" will produce a clip that simply outputs
    the frames of the succesive clips, without any correction if they are
    not of the same size of anything.

    With method="compose", if the clips do not have the same
    resolution, the final resolution will be such that no clip has
    to be resized.
    As a consequence the final clip has the height of the highest
    clip and the width of the widest clip of the list. All the
    clips with smaller dimensions will appear centered. The border
    will be transparent if mask=True, else it will be of the
    color specified by ``bg_color``.

    Returns a VideoClip instance if all clips have the same size and
    there is no transition, else a composite clip.
    
    Parameters
    -----------

    clipslist
      A list of video clips which must all have their ``duration``
      attributes set.

    method
      "chain" or "compose": see above.

    transition
      A clip that will be played between each two clips of the list.  
    
    bg_color
      Color of the background, if any.

    transparent
      If True, the resulting clip's mask will be the concatenation of
      the masks of the clips in the list. If the clips do not have the
      same resolution, the border around the smaller clips will be
      transparent.

    padding
      Duration during two consecutive clips. If negative, a clip will
      play at the same time as the clip it follows. A non-null
      padding automatically sets the method to `compose`.
           
    """

    if transition != None:
        l = [[v, transition] for v in clipslist[:-1]]
        clipslist = reduce(lambda x, y: x + y, l) + [clipslist[-1]]
        transition = None

    tt = np.cumsum([0] + [c.duration for c in clipslist])

    sizes = [v.size for v in clipslist]

    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])

    tt = np.maximum(0, tt + padding * np.arange(len(tt)))

    if method == "chain":

        def make_frame(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clipslist[i].get_frame(t - tt[i])

        result = VideoClip(ismask=ismask, make_frame=make_frame)
        if transparent:
            clips_w_masks = [(c.add_mask() if c.mask is None else c)
                             for c in clips]
            masks = [c.mask for c in clips_w_masks]
            result.mask = concatenate(masks, method="chain", ismask=True)

    elif method == "compose":
        result = CompositeVideoClip([
            c.set_start(t).set_pos('center') for (c, t) in zip(clipslist, tt)
        ],
                                    size=(w, h),
                                    bg_color=bg_color,
                                    ismask=ismask,
                                    transparent=transparent)

    result.tt = tt
    result.clipslist = clipslist
    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1], tt[-1]

    audio_t = [(c.audio, t) for c, t in zip(clipslist, tt) if c.audio != None]
    if len(audio_t) > 0:
        result.audio = CompositeAudioClip([a.set_start(t) for a, t in audio_t])
    return result
Beispiel #23
0
def concatenate(clipslist, method = 'chain', transition=None,
           bg_color=(0, 0, 0), transparent=False, ismask=False, crossover = 0):
    """ Concatenates several video clips
    
    Returns a video clip made by clip by concatenating several video clips.
    (Concatenated means that they will be played one after another).
    if the clips do not have the same resolution, the final
    resolution will be such that no clip has to be resized. As
    a consequence the final clip has the height of the highest
    clip and the width of the widest clip of the list. All the
    clips with smaller dimensions will appear centered. The border
    will be transparent if mask=True, else it will be of the
    color specified by ``bg_color``.
    
    Returns a VideoClip instance if all clips have the same size and
    there is no transition, else a composite clip.
    
    Parameters
    -----------

    clipslist
      A list of video clips which must all have their ``duration``
      attributes set.
    
    transition
      A clip that will be played between each two clips of the list.  
    
    bg_color
      Color of the background, if any.

    transparent
      If True, the resulting clip's mask will be the concatenation of
      the masks of the clips in the list. If the clips do not have the
      same resolution, the border around the smaller clips will be
      transparent.
    
                       
    """
    
    if transition != None:
        l = [[v, transition] for v in clipslist[:-1]]
        clipslist = reduce(lambda x, y: x + y, l) + [clipslist[-1]]
        transition = None
    
    tt = np.cumsum([0] + [c.duration for c in clipslist])
    sizes = [v.size for v in clipslist]
    w = max([r[0] for r in sizes])
    h = max([r[1] for r in sizes])
    
    if method == 'chain':
        result = VideoClip(ismask = ismask)
        result.size = (w,h)

        def gf(t):
            i = max([i for i, e in enumerate(tt) if e <= t])
            return clipslist[i].get_frame(t - tt[i])
        
        result.get_frame = gf
        if (len(set(map(tuple,sizes)))>1) and (bg_color is not None):
            # If not all clips have the same size, flatten the result
            # on some color
            result = result.fx( on_color, (w,h), bg_color, 'center')
        
    elif method == 'compose':
        tt = np.maximum(0, tt - crossover*np.arange(len(tt)))
        result = concatenate( [c.set_start(t).set_pos('center')
                                    for (c, t) in zip(clipslist, tt)],
                   size = (w, h), bg_color=bg_color, ismask=ismask,
                   transparent=transparent)
    
    result.tt = tt
    result.clipslist = clipslist
    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
    
    # Compute the mask if any
    
    if transparent and (not ismask):
        # add a mask to the clips which have none
        clips_withmask = [(c if (c.mask!=None) else c.add_mask())
                          for c in clipslist] 
        result.mask = concatenate([c.mask for c in clips_withmask],
                    bg_color=0, ismask=True, transparent=False)
                    
                    
    # Compute the audio, if any.
    
    audio_t = [(c.audio,t) for c,t in zip(clipslist,tt) if c.audio!=None]
    if len(audio_t)>0:
        result.audio = CompositeAudioClip([a.set_start(t)
                                for a,t in audio_t])
    return result
Beispiel #24
0
def animateImages(imglist, datetimelist, mask, settings, logger, duration, fps,
                  resolution, fformat):
    if len(imglist) == 0:
        return False
    mask, pgs, th = mask

    (duration, fps, resolution) = map(float, (duration, fps, resolution[:-1]))
    resolution = int(resolution)

    if fps == 0 and duration != 0:
        fps = len(datetimelist) / duration
        if fps < 1:
            fps = 1.0

    if fps != 0 and duration == 0:
        duration = len(datetimelist) / fps

    if fps == 0 and duration == 0:
        logger.set("Either duration or frames per second should not be zero.")
        return False

    logger.set('Generating animation...')
    logger.set('Number of images:' + str(len(imglist)))
    logger.set('Animation duration: ' +
               str(datetime.timedelta(seconds=duration)))
    logger.set('Frames per second: ' + str(fps))
    logger.set('Resolution ' + str(resolution) + 'p')
    logger.set('Format: ' + str(fformat))

    (sdate, edate) = (datetimelist[0], datetimelist[-1])
    dateratio = (edate - sdate).total_seconds() / float(duration)
    animfname = str(uuid4()) + '.' + fformat.lower()
    while os.path.isfile(os.path.join(TmpDir, animfname)):
        animfname = str(uuid4()) + '.' + fformat.lower()
    animfname = os.path.join(TmpDir, animfname)
    datetimelist = np.array(datetimelist)
    toy_frame_ratio = 100
    year_total_secs = abs(
        datetime.datetime(1971, 1, 1, 0, 0, 0) -
        datetime.datetime(1970, 1, 1, 0, 0, 0)).total_seconds()

    def make_frame(t):
        try:
            frame_for_time_t = mahotas.imread(imglist[np.argmin(
                np.abs(datetimelist - sdate -
                       datetime.timedelta(seconds=dateratio * t)))])
            #print np.argmin(np.abs(datetimelist-sdate-datetime.timedelta(seconds=dateratio*t)))
            #print imglist[np.argmin(np.abs(datetimelist-sdate-datetime.timedelta(seconds=dateratio*t)))]
        except:
            frame_for_time_t = mahotas.imread(imglist[0]) * 0
        if len(frame_for_time_t.shape) != 3:
            frame_for_time_t = mahotas.imread(imglist[0]) * 0
        img_date = datetimelist[np.argmin(
            np.abs(datetimelist - sdate -
                   datetime.timedelta(seconds=dateratio * t)))]
        vid_date = sdate + datetime.timedelta(seconds=dateratio * t)
        img_toy = abs(
            datetime.datetime(img_date.year, 1, 1, 0, 0, 0) -
            img_date).total_seconds() / float(year_total_secs)
        vid_toy = abs(
            datetime.datetime(vid_date.year, 1, 1, 0, 0, 0) -
            vid_date).total_seconds() / float(year_total_secs)
        toyframe = np.zeros(
            (int(round(frame_for_time_t.shape[0] / toy_frame_ratio)),
             frame_for_time_t.shape[1]),
            dtype='uint8')
        img_toy = [
            int(round(toyframe.shape[1] * img_toy)) -
            int(round(frame_for_time_t.shape[0] / toy_frame_ratio)),
            int(round(toyframe.shape[1] * img_toy)) +
            int(round(frame_for_time_t.shape[0] / toy_frame_ratio))
        ]
        vid_toy = [
            int(round(toyframe.shape[1] * vid_toy)) -
            int(round(frame_for_time_t.shape[0] / toy_frame_ratio)),
            int(round(toyframe.shape[1] * vid_toy)) +
            int(round(frame_for_time_t.shape[0] / toy_frame_ratio))
        ]
        if img_toy[0] < 0:
            img_toy[0] = 0
        if img_toy[1] > toyframe.shape[1]:
            img_toy[1] = toyframe.shape[1]
        if vid_toy[0] < 0:
            vid_toy[0] = 0
        if vid_toy[1] > toyframe.shape[1]:
            vid_toy[1] = toyframe.shape[1]
        toyframe[:int(round(toyframe.shape[0] / 2)),
                 img_toy[0]:img_toy[1]] = 127
        toyframe[int(round(toyframe.shape[0] / 2)):,
                 vid_toy[0]:vid_toy[1]] = 255
        toyframe = np.dstack((toyframe, toyframe, toyframe))
        frame_for_time_t = np.vstack((frame_for_time_t, toyframe))
        logger.set('Frame time: |progress:4|queue:' + str(t + 1 / fps) +
                   '|total:' + str(round(int(duration))))
        return frame_for_time_t  # (Height x Width x 3) Numpy array

    animation = VideoClip(make_frame, duration=duration)
    if resolution != 0:
        animation = animation.resize(height=resolution)
    logger.set("Writing animation...")
    if fformat == "MP4":
        animation.write_videofile(animfname, fps=fps)
    if fformat == "GIF":
        animation.write_gif(animfname, fps=fps)

    output = ["filename", animfname]
    output = [["Time series animation", output]]
    return output
Beispiel #25
0
def animateImagesFromResults(imglist, datetimelist, mask, settings, logger,
                             temporalmode, temporalrange, temporalthreshold,
                             replaceimages, varstoplot, barwidth, barlocation,
                             duration, fps, resolution, fformat, resdata):
    if len(imglist) == 0:
        return False
    if mask is not None:
        mask, pgs, th = mask
    (duration, fps, resolution,
     barwidth) = map(float, (duration, fps, resolution[:-1], barwidth))
    barwidth = barwidth / 100.0
    resolution = int(resolution)
    temporalthreshold = datetime.timedelta(hours=float(temporalthreshold))
    logger.set('Generating animation...')

    res_captions = []
    res_data = []
    for i, v in enumerate(resdata):
        if i % 2 == 0:
            res_captions.append(v)
        else:
            res_data.append(v)
    resdata = None

    # if temporalmode == 'Date interval':
    if True:
        sdate = min([
            datetime.datetime.strptime(temporalrange[0], '%d.%m.%Y'),
            datetime.datetime.strptime(temporalrange[1], '%d.%m.%Y')
        ])
        edate = max([
            datetime.datetime.strptime(temporalrange[0], '%d.%m.%Y'),
            datetime.datetime.strptime(temporalrange[1], '%d.%m.%Y')
        ])
        logger.set('Number of images:' + str(
            np.sum((np.array(datetimelist) <= edate) *
                   (np.array(datetimelist) >= sdate))))
        if fps == 0:
            fps = np.sum((np.array(datetimelist) <= edate) *
                         (np.array(datetimelist) >= sdate)) / duration
            if fps < 1:
                fps = 1.0
    else:  #range in data
        sdate = min(res_data[res_captions.index('Time')])
        edate = max(res_data[res_captions.index('Time')])
        logger.set('Number of images:' + str(len(imglist)))
        if fps == 0:
            fps = len(datetimelist) / duration
            if fps < 1:
                fps = 1.0

    logger.set('Animation duration: ' +
               str(datetime.timedelta(seconds=duration)))
    logger.set('Frames per second: ' + str(fps))
    logger.set('Number of frames: ' + str(fps * duration))
    logger.set('Resolution ' + str(resolution) + 'p')
    logger.set('Format: ' + str(fformat))

    dateratio = (edate - sdate).total_seconds() / float(duration)
    animfname = str(uuid4()) + '.' + fformat.lower()
    while os.path.isfile(os.path.join(TmpDir, animfname)):
        animfname = str(uuid4()) + '.' + fformat.lower()
    animfname = os.path.join(TmpDir, animfname)
    datetimelist = np.array(datetimelist)
    range_total_secs = abs(edate - sdate).total_seconds()

    for i, v in enumerate(varstoplot):
        if v[1] != 'Time':
            if v[4] == '':
                varstoplot[i][4] = np.nanmin(res_data[res_captions.index(
                    v[1])])
            else:
                varstoplot[i][4] = float(v[4])
            if v[5] == '':
                varstoplot[i][5] = np.nanmax(res_data[res_captions.index(
                    v[1])])
            else:
                varstoplot[i][5] = float(v[5])

    def make_frame(t):
        res_date = res_data[res_captions.index('Time')][np.argmin(
            np.abs(res_data[res_captions.index('Time')] - sdate -
                   datetime.timedelta(seconds=dateratio * t)))]
        if abs(res_date - sdate -
               datetime.timedelta(seconds=dateratio * t)) > temporalthreshold:
            img_file = False
        else:
            if res_date in datetimelist:
                img_date = res_date
                img_file = imglist[datetimelist.tolist().index(img_date)]
                try:
                    img = mahotas.imread(img_file)
                except:
                    img_file = False
        if res_date not in datetimelist or img_file is False:  #'Closest image','Blank (Black)','Blank (White)','Monochromatic Noise'
            if replaceimages == 'Closest image':  #xxcheck later again
                img_date = datetimelist[np.argmin(
                    np.abs(datetimelist - res_date))]
                img_file = imglist[np.argmin(np.abs(datetimelist - res_date))]
                img = mahotas.imread(img_file)
            else:
                img_date = res_date
                if replaceimages == 'Blank (Black)':
                    img = mahotas.imread(imglist[0]) * 0
                if replaceimages == 'Blank (White)':
                    img = mahotas.imread(imglist[0]) * 0 + 255
                if replaceimages == 'Monochromatic Noise':
                    img = (
                        np.random.rand(*mahotas.imread(imglist[0]).shape[:2]) *
                        255).astype('uint8')
                    img = np.dstack((img, img, img))

        vid_date = sdate + datetime.timedelta(seconds=dateratio * t)
        res_toy = abs(
            datetime.datetime(res_date.year, 1, 1, 0, 0, 0) -
            res_date).total_seconds() / float(
                abs(
                    datetime.datetime(res_date.year, 12, 31, 23, 59, 59) -
                    datetime.datetime(res_date.year, 1, 1, 0, 0, 0)).
                total_seconds())
        if img_file == False:
            res_toy = 0.0
        vid_toy = datetime.timedelta(
            seconds=dateratio * t).total_seconds() / float(range_total_secs)
        if barlocation == 'Right' or barlocation == 'Left':
            barshape = (img.shape[0], int(round(img.shape[1] * barwidth)))
            for v in varstoplot:
                if bool(int(v[0])):
                    barframe = np.zeros(barshape, dtype='uint8')
                    if v[1] == 'Time':
                        barvalue = vid_toy
                        barvalue = int(round(barshape[0] * barvalue))
                        barvalue2 = res_toy
                        barvalue2 = int(round(barshape[0] * barvalue2))
                        barframe[-barvalue:, :int(round(barshape[1] /
                                                        2.0))] = 1
                        barframe[-barvalue2:,
                                 int(round(barshape[1] / 2.0)):] = 1
                        barframe = np.dstack(
                            ((barframe == 0) * int(v[2][1:3], 16) +
                             (barframe == 1) * int(v[3][1:3], 16),
                             (barframe == 0) * int(v[2][3:5], 16) +
                             (barframe == 1) * int(v[3][3:5], 16),
                             (barframe == 0) * int(v[2][5:7], 16) +
                             (barframe == 1) * int(v[3][5:7], 16)))
                        img = np.hstack((img, barframe))
                    else:
                        if img_file == False:
                            barframe = (np.random.rand(*barframe.shape[:2]) *
                                        255).astype('uint8')
                            barframe = np.dstack(
                                (barframe, barframe, barframe))
                        else:
                            barvalue = res_data[res_captions.index(
                                v[1])][res_data[res_captions.index(
                                    'Time')].tolist().index(res_date)]
                            barvalue = abs(
                                (barvalue /
                                 float(abs(float(v[5]) - float(v[4])))))
                            barvalue = int(round(barshape[0] * barvalue))
                            if np.isnan(barvalue):
                                barvalue = 0
                            barframe[-barvalue:, :] = 1
                            barframe = barframe.transpose(1,
                                                          0)[::-1].transpose(
                                                              1, 0)
                            barframe = np.dstack(
                                ((barframe == 0) * int(v[2][1:3], 16) +
                                 (barframe == 1) * int(v[3][1:3], 16),
                                 (barframe == 0) * int(v[2][3:5], 16) +
                                 (barframe == 1) * int(v[3][3:5], 16),
                                 (barframe == 0) * int(v[2][5:7], 16) +
                                 (barframe == 1) * int(v[3][5:7], 16)))
                        img = np.hstack((img, barframe))
        else:
            barshape = (int(round(img.shape[0] * barwidth)), img.shape[1])
            for v in varstoplot:
                if bool(int(v[0])):
                    barframe = np.zeros(barshape, dtype='uint8')
                    if v[1] == 'Time':
                        barvalue = vid_toy
                        barvalue = int(round(barshape[1] * barvalue))
                        barvalue2 = res_toy
                        barvalue2 = int(round(barshape[1] * barvalue2))
                        barframe[:int(round(barshape[0] / 2.0)), :barvalue] = 1
                        barframe[int(round(barshape[0] /
                                           2.0)):, :barvalue2] = 1
                        barframe = np.dstack(
                            ((barframe == 0) * int(v[2][1:3], 16) +
                             (barframe == 1) * int(v[3][1:3], 16),
                             (barframe == 0) * int(v[2][3:5], 16) +
                             (barframe == 1) * int(v[3][3:5], 16),
                             (barframe == 0) * int(v[2][5:7], 16) +
                             (barframe == 1) * int(v[3][5:7], 16)))
                        img = np.vstack((img, barframe))
                    else:
                        if img_file == False:
                            barframe = (np.random.rand(*barframe.shape[:2]) *
                                        255).astype('uint8')
                            barframe = np.dstack(
                                (barframe, barframe, barframe))
                        else:
                            barvalue = res_data[res_captions.index(
                                v[1])][res_data[res_captions.index(
                                    'Time')].tolist().index(res_date)]
                            barvalue = barvalue / float(
                                abs(float(v[5]) - float(v[4])))
                            barvalue = int(round(barshape[1] * barvalue))
                            if np.isnan(barvalue):
                                barvalue = 0
                            barframe[:, :barvalue] = 1
                            barframe = np.dstack(
                                ((barframe == 0) * int(v[2][1:3], 16) +
                                 (barframe == 1) * int(v[3][1:3], 16),
                                 (barframe == 0) * int(v[2][3:5], 16) +
                                 (barframe == 1) * int(v[3][3:5], 16),
                                 (barframe == 0) * int(v[2][5:7], 16) +
                                 (barframe == 1) * int(v[3][5:7], 16)))
                        img = np.vstack((img, barframe))
        logger.set('Frame time: |progress:4|queue:' + str(t + 1 / fps) +
                   '|total:' + str(round(int(duration))))
        return img  # (Height x Width x 3) Numpy array

    animation = VideoClip(make_frame, duration=duration)
    if resolution != 0:
        animation = animation.resize(height=resolution)
    logger.set("Writing animation...")
    if fformat == "MP4":
        animation.write_videofile(animfname, fps=fps)
    if fformat == "GIF":
        animation.write_gif(animfname, fps=fps)

    output = ["filename", animfname]
    output = [["Time series animation", output]]
    return output
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--dense-dir',
        required=True,
        type=Path,
        help='Directory containing dense segmentation .ppm files.')
    parser.add_argument('--images-dir', required=True, type=Path)
    parser.add_argument('--output-dir', required=True, type=Path)
    parser.add_argument('--output-fps', default=30, type=int)
    parser.add_argument('--output-images', action='store_true')
    parser.add_argument(
        '--background-id',
        required=True,
        help=('ID of background track in predictions. Can be an integer or '
              '"infer", in which case the background id is assumed to be the '
              'id of the track with the most pixels.'))

    args = parser.parse_args()

    assert args.dense_dir.exists()
    assert args.images_dir.exists()
    args.output_dir.mkdir(exist_ok=True, parents=True)

    setup_logging(args.output_dir / (Path(__file__).name + '.log'))
    logging.info('File path: %s', Path(__file__))
    logging.info('Args:\n%s', vars(args))

    colors = colormap()
    if args.background_id != 'infer':
        background_prediction_id = int(args.background_id)
    else:
        background_prediction_id = None

    dense_segmentations = natsorted(
        args.dense_dir.glob('*_dense.ppm'), alg=ns.PATH)
    images = natsorted(
        [x for x in args.images_dir.iterdir() if is_image_file(x.name)],
        alg=ns.PATH)
    assert len(images) == len(dense_segmentations)

    segmentation_frames = np.stack(
        np.array(Image.open(segmentation_ppm))
        for segmentation_ppm in dense_segmentations)
    if segmentation_frames.ndim == 4 and segmentation_frames.shape[-1] == 1:
        segmentation_frames = segmentation_frames[:, :, :, 0]
    elif segmentation_frames.ndim == 4 and segmentation_frames.shape[-1] == 3:
        segmentation_frames = segmentation_frames.astype(np.int32)
        segmentation_frames = (segmentation_frames[:, :, :, 2] +
                               256 * segmentation_frames[:, :, :, 1] +
                               (256**2) * segmentation_frames[:, :, :, 0])
        assert segmentation_frames.ndim == 3

    all_ids, id_counts = np.unique(segmentation_frames, return_counts=True)
    id_counts = dict(zip(all_ids, id_counts))
    sorted_ids = sorted(
        id_counts.keys(), key=lambda i: id_counts[i], reverse=True)
    if background_prediction_id is None:  # Infer background id
        background_prediction_id = int(sorted_ids[0])
        print('Inferred background prediction id as %s' %
              background_prediction_id)
        sorted_ids = sorted_ids[1:]
    else:
        sorted_ids = [
            x for x in sorted_ids if x != background_prediction_id
        ]
    # Map id to size index
    id_rankings = {
        region_id: index
        for index, region_id in enumerate(sorted_ids)
    }

    def visualize_frame(t):
        frame = int(t * args.output_fps)
        frame_mask = segmentation_frames[frame]
        image_path = images[frame]
        ids = sorted(np.unique(frame_mask))
        masks = [frame_mask == object_id for object_id in ids]

        # Sort masks by area
        ids_and_masks = sorted(zip(ids, masks), key=lambda x: x[1].sum())
        vis_image = cv2.imread(str(image_path))
        # vis_image = (vis_image.astype(np.float32) * 1.0).astype(np.uint8)
        for mask_id, mask in ids_and_masks:
            if isinstance(mask_id, float):
                assert mask_id.is_integer()
                mask_id = int(mask_id)
            if mask_id == background_prediction_id:
                continue
            color = colors[int(id_rankings[mask_id]) % len(colors)]
            vis_image = vis_mask(
                vis_image,
                mask.astype(np.uint8),
                color,
                alpha=0.5,
                border_alpha=0.5,
                border_color=[255, 255, 255],
                border_thick=1)
        vis_image = vis_image[:, :, ::-1]  # BGR -> RGB
        if args.output_images:
            output_frame = args.output_dir / image_path.name
            output_frame.parent.mkdir(exist_ok=True, parents=True)
            Image.fromarray(vis_image).save(output_frame)
        return vis_image

    num_frames = segmentation_frames.shape[0]
    output_video = args.output_dir / 'video.mp4'
    output_video.parent.mkdir(exist_ok=True, parents=True)
    from moviepy.video.VideoClip import VideoClip
    clip = VideoClip(make_frame=visualize_frame)
    # Subtract a small epsilon; otherwise, moviepy can sometimes request
    # a frame at index num_frames.
    duration = num_frames / args.output_fps - 1e-10
    clip = clip.set_duration(duration).set_memoize(True)
    clip.write_videofile(
        str(output_video), fps=args.output_fps, verbose=False)
Beispiel #27
0
def test_without_audio(stereo_wave):
    audio_clip = AudioClip(stereo_wave(), duration=1, fps=22050)
    clip = VideoClip(duration=1).with_fps(1).with_audio(audio_clip)

    assert clip.audio is audio_clip
    assert clip.without_audio().audio is None
Beispiel #28
0
def test_n_frames(duration, fps, expected_n_frames):
    clip = VideoClip(duration=duration).with_fps(fps)
    assert clip.n_frames == expected_n_frames
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--input-dir', required=True, type=Path)
    parser.add_argument('--output-dir', required=True, type=Path)
    parser.add_argument('--images-dir', required=True, type=Path)
    parser.add_argument('--np-extension', default='.npy')
    parser.add_argument('--output-fps', default=30, type=int)
    parser.add_argument('--output-images', action='store_true')
    parser.add_argument(
        '--background-id',
        required=True,
        help=('ID of background track in predictions. Can be an integer or '
              '"infer", in which case the background id is assumed to be the '
              'id of the track with the most pixels.'))

    args = parser.parse_args()

    colors = colormap()
    if args.background_id != 'infer':
        background_prediction_id = int(args.background_id)
    else:
        background_prediction_id = None

    for mask_path in tqdm(list(args.input_dir.rglob('*' + args.np_extension))):
        relative_dir = mask_path.relative_to(args.input_dir).with_suffix('')
        images_subdir = args.images_dir / relative_dir
        assert images_subdir.exists(), ('Could not find directory %s' %
                                        images_subdir)
        images = natsorted(
            [x for x in images_subdir.iterdir() if is_image_file(x.name)],
            alg=ns.PATH)

        all_frames_mask = np.load(mask_path)
        if args.np_extension == '.npz':
            # Segmentation saved with savez_compressed; ensure there is only
            # one item in the dict and retrieve it.
            keys = all_frames_mask.keys()
            assert len(keys) == 1, (
                'Numpy file (%s) contained dict with multiple items, not sure '
                'which one to load.' % mask_path)
            all_frames_mask = all_frames_mask[keys[0]]
        all_ids, id_counts = np.unique(all_frames_mask, return_counts=True)
        id_counts = dict(zip(all_ids, id_counts))
        sorted_ids = sorted(id_counts.keys(),
                            key=lambda i: id_counts[i],
                            reverse=True)
        if background_prediction_id is None:  # Infer background id
            current_bg = int(sorted_ids[0])
            print('Inferred background prediction id as %s for %s' %
                  (current_bg, relative_dir))
            sorted_ids = sorted_ids[1:]
        else:
            current_bg = background_prediction_id
            sorted_ids = [x for x in sorted_ids if x != current_bg]

        # Map id to size index
        id_rankings = {
            region_id: index
            for index, region_id in enumerate(sorted_ids)
        }

        def visualize_frame(t):
            frame = int(t * args.output_fps)
            frame_mask = all_frames_mask[frame]
            image_path = images[frame]
            ids = sorted(np.unique(frame_mask))
            masks = [frame_mask == object_id for object_id in ids]

            # Sort masks by area
            ids_and_masks = sorted(zip(ids, masks), key=lambda x: x[1].sum())
            vis_image = cv2.imread(str(image_path))
            # vis_image = (vis_image.astype(np.float32) * 1.0).astype(np.uint8)
            for mask_id, mask in ids_and_masks:
                if isinstance(mask_id, float):
                    assert mask_id.is_integer()
                    mask_id = int(mask_id)
                if mask_id == current_bg:
                    continue
                color = colors[int(id_rankings[mask_id]) % len(colors)]
                vis_image = vis_mask(vis_image,
                                     mask.astype(np.uint8),
                                     color,
                                     alpha=0.5,
                                     border_alpha=0.5,
                                     border_color=[255, 255, 255],
                                     border_thick=2)
            vis_image = vis_image[:, :, ::-1]  # BGR -> RGB
            if args.output_images:
                output_frame = args.output_dir / image_path.relative_to(
                    args.images_dir)
                output_frame.parent.mkdir(exist_ok=True, parents=True)
                Image.fromarray(vis_image).save(output_frame)
            return vis_image

        num_frames = all_frames_mask.shape[0]
        output_video = (args.output_dir / relative_dir).with_suffix('.mp4')
        output_video.parent.mkdir(exist_ok=True, parents=True)
        from moviepy.video.VideoClip import VideoClip
        clip = VideoClip(make_frame=visualize_frame)
        # Subtract a small epsilon; otherwise, moviepy can sometimes request
        # a frame at index num_frames.
        duration = num_frames / args.output_fps - 1e-10
        clip = clip.set_duration(duration).set_memoize(True)
        clip.write_videofile(str(output_video),
                             fps=args.output_fps,
                             verbose=False)
Beispiel #30
0
def visualize_and_eval(video_name, face_detector, ahegao_classifier=None, output_file=None):
    enable_ahegao_classification = ahegao_classifier is not None

    cv2.ocl.setUseOpenCL(False)
    cap = cv2.VideoCapture(osp.join(VIDEOS_DIR, video_name))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    max_faces_probs = 3
    if output_file is None:
        plt.ion()
    fig = plt.figure(figsize=(15, 8))
    ax0 = plt.subplot2grid((2, 2), (0, 1))  # number of faces detected
    ax1 = plt.subplot2grid((2, 2), (1, 1))  # showing emotions distribution or faces probs
    ax2 = plt.subplot2grid((2, 2), (0, 0), rowspan=2)  # showing image
    axarr = [ax0, ax1, ax2]
    plt.tight_layout()
    axarr[0].set_title('num faces detected')
    face_line, = axarr[0].plot([], [], 'r-')
    face_probs_lines = []
    if enable_ahegao_classification:
        axarr[1].stackplot([], [])
    else:
        for i in range(max_faces_probs):
            face_probs_line, = axarr[1].plot([], [], 'r-')
            face_probs_lines.append(face_probs_line)
        axarr[1].set_ylim(-0.05, 1.05)
        axarr[1].yaxis.grid(True)
    im = axarr[2].imshow(np.zeros((height, width)))
    axarr[2].grid(False)
    axarr[2].axis('off')

    i = 0
    face_data_x = []
    face_data_y = []
    emotion_data_x = []
    emotion_data_y = np.empty(0)
    face_probs_x = []
    face_probs_y = np.empty((0, 3))
    j = 0

    if output_file is None:
        def update_face_probs(face_probs_x, face_probs_y):
            for k, face_probs_line in enumerate(face_probs_lines):
                face_probs_line.set_xdata(face_probs_x)
                face_probs_line.set_ydata(face_probs_y[:, k])

        def update_face_line(face_data_x, face_data_y):
            face_line.set_xdata(face_data_x)
            face_line.set_ydata(face_data_y)
            # update x and ylim to show all points:
            axarr[0].set_xlim(min(face_data_x) - 0.5, max(face_data_x) + 0.5)
            axarr[0].set_ylim(min(face_data_y) - 0.5, max(face_data_y) + 0.5)

        should_stop = False
        while not should_stop:
            should_stop, emotion_data_x, emotion_data_y, face_probs_x, face_probs_y, i, j = process_frame(
                ahegao_classifier, axarr, cap, emotion_data_x, emotion_data_y, enable_ahegao_classification,
                face_data_x, face_data_y, face_detector, face_probs_x, face_probs_y, i, im, j, max_faces_probs,
                update_face_probs, update_face_line)
            plt.draw()
            plt.pause(0.0001)
    else:
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = frame_count / fps

        widgets = [progressbar.Percentage(), ' ', progressbar.Counter(), ' ', progressbar.Bar(), ' ',
                   progressbar.FileTransferSpeed()]
        pbar = progressbar.ProgressBar(widgets=widgets, max_value=frame_count).start()

        def update_face_probs(face_probs_x, face_probs_y):
            axarr[1].clear()
            for k, face_probs_line in enumerate(face_probs_lines):
                axarr[1].plot(face_probs_x, face_probs_y[:, k], 'r-')
            axarr[1].set_ylim(-0.05, 1.05)
            axarr[1].yaxis.grid(True)

        def update_face_line(face_data_x, face_data_y):
            axarr[0].clear()
            axarr[0].set_title('num faces detected')
            axarr[0].plot(face_data_x, face_data_y, 'r-')
            axarr[0].set_xlim(min(face_data_x) - 0.5, max(face_data_x) + 0.5)
            axarr[0].set_ylim(min(face_data_y) - 0.5, max(face_data_y) + 0.5)

        # while not should_stop:
        def make_frame(t):
            nonlocal emotion_data_x, emotion_data_y, face_probs_x, face_probs_y, i, j
            pbar.update(i)
            should_stop, emotion_data_x, emotion_data_y, face_probs_x, face_probs_y, i, j = process_frame(
                ahegao_classifier, axarr, cap, emotion_data_x, emotion_data_y, enable_ahegao_classification,
                face_data_x, face_data_y, face_detector, face_probs_x, face_probs_y, i, im, j, max_faces_probs,
                update_face_probs, update_face_line)
            return mplfig_to_npimage(fig)
        pbar.finish()
        orig_audio = AudioFileClip(osp.join(VIDEOS_DIR, video_name))
        animation = VideoClip(make_frame, duration=duration)
        animation.set_audio(orig_audio)
        animation.write_videofile(output_file, fps=fps)

    cap.release()
    cv2.destroyAllWindows()