def save_video(self):
        """
        Generate video out of self.state_history and save it. This variable
        needs to be updated during the simulation.
        """
        import matplotlib.pyplot as plt
        from moviepy.editor import VideoClip
        from moviepy.video.io.bindings import mplfig_to_npimage

        history_of_states = self.state_history
        #duration_in_seconds = len(history_of_states) / 4
        duration_in_seconds = len(history_of_states)
        fig, ax = plt.subplots()
        frames_per_second = len(history_of_states) / duration_in_seconds

        def make_frame(t):
            ax.clear()
            ax.grid(False)

            ax.imshow(history_of_states[int(t * frames_per_second)],
                      cmap="gist_ncar")
            ax.tick_params(axis='both', which='both', bottom=False,
                           top=False, left=False, right=False,
                           labelleft=False, labelbottom=False)
            return mplfig_to_npimage(fig)

        animation = VideoClip(make_frame, duration=duration_in_seconds)
        animation.write_videofile(self.env.video_filename,
                                  fps=frames_per_second)
Beispiel #2
0
def video():
    if 1:
        snd = AudioFileClip("space.mp3")
        clip = VideoClip(c.animation, duration=snd.duration / 30.)

        clip = clip.set_audio(snd).set_duration(snd.duration / 30.)
        clip.write_videofile('cam.mp4', fps=24)
Beispiel #3
0
def create_NN_comparison(clip,
                         h5,
                         nnh5,
                         inds2plot=np.arange(100),
                         pad=400,
                         outfname=None):
    """
    Give movipy.videofile, dlc tracked h5 and RNN h5, plot indices in inds2plot and save as outfname.
    """
    tlag = h5.shape[0] - nnh5.shape[0]
    h5 = h5[tlag:]
    center = medfilt2d(h5[:, 8, :2], [7, 1])
    h5 = egoh5(h5)
    connections = [[0, 2, 5, 10, 12], [0, 1, 4, 7, 9, 12],
                   [0, 3, 6, 8, 11, 12], [1, 2, 3], [4, 5, 6], [7, 8],
                   [9, 10, 11]]
    fig, ax = plt.subplots(figsize=(10, 10))

    def make_frame(i):
        t = inds2plot[int(i * clip.fps)]
        ax.clear()
        ax.imshow(clip.get_frame((t + tlag) / clip.fps))
        for l in connections:
            ax.plot(h5[t, l, 0] + center[t, 0],
                    h5[t, l, 1] + center[t, 1],
                    color='royalblue',
                    alpha=0.5,
                    lw=3)[0]
            ax.plot(nnh5[t, l, 0] + center[t, 0],
                    nnh5[t, l, 1] + center[t, 1],
                    color='orangered',
                    alpha=0.5,
                    lw=3)[0]

        ax.set_xlim([center[t, 0] - pad, center[t, 0] + pad])
        ax.set_ylim([center[t, 1] - pad, center[t, 1] + pad])
        ax.axis('off')
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=inds2plot.shape[0] / clip.fps)
    #     animation.write_gif('matplotlib.gif', fps=clip.fps)
    if outfname is None:
        outfname = 'videos/vid_' + ''.join(
            [np.random.choice(list(string.ascii_letters))
             for _ in range(20)]) + '.mp4'
        animation.write_videofile(outfname,
                                  fps=clip.fps,
                                  audio=False,
                                  threads=12)
        print('Video saved as ', outfname)
    else:
        if outfname[-4:] != '.mp4':
            outfname += '.mp4'
            outfname = 'videos/' + outfname
        animation.write_videofile(outfname,
                                  fps=clip.fps,
                                  audio=False,
                                  threads=12)
        print('Video saved as ', outfname)
 def save_animation(self, name):
     self._index = 0
     duration = self.num_data // self.fps
     anim = VideoClip(make_frame=self.make_frame, duration=duration)
     # anim.write_gif(os.path.join(self.DIR, name), fps=self.fps)
     anim.write_videofile(os.path.join(self.DIR, name),
                          fps=self.fps,
                          audio=False)
     print(duration)
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser(
        prog='AniMaker', description='动画短片制作器by lzy2002 site:lzy2002.com')
    parser.add_argument('-i', '--inputs', help='输入文件所在目录', required=True)
    parser.add_argument('-o', '--output', help='输出文件', default='movie.mp4')
    parser.add_argument('-f', '--fps', help='帧频fps', default=24)
    parser.add_argument('-rf', '--recordfps', help='录制帧频fps', default=60)
    parser.add_argument('-t', '--time', help='时长s', default=5)
    args = parser.parse_args()
    animation = VideoClip(make_frame(args.inputs, float(args.recordfps),
                                     int(args.fps), float(args.time)),
                          duration=float(args.time))
    animation.write_videofile(args.output, fps=int(args.fps))
Beispiel #6
0
    def saveMovie(self, zoomfactor=1.0, savename='None', appendix='', update=False, fps=25):
        """
        save tif file as mp4 using moviepy library. The duration of movie file
        is determined as 3 times of realtime video.

        Input:
        zoomfactor = 0.5 (default), save image using resampling
        savename = default format [tif file name]_z[zoomefactor].mp4

        Return:
        VidoeClip object in moviepy library
        """

        if ('moviepy' not in dir()):
            from moviepy.editor import VideoClip
        if ('cv2' not in dir()):
            import cv2

        if (savename == 'None'):
            savename = '{}_z{:.1f}_{}.mp4'.format(self._meta['fname'][:-4], zoomfactor, appendix)
            if not update:
                if os.path.exists(savename):
                    if self._debug: print('... movie file already exists: %s' % savename)
                    return False

        if self._single:
            if self._debug: print('... not movie file')
            return False

        cmap = plt.get_cmap(self._meta['cmapname'])

        def make_frame(t):
            #self._curframe = int(t * (self._frameN - 1) / (self._duration * 3.0))
            self._curframe = int(t * fps)
            img0 = self.getframe(frame=self._curframe, dtypes='uint8')
            if zoomfactor != 1.0:
                img0 = cv2.resize(img0, None, fx=zoomfactor, fy=zoomfactor, interpolation=cv2.INTER_CUBIC)
            img = np.delete(cmap(img0), 3, 2)
            #return img
            return (img * 255.0).astype('uint8')

        #animation = VideoClip(make_frame, duration=self._duration * 3.0)
        animation = VideoClip(make_frame, duration=float(self._meta.N()/fps))

        animation.write_videofile(savename, fps=fps, codec='libx264', \
                threads=8, audio=False, preset='medium', verbose=False)
        self._animation = animation

        print("""To play movie file in jupyter notebook:
            from IPython.display import Video
            Video("{}")""".format(savename))
Beispiel #7
0
def trajectory_video(trajectory,
                     filename,
                     xlim=(-10, 10),
                     ylim=(-10, 10),
                     callback=None,
                     axisOff=True):
    """Create video of the track and saves it in working directory

    Parameters
    ---------
    filename : string
        name of the file to be saved into
    xlim : touple(2) of float
        xlimits of the fideo
    ylim : touple(2) of float
        ylimits of the video
    callback:
        what to
    axisOff : bool

    Returns
    --------
    saved mp4 video at fiven filepath
    """
    WIDTH = 900
    HEIGHT = 600
    DPI = 150
    FPS = 25
    DURATION = np.max(trajectory.time) - np.min(trajectory.time)

    fig, axis = plt.subplots(figsize=(1.0 * WIDTH / DPI, 1.0 * HEIGHT / DPI),
                             dpi=DPI)

    def make_frame(t):
        axis.clear()
        (tx, ty) = trajectory.position_for_time(t + np.min(trajectory.time))
        axis.plot(tx, ty, "ko")
        axis.set_xlim(xlim)
        axis.set_ylim(ylim)
        axis.set_title("Time {:.2f} s".format(t))
        if not callback is None:
            callback(axis)
        if axisOff:
            plt.axis('off')
        return mplfig_to_npimage(fig)

    animation = VideoClip(make_frame, duration=DURATION)
    #animation.write_gif(filename, fps=FPS)
    animation.write_videofile(filename, fps=FPS)
    pass
Beispiel #8
0
def animate_wave_function_collapse(fn, seconds_per_state=0.5):
  size = (32, 32)
  tn = wfc.seq_target_name(fn, 1, "overlapping", size)
  ts, result = dep.create(tn)
  full_duration = len(result) * seconds_per_state
  def makeframe(t):
    state_index = int(t / seconds_per_state)
    return result[min(state_index, len(result)-1)]

  anim = VideoClip(makeframe, duration=full_duration)
  nfn = os.path.splitext(fn)[0] + ".collapse.mp4"
  anim.write_videofile(nfn, fps=FRAMERATE)
  nfn = os.path.splitext(fn)[0] + ".collapsed.png"
  imsave(nfn, result[-1])
Beispiel #9
0
def make_skeleton_animation(frames, index, fps, movies_dir):
    DEFAULT_FPS = 30.0

    def make_frame(t):
        verticies = frames[int(t * DEFAULT_FPS)]
        edges = zip(range(len(CONNECT)), CONNECT)
        arr = render_offscreen(verticies, edges, WIDTH, HEIGHT)

        return arr

    duration = frames.shape[0] / DEFAULT_FPS
    animation = VideoClip(make_frame, duration=duration)
    file_path = os.path.join(movies_dir, "sequence_{}.mp4".format(index))
    animation.write_videofile(file_path, fps=fps)
Beispiel #10
0
def save_movie(make_frame, duration, filename, fps=20):
    """Writes an animation to disk"""
    anim = VideoClip(make_frame, duration=duration)

    if filename.endswith('.gif'):
        anim.write_gif(filename, fps=fps)

    elif filename.endswith('.mp4'):
        anim.write_videofile(filename, fps=fps)

    else:
        raise ValueError(
            f'Invalid file type for {filename}. Must be .gif or .mp4')

    return anim
Beispiel #11
0
    def ani_gen(self, path, title="", cmap='binary', duration=3e2):
        bset.clear()
        fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1))

        def make_frame(t):
            ax.clear()
            ax.axis('off')
            ax.set_title(title, fontsize=16)
            self.step()
            self.plot(ax, cmap)
            return (mplfig_to_npimage(fig))

        ani = VideoClip(make_frame, duration=duration)
        ani.write_videofile(path, fps=50)
        return
Beispiel #12
0
class Animation(object):
    def __init__(self, fps=24):
        self.fps = fps
        self.reset()

    def reset(self):
        self.frames = []
        self._iter = -1

    def add_frame(self, image):
        """
        image should be a (height, width, 3) np.ndarry
        """
        self.frames.append(np.copy(image))

    def anim_fn(self, fn, data):
        """
        fn: a function that returns a plot
        data: an iterable
        """
        for i in range(len(data)):
            p = fn(data[:i])
            self.add_frame(p.numpy())

    def rotate_3d(self, plot, duration=8):
        nframes = duration * self.fps
        change_angle = 360.0 / nframes
        azim = plot.canvas.azim
        for i in range(nframes):
            plot.set_camera(azim=azim + i * change_angle)
            self.frames.append(plot.numpy())

    def _make(self, t):
        self._iter += 1
        # Weird bug where idx might be = len(self.frames)
        idx = min(self._iter, len(self.frames) - 1)
        return self.frames[idx]

    def save(self, path):
        from moviepy.editor import VideoClip
        fname, ext = os.path.splitext(path)
        duration = (len(self.frames) - 1) / float(self.fps)
        self.animation = VideoClip(self._make, duration=duration)
        if 'gif' in ext:
            self.animation.write_gif(path, fps=self.fps)
        else:
            self.animation.write_videofile(path, fps=self.fps)
        self._iter = -1
Beispiel #13
0
def create_animation(name, files, output_path, worker_mem):
    '''
    Creates an animation single tiff file from a list of tiff files.
    '''
    sys.stdout = open(os.devnull, "w")
    sys.stderr = open(os.devnull, "w")

    # Add the last file twice to avoid it getting dropped from the animation.
    files.extend(files[-1:])

    frame_generator = FrameGenerator(name, files, output_path, worker_mem)
    duration = len(files)
    animation = VideoClip(frame_generator.make_frame, duration=duration)
    animation.write_videofile(os.path.join(output_path, "{}.avi".format(name)),
                              fps=1,
                              codec="png")
    return name
Beispiel #14
0
def run_animation(args, target, outfile):
    """Creates an animated crash based on a still image input"""
    stepsize = args.animate
    img = util.read_img(target)
    bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth)
    max_depth = bounds.max_depth
    crash_params = crash.CrashParams(
        max_depth, args.threshold, args.bg_value, args.rgb_select)
    depths = range(max_depth, -stepsize, -stepsize)
    depths = [d for d in depths if d > 0]
    depths.append(0)
    n_frames = len(depths)
    n_procs = max(args.in_parallel, 1)

    fps = args.fps
    duration = len(depths) / fps
    img = util.read_img(target)
    options = _options(args.reveal_foreground, args.reveal_background,
                       args.crash, args.reveal_quadrants)
    source_img = util.read_img(target)
    fg, bounds = foreground.find_foreground(source_img, crash_params)

    def make_frame(time):
        frame_no = int(round(time * fps))
        if frame_no >= n_frames:
            frame_no = n_frames - 1
        depth = depths[-frame_no]
        img = source_img.copy()
        if depth:
            params = crash.CrashParams(
                depth, args.threshold, args.bg_value, args.rgb_select)
            new_fg, new_bounds = foreground.trim_foreground(img, fg, params)
            new_img = _process_img(img, new_fg, new_bounds, options)
        else:
            new_img = source_img
        return new_img

    animation = VideoClip(make_frame, duration=duration)
    clip = animation.to_ImageClip(t=duration)
    clip.duration = 0.1
    clip.write_videofile(outfile, fps=fps, audio=False)
    animation.write_videofile("__temp_crash.mp4", fps=fps, audio=False,
                              preset=args.compression,
                              threads=args.in_parallel)
    os.rename("__temp_crash.mp4", outfile)
Beispiel #15
0
    def write_video_clip_to_file(self,
                                 video_clip: VideoClip,
                                 output_path: Opt[str] = None,
                                 *,
                                 audio: Union[str, bool] = True,
                                 verbose: bool = False,
                                 progress_bar: bool = True,
                                 **kwargs):
        """
        Writes a video clip to file in the specified directory

        Parameters
        ----------
        video_clip

        output_path

        audio
            Audio for the video clip. Can be True to enable, False to disable, or an external audio file.

        verbose
            Whether output to stdout should include extra information during writing

        progress_bar
            Whether to output progress information to stdout

        kwargs
            List of other keyword arguments to pass to moviepy's write_videofile
        """
        # Prepend crf to ffmpeg_params
        ffmpeg_params = ['-crf', str(self.crf)] + self.ffmpeg_params
        audio_bitrate = str(self.audio_bitrate) + 'k'

        video_clip.write_videofile(output_path,
                                   audio=audio,
                                   preset=self.preset,
                                   codec=self.codec,
                                   audio_codec=self.audio_codec,
                                   audio_bitrate=audio_bitrate,
                                   ffmpeg_params=ffmpeg_params,
                                   **kwargs,
                                   verbose=verbose,
                                   progress_bar=progress_bar)

        return output_path
Beispiel #16
0
    def save_video(self,
                   video: VideoClip,
                   video_filename: str,
                   output_appendix: str = "_output") -> None:
        """Save the resulting video.

        :param video: the processed VideoClip
        :param video_filename: original file name
        :param output_appendix: appendix to add to file
        :return: None
        """
        output_filename = append_to_file_name(video_filename, output_appendix)
        video.set_audio(self.audio)
        video.write_videofile(
            output_filename,
            codec="libx264",
            audio_codec="aac",
            temp_audiofile=output_filename + ".tmp",
            remove_temp=True,
            fps=self.video.fps,
        )
def render(render_frame, frames, fps, size, filename):
    """ Create a movie using the given render_frame function """
    
    # Wrapper so that the render function gets passed a surface to draw to,
    # and a frame number.
    def make_frame(t):
        surface = pygame.Surface(size)
        render_frame(surface, int(t*fps))
        # Flip the surface around it's x/y axis (main diagonal), to account for display
        # issues with the movie rendering.
        surface = pygame.transform.rotate(surface, -90)
        surface = pygame.transform.flip(surface, True, False)
        return pygame.surfarray.pixels3d(surface)
    
    # Create the animation...
    animation = VideoClip(make_frame, duration=frames/fps)

    # Write to the movie file...
    animation.write_videofile(filename, fps=fps)

    
Beispiel #18
0
    def show_animation(
            self,
            a_function_which_returns_an_image_according_to_a_time_variable,
            duration=3,
            fps=24,
            saving_path=None):
        """
        the function looks like `func(t)`.
        t is a float variable in seconds, for example, 1.2 = 1 second + 0.2 second
        """
        def wrap_function(t):
            array = a_function_which_returns_an_image_according_to_a_time_variable(
                t)
            assert isinstance(
                array,
                np.ndarray), "this function should return an numpy array"
            if array.shape[2] == 4:
                array = array[:, :, 0:3]

            return array

        animation = VideoClip(wrap_function, duration=duration)
        if self._notebook:
            result = animation.ipython_display(fps=fps,
                                               loop=True,
                                               autoplay=True)
            self._IPython.display.display(result)
        else:
            animation.preview(fps=fps)

        if saving_path != None:
            if len(saving_path.split(".")) > 0:
                extension = saving_path.split(".")[-1]
                if extension.lower() == "gif":
                    animation.write_gif(saving_path, fps=fps)
                elif extension.lower() == "mp4":
                    animation.write_videofile(saving_path, fps=fps)
                else:
                    print("you can only save gif or mp4!")
                    exit()
Beispiel #19
0
def run_moving_crash(args, target, outfile):
    """Runs a moving crash based on moving (gif/mp4) inputs"""
    video = VideoFileClip(target)
    img = video.get_frame(t=0)  # first frame of the video
    bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth)
    max_depth = bounds.max_depth
    crash_params = crash.CrashParams(
        max_depth, args.threshold, args.bg_value, args.rgb_select)
    options = _options(args.reveal_foreground, args.reveal_background,
                       args.crash, args.reveal_quadrants, args.bg_value)
    frames = video.iter_frames(fps=video.fps)

    def make_frame(_):
        frame = next(frames)
        fg, bounds = foreground.find_foreground(frame, crash_params)
        return _process_img(frame, fg, bounds, options)

    output_video = VideoClip(
        make_frame, duration=video.duration-(4/video.fps))  # trim last 4 frms
    output_video.write_videofile(
        outfile, preset=args.compression, fps=video.fps,
        threads=args.in_parallel)
Beispiel #20
0
def save_fitness_histogram_movie():
    from moviepy.editor import VideoClip
    from moviepy.video.io.bindings import mplfig_to_npimage
    from algorithm.parameters import params

    def make_frame(t):
        """ returns an image of the frame at time t """
        # ... create the frame with any library
        fitness = fitness_list[int(t)]
        __sum_fit = sum(fitness)
        __mean_fit = float(__sum_fit) / float(len(fitness))
        from scipy.stats import tstd, iqr, variation, entropy
        __sd_fit = tstd(fitness)
        __iqr = iqr(fitness)
        __v = variation(fitness)
        __e = entropy(fitness)

        fig = plt.figure()
        plt.hist(fitness)  # ,bins=int(params['POPULATION_SIZE']*0.1))
        plt.title("Moving Point - Population Fitness Histogram - Generation " +
                  str(int(t)))
        plt.axis([0, 20000, 0, params['POPULATION_SIZE']])
        plt.ylabel('#Individuals')
        plt.xlabel('Fitness')
        plt.grid(True)
        __hist_text = "$\mu=" + "{0:.2f}".format(
            __mean_fit) + ",\ \sigma=" + "{0:.2f}".format(
                __sd_fit) + ",\ entropy=" + "{0:.2f}".format(
                    __e) + ",\ iqr=" + "{0:.2f}".format(__iqr) + "$"
        plt.text(1000, params['POPULATION_SIZE'] * .9, __hist_text)
        return mplfig_to_npimage(fig)  # (Height x Width x 3) Numpy array

    filename = params['FILE_PATH'] + str(
        params['TIME_STAMP']) + '/fitnessdistribution'
    fps = 1
    duration = params['GENERATIONS'] + 1
    animation = VideoClip(make_frame, duration=duration)
    #animation.resize(width=1280,height=720)
    animation.write_videofile(filename + ".mp4", fps=fps)  # export as video
Beispiel #21
0
    def save_video(self):
        if self.movie_filename is not None:
            history_of_states = self.state_history
            duration_in_seconds = len(history_of_states) / 4
            fig, ax = plot.subplots()
            frames_per_second = len(history_of_states) / duration_in_seconds

            def make_frame(t):
                ax.clear()
                ax.grid(False)
                ax.imshow(history_of_states[int(t * frames_per_second)])
                ax.tick_params(axis='both',
                               which='both',
                               bottom=False,
                               top=False,
                               left=False,
                               right=False,
                               labelleft=False,
                               labelbottom=False)
                return mplfig_to_npimage(fig)

            animation = VideoClip(make_frame, duration=duration_in_seconds)
            animation.write_videofile(self.movie_filename,
                                      fps=frames_per_second)
Beispiel #22
0
def make_frame(t):
    index = int(FPS * t)
    canvas = np.zeros(CANVAS_SIZE + (3, ), dtype="uint8")
    canvas.fill(255)

    for i in range(len(video_dirs)):
        im = imread(video_files[i][index])

        x, y = i // 2, i % 2  # hardcoded for 4 videos
        p = [
            x * (IMSIZE[0] + PAD_SIZE + FONT_HEIGHT),
            y * (IMSIZE[1] + PAD_SIZE)
        ]

        p[0] += FONT_HEIGHT
        p[1] += IMSIZE[1] // 2 - 100
        canvas = cv.putText(canvas, title[i], (p[1], p[0]),
                            cv.FONT_HERSHEY_SIMPLEX, 1, [0, 0, 0], 2,
                            cv.LINE_AA)
        p[1] -= IMSIZE[1] // 2 - 100
        p[0] += PAD_SIZE

        #stx, sty = x * (IMSIZE[0] + PAD_SIZE), y * (IMSIZE[1] + PAD_SIZE)
        canvas[p[0]:p[0] + IMSIZE[0], p[1]:p[1] + IMSIZE[1]] = im

    return canvas


animation = VideoClip(make_frame, duration=TOTAL_TIME)
animation.write_videofile(args.output, fps=FPS)
class _Scatter(object):
    """relative frame length is the length of each single frame measured as a fraction of
    the whole experiment duration. """
    def __init__(self,
                 visualizer=None,
                 dimension=3,
                 relative_frame_length=0.01,
                 rotate=False,
                 speedup_factor=1.0,
                 smoothness_factor=0.5,
                 export_format='mp4'):

        self.visualizer = visualizer
        self.dimension = dimension

        self.relative_frame_length = relative_frame_length  # should be within (0.0, 1.0]

        self.duration = self.visualizer.spikes[-1][0]  # in ms
        self.angle = -90
        self.rotate = rotate
        self.speedup = speedup_factor

        self.scatter_plot = None

        self.window_size = self.duration * relative_frame_length  # some quantity in ms
        self.window_step = self.window_size * smoothness_factor  # some quantity in ms
        self.window_start = 0
        self.window_end = self.window_start + self.window_size

        self.frame_count = int(
            (self.duration - self.window_size) / self.window_step) + 1
        self.fps = int(
            (self.frame_count / (self.duration / 1000)) * self.speedup)

        if self.fps == 0:
            self.fps = 1

        # initialise figure and axes and setup other details
        self.cbar_not_added = True
        self._setup_plot()

        self.file_format = export_format
        # print("duration", int((self.duration/1000+1)/speedup_factor))
        from moviepy.editor import VideoClip
        self.anim = VideoClip(self._update_frame,
                              duration=int(
                                  (self.duration / 1000 + 1) / speedup_factor))

    def _change_angle(self):
        self.angle = (self.angle + 0.5) % 360

    def _setup_plot(self):
        self.fig = plt.figure()
        if self.dimension == 3:
            self.ax = self.fig.add_subplot(111, projection='3d')
            self.ax.set_xlim3d(0, self.visualizer.network_dimensions['dim_x'])
            self.ax.set_xlabel('x')
            self.ax.set_ylim3d(self.visualizer.network_dimensions['max_d'],
                               self.visualizer.network_dimensions['min_d'])
            self.ax.set_ylabel('depth')
            self.ax.set_zlim3d(0, self.visualizer.network_dimensions['dim_y'])
            self.ax.set_zlabel('y')

        else:
            self.ax = self.fig.add_subplot(111)
            self.ax.set_xlim(0, self.visualizer.network_dimensions['dim_x'])
            self.ax.set_xlabel('x')
            self.ax.set_ylim(0, self.visualizer.network_dimensions['dim_y'])
            self.ax.set_ylabel('y')

        self.ax.set_title("Network's subjective sense of reality", fontsize=16)
        self.ax.set_autoscale_on(False)

    def _update_frame(self, time):
        from moviepy.video.io.bindings import mplfig_to_npimage
        if self.dimension == 3:
            if self.rotate:
                self._change_angle()
            self.ax.view_init(30, self.angle)

        # clear the plot from the old data
        if self.scatter_plot is not None:
            self.scatter_plot.remove()

        # reimplement in a more clever way so that movie generation is in total O(n) not O(nm)
        current_frame = []
        for s in self.visualizer.spikes:
            if s[0] > self.window_end:
                self.window_start += self.window_step
                self.window_end = self.window_start + self.window_size
                break
            if s[0] > self.window_start:
                current_frame.append((s[1], s[2], s[3]))
        current_frame = np.asarray(current_frame)

        if current_frame.size > 0:

            # be careful of the coordinate-axes orientation. up/down should be x!
            if self.dimension == 3:
                self.scatter_plot = self.ax.scatter(
                    current_frame[:, 0],
                    current_frame[:, 2],
                    current_frame[:, 1],
                    s=25,
                    marker='s',
                    lw=0,
                    c=current_frame[:, 2],
                    cmap=plt.cm.get_cmap(
                        "brg",
                        self.visualizer.network_dimensions['max_d'] + 1),
                    vmin=self.visualizer.network_dimensions['min_d'],
                    vmax=self.visualizer.network_dimensions['max_d'])
                if self.cbar_not_added:
                    cbar = self.fig.colorbar(self.scatter_plot)
                    cbar.set_label('Perceived disparity in pixel units',
                                   rotation=270)
                    cbar.ax.get_yaxis().labelpad = 15
                    self.cbar_not_added = False
            else:
                self.scatter_plot = self.ax.scatter(
                    current_frame[:, 0],
                    current_frame[:, 1],
                    s=25,
                    marker='s',
                    lw=0,
                    c=current_frame[:, 2],
                    cmap=plt.cm.get_cmap(
                        "brg",
                        self.visualizer.network_dimensions['max_d'] + 1),
                    vmin=self.visualizer.network_dimensions['min_d'],
                    vmax=self.visualizer.network_dimensions['max_d'])
                # find some better solution than this flag hack
                if self.cbar_not_added:
                    cbar = self.fig.colorbar(self.scatter_plot)
                    cbar.set_label('Perceived disparity in pixel units',
                                   rotation=270)
                    cbar.ax.get_yaxis().labelpad = 15
                    self.cbar_not_added = False
        else:
            self.scatter_plot = self.ax.scatter([], [])

        return mplfig_to_npimage(self.fig)

    def show(self):
        plt.show(
        )  # or better use plt.draw()?, not sure but I think this might freeze when updating...

    def save(self):
        dim = "3D" if self.dimension == 3 else "2D"

        if not os.path.exists("./animations"):
            os.makedirs("./animations")
        i = 0
        while os.path.exists("./animations/{0}_{2}_{1}.gif"
                                     .format(self.visualizer.experiment_name, i, dim)) or \
                os.path.exists("./animations/{0}_{2}_{1}.mp4"
                                       .format(self.visualizer.experiment_name, i, dim)):
            i += 1
        if self.file_format == 'gif':
            self.anim.write_gif(filename="./animations/{0}_{2}_{1}.gif".format(
                self.visualizer.experiment_name, i, dim),
                                fps=self.fps,
                                verbose=self.visualizer.verbose)
        elif self.file_format == 'mp4':
            print("INFO: Generating movie with duration of {0}s at {1}fps.".
                  format(int((self.duration / 1000 + 1) / self.speedup),
                         self.fps))
            self.anim.write_videofile(
                filename="./animations/{0}_{2}_{1}.mp4".format(
                    self.visualizer.experiment_name, i, dim),
                fps=self.fps,
                codec='mpeg4',
                bitrate='2000k',
                audio=False,
                verbose=self.visualizer.verbose)
        else:
            print("ERROR: The export format is not supported.")
    global line
    print("time: {time}, line: {lime}".format(time=t, line=line))

    # vyplneni trojrozmerneho pole nulami
    frame = numpy.zeros((HEIGHT, WIDTH, 3))

    # vykresleni jedine vodorovne usecky
    if line < HEIGHT:
        frame[line].fill(255)
        line += 1
    return frame


# vytvoreni video klipu
animation = VideoClip(make_frame, duration=10)

# export videa do formatu MPEG-4
animation.write_videofile("line.mp4", fps=24)

# znovunastaveni pocitadla
line = 0

# export videa do formatu Ogg Video File
animation.write_videofile("line.ogv", fps=24)

# znovunastaveni pocitadla
line = 0

# export videa do formatu GIF
animation.write_gif("line.gif", fps=24)
Beispiel #25
0
myclip = VideoFileClip('file3763.mov')
iter_frames = myclip.iter_frames()
mean=(0.485, 0.456, 0.406)
std=(0.229, 0.224, 0.225)
def make_frame(t):
    orig_img = next(iter_frames)

    x, orig_img = load_test_from_numpy(orig_img, ctx=ctx)
    
    ids, scores, bboxes, drivable_maps = net(x.as_in_context(ctx))
    
#     ids, scores, bboxes = [xx[0].asnumpy() for xx in [ids, scores, bboxes]]
    ids = ids[0].asnumpy()
    scores = scores[0].asnumpy()
    bboxes = bboxes[0].asnumpy()
    drivable_maps = drivable_maps[0]
#     print(ids.context, scores.context, bboxes.context, drivable_maps.context)
    mask = drivable_maps.transpose((1,2,0)).as_in_context(mx.cpu())
    mask = re_size(mask)
    
    orig_img = plot_drivable_map(orig_img, mask) 
    res_img = draw_bbox(orig_img, bboxes, scores, ids, thresh=0.5, class_names=CLASSES,colors=colors)
    
    
    return res_img

animation = VideoClip(make_frame, duration=duration)
animation.write_videofile(name + '.mp4', fps=30)

                M_0=np.zeros((2,2*nlags+affine)),
                K_0=10*np.eye(2*nlags+affine),
                affine=affine)
            for state in range(Nmax)],
        )

model.add_data(data)

###############
#  inference  #
###############

from moviepy.video.io.bindings import mplfig_to_npimage
from moviepy.editor import VideoClip

fig = model.make_figure()
plt.set_cmap('terrain')
plot_slice = slice(0,300)

model.plot(fig=fig,draw=False,plot_slice=plot_slice)

def make_frame_mpl(t):
    model.resample_model()
    model.plot(fig=fig,update=True,draw=False,plot_slice=plot_slice)
    plt.tight_layout()
    return mplfig_to_npimage(fig)

animation = VideoClip(make_frame_mpl, duration=10)
animation.write_videofile('gibbs.mp4',fps=30)

Beispiel #27
0
def export_moviepy(sequence, filename, rate=30, bitrate=None, width=None,
                   height=None, codec='mpeg4', pixel_format='yuv420p',
                   autoscale=None, quality=None, verbose=True,
                   options=None, rate_range=(16, 32)):
    """Export a sequence of images as a standard video file using MoviePy.

    Parameters
    ----------
    sequence : any iterator or array of array-like images
        The images should have two dimensions plus an
        optional third dimensions representing color.
    filename : string
        name of output file
    rate : integer, optional
        frame rate of output file, 30 by default
        NB: The output frame rate will be limited between `rate_range`
    bitrate : integer or string, optional
        Preferably use the parameter `quality` for controlling the bitrate.
    width : integer, optional
        By default, set the width of the images.
    height : integer, optional
        By default, set the  height of the images. If width is specified
        and height is not, the height is autoscaled to maintain the aspect
        ratio.
    codec : string
        a valid video encoding, 'mpeg4' by default. Must be supported by the
        container format. Examples are {'mpeg4', 'wmv2', 'libx264', 'rawvideo'}
        Check https://www.ffmpeg.org/ffmpeg-codecs.html#Video-Encoders.
    pixel_format: string, optional
        Pixel format, 'yuv420p' by default.
        Another possibility is 'bgr24' in combination with the 'rawvideo' codec.
    quality: number or string, optional
        For 'mpeg4' codec: sets qscale:v. 1 = high quality, 5 = default.
        For 'libx264' codec: sets crf. 0 = lossless, 23 = default.
        For 'wmv2' codec: sets fraction of lossless bitrate, 0.01 = default
    autoscale : boolean, optional
        Linearly rescale the brightness to use the full gamut of black to
        white values. False by default for uint8 readers, True otherwise.
    verbose : boolean, optional
        Determines whether MoviePy will print progress. True by default.
    options : dictionary, optional
        Dictionary of parameters that will be passed to ffmpeg. Avoid using
        {'qscale:v', 'crf', 'pixel_format'}.
    rate_range : tuple of two numbers
        As extreme frame rates have playback issues on many players, by default
        the frame rate is limited between 16 and 32. When the desired frame rate
        is too low, frames will be multiplied an integer number of times. When
        the desired frame rate is too high, frames will be skipped at constant
        intervals.

    See Also
    --------
    http://zulko.github.io/moviepy/ref/VideoClip/VideoClip.html#moviepy.video.VideoClip.VideoClip.write_videofile
    """
    if VideoClip is None:
        raise ImportError('The MoviePy exporter requires moviepy to work.')

    if options is None:
        options = dict()
    ffmpeg_params = []
    for key in options:
        ffmpeg_params.extend(['-{}'.format(key), str(options[key])])

    if rate <= 0:
        raise ValueError
    export_rate = _normalize_framerate(rate, *rate_range)

    clip = VideoClip(CachedFrameGenerator(sequence, rate, autoscale,
                                          to_bgr=(pixel_format == 'bgr24')))
    clip.duration = len(sequence) / rate
    if not (height is None and width is None):
        clip = clip.resize(height=height, width=width)

    if codec == 'wmv2' and bitrate is None and quality is None:
        quality = 0.01

    if quality is not None:
        if codec == 'libx264':
            ffmpeg_params.extend(['-crf', str(quality)])
        elif codec == 'mpeg4':
            ffmpeg_params.extend(['-qscale:v', str(quality)])
        elif codec == 'wmv2':
            if bitrate is not None:
                warnings.warn("(wmv) quality is ignored when bitrate is set.")
            else:
                bitrate = quality * _estimate_bitrate(clip.size, export_rate)
        else:
            raise NotImplemented

    if format is not None:
        ffmpeg_params.extend(['-pixel_format', str(pixel_format)])
    if bitrate is not None:
        bitrate = str(bitrate)

    clip.write_videofile(filename, export_rate, codec, bitrate, audio=False,
                         verbose=verbose, ffmpeg_params=ffmpeg_params)
            #print probe.name, "at", probe.position
            if probe.name in displayed_probes:
                list_lines[line_index].set_data(probe.time_buffer, probe.value_buffer)
            line_index+=1
        xmin, xmax = ax1.get_xlim()
        if o.t > xmax:
            ax1.set_xlim(xmin, 2*xmax)
            ax1.figure.canvas.draw()
    NF_mp.set_data(xrange(0,width), NF.V)
    NF_firing.set_data(xrange(0,width), NF.GainFunction(NF.V))
    if RECORD_VIDEO:
        return mplfig_to_npimage(fig) 
    else:
        return list_lines
   
from time import time
t0 = time() 
print list_obj

## call the animator.  blit=True means only re-draw the parts that have changed.
if not RECORD_VIDEO:
    anim = animation.FuncAnimation(fig, animate, frames=simulation_time,  interval=5 , blit=True, init_func= init, repeat=False)
else:
    from moviepy.editor import VideoClip
    from moviepy.video.io.bindings import mplfig_to_npimage
    anim = VideoClip(animate, duration = simulation_time/60)
    anim.write_videofile("bartest_"+str(end_bar-start_bar)+".mp4", fps = 60)

t1 = time()
print((t1 - t0))
plt.show()
Beispiel #29
0
# Make super short video (so the analysis is quick!)
vname='brief'
newvideo=os.path.join(cfg['project_path'],'videos',vname+'.mp4')
try: #you need ffmpeg command line interface
    subprocess.call(['ffmpeg','-i',video[0],'-ss','00:00:00','-to','00:00:00.4','-c','copy',newvideo])
except:
    #for windows:
    from moviepy.editor import VideoFileClip,VideoClip
    clip = VideoFileClip(video[0])
    clip.reader.initialize()
    def make_frame(t):
        return clip.get_frame(1)

    newclip = VideoClip(make_frame, duration=1)
    newclip.write_videofile(newvideo,fps=30)

deeplabcut.analyze_videos(path_config_file,[newvideo],save_as_csv=True, destfolder=dfolder)

print("CREATE VIDEO")
deeplabcut.create_labeled_video(path_config_file,[newvideo], destfolder=dfolder)

print("Making plots")
deeplabcut.plot_trajectories(path_config_file,[newvideo], destfolder=dfolder)


print("EXTRACT OUTLIERS")
deeplabcut.extract_outlier_frames(path_config_file,[newvideo],outlieralgorithm='jump',epsilon=0,automatic=True, destfolder=dfolder)


file=os.path.join(cfg['project_path'],'labeled-data',vname,"machinelabels-iter"+ str(cfg['iteration']) + '.h5')
            if probe.name in displayed_probes:
                list_lines[line_index].set_data(probe.time_buffer, probe.value_buffer)
            line_index+=1
        xmin, xmax = ax1.get_xlim()
        if o.t > xmax:
            ax1.set_xlim(xmin, 2*xmax)
            ax1.figure.canvas.draw()
    NF_mp.set_data(xrange(0,width), NF.V)
    NF_firing.set_data(xrange(0,width), NF.GainFunction(NF.V))
    if RECORD_VIDEO:
        return mplfig_to_npimage(fig)
    else:
        return list_lines

from time import time
t0 = time()
print list_obj

## call the animator.  blit=True means only re-draw the parts that have changed.
if not RECORD_VIDEO:
    anim = animation.FuncAnimation(fig, animate, frames=simulation_time,  interval=5 , blit=True, init_func= init, repeat=False)
else:
    from moviepy.editor import VideoClip
    from moviepy.video.io.bindings import mplfig_to_npimage
    anim = VideoClip(animate, duration = simulation_time/60)
    anim.write_videofile("test2.mp4", fps = 60)

t1 = time()
print((t1 - t0))
plt.show()
Beispiel #31
0
import numpy as np
from moviepy.editor import VideoClip

def make_frame(t):
    """ returns an image of the frame at time t """
    # ... create the frame with any library
    return np.zeros((300, 400, 3)) # (Height x Width x 3) Numpy array

animation = VideoClip(make_frame, duration=3) # 3-second clip

# For the export, many options/formats/optimizations are supported
animation.write_videofile("my_animation.mp4", fps=24) # export as video
animation.write_gif("my_animation.gif", fps=24) # export as GIF (slow)
        idx = [i for i in range(len(time_sum)-1) if t<=time_sum[i]][0]
#        print "======", jpegs[idx/2], "======", idx
        
        delta_fade = time_sum[idx]-time_sum[idx-1]
        fade_to = (t-time_sum[idx-1])/delta_fade # fraction
        fade_from = 1-fade_to # fraction

        frame_for_time_t_BGR_frame0 = fade_from * cv2.imread(jpegs[idx/2],cv2.CV_LOAD_IMAGE_COLOR)
        frame_for_time_t_BGR_frame1 = fade_to   * cv2.imread(jpegs[idx/2+1],cv2.CV_LOAD_IMAGE_COLOR)
        
        # BLENDED FRAME
        frame_for_time_t_BGR_frame01 = frame_for_time_t_BGR_frame0 + frame_for_time_t_BGR_frame1
        frame_for_time_t_BGR_frame01 = frame_for_time_t_BGR_frame01.astype('uint8') # convert from float to uint8
        
        frame_for_time_t = cv2.cvtColor(frame_for_time_t_BGR_frame01, cv2.COLOR_BGR2RGB) # BGR-RGB COLOR

        
    return frame_for_time_t
    



clip = VideoClip(make_frame, duration=time_sum[-1])#.set_audio(audio) # x-second clip

if audio_on:
    audio = AudioFileClip("audioclip.mp3")
    audio = audio.set_duration(time_sum[-1])
    clip = clip.set_audio(audio)

clip.write_videofile("my_animation_%sfps_dummy.mp4" %fps, fps=fps) # export as video
#clip.write_gif("my_animation.gif", fps=24) # export as GIF
def main(argv):
    
  
    animation = VideoClip(make_frame, duration=60) # durration is seconds
    animation.write_videofile("my_animation.mp4", fps=6) # export as video
    animation.write_gif("my_animation.gif", fps=6) # export as GIF (slow
Beispiel #34
0
def export_moviepy(sequence,
                   filename,
                   rate=30,
                   bitrate=None,
                   width=None,
                   height=None,
                   codec='mpeg4',
                   pixel_format='yuv420p',
                   autoscale=None,
                   quality=None,
                   verbose=True,
                   options=None,
                   rate_range=(16, 32)):
    """Export a sequence of images as a standard video file using MoviePy.

    Parameters
    ----------
    sequence : any iterator or array of array-like images
        The images should have two dimensions plus an
        optional third dimensions representing color.
    filename : string
        name of output file
    rate : integer, optional
        frame rate of output file, 30 by default
        NB: The output frame rate will be limited between `rate_range`
    bitrate : integer or string, optional
        Preferably use the parameter `quality` for controlling the bitrate.
    width : integer, optional
        By default, set the width of the images.
    height : integer, optional
        By default, set the  height of the images. If width is specified
        and height is not, the height is autoscaled to maintain the aspect
        ratio.
    codec : string
        a valid video encoding, 'mpeg4' by default. Must be supported by the
        container format. Examples are {'mpeg4', 'wmv2', 'libx264', 'rawvideo'}
        Check https://www.ffmpeg.org/ffmpeg-codecs.html#Video-Encoders.
    pixel_format: string, optional
        Pixel format, 'yuv420p' by default.
        Another possibility is 'bgr24' in combination with the 'rawvideo' codec.
    quality: number or string, optional
        For 'mpeg4' codec: sets qscale:v. 1 = high quality, 5 = default.
        For 'libx264' codec: sets crf. 0 = lossless, 23 = default.
        For 'wmv2' codec: sets fraction of lossless bitrate, 0.01 = default
    autoscale : boolean, optional
        Linearly rescale the brightness to use the full gamut of black to
        white values. False by default for uint8 readers, True otherwise.
    verbose : boolean, optional
        Determines whether MoviePy will print progress. True by default.
    options : dictionary, optional
        Dictionary of parameters that will be passed to ffmpeg. Avoid using
        {'qscale:v', 'crf', 'pixel_format'}.
    rate_range : tuple of two numbers
        As extreme frame rates have playback issues on many players, by default
        the frame rate is limited between 16 and 32. When the desired frame rate
        is too low, frames will be multiplied an integer number of times. When
        the desired frame rate is too high, frames will be skipped at constant
        intervals.

    See Also
    --------
    http://zulko.github.io/moviepy/ref/VideoClip/VideoClip.html#moviepy.video.VideoClip.VideoClip.write_videofile
    """
    if VideoClip is None:
        raise ImportError('The MoviePy exporter requires moviepy to work.')

    if options is None:
        options = dict()
    ffmpeg_params = []
    for key in options:
        ffmpeg_params.extend(['-{}'.format(key), str(options[key])])

    if rate <= 0:
        raise ValueError
    export_rate = _normalize_framerate(rate, *rate_range)

    clip = VideoClip(
        CachedFrameGenerator(sequence,
                             rate,
                             autoscale,
                             to_bgr=(pixel_format == 'bgr24')))
    clip.duration = len(sequence) / rate
    if not (height is None and width is None):
        clip = clip.resize(height=height, width=width)

    if codec == 'wmv2' and bitrate is None and quality is None:
        quality = 0.01

    if quality is not None:
        if codec == 'libx264':
            ffmpeg_params.extend(['-crf', str(quality)])
        elif codec == 'mpeg4':
            ffmpeg_params.extend(['-qscale:v', str(quality)])
        elif codec == 'wmv2':
            if bitrate is not None:
                warnings.warn("(wmv) quality is ignored when bitrate is set.")
            else:
                bitrate = quality * _estimate_bitrate(clip.size, export_rate)
        else:
            raise NotImplemented

    if format is not None:
        ffmpeg_params.extend(['-pixel_format', str(pixel_format)])
    if bitrate is not None:
        bitrate = str(bitrate)

    clip.write_videofile(filename,
                         export_rate,
                         codec,
                         bitrate,
                         audio=False,
                         verbose=verbose,
                         ffmpeg_params=ffmpeg_params)
Beispiel #35
0
def draw_animation():
    animation = VideoClip(make_frame=make_frame, duration=duration)
    animation.write_videofile("matplotlib.mp4", fps=20)
class Animate(object):
    """
    Moviepy related functions (based on matplotlib routines below)
    """
    def moviepy_update(self, i):
        self.update(i)
        if SAVE_VEC_FRAMES:
            if i * FPS % VEC_FRAME_STEP == 0:
                self.save_vec_frame(i)
        return mplfig_to_npimage(self.fig) if USE_MOVIEPY else None

    def moviepy_animate(self):
        self.prepare()
        if USE_MOVIEPY:
            self.ani = VideoClip(
                self.moviepy_update,
                duration=self.bf.particles.get_number_of_frames() /
                FPS)  # FIXME: duration in seconds
        else:
            for i in range(self.bf.particles.get_number_of_frames()):
                print(i)
                self.moviepy_update(i)

    def moviepy_save_to_file(self, filename):
        if USE_MOVIEPY:
            self.ani.write_videofile(filename, fps=FPS)

    def save_vec_frame(self, i):
        self.fig.savefig("%s-frame-ts%d.pdf" % (self.output_filename, i * 30))

    """
    Matplotlib related functions
    """

    def update(self, i):
        """
        Update dynamic particle properties.
        """
        if DRAW_PARTICLES:
            particles = next(self.particle_data)
            if DRAW_TIME:
                # FIXME: evtl. in Simulationszeit konvertieren, also *saving_timestep
                #self.ax.set_title(r'$t_s$=%3.2f' % (i*FPS))
                #self.ax.set_title(r'$t_s$=%03d' % (i*FPS))
                self.ax.set_title(r'$t_s$=%d' % (i * FPS))
                #self.ax.set_title(r'$t_s$=%d' % (i*FPS), fontsize=80)

            if not USE_BOUNDARY_REDRAW:
                for (ep, es) in zip(
                        self.ells, particles
                ):  # FIXME: self.ells = [] after add_artist(e) for e in ells
                    ep.center = es['position']
                    if not isinstance(
                            ep, matplotlib.patches.Circle
                    ):  # FIXME: das ist noch nicht so wirklich schön
                        ep.angle = degrees(es['angle'])
                    if DRAW_PARTICLE_IDS:
                        self.labels[es['id']].set_position(xy=es['position'])

            ### VERSION MIT WIEDERHOLDUNGEN AN RÄNDERN
            else:
                [box_width, box_height] = self.__box_size
                translations = [
                    np.array([dx, dy]) for dx in [box_width, -box_width, 0]
                    for dy in [box_height, -box_height, 0]
                ]
                for (ep, es) in zip(
                        self.ells2, particles
                ):  # FIXME: self.ells = [] after add_artist(e) for e in ells
                    ep.set_offsets(es['position'] + translations)
                    if (ep._heights != ep._widths).all():
                        ep._angles = np.repeat((es['angle']), 9)

                    if DRAW_PARTICLE_IDS:
                        self.labels[es['id']].set_position(xy=es['position'])

            ### ENDE DER 2. Version

    def prepare(self):
        """
        Initialization of the particle patches.
        """
        self.plot_cellspace()
        #particles = next(self.particle_data)
        particles = next(self.particle_static_data)

        if not DRAW_FIG_AXES:
            self.ax.axis('off')

        if DRAW_PARTICLES:
            if not USE_BOUNDARY_REDRAW:
                #self.ells = [Ellipse(xy=e['position'], width=2*e['major'], height=2*e['minor'], angle=degrees(e['angle'])) for e in particles]
                #self.ells = [getattr(matplotlib.patches, e['type'])(xy=e['position'], width=2*e['major'], height=2*e['minor'], angle=degrees(e['angle'])) for e in particles]
                self.ells = []
                self.labels = {}
                for e in particles:  # FIXME: das ist noch nicht so wirklich schön
                    if e['type'] == "Ellipse":
                        self.ells.append(
                            matplotlib.patches.Ellipse(xy=e['position'],
                                                       width=2 * e['major'],
                                                       height=2 * e['minor'],
                                                       angle=degrees(
                                                           e['angle'])))
                    elif e['type'] == "Circle":
                        self.ells.append(
                            matplotlib.patches.Circle(xy=e['position'],
                                                      radius=e['radius']))
                    if DRAW_PARTICLE_IDS:
                        self.labels[e['id']] = self.ax.text(e['position'][0],
                                                            e['position'][1],
                                                            ("%d" % e['id']),
                                                            fontsize=12,
                                                            color='white')

                for e in self.ells:
                    self.ax.add_artist(e)

                    k = e.height / e.width

                    # Antrag (R/G)
                    """"(r,g,b)=(0,0,1)
                    if k>0.5:
                        (g,b) = (0.54902, 0) 
                    """
                    # R/B
                    (r, g, b) = (0, 1, 0)
                    if k > 0.5:
                        (r, b) = 1, 0

                    e.set_facecolor([r, g, b
                                     ])  # FIXME: encode angle or particle type

            else:
                ### VERSION MIT WIEDERHOLDUNGEN AN RÄNDERN
                self.ells2 = []
                [box_width, box_height] = self.__box_size
                translations = [
                    np.array([dx, dy]) for dx in [box_width, -box_width, 0]
                    for dy in [box_height, -box_height, 0]
                ]
                for e in particles:
                    if e['type'] == "Ellipse":
                        # FIXME: this should be done in boundary module!
                        widths = np.repeat(2 * e['major'], 9)
                        heights = np.repeat(2 * e['minor'], 9)
                        angles = np.repeat(degrees(e['angle']), 9)
                        (r, g, b) = (0, 0, 1)
                        if e['minor'] / e['major'] > 0.5:
                            (r, g, b) = (
                                1, 0, 0
                            )  # probably not too useful, because pinned particles have same color
#                             (g,b)=(0.54902,0)
                    elif e['type'] == "Circle":
                        widths = np.repeat(2 * e['radius'], 9)
                        heights = np.repeat(2 * e['radius'], 9)
                        angles = np.repeat(0, 9)
                        (r, g, b) = (1, 0, 0)
                    if e['pinned'] == True:
                        (r, g, b) = (1, 0, 0)
                    if DRAW_PARTICLE_IDS:
                        self.labels[e['id']] = self.ax.text(e['position'][0],
                                                            e['position'][1],
                                                            ("%d" % e['id']),
                                                            fontsize=12,
                                                            color='white')

                    XY = e['position'] + translations
                    ec = EllipseCollection(widths,
                                           heights,
                                           angles,
                                           units='x',
                                           offsets=XY,
                                           transOffset=self.ax.transData)
                    ec.set_facecolor([r, g, b])
                    self.ells2.append(ec)
                    self.ax.add_collection(ec)

    def plot_cellspace(self):
        """
        Plots the Delaunay triangulation of the cellspace.
        """
        system = arb()
        self.cellspace = DelaunayCellspace(system=system,
                                           grid_points=self.grid_data)

        if DRAW_CELLS:
            self.ax.triplot(self.cellspace._grid_points[:, 0],
                            self.cellspace._grid_points[:, 1],
                            self.cellspace.triang.simplices.copy())
        self.__grid_to_box_corners()
        (xmin, xmax, ymin, ymax) = self.__box_corners
        self.ax.set_xlim(xmin, xmax)
        self.ax.set_ylim(ymin, ymax)

        if DRAW_CELL_NEIGHBOURS:
            ### load boundaries as well to show appropriate neighbours
            system.cellspace = self.cellspace
            from bedsim.boundary import PeriodicBox
            self.boundary = PeriodicBox(system=system)
            system.boundary = self.boundary
            ###
            np.random.seed(3)
            for cell in self.cellspace.cells:
                #print("cs = ", cell._simplex)
                #if (cell._simplex == np.array([15, 21, 16])).all():
                #if (cell._simplex == np.array([15, 11, 10])).all():
                #if (cell._simplex == np.array([21, 17, 16])).all():
                if (cell._simplex == np.array([5, 1, 0])).all():
                    cell_points = self.cellspace._grid_points[cell._simplex]
                    mpoint = np.sum(cell_points, axis=0) / len(cell_points)

                    for neigh in cell.neighbours:
                        neigh_cell_points = self.cellspace._grid_points[
                            neigh._simplex]
                        neigh_mpoint = np.sum(neigh_cell_points,
                                              axis=0) / len(neigh_cell_points)

                        self.ax.arrow(mpoint[0],
                                      mpoint[1],
                                      neigh_mpoint[0] - mpoint[0] +
                                      np.random.normal(0, 0.1),
                                      neigh_mpoint[1] - mpoint[1] +
                                      np.random.normal(0, 0.1),
                                      head_width=0.05,
                                      head_length=0.1,
                                      fc='r',
                                      ec='r')
                #self.ax.text(mpoint[0], mpoint[1], "I'm a cell")

    """ # OBSOLETE:
    def animate(self):
        self.ani = animation.FuncAnimation(self.fig, self.update, frames=self.bf.particles.get_number_of_frames(), interval=10, blit=True, init_func=self.prepare) # FIXME: frames

    def save_to_file(self, filename):
        self.ani.save(filename, fps=25, extra_args=['-vcodec', 'libx264'])
    """

    def load_from_file(self, filename):
        self.bf = BedfileReader(filename)
        self.particle_data = self.bf.particles.load_dynamics(
        )  # FIXME: im Moment ist noch alles dynamisch. Das wird sich aber bald ändern!
        self.particle_static_data = self.bf.particles.load_statics()
        self.grid_data = self.bf.system.grid

    def __init__(self, input_filename, output_filename):
        self.ells = []
        self.load_from_file(input_filename)
        self.fig, self.ax = plt.subplots(figsize=FIGSIZE)
        self.ax.set_aspect(1)
        self.ax.set_ylim(0, 10)  # FIXME
        self.ax.set_xlim(0, 10)  # FIXME
        self.labels = {}

        self.__box_size = np.array([10, 10])
        self.__box_center = np.array([5, 5])
        self.__box_corners = (0, 10, 0, 10)

        # matplotlib
        #         self.animate()
        #self.save_to_file(output_filename)
        self.output_filename = output_filename

        # moviepy
        self.moviepy_animate()
        self.moviepy_save_to_file(output_filename)

    def __grid_to_box_corners(self):  ## FIXME: generalize this
        """
        Calculate the corners of the simulation box from the cell grid.
        """
        [x, y] = self.cellspace._grid_points.transpose()
        [xmin, xmax, ymin,
         ymax] = [np.amin(x), np.amax(x),
                  np.amin(y), np.amax(y)]
        [width, height] = [xmax - xmin, ymax - ymin]
        self.__box_size = np.array([width, height])
        self.__box_center = np.array([(xmin + xmax) / 2, (ymin + ymax) / 2])
        self.__box_corners = (xmin, xmax, ymin, ymax)
        self.extent = np.fabs(np.array([xmax, ymax]) - np.array([xmin, ymin]))
	"""
	T01,Joint1 = PlanarTransformationMatrix(Angle1,[0,0],np.identity(4))
	T12,Joint2 = PlanarTransformationMatrix(Angle2,[0,-Length1],T01)
	T23,Endpoint = PlanarTransformationMatrix(0,[0,-Length2],T12)
	
	Radius = 0.5
	plt.figure()
	ax = plt.gca()
	plt.plot([0],[0],'ko')
	plt.plot([Joint1[0], Joint2[0], Endpoint[0]],[Joint1[1], Joint2[1], Endpoint[1]],"0.75",lw=3)
	plot_link(ax, Joint1, Radius, Length1, Angle1, Joint2,"0.55")
	plot_link(ax, Joint2, Radius, Length2, Angle1+Angle2, Endpoint, "0.55")
	quick_2D_plot_tool(ax,'x','y','Drawing Rotating Links')
	return(ax)

ax1 = plot_2_link_planar_model(np.pi/6,np.pi/6,5,5)
ax2 = plot_2_link_planar_model(np.pi/2,np.pi/2,4,3)
plt.imshow((ax1,ax2))

from moviepy.editor import VideoClip

def make_frame(t):
    frame_for_time_t = plot_2_link_planar_model(Angle1[t],Angle2[t],5,5)
    return frame_for_time_t # (Height x Width x 3) Numpy array
Angle1 = np.arange(0,np.pi,0.001)
Angle2 = np.arange(0,np.pi/2,0.0005)
animation = VideoClip(make_frame, duration=3) # 3-second clip

# For the export, many options/formats/optimizations are supported
animation.write_videofile("my_animation.mp4", fps=24) # export as video
animation.write_gif("my_animation.gif", fps=24) # export as GIF (slow)
Beispiel #38
0
def render_gif(pov_file, args):
    prog_args = []

    clock_args = {'fps': None,
                  'initial_clock': 0,
                  'final_clock': None,
                  'initial_frame': 0,
                  'final_frame': None,
                  }

    for arg in args:
        if '=' in arg:
            key,value = arg.split('=')
            key = key.strip().lower()
            if key in clock_args:
                clock_args[key] = float(value)
            else:
                prog_args.append(arg)
        else:
            prog_args.append(arg)


    FPS = clock_args['fps']
    initial_clock = clock_args['initial_clock']
    final_clock = clock_args['final_clock']
    initial_frame = clock_args['initial_frame']
    final_frame = clock_args['final_frame']
    time_scaling = 1.0

    if (FPS is not None and
        final_clock is not None and
        final_frame is not None):
        would_be_FPS = (final_frame-initial_frame) / (final_clock-initial_clock)
        time_scaling = FPS / would_be_FPS

    if FPS is None:
        if (final_clock is not None and
            final_frame is not None):
            FPS = (final_clock-initial_clock) / (final_frame-initial_frame)
        else:
            raise ValueError('FPS must be set at top of file')

    if final_clock is None:
        if (FPS is not None and
            final_frame is not None):
            final_clock = initial_clock + (final_frame-initial_frame)/FPS
        else:
            raise ValueError('Final_Clock must be set at top of file')

    if final_frame is None:
        if (FPS is not None and
            final_clock is not None):
            final_frame = initial_frame + (final_clock-initial_clock)*FPS
        else:
            raise ValueError('Final_Frame must be set at top of file')


    make_frame = frame_gen(pov_file, prog_args, initial_clock, time_scaling)
    clip = VideoClip(make_frame, duration=(final_clock-initial_clock)/time_scaling)
    # output_gif = pov_file.replace('.pov','.gif')
    # clip.write_gif(output_gif, fps=FPS,
    #                program='ffmpeg')
    output_mp4 = pov_file.replace('.pov','.mp4')
    clip.write_videofile(output_mp4, fps=FPS,
                         codec='libx264', bitrate='500k',
                         audio=False)
    print('')

def make_frame(t):
    """Deklarace callback funkce zavolane pri renderingu kazdeho snimku videa."""
    offset = 4 * t / DURATION - 2
    print(offset)

    w = 1 / (z + 2j)**2 + 1 / (z - offset)**2
    img = colorize(w)

    fig = plt.figure(figsize=(20, 20))

    plt.subplot(111)

    subplot = fig.add_subplot(111)

    subplot.imshow(img)

    # konverze na objekt typu "frame"
    return mplfig_to_npimage(fig)


# vytvoreni video klipu
animation = VideoClip(make_frame, duration=DURATION)

# export videa do formatu Ogg/Vorbis
animation.write_videofile('complex.ogv',
                          fps=FPS,
                          progress_bar=False,
                          bitrate="800000")
Beispiel #40
0
def make_frame(t):
    """Vytvoreni jednoho snimku videa."""
    global scale
    print("time: {t}, scale: {s}".format(t=t, s=scale))

    # vyplneni trojrozmerneho pole nulami
    frame = numpy.zeros((HEIGHT, WIDTH, 3))

    calc_mandelbrot(WIDTH, HEIGHT, MAXITER, palette_mandmap.palette, x0, y0,
                    scale, frame)
    scale *= scale_factor

    return frame


# vytvoreni video klipu
animation = VideoClip(make_frame, duration=15)

# export videa do formatu Ogg Video File
# animation.write_videofile("mandelbrot_zoom.ogv", fps=20, progress_bar=False, bitrate="2000000")
animation.write_videofile("mandelbrot_zoom.ogv",
                          fps=20,
                          progress_bar=False,
                          bitrate="300000")

# export videa do formatu MPEG-4
# animation.write_videofile("mandelbrot_zoom.mp4", fps=20, progress_bar=False, bitrate="700000")

# export videa do formatu GIF
# animation.write_gif("colors.gif", fps=25)
        self.program.bind(gloo.VertexBuffer(data))
        self.program['u_model'] = self.model
        self.program['u_view'] = self.view
        self.program['u_size'] = 5 / self.translate
 
        gloo.set_state('translucent', depth_test=False)
        self.program['u_clock'] = 0.0
 
    def on_resize(self, event):
        width, height = event.size
        gloo.set_viewport(0, 0, width, height)
        self.projection = perspective(45.0, width / float(height), 1.0, 1000.0)
        self.program['u_projection'] = self.projection
 
    def animation(self, t):
        """ Added for animation with MoviePy """
        self.program['u_clock'] = 2*t
        gloo.clear('black')
        self.program.draw('points')
        return  _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]
 
 
 
if __name__ == '__main__':
 
    from moviepy.editor import VideoClip
    canvas = Canvas()
    canvas.show()
    clip = VideoClip(canvas.animation, duration=np.pi).resize(0.3)
    clip.write_videofile('atom3.mp4', fps=20)
    #clip.write_gif('atom3.gif', fps=20, opt='OptimizePlus')