def video(): if 1: snd = AudioFileClip("space.mp3") clip = VideoClip(c.animation, duration=snd.duration / 30.) clip = clip.set_audio(snd).set_duration(snd.duration / 30.) clip.write_videofile('cam.mp4', fps=24)
def test_matplotlib(): #for now, python 3.5 installs a version of matplotlib that complains #about $DISPLAY variable, so lets just ignore for now. if sys.version_info < (3, 4): return if sys.version_info.major == 3 and sys.version_info.minor == 5: return import matplotlib.pyplot as plt import numpy as np from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage x = np.linspace(-2, 2, 200) duration = 2 fig, ax = plt.subplots() def make_frame(t): ax.clear() ax.plot(x, np.sinc(x**2) + np.sin(x + 2 * np.pi / duration * t), lw=3) ax.set_ylim(-1.5, 2.5) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=duration) animation.write_gif('/tmp/matplotlib.gif', fps=20)
def render_video(width, height, duration, filename, bg_color=(0, 0, 0, 1)): """Render a video.""" import numpy as np from moviepy.editor import VideoClip def make_frame(frame_num): surface = build_frame( width, height, height * 0.4, num_iters=11, num_points=3, r_mult=0.5, c_mult=0.5, d_angle=frame_num * 25 * 2 * math.pi / 300, bg_color=bg_color, ) buf = surface.get_data() frame_uint32 = np.ndarray(shape=(height, width), dtype=np.uint32, buffer=buf) frame = np.zeros(shape=(height, width, 3), dtype=np.uint8) frame[:, :, 0] = ((frame_uint32 >> 16) & 0xff).astype(np.uint8) frame[:, :, 1] = ((frame_uint32 >> 8) & 0xff).astype(np.uint8) frame[:, :, 2] = (frame_uint32 & 0xff).astype(np.uint8) return frame # render video animation = VideoClip(make_frame, duration=duration) animation.write_gif(filename, fps=25)
def test_matplotlib(): if PYTHON_VERSION in ('2.7', '3.3'): return #for now, python 3.5 installs a version of matplotlib that complains #about $DISPLAY variable, so lets just ignore for now. if PYTHON_VERSION == '3.5' and TRAVIS: return import matplotlib.pyplot as plt import numpy as np from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage x = np.linspace(-2, 2, 200) duration = 2 fig, ax = plt.subplots() def make_frame(t): ax.clear() ax.plot(x, np.sinc(x**2) + np.sin(x + 2 * np.pi / duration * t), lw=3) ax.set_ylim(-1.5, 2.5) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=duration) animation.write_gif(os.path.join(TMP_DIR, 'matplotlib.gif'), fps=20)
def render_gif(pov_file, args): prog_args = [] FPS = None initial_clock = 0 final_clock = None for arg in args: if '=' in arg: key, value = arg.split('=') key = key.strip().lower() if key == 'initial_clock': initial_clock = float(value) elif key == 'final_clock': final_clock = float(value) elif key == 'fps': FPS = float(value) else: prog_args.append(arg) else: prog_args.append(arg) if final_clock is None: raise ValueError('Final_Clock must be set at top of file') if FPS is None: raise ValueError('FPS must be set at top of file') make_frame = frame_gen(pov_file, prog_args, initial_clock) clip = VideoClip(make_frame, duration=final_clock - initial_clock) output_gif = pov_file.replace('.pov', '.gif') clip.write_gif(output_gif, fps=FPS, program='ffmpeg')
def save_video(self): """ Generate video out of self.state_history and save it. This variable needs to be updated during the simulation. """ import matplotlib.pyplot as plt from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage history_of_states = self.state_history #duration_in_seconds = len(history_of_states) / 4 duration_in_seconds = len(history_of_states) fig, ax = plt.subplots() frames_per_second = len(history_of_states) / duration_in_seconds def make_frame(t): ax.clear() ax.grid(False) ax.imshow(history_of_states[int(t * frames_per_second)], cmap="gist_ncar") ax.tick_params(axis='both', which='both', bottom=False, top=False, left=False, right=False, labelleft=False, labelbottom=False) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=duration_in_seconds) animation.write_videofile(self.env.video_filename, fps=frames_per_second)
def demo(filename, tracking, output, t_start=0., t_end=None, shift=0., labels=None, landmark=None, height=200): # parse label file if labels is not None: with open(labels, 'r') as f: labels = {} for line in f: identifier, label = line.strip().split() identifier = int(identifier) labels[identifier] = label video = Video(filename) import os os.environ['IMAGEIO_FFMPEG_EXE'] = 'ffmpeg' # from moviepy.video.io.VideoFileClip import VideoFileClip from moviepy.editor import VideoClip, AudioFileClip make_frame = get_make_frame(video, tracking, landmark=landmark, labels=labels, height=height, shift=shift) video_clip = VideoClip(make_frame, duration=video.duration) audio_clip = AudioFileClip(filename) clip = video_clip.set_audio(audio_clip) if t_end is None: t_end = video.duration clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)
def create_videoclip(frames, duration, frame_rate, audio_in=None): """ Create a VideoClip object :param frames: a iterator returning numpy frame objects :param duration: Duration of clip in seconds :param audio_in: file name of audio file, or None :return: """ def make_frame(t): nonlocal current_frame nonlocal current_frame_index required_frame_index = int(t * frame_rate) if required_frame_index > current_frame_index: current_frame = next(frames) current_frame_index += 1 rgb_frame = np.empty( (current_frame.shape[0], current_frame.shape[1], 3), dtype=np.uint8) rgb_frame[:, :] = current_frame[:, :, 0:3] return rgb_frame current_frame = next(frames) current_frame_index = 0 video_clip = VideoClip(make_frame, duration=duration) if audio_in: print("Adding audio clip", audio_in) audio_clip = AudioFileClip(audio_in).subclip(0, duration) video_clip = video_clip.set_audio(audio_clip) return video_clip
def make_gif(self, filename): def make_frame(t): fig = self.iterate(t) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=self.T_max) animation.write_gif(filename, fps=20)
def __init__(self, qs, size, fps, duration): VideoClip.__init__(self, duration=duration) self.frames = {frame.number: frame for frame in qs} self.fps = fps self.size = [int(value) for value in size.split("x")] self.size_str = size print("has {} frames".format(qs.count()))
def save_animation(self, name): self._index = 0 duration = self.num_data // self.fps anim = VideoClip(make_frame=self.make_frame, duration=duration) # anim.write_gif(os.path.join(self.DIR, name), fps=self.fps) anim.write_videofile(os.path.join(self.DIR, name), fps=self.fps, audio=False) print(duration)
def make_gif(self, file_name): def make_frame(t): fig = self.iterate(t) img = mplfig_to_npimage(fig) plt.close(fig) return img animation = VideoClip(make_frame, duration=self.duration) animation.write_gif(file_name, fps=self.fps)
def save(self, path): fname, ext = os.path.splitext(path) duration = (len(self.frames) - 1) / float(self.fps) self.animation = VideoClip(self._make, duration=duration) if 'gif' in ext: self.animation.write_gif(path, fps=self.fps) else: self.animation.write_videofile(path, fps=self.fps) self._iter = -1
def plot_voxels(gridLabels, suncg_labels, vox_min, vox_unit, save_path=None, animate=False): nbr_classes = len(suncg_labels) canvas = scene.SceneCanvas(keys='interactive', bgcolor='w', size=(1920, 1080)) view = canvas.central_widget.add_view() azimuth = 30 view.camera = scene.TurntableCamera(up='y', distance=4, fov=70, azimuth=azimuth, elevation=30.) # Sample colormap and adjust alpha colormap = get_colormap('cubehelix') cm_sampled = [] for i, (iclass, sample_f) in enumerate( zip(suncg_labels, np.linspace(0, 1, nbr_classes))): if iclass.lower() in ('free', 'ceiling'): alpha = 0 elif iclass.lower() in ('floor', 'wall', 'window'): alpha = 0.6 else: alpha = 1.0 cm_sampled.append(Color(color=colormap[sample_f].rgb, alpha=alpha)) my_cm = AlphaAwareCM(cm_sampled) volume = scene.visuals.Volume(gridLabels, relative_step_size=0.1, method='mip', parent=view.scene, cmap=my_cm, clim=[0, nbr_classes - 1], emulate_texture=False) volume.transform = scene.transforms.MatrixTransform() volume.transform.scale(3 * [vox_unit]) volume.transform.translate(3 * [-vox_unit * gridLabels.shape[0] / 2.0]) if save_path is None: return def make_frame(t): view.camera.set_state({'azimuth': azimuth + t * 90}) return canvas.render() if animate: animation = VideoClip(make_frame, duration=3) animation.write_gif('voxel.gif', fps=8, opt='OptimizePlus') else: img = canvas.render() cv2.imwrite('voxel.png', img[::-1])
def __init__(self, qs, size, fps, duration): VideoClip.__init__(self, duration=duration) self.frames = { frame.number: frame for frame in qs } self.fps = fps self.size = [int(value) for value in size.split('x')] self.size_str = size print('has {} frames'.format(qs.count()))
def moviepy_animate(self): self.prepare() if USE_MOVIEPY: self.ani = VideoClip( self.moviepy_update, duration=self.bf.particles.get_number_of_frames() / FPS) # FIXME: duration in seconds else: for i in range(self.bf.particles.get_number_of_frames()): print(i) self.moviepy_update(i)
def foldin(cliplist, length): a = 0 duration = min(cliplist, key=lambda x: x.duration).duration print(duration) result = VideoClip(duration=0) result.size = (0, 0) while result.duration < duration: for i in cliplist: if (a < i.duration and a + length < i.duration): seg = i.subclip(a, a + length) result = concatenate_videoclips([result, seg]) return result
def main(): parser = argparse.ArgumentParser( prog='AniMaker', description='动画短片制作器by lzy2002 site:lzy2002.com') parser.add_argument('-i', '--inputs', help='输入文件所在目录', required=True) parser.add_argument('-o', '--output', help='输出文件', default='movie.mp4') parser.add_argument('-f', '--fps', help='帧频fps', default=24) parser.add_argument('-rf', '--recordfps', help='录制帧频fps', default=60) parser.add_argument('-t', '--time', help='时长s', default=5) args = parser.parse_args() animation = VideoClip(make_frame(args.inputs, float(args.recordfps), int(args.fps), float(args.time)), duration=float(args.time)) animation.write_videofile(args.output, fps=int(args.fps))
def saveMovie(self, zoomfactor=1.0, savename='None', appendix='', update=False, fps=25): """ save tif file as mp4 using moviepy library. The duration of movie file is determined as 3 times of realtime video. Input: zoomfactor = 0.5 (default), save image using resampling savename = default format [tif file name]_z[zoomefactor].mp4 Return: VidoeClip object in moviepy library """ if ('moviepy' not in dir()): from moviepy.editor import VideoClip if ('cv2' not in dir()): import cv2 if (savename == 'None'): savename = '{}_z{:.1f}_{}.mp4'.format(self._meta['fname'][:-4], zoomfactor, appendix) if not update: if os.path.exists(savename): if self._debug: print('... movie file already exists: %s' % savename) return False if self._single: if self._debug: print('... not movie file') return False cmap = plt.get_cmap(self._meta['cmapname']) def make_frame(t): #self._curframe = int(t * (self._frameN - 1) / (self._duration * 3.0)) self._curframe = int(t * fps) img0 = self.getframe(frame=self._curframe, dtypes='uint8') if zoomfactor != 1.0: img0 = cv2.resize(img0, None, fx=zoomfactor, fy=zoomfactor, interpolation=cv2.INTER_CUBIC) img = np.delete(cmap(img0), 3, 2) #return img return (img * 255.0).astype('uint8') #animation = VideoClip(make_frame, duration=self._duration * 3.0) animation = VideoClip(make_frame, duration=float(self._meta.N()/fps)) animation.write_videofile(savename, fps=fps, codec='libx264', \ threads=8, audio=False, preset='medium', verbose=False) self._animation = animation print("""To play movie file in jupyter notebook: from IPython.display import Video Video("{}")""".format(savename))
def trajectory_video(trajectory, filename, xlim=(-10, 10), ylim=(-10, 10), callback=None, axisOff=True): """Create video of the track and saves it in working directory Parameters --------- filename : string name of the file to be saved into xlim : touple(2) of float xlimits of the fideo ylim : touple(2) of float ylimits of the video callback: what to axisOff : bool Returns -------- saved mp4 video at fiven filepath """ WIDTH = 900 HEIGHT = 600 DPI = 150 FPS = 25 DURATION = np.max(trajectory.time) - np.min(trajectory.time) fig, axis = plt.subplots(figsize=(1.0 * WIDTH / DPI, 1.0 * HEIGHT / DPI), dpi=DPI) def make_frame(t): axis.clear() (tx, ty) = trajectory.position_for_time(t + np.min(trajectory.time)) axis.plot(tx, ty, "ko") axis.set_xlim(xlim) axis.set_ylim(ylim) axis.set_title("Time {:.2f} s".format(t)) if not callback is None: callback(axis) if axisOff: plt.axis('off') return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=DURATION) #animation.write_gif(filename, fps=FPS) animation.write_videofile(filename, fps=FPS) pass
def make_skeleton_animation(frames, index, fps, movies_dir): DEFAULT_FPS = 30.0 def make_frame(t): verticies = frames[int(t * DEFAULT_FPS)] edges = zip(range(len(CONNECT)), CONNECT) arr = render_offscreen(verticies, edges, WIDTH, HEIGHT) return arr duration = frames.shape[0] / DEFAULT_FPS animation = VideoClip(make_frame, duration=duration) file_path = os.path.join(movies_dir, "sequence_{}.mp4".format(index)) animation.write_videofile(file_path, fps=fps)
def gen_mv(self): # generates the music video combining gif and audio def make_frame(t): ind = int(t//self.period) frame = self.screens[ind] return frame[:,:,None] * np.ones((1,1,3),np.uint8) total_duration = len(self.screens)*self.period # in seconds animation = VideoClip(make_frame, duration=total_duration) # pdb.set_trace() audio = AudioFileClip(self.mfile) animation.set_audio(audio).write_videofile("test.mp4",fps=20)
def animate_wave_function_collapse(fn, seconds_per_state=0.5): size = (32, 32) tn = wfc.seq_target_name(fn, 1, "overlapping", size) ts, result = dep.create(tn) full_duration = len(result) * seconds_per_state def makeframe(t): state_index = int(t / seconds_per_state) return result[min(state_index, len(result)-1)] anim = VideoClip(makeframe, duration=full_duration) nfn = os.path.splitext(fn)[0] + ".collapse.mp4" anim.write_videofile(nfn, fps=FRAMERATE) nfn = os.path.splitext(fn)[0] + ".collapsed.png" imsave(nfn, result[-1])
def implay(array): try: if array.dtype != np.uint8: array = (array * 255).astype(np.uint8) def make_frame(t): return array[..., round(t * 30.0)] clip = VideoClip(make_frame, duration=array.shape[-1] / 30.0) clip.preview(fps=30) #_ = raw_input("quit...") finally: import pygame.display pygame.display.quit()
def ani_gen(self, path, title="", cmap='binary', duration=3e2): bset.clear() fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1)) def make_frame(t): ax.clear() ax.axis('off') ax.set_title(title, fontsize=16) self.step() self.plot(ax, cmap) return (mplfig_to_npimage(fig)) ani = VideoClip(make_frame, duration=duration) ani.write_videofile(path, fps=50) return
class Animation(object): def __init__(self, fps=24): self.fps = fps self.reset() def reset(self): self.frames = [] self._iter = -1 def add_frame(self, image): """ image should be a (height, width, 3) np.ndarry """ self.frames.append(np.copy(image)) def anim_fn(self, fn, data): """ fn: a function that returns a plot data: an iterable """ for i in range(len(data)): p = fn(data[:i]) self.add_frame(p.numpy()) def rotate_3d(self, plot, duration=8): nframes = duration * self.fps change_angle = 360.0 / nframes azim = plot.canvas.azim for i in range(nframes): plot.set_camera(azim=azim + i * change_angle) self.frames.append(plot.numpy()) def _make(self, t): self._iter += 1 # Weird bug where idx might be = len(self.frames) idx = min(self._iter, len(self.frames) - 1) return self.frames[idx] def save(self, path): from moviepy.editor import VideoClip fname, ext = os.path.splitext(path) duration = (len(self.frames) - 1) / float(self.fps) self.animation = VideoClip(self._make, duration=duration) if 'gif' in ext: self.animation.write_gif(path, fps=self.fps) else: self.animation.write_videofile(path, fps=self.fps) self._iter = -1
def clip_title(fig, title='title', fontsize=40): ''' Make a videoclip with a centered white title ''' from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage duration = 1 ax = plt.gca() left, width = .25, .5 bottom, height = .25, .5 right = left + width top = bottom + height def make_frame(t): ax = plt.gca() ax = fig.add_axes([0, 0, 1, 1]) ax.text(0.5 * (left + right), 0.5 * (bottom + top), title, horizontalalignment='center', verticalalignment='center', fontsize=fontsize, color='white', transform=ax.transAxes) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=duration) return animation
def __init__(self, make_rgba_frame, duration=None): self.rgba_buffer = None self.last_t = None def save_last_rgba_frame(t): # only create a new frame if the time is different from that of the last frame if t != self.last_t: self.rgba_buffer = make_rgba_frame(t) if not isinstance(self.rgba_buffer, np.ndarray): raise Exception(f'The rgba buffer is not a numpy array but of type "{type(self.rgba_buffer)}".') if self.rgba_buffer.dtype != np.uint8: raise Exception(f'The rgba buffer needs to be an 8-bit uint array, not "{self.rgba_buffer.dtype}".') # update the time stamp of the last created frame self.last_t = t # frame function for image data def make_frame(t): save_last_rgba_frame(t) return self.rgba_buffer[..., :3] # frame function for mask data def make_mask_frame(t): save_last_rgba_frame(t) return self.rgba_buffer[..., 3] / 255 super(RGBAVideoClip, self).__init__(make_frame, duration=duration) self.mask = VideoClip(make_mask_frame, ismask=True, duration=duration)
def cubes_to_animation(cubes, clim=None, figsize=(10,11), title=None, fontsize=24, fps=16, **kwargs): from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage if len(cubes.shape)<3: cubes = cubes.reshape([1, *cubes.shape]) if clim is None: clim = (np.min(cubes[0]), np.max(cubes[0])) fig = plt.figure(figsize=figsize) nframe = cubes.shape[1] def make_frame(t): ind = int(round(t*fps)) plt.cla() plt.imshow(cubes[0, ind, :, :], interpolation='none', clim=clim, **kwargs ) # plt.axis('off') titlestr = 'Frame no. {}'.format(ind) if title: titlestr = title + ' - ' + titlestr plt.title(titlestr, fontsize=fontsize) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration=nframe/fps) plt.clf() return animation
def create_animation(name, files, output_path, worker_mem): ''' Creates an animation single tiff file from a list of tiff files. ''' sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Add the last file twice to avoid it getting dropped from the animation. files.extend(files[-1:]) frame_generator = FrameGenerator(name, files, output_path, worker_mem) duration = len(files) animation = VideoClip(frame_generator.make_frame, duration=duration) animation.write_videofile(os.path.join(output_path, "{}.avi".format(name)), fps=1, codec="png") return name
def ipython_display(self, *args, **kwargs): """ Fixes inheritance naming issue with moviepy's ipython_display """ seg_copy = self.copy() # Class should also always be set to VideoClip for expected video display seg_copy.__class__ = VideoClip().__class__ return seg_copy.ipython_display(*args, **kwargs)
def run_animation(args, target, outfile): """Creates an animated crash based on a still image input""" stepsize = args.animate img = util.read_img(target) bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth) max_depth = bounds.max_depth crash_params = crash.CrashParams( max_depth, args.threshold, args.bg_value, args.rgb_select) depths = range(max_depth, -stepsize, -stepsize) depths = [d for d in depths if d > 0] depths.append(0) n_frames = len(depths) n_procs = max(args.in_parallel, 1) fps = args.fps duration = len(depths) / fps img = util.read_img(target) options = _options(args.reveal_foreground, args.reveal_background, args.crash, args.reveal_quadrants) source_img = util.read_img(target) fg, bounds = foreground.find_foreground(source_img, crash_params) def make_frame(time): frame_no = int(round(time * fps)) if frame_no >= n_frames: frame_no = n_frames - 1 depth = depths[-frame_no] img = source_img.copy() if depth: params = crash.CrashParams( depth, args.threshold, args.bg_value, args.rgb_select) new_fg, new_bounds = foreground.trim_foreground(img, fg, params) new_img = _process_img(img, new_fg, new_bounds, options) else: new_img = source_img return new_img animation = VideoClip(make_frame, duration=duration) clip = animation.to_ImageClip(t=duration) clip.duration = 0.1 clip.write_videofile(outfile, fps=fps, audio=False) animation.write_videofile("__temp_crash.mp4", fps=fps, audio=False, preset=args.compression, threads=args.in_parallel) os.rename("__temp_crash.mp4", outfile)
def render(render_frame, frames, fps, size, filename): """ Create a movie using the given render_frame function """ # Wrapper so that the render function gets passed a surface to draw to, # and a frame number. def make_frame(t): surface = pygame.Surface(size) render_frame(surface, int(t*fps)) # Flip the surface around it's x/y axis (main diagonal), to account for display # issues with the movie rendering. surface = pygame.transform.rotate(surface, -90) surface = pygame.transform.flip(surface, True, False) return pygame.surfarray.pixels3d(surface) # Create the animation... animation = VideoClip(make_frame, duration=frames/fps) # Write to the movie file... animation.write_videofile(filename, fps=fps)
def run_moving_crash(args, target, outfile): """Runs a moving crash based on moving (gif/mp4) inputs""" video = VideoFileClip(target) img = video.get_frame(t=0) # first frame of the video bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth) max_depth = bounds.max_depth crash_params = crash.CrashParams( max_depth, args.threshold, args.bg_value, args.rgb_select) options = _options(args.reveal_foreground, args.reveal_background, args.crash, args.reveal_quadrants, args.bg_value) frames = video.iter_frames(fps=video.fps) def make_frame(_): frame = next(frames) fg, bounds = foreground.find_foreground(frame, crash_params) return _process_img(frame, fg, bounds, options) output_video = VideoClip( make_frame, duration=video.duration-(4/video.fps)) # trim last 4 frms output_video.write_videofile( outfile, preset=args.compression, fps=video.fps, threads=args.in_parallel)
self.program.bind(gloo.VertexBuffer(data)) self.program['u_model'] = self.model self.program['u_view'] = self.view self.program['u_size'] = 5 / self.translate gloo.set_state('translucent', depth_test=False) self.program['u_clock'] = 0.0 def on_resize(self, event): width, height = event.size gloo.set_viewport(0, 0, width, height) self.projection = perspective(45.0, width / float(height), 1.0, 1000.0) self.program['u_projection'] = self.projection def animation(self, t): """ Added for animation with MoviePy """ self.program['u_clock'] = 2*t gloo.clear('black') self.program.draw('points') return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3] if __name__ == '__main__': from moviepy.editor import VideoClip canvas = Canvas() canvas.show() clip = VideoClip(canvas.animation, duration=np.pi).resize(0.3) clip.write_videofile('atom3.mp4', fps=20) #clip.write_gif('atom3.gif', fps=20, opt='OptimizePlus')
idx = [i for i in range(len(time_sum)-1) if t<=time_sum[i]][0] # print "======", jpegs[idx/2], "======", idx delta_fade = time_sum[idx]-time_sum[idx-1] fade_to = (t-time_sum[idx-1])/delta_fade # fraction fade_from = 1-fade_to # fraction frame_for_time_t_BGR_frame0 = fade_from * cv2.imread(jpegs[idx/2],cv2.CV_LOAD_IMAGE_COLOR) frame_for_time_t_BGR_frame1 = fade_to * cv2.imread(jpegs[idx/2+1],cv2.CV_LOAD_IMAGE_COLOR) # BLENDED FRAME frame_for_time_t_BGR_frame01 = frame_for_time_t_BGR_frame0 + frame_for_time_t_BGR_frame1 frame_for_time_t_BGR_frame01 = frame_for_time_t_BGR_frame01.astype('uint8') # convert from float to uint8 frame_for_time_t = cv2.cvtColor(frame_for_time_t_BGR_frame01, cv2.COLOR_BGR2RGB) # BGR-RGB COLOR return frame_for_time_t clip = VideoClip(make_frame, duration=time_sum[-1])#.set_audio(audio) # x-second clip if audio_on: audio = AudioFileClip("audioclip.mp3") audio = audio.set_duration(time_sum[-1]) clip = clip.set_audio(audio) clip.write_videofile("my_animation_%sfps_dummy.mp4" %fps, fps=fps) # export as video #clip.write_gif("my_animation.gif", fps=24) # export as GIF
def export_moviepy(sequence, filename, rate=30, bitrate=None, width=None, height=None, codec='mpeg4', pixel_format='yuv420p', autoscale=None, quality=None, verbose=True, options=None, rate_range=(16, 32)): """Export a sequence of images as a standard video file using MoviePy. Parameters ---------- sequence : any iterator or array of array-like images The images should have two dimensions plus an optional third dimensions representing color. filename : string name of output file rate : integer, optional frame rate of output file, 30 by default NB: The output frame rate will be limited between `rate_range` bitrate : integer or string, optional Preferably use the parameter `quality` for controlling the bitrate. width : integer, optional By default, set the width of the images. height : integer, optional By default, set the height of the images. If width is specified and height is not, the height is autoscaled to maintain the aspect ratio. codec : string a valid video encoding, 'mpeg4' by default. Must be supported by the container format. Examples are {'mpeg4', 'wmv2', 'libx264', 'rawvideo'} Check https://www.ffmpeg.org/ffmpeg-codecs.html#Video-Encoders. pixel_format: string, optional Pixel format, 'yuv420p' by default. Another possibility is 'bgr24' in combination with the 'rawvideo' codec. quality: number or string, optional For 'mpeg4' codec: sets qscale:v. 1 = high quality, 5 = default. For 'libx264' codec: sets crf. 0 = lossless, 23 = default. For 'wmv2' codec: sets fraction of lossless bitrate, 0.01 = default autoscale : boolean, optional Linearly rescale the brightness to use the full gamut of black to white values. False by default for uint8 readers, True otherwise. verbose : boolean, optional Determines whether MoviePy will print progress. True by default. options : dictionary, optional Dictionary of parameters that will be passed to ffmpeg. Avoid using {'qscale:v', 'crf', 'pixel_format'}. rate_range : tuple of two numbers As extreme frame rates have playback issues on many players, by default the frame rate is limited between 16 and 32. When the desired frame rate is too low, frames will be multiplied an integer number of times. When the desired frame rate is too high, frames will be skipped at constant intervals. See Also -------- http://zulko.github.io/moviepy/ref/VideoClip/VideoClip.html#moviepy.video.VideoClip.VideoClip.write_videofile """ if VideoClip is None: raise ImportError('The MoviePy exporter requires moviepy to work.') if options is None: options = dict() ffmpeg_params = [] for key in options: ffmpeg_params.extend(['-{}'.format(key), str(options[key])]) if rate <= 0: raise ValueError export_rate = _normalize_framerate(rate, *rate_range) clip = VideoClip(CachedFrameGenerator(sequence, rate, autoscale, to_bgr=(pixel_format == 'bgr24'))) clip.duration = len(sequence) / rate if not (height is None and width is None): clip = clip.resize(height=height, width=width) if codec == 'wmv2' and bitrate is None and quality is None: quality = 0.01 if quality is not None: if codec == 'libx264': ffmpeg_params.extend(['-crf', str(quality)]) elif codec == 'mpeg4': ffmpeg_params.extend(['-qscale:v', str(quality)]) elif codec == 'wmv2': if bitrate is not None: warnings.warn("(wmv) quality is ignored when bitrate is set.") else: bitrate = quality * _estimate_bitrate(clip.size, export_rate) else: raise NotImplemented if format is not None: ffmpeg_params.extend(['-pixel_format', str(pixel_format)]) if bitrate is not None: bitrate = str(bitrate) clip.write_videofile(filename, export_rate, codec, bitrate, audio=False, verbose=verbose, ffmpeg_params=ffmpeg_params)
#print probe.name, "at", probe.position if probe.name in displayed_probes: list_lines[line_index].set_data(probe.time_buffer, probe.value_buffer) line_index+=1 xmin, xmax = ax1.get_xlim() if o.t > xmax: ax1.set_xlim(xmin, 2*xmax) ax1.figure.canvas.draw() NF_mp.set_data(xrange(0,width), NF.V) NF_firing.set_data(xrange(0,width), NF.GainFunction(NF.V)) if RECORD_VIDEO: return mplfig_to_npimage(fig) else: return list_lines from time import time t0 = time() print list_obj ## call the animator. blit=True means only re-draw the parts that have changed. if not RECORD_VIDEO: anim = animation.FuncAnimation(fig, animate, frames=simulation_time, interval=5 , blit=True, init_func= init, repeat=False) else: from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage anim = VideoClip(animate, duration = simulation_time/60) anim.write_videofile("bartest_"+str(end_bar-start_bar)+".mp4", fps = 60) t1 = time() print((t1 - t0)) plt.show()
M_0=np.zeros((2,2*nlags+affine)), K_0=10*np.eye(2*nlags+affine), affine=affine) for state in range(Nmax)], ) model.add_data(data) ############### # inference # ############### from moviepy.video.io.bindings import mplfig_to_npimage from moviepy.editor import VideoClip fig = model.make_figure() plt.set_cmap('terrain') plot_slice = slice(0,300) model.plot(fig=fig,draw=False,plot_slice=plot_slice) def make_frame_mpl(t): model.resample_model() model.plot(fig=fig,update=True,draw=False,plot_slice=plot_slice) plt.tight_layout() return mplfig_to_npimage(fig) animation = VideoClip(make_frame_mpl, duration=10) animation.write_videofile('gibbs.mp4',fps=30)
def render_gif(pov_file, args): prog_args = [] clock_args = {'fps': None, 'initial_clock': 0, 'final_clock': None, 'initial_frame': 0, 'final_frame': None, } for arg in args: if '=' in arg: key,value = arg.split('=') key = key.strip().lower() if key in clock_args: clock_args[key] = float(value) else: prog_args.append(arg) else: prog_args.append(arg) FPS = clock_args['fps'] initial_clock = clock_args['initial_clock'] final_clock = clock_args['final_clock'] initial_frame = clock_args['initial_frame'] final_frame = clock_args['final_frame'] time_scaling = 1.0 if (FPS is not None and final_clock is not None and final_frame is not None): would_be_FPS = (final_frame-initial_frame) / (final_clock-initial_clock) time_scaling = FPS / would_be_FPS if FPS is None: if (final_clock is not None and final_frame is not None): FPS = (final_clock-initial_clock) / (final_frame-initial_frame) else: raise ValueError('FPS must be set at top of file') if final_clock is None: if (FPS is not None and final_frame is not None): final_clock = initial_clock + (final_frame-initial_frame)/FPS else: raise ValueError('Final_Clock must be set at top of file') if final_frame is None: if (FPS is not None and final_clock is not None): final_frame = initial_frame + (final_clock-initial_clock)*FPS else: raise ValueError('Final_Frame must be set at top of file') make_frame = frame_gen(pov_file, prog_args, initial_clock, time_scaling) clip = VideoClip(make_frame, duration=(final_clock-initial_clock)/time_scaling) # output_gif = pov_file.replace('.pov','.gif') # clip.write_gif(output_gif, fps=FPS, # program='ffmpeg') output_mp4 = pov_file.replace('.pov','.mp4') clip.write_videofile(output_mp4, fps=FPS, codec='libx264', bitrate='500k', audio=False) print('')
def main(argv): animation = VideoClip(make_frame, duration=60) # durration is seconds animation.write_videofile("my_animation.mp4", fps=6) # export as video animation.write_gif("my_animation.gif", fps=6) # export as GIF (slow
from sklearn.datasets import make_moons from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage X, Y = make_moons(50, noise=0.1, random_state=2) # semi-random data fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1,1,1)) fig.subplots_adjust(left=0, right=1, bottom=0) xx, yy = np.meshgrid(np.linspace(-2,3,500), np.linspace(-1,2,500)) def make_frame(t): ax.clear() ax.axis('off') ax.set_title("SVC classification", fontsize=16) classifier = svm.SVC(gamma=2, C=1) # the varying weights make the points appear one after the other weights = np.minimum(1, np.maximum(0, t**2+10-np.arange(50))) classifier.fit(X, Y, sample_weight=weights) Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=plt.cm.bone, alpha=0.8, vmin=-2.5, vmax=2.5, levels=np.linspace(-2,2,20)) ax.scatter(X[:,0], X[:,1], c=Y, s=50*weights, cmap=plt.cm.bone) return mplfig_to_npimage(fig) animation = VideoClip(make_frame, duration = 7) animation.write_gif("svm.gif", fps=15)
if probe.name in displayed_probes: list_lines[line_index].set_data(probe.time_buffer, probe.value_buffer) line_index+=1 xmin, xmax = ax1.get_xlim() if o.t > xmax: ax1.set_xlim(xmin, 2*xmax) ax1.figure.canvas.draw() NF_mp.set_data(xrange(0,width), NF.V) NF_firing.set_data(xrange(0,width), NF.GainFunction(NF.V)) if RECORD_VIDEO: return mplfig_to_npimage(fig) else: return list_lines from time import time t0 = time() print list_obj ## call the animator. blit=True means only re-draw the parts that have changed. if not RECORD_VIDEO: anim = animation.FuncAnimation(fig, animate, frames=simulation_time, interval=5 , blit=True, init_func= init, repeat=False) else: from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage anim = VideoClip(animate, duration = simulation_time/60) anim.write_videofile("test2.mp4", fps = 60) t1 = time() print((t1 - t0)) plt.show()
""" T01,Joint1 = PlanarTransformationMatrix(Angle1,[0,0],np.identity(4)) T12,Joint2 = PlanarTransformationMatrix(Angle2,[0,-Length1],T01) T23,Endpoint = PlanarTransformationMatrix(0,[0,-Length2],T12) Radius = 0.5 plt.figure() ax = plt.gca() plt.plot([0],[0],'ko') plt.plot([Joint1[0], Joint2[0], Endpoint[0]],[Joint1[1], Joint2[1], Endpoint[1]],"0.75",lw=3) plot_link(ax, Joint1, Radius, Length1, Angle1, Joint2,"0.55") plot_link(ax, Joint2, Radius, Length2, Angle1+Angle2, Endpoint, "0.55") quick_2D_plot_tool(ax,'x','y','Drawing Rotating Links') return(ax) ax1 = plot_2_link_planar_model(np.pi/6,np.pi/6,5,5) ax2 = plot_2_link_planar_model(np.pi/2,np.pi/2,4,3) plt.imshow((ax1,ax2)) from moviepy.editor import VideoClip def make_frame(t): frame_for_time_t = plot_2_link_planar_model(Angle1[t],Angle2[t],5,5) return frame_for_time_t # (Height x Width x 3) Numpy array Angle1 = np.arange(0,np.pi,0.001) Angle2 = np.arange(0,np.pi/2,0.0005) animation = VideoClip(make_frame, duration=3) # 3-second clip # For the export, many options/formats/optimizations are supported animation.write_videofile("my_animation.mp4", fps=24) # export as video animation.write_gif("my_animation.gif", fps=24) # export as GIF (slow)
hue_left = hue[rs, cs-1] hue_up = hue[rs-1, cs] this_hue = chunk[0, 0] new_hue = (-random.randrange(30, 50) * (hue_up / 360) -10*random.randrange(1, 10) * (hue_left / 360)) new_hue = (15*this_hue + 2*new_hue) / 17 chunk[:] = new_hue np.mod(hue, 360, out=hue) yield nphusl.to_rgb(hsl) if __name__ == "__main__": filename = sys.argv[1] img = imread.imread(filename) transforms = reveal_blue, reveal_blue_rgb, hue_watermelon,\ reveal_light, reveal_light_rgb, highlight_saturation for t in transforms: out, name = t(img) imread.imwrite(name + ".jpg", out.astype(np.uint8)) n_frames = 300 fps = 50 duration = n_frames / fps #frames = microwave(img) #animation = VideoClip(lambda _: next(frames), duration=duration) #animation.write_gif("video2.gif", fps=fps, opt="OptimizePlus") frames = melonize(img, n_frames) animation = VideoClip(lambda _: next(frames), duration=duration) animation.write_gif("melonized.gif", fps=fps, opt="OptimizePlus")