Пример #1
0
def make_clips_from_activations(m,
                                _frames,
                                obs,
                                activations_tensor,
                                session,
                                X_t,
                                fps=60):
    clip_dict = {}
    activations = gather_activations(m,
                                     obs,
                                     activations_tensor=activations_tensor,
                                     session=session,
                                     X_t=X_t,
                                     batch_size=1)

    for layer_idx in range(len(m.layers)):
        layer_name = m.layers[layer_idx]['name']
        print(layer_name)
        frames = activations_to_frames(m, activations[layer_name])
        clip = mpy.ImageSequenceClip([frame * 255 for frame in frames], fps=60)
        clip_dict[layer_name] = clip

    #create observation movie
    n_obs = m.native_activation_representation(obs)
    frames = activations_to_frames(m, n_obs)
    clip = mpy.ImageSequenceClip([frame * 255 for frame in frames], fps=fps)
    clip_dict['observations'] = clip

    #create raw rollout movie
    clip = mpy.ImageSequenceClip([frame for frame in _frames], fps=60)
    clip_dict['frames'] = clip

    return clip_dict
    def train(self):
        next_v = 1e6
        v = self.value_fun.get_values()
        itr = 0
        videos = []
        contours = []
        returns = []
        delay_cs = []
        fig = None

        while not self._stop_condition(itr, next_v, v) and itr < self.max_itr:
            log = itr % self.log_itr == 0
            render = (itr % self.render_itr == 0) and self.render
            if log:
                next_pi = self.get_next_policy()
                self.policy.update(next_pi)
                average_return, avg_delay_cost, video = rollout(self.env, self.policy, render=render,
                                                num_rollouts=self.num_rollouts, max_path_length=self.max_path_length,iteration=itr)
                if render:
                    contour, fig = plot_contour(self.env, self.value_fun, fig=fig, iteration=itr)
                    contours += [contour] * len(video)
                    videos += video
                returns.append(average_return)
                delay_cs.append(avg_delay_cost)
                logger.logkv('Iteration', itr)
                logger.logkv('Average Returns', average_return)
                logger.logkv('Average Delayed Costs', avg_delay_cost)
                logger.dumpkvs()
            next_v = self.get_next_values()
            self.value_fun.update(next_v)
            itr += 1

        next_pi = self.get_next_policy()
        self.policy.update(next_pi)
        contour, fig = plot_contour(self.env, self.value_fun, save=True, fig=fig, iteration=itr)
        average_return, avg_delay_cost, video = rollout(self.env, self.policy,
                                        render=True, num_rollouts=self.num_rollouts, max_path_length=self.max_path_length, iteration=itr)
        self.env.close()
        plot_returns(returns)
        plot_returns(delay_cs,'delayed_cost')
        videos += video
        if self.render:
            contours += [contour]
        logger.logkv('Iteration', itr)
        logger.logkv('Average Returns', average_return)
        logger.logkv('Average Delayed Costs', avg_delay_cost)

        fps = int(4/getattr(self.env, 'dt', 0.1))
        if contours and contours[0] is not None:
            clip = mpy.ImageSequenceClip(contours, fps=fps)
            clip.write_videofile('%s/contours_progress.mp4' % logger.get_dir())

        if videos:
            clip = mpy.ImageSequenceClip(videos, fps=fps)
            clip.write_videofile('%s/roll_outs.mp4' % logger.get_dir())

        plt.close()
Пример #3
0
    def makeVisualSong(self):
        """Return a sequence of images and durations.
        """
        self.files = os.listdir(self.basedir)
        self.stairs = [i for i in self.files if ("stair" in i) and ("R" in i)]
        self.sectors = [i for i in self.files if "sector" in i]
        self.stairs.sort()
        self.sectors.sort()
        filenames = [self.basedir + i for i in self.sectors[:4]]
        self.iS0 = mpy.ImageSequenceClip(filenames,
                                         durations=[1.5, 2.5, .5, 1.5])
        self.iS1 = mpy.ImageSequenceClip([
            self.basedir + self.sectors[2], self.basedir + self.sectors[3],
            self.basedir + self.sectors[2], self.basedir + self.sectors[3],
            self.basedir + self.sectors[2], self.basedir + self.sectors[3],
            self.basedir + self.sectors[2], self.basedir + self.sectors[3]
        ],
                                         durations=[0.25] * 8)
        self.iS2 = mpy.ImageSequenceClip(
            [
                self.basedir + self.sectors[2], self.basedir + self.sectors[3],
                self.basedir + self.sectors[2], self.basedir + self.sectors[3],
                self.basedir + self.sectors[0]
            ],
            durations=[0.75, 0.25, 0.75, 0.25, 2.])  # cai para sensível

        self.iS3 = mpy.ImageSequenceClip(
            [
                self.basedir + "BLANK.png", self.basedir + self.sectors[0],
                self.basedir + self.sectors[1], self.basedir + self.sectors[1],
                self.basedir + self.sectors[1], self.basedir + self.sectors[0],
                self.basedir + self.sectors[0]
            ],
            durations=[1, 0.5, 2., .25, .25, 1.75, 0.25])  # [-1,8]

        self.iS4 = mpy.ImageSequenceClip(
            [
                self.basedir + self.sectors[2],  # 1
                self.basedir + self.sectors[3],  # .5
                self.basedir + self.sectors[5],  # .5
                self.basedir + self.sectors[2],  # .75
                self.basedir + self.sectors[0],  #.25
                self.basedir + self.sectors[2],  # 1
                self.basedir + self.sectors[0],  # 2 8
                self.basedir + self.sectors[3],  # 2 7
                self.basedir + self.sectors[0],  # 2 -1
                self.basedir + "BLANK.png",  # 2
            ],
            durations=[1, 0.5, 0.5, .75, .25, 1., 2., 2., 2.,
                       2.])  # [0,7,11,0]

        self.iS = mpy.concatenate_videoclips(
            (self.iS0, self.iS1, self.iS2, self.iS3, self.iS4))
Пример #4
0
def __save_gif(file, video,fps=24, duration=None):
    if isinstance(video, list):
        sequence = video
    else:
        sequence = [x for x in video] #sigh...
    if duration is not None:
        durations = [duration/len(sequence) for _ in range(len(sequence))]
        clip = mpy.ImageSequenceClip(sequence, durations = durations)
    else:
        clip = mpy.ImageSequenceClip(sequence, fps=fps)

    clip.write_gif(file, fps=fps, program='ffmpeg')
Пример #5
0
def save_dd_record(dir, fname, traj):
    if not os.path.exists(dir):
        os.makedirs(dir)
    filename = os.path.join(dir, fname + '.hd5')
    images = [i for i in traj['images']]
    clip = mpy.ImageSequenceClip(images, fps=5)
    clip.write_gif(dir + fname + '.gif')
    side_images = [i for i in traj['side_images']]
    clip2 = mpy.ImageSequenceClip(side_images, fps=5)
    clip2.write_gif(dir + fname + '_side.gif')
    print(('Writing', filename))
    dd.io.save(filename, traj)
def make_turing_moive(camera_images, gazemaps, thresh, fps):
    if len(gazemaps.shape)==3:
        gazemaps = np.expand_dims(gazemaps, axis=-1)
    blurred_images = filters.gaussian_filter(camera_images, sigma=(0,5,5,0))
    
    camera_clip = mpy.ImageSequenceClip([im for im in camera_images], fps=fps)
    blurred_clip = mpy.ImageSequenceClip([im for im in blurred_images], fps=fps)
    
    masks = (gazemaps>thresh).astype(np.float)*255
    blurred_masks = filters.gaussian_filter(masks, sigma=(0,2,2,0))
    mask_clip = mpy.ImageSequenceClip(list(blurred_masks), fps=fps).to_mask()
    fovea_clip = camera_clip.set_mask(mask_clip)
    mix_clip = mpy.CompositeVideoClip([blurred_clip, fovea_clip])
    return mix_clip
def gen(args, path, num_given_colors, num_subtasks, num_trajs_per_task,
        specific_task, video):
    path = Path(path)
    if path.exists():
        print('data seems already exist.')
        return
    path.mkdir(parents=True)

    if video:
        (path / 'video' / 'given').mkdir(parents=True)
        (path / 'video' / 'meta').mkdir(parents=True)

    with open(str(path / 'args.txt'), 'w') as f:
        f.write(str(args))

    # Generate Meta-Training Set
    given_colors = RR.COLOR_SET[:num_given_colors]
    given = generate(given_colors, num_subtasks, num_trajs_per_task,
                     specific_task)
    saveCompressed(str(path / 'given.pkl'), trajs=given, colors=given_colors)

    if video:
        for task, trajs in given.items():
            for i, (states, actions, subtasks, completes,
                    images) in enumerate(trajs):
                clip = mpy.ImageSequenceClip(list(images), fps=60)
                clip.write_videofile(
                    str(path / 'video' / 'given' / ('%s_%05d.mp4' %
                                                    (task, i))),
                    verbose=False,
                    ffmpeg_params=['-y'],  # always override
                    progress_bar=False)

    # Generate Meta-Validation Set
    meta_colors = RR.COLOR_SET[num_given_colors:num_given_colors * 2]
    meta = generate(meta_colors, num_subtasks, num_trajs_per_task,
                    specific_task)
    saveCompressed(str(path / 'meta.pkl'), trajs=meta, colors=meta_colors)

    if video:
        for task, trajs in meta.items():
            for i, (states, actions, subtasks, completes,
                    images) in enumerate(trajs):
                clip = mpy.ImageSequenceClip(list(images), fps=60)
                clip.write_videofile(
                    str(path / 'video' / 'meta' / ('%s_%05d.mp4' % (task, i))),
                    verbose=False,
                    ffmpeg_params=['-y'],  # always override
                    progress_bar=False)
Пример #8
0
    def add_video(self, vid_tensor, epoch, nrow=None, fps=5):
        vid_tensor = vid_tensor.cpu().numpy()

        vid_tensor = _prepare_video(vid_tensor, n_rows=nrow)

        if vid_tensor.dtype != np.uint8:
            vid_tensor = (vid_tensor * 255.0).astype(np.uint8)

        clip = mpy.ImageSequenceClip(list(vid_tensor), fps=fps)
        tmpdirname = tempfile.mkdtemp()
        list_files = os.path.join(tmpdirname, "frame_%04d.png")
        clip.write_images_sequence(list_files, verbose=False, logger=None)

        video_dir = os.path.join(self.logdir, "videos")
        if not os.path.exists(video_dir):
            os.makedirs(video_dir)
        filename = os.path.join(video_dir, "video_%.5i.mp4" % epoch)
        #tmpfile = os.path.join(tmpdirname, "video.mp4")

        subprocess.run([
            "ffmpeg", "-r",
            str(fps), "-f", "image2", "-s", "1920x1080", "-i", list_files,
            "-vcodec", "libx264", "-crf", "25", filename
        ],
                       stderr=subprocess.DEVNULL,
                       stdout=subprocess.DEVNULL)
        #subprocess.run(["ffmpeg", "-i", tmpfile, filename])

        shutil.rmtree(tmpdirname)
Пример #9
0
def rollout(env,
            agent,
            max_path_length=np.inf,
            animated=False,
            speedup=1,
            save_video=True,
            video_filename='sim_out.mp4',
            reset_args=None,
            policy_contexts=None):
    observations = []
    actions = []
    rewards = []
    agent_infos = []
    env_infos = []
    images = []
    o = env.reset(reset_args=reset_args, policy_contexts=policy_contexts)
    agent.reset()
    path_length = 0
    if animated:
        env.render()
    while path_length < max_path_length:
        a, agent_info = agent.get_action(o)
        next_o, r, d, env_info = env.step(a)
        observations.append(env.observation_space.flatten(o))
        rewards.append(r)
        actions.append(env.action_space.flatten(a))
        agent_infos.append(agent_info)
        env_infos.append(env_info)
        path_length += 1
        if d:  # and not animated:  # TODO testing
            break
        o = next_o
        if animated:
            env.render()
            timestep = 0.05
            time.sleep(timestep / speedup)
            if save_video:
                from PIL import Image
                image = env.wrapped_env.wrapped_env.get_viewer().get_image()
                pil_image = Image.frombytes('RGB', (image[1], image[2]),
                                            image[0])
                images.append(np.flipud(np.array(pil_image)))

    if animated:
        if save_video and len(images) >= max_path_length:
            import moviepy.editor as mpy
            clip = mpy.ImageSequenceClip(images, fps=20 * speedup)
            if video_filename[-3:] == 'gif':
                clip.write_gif(video_filename, fps=20 * speedup)
            else:
                clip.write_videofile(video_filename, fps=20 * speedup)
        #return

    return dict(
        observations=tensor_utils.stack_tensor_list(observations),
        actions=tensor_utils.stack_tensor_list(actions),
        rewards=tensor_utils.stack_tensor_list(rewards),
        agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
        env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
    )
Пример #10
0
    def movie_from_snapshots(self,
                             filename='evo_movie',
                             image_extension='png',
                             frames_per_second=4):
        '''Creates a movies from the population snapshot images.
		Assumes the images already exist in self.image_path directory and all have extension image_extension.'''

        # Get a list of the image filenames that have the extension specified by image_extension
        image_files = [
            self.image_path + '/' + f for f in listdir(self.image_path)
            if isfile(join(self.image_path, f))
            and f.split('.')[1] == image_extension
        ]

        # Sort the files in ascending order based on iteration number
        self._sort_nicely(image_files)

        # Create the image sequence
        image_sequence = mpy.ImageSequenceClip(image_files,
                                               fps=frames_per_second)

        # Save the movie
        image_sequence.write_videofile(filename + '.mp4',
                                       fps=frames_per_second)

        return image_sequence
Пример #11
0
def make_video(tensor, fps):
    try:
        import moviepy.editor as mpy
    except ImportError:
        print('add_video needs package moviepy')
        return
    import tempfile

    t, h, w, c = tensor.shape

    # encode sequence of images into gif string
    clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
    with tempfile.NamedTemporaryFile() as f:
        filename = f.name + '.gif'

    try:
        clip.write_gif(filename, verbose=False, progress_bar=False)
    except TypeError:
        clip.write_gif(filename, verbose=False)

    with open(filename, 'rb') as f:
        tensor_string = f.read()

    try:
        os.remove(filename)
    except OSError:
        pass

    return Summary.Image(height=h,
                         width=w,
                         colorspace=c,
                         encoded_image_string=tensor_string)
Пример #12
0
 async def textgif(
     self, ctx, *, args
 ):  #EC DO NOT COPY PASTE THIS INTO A BOT! I WILL KEEL YOU IF YOU DO!
     '''Turn TEXT to GIF'''
     img = Image.new('RGB', (500, 45), "red")
     d = ImageDraw.Draw(img)
     c = 0
     length = int(len(args))
     font = ImageFont.truetype('Tabitha.ttf', 27)
     for m in range(length):
         x = 9
         d.text((x + c, 5), args[m], fill=(255, 255, 255), font=font)
         img.save('{}.png'.format(m))
         c += 12
     gif_name = 'content'
     fps = 10
     file_list = glob.glob(
         '*.png')  # Get all the pngs in the current directory
     list.sort(
         file_list
     )  # Sort the images by #, this may need to be tweaked for your use case
     clip = mpy.ImageSequenceClip(file_list, fps=fps)
     clip.write_gif('{}.gif'.format(gif_name), fps=fps)
     await ctx.send(file=discord.File('content.gif'))
     await ctx.message.delete()
     for f in glob.glob("*.png"):
         os.remove(f)
Пример #13
0
    def runWithMemoryImage(self, frames):
        """"从内存中获取图片帧
        
        frames: 列表,每个元素为PIL对象或其子类
        """
        # Make video prediction.
        data = self.transform(frames)
        #input_var = torch.autograd.Variable(data.view(-1, 3, data.size(1), data.size(2)),
        #                                    volatile=True).unsqueeze(0).cuda()
        input_var = torch.autograd.Variable(data.view(-1, 3, data.size(1),
                                                      data.size(2)),
                                            volatile=True).unsqueeze(0)
        logits = self.net(input_var)
        h_x = torch.mean(F.softmax(logits, 1), dim=0).data
        probs, idx = h_x.sort(0, True)

        # Output the prediction.
        res = dict()
        for i in range(0, 5):
            res[self.categories[idx[i]]] = "%.3f" % probs[i]
            print('{:.3f} -> {}'.format(probs[i], self.categories[idx[i]]))

        # Render output frames with prediction text.
        if rendered_output is not None:
            prediction = self.categories[idx[0]]
            rendered_frames = render_frames(frames, prediction)
            clip = mpy.ImageSequenceClip(rendered_frames, fps=4)
            clip.write_videofile(rendered_output)

        return res
Пример #14
0
def save_gif(gif_fname, images, fps=4):
    import moviepy.editor as mpy
    head, tail = os.path.split(gif_fname)
    if head and not os.path.exists(head):
        os.makedirs(head)
    clip = mpy.ImageSequenceClip(list(images), fps=fps)
    clip.write_gif(gif_fname)
Пример #15
0
def infer_video(
        cfg, ckpt, video_path: str, score_threshold: float,
        dataset_type, output_path, video_images):
    assert pathlib.Path(video_path).is_file(),\
        f"Did not find video: {video_path}"
    with tempfile.TemporaryDirectory() as cache_dir:
        if video_images:
            cache_dir = video_images
        input_image_dir = pathlib.Path(cache_dir, "input_images")
        input_image_dir.mkdir(exist_ok=True)
        
        with mp.VideoFileClip(video_path) as video:
            original_fps = video.fps
            dump_frames(video, input_image_dir)

        output_image_dir = pathlib.Path(cache_dir, "video_images")
        output_image_dir.mkdir(exist_ok=True)
        
        run_demo(
            cfg, ckpt,
            score_threshold,
            pathlib.Path(input_image_dir),
            output_image_dir,
            dataset_type)

        impaths = list(output_image_dir.glob("*.png"))
        impaths.sort(key=lambda impath: int(impath.stem.strip(".png")))
        impaths = [str(impath) for impath in impaths]
        with mp.ImageSequenceClip(impaths, fps=original_fps) as video:
            video.write_videofile(output_path)
Пример #16
0
    def run_trajectory(self):
        self.running = True
        self.start = self.recorder.get_endeffector_pos()
        print 'actual end eep', self.start
        self.move_netural(gripper_open=False)
        self.img_stack = []

        step = 0
        actions = []

        for i in range(self.EE_STEPS):
            current_eep = self.recorder.get_endeffector_pos()
            eep_diff_action, pred_final = self.query_action()
            current_eep[:2] = pred_final

            # current_eep[2] += np.sum(np.abs(current_eep[:2])) * 0.05
            self.move_to(current_eep[:3], i > 0)

        while step < self.ACTION_SEQUENCE_LENGTH:
            current_eep = self.recorder.get_endeffector_pos()

            eep_diff_action, pred_final = self.query_action()
            current_eep[:3] += 0.05 * eep_diff_action

            current_eep[2] = max(current_eep[2], self.Z_SAFETY_THRESH)

            self.move_to(current_eep[:3])

            step += 1
        self.set_weiss_griper(100.)
        print 'end', self.recorder.get_endeffector_pos()
        clip = mpy.ImageSequenceClip([i for i in self.img_stack], fps=20)
        clip.write_gif('test_frames.gif')
        self.running = False
Пример #17
0
    def log_paths_as_videos(self,
                            paths,
                            step,
                            max_videos_to_save=2,
                            fps=10,
                            video_title='video'):

        # reshape the rollouts
        videos = [p['image_obs'] for p in paths]

        # max rollout length
        max_videos_to_save = np.min([max_videos_to_save, len(videos)])
        max_length = videos[0].shape[0]
        for i in range(max_videos_to_save):
            if videos[i].shape[0] > max_length:
                max_length = videos[i].shape[0]

        # pad rollouts to all be same length
        for i in range(max_videos_to_save):
            if videos[i].shape[0] < max_length:
                padding = np.tile([videos[i][-1]],
                                  (max_length - videos[i].shape[0], 1, 1, 1))
                videos[i] = np.concatenate([videos[i], padding], 0)

            clip = mpy.ImageSequenceClip(list(videos[i]), fps=fps)
            txt_clip = (mpy.TextClip(video_title, fontsize=30,
                                     color='white').set_position(
                                         'top', 'center').set_duration(10))

            video = mpy.CompositeVideoClip([clip, txt_clip])
            new_video_title = video_title + '{}_{}'.format(step, i) + '.mp4'
            filename = os.path.join(self._log_dir, new_video_title)
            video.write_videofile(filename, fps=fps)
Пример #18
0
    def save_highres(self):

        if 'opencv_tracking' in self.agent_params:
            highres_imglist = self.add_cross_hairs(
                self.curr_traj.highres_imglist, self.curr_traj.desig_hpos_list)
        else:
            highres_imglist = self.curr_traj.highres_imglist

        if 'make_final_vid' in self.agent_params:
            writer = imageio.get_writer(self.image_folder +
                                        '/highres_traj{}.mp4'.format(self.itr),
                                        fps=10)
            # add crosshairs to images in case of tracking:
            print('shape highres:', highres_imglist[0].shape)
            for im in highres_imglist:
                writer.append_data(im)
            writer.close()

        if 'make_final_gif' in self.agent_params:
            im_list = [
                cv2.resize(im, (0, 0),
                           fx=0.5,
                           fy=0.5,
                           interpolation=cv2.INTER_AREA)
                for im in highres_imglist
            ]

            clip = mpy.ImageSequenceClip(im_list, fps=4)
            clip.write_gif(self.image_folder +
                           '/highres_traj{}.gif'.format(self.itr))
Пример #19
0
def labeled_video_output(op_figs_list,
                         v_fps,
                         output_dir,
                         output_file_name="output"):
    try:
        print(print_info(), end=" ")
        print("Getting the operated frames sequence...")
        clip = mpe.ImageSequenceClip(op_figs_list, fps=v_fps)
        print(print_info(), end=" ")
        print("Operated frames sequence getted!")
        clip.to_RGB()
        print(print_info(), end=" ")
        print("Clip is change to RGB!")
    except:
        print(print_info("E"), end=" ")
        print(
            "Cloud not get the operated frames sequence. Please check again!")
        return False
    try:
        print(print_info(), end=" ")
        print("Compositing video file from operated frames sequence...")
        clip.write_videofile(os.path.join(output_dir,
                                          output_file_name + ".mp4"),
                             fps=v_fps,
                             audio=False)
        print(print_info(), end=" ")
        print("Video file compositted at '{}'!".format(
            os.path.join(output_dir, output_file_name + ".mp4")))
    except:
        print(print_info("E"), end=" ")
        print("Cloud not compositing video file. Please check again!")
        return False
    return True
def make_gif(root, output, fps):
    file_list = glob.glob(root + '/*.png')

    file_list.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))

    clip = mpy.ImageSequenceClip(file_list, fps=fps)
    clip.write_gif(output, fps=fps)
Пример #21
0
def encode_gif(im_seq, tag, fps):
    """
    Given a 4D numpy tensor of images, encodes as a gif.
    """
    with tempfile.NamedTemporaryFile() as f:
        fname = f.name + '.gif'
    clip = mpy.ImageSequenceClip(list(im_seq), fps=fps)
    clip.write_gif(fname, verbose=False, progress_bar=False)

    with open(fname, 'rb') as f:
        enc_gif = f.read()
    os.remove(fname)

    # create a tensorflow image summary protobuf:
    im_summ = tf.Summary.Image()
    im_summ.height = im_seq.shape[1]
    im_summ.width = im_seq.shape[2]
    im_summ.colorspace = 3
    im_summ.encoded_image_string = enc_gif

    # create a summary obj:
    summ = tf.Summary()
    summ.value.add(tag=tag, image=im_summ)
    summ_str = summ.SerializeToString()
    return summ_str
Пример #22
0
def save_as_gif(filename,
                images,
                fps=10,
                fuzz=1,
                normalize=False,
                src_min=None,
                src_max=None):
    from moviepy.video.io.bindings import mplfig_to_npimage
    import moviepy.editor as mpy
    assert images.ndim == 4  # NHWC

    def normalize_tensor(tensor,
                         dst_min=0,
                         dst_max=1.0,
                         src_min=None,
                         src_max=None):
        if src_min is None:
            src_min = tensor.min()
        if src_max is None:
            src_max = tensor.max()
        alpha = (dst_max - dst_min) / (src_max - src_min + 1e-8)
        dst_tensor = alpha * (tensor - src_min) + dst_min
        return dst_tensor

    if normalize:
        images = normalize_tensor(images,
                                  dst_min=0,
                                  dst_max=255,
                                  src_min=src_min,
                                  src_max=src_max)
    images = images.astype(np.uint8)
    image_list = list(images)
    clip = mpy.ImageSequenceClip(image_list, fps=fps)
    clip.write_gif(filename, fps=fps, fuzz=fuzz)
    print('Save {} images as gif at {}'.format(len(image_list), filename))
Пример #23
0
def make_GAN_movie():
    import os
    import moviepy.editor as me

    song = 'Pokémon Theme Song.mp3'
    song2 = 'Battle Music.mp3'
    song3 = 'A Rival Appears! [Pokémon Red & Blue].mp3'
    song4 = 'Pokemon Omega RubyAlpha Sapphire - Battle! Rival Music (HQ).mp3'

    folder = 'GAN_images/'
    save_folder = 'media/'

    files = os.listdir(folder)
    files = [x.split('.')[0] for x in files]
    files = [int(x) for x in files]
    files.sort()

    file_names = [folder + str(x) + '.png' for x in files]
    duration = len(file_names) / 24

    audio = me.AudioFileClip(save_folder + song4).set_duration(duration)

    clip = me.ImageSequenceClip(file_names, fps=24).set_duration(duration)

    final_clip = clip.set_audio(audio)

    final_clip.write_videofile(save_folder + "movie.mp4",
                               temp_audiofile="temp-audio.m4a",
                               remove_temp=True,
                               codec="libx264",
                               audio_codec="aac")
def convert_tensor_to_gif_summary(summ):
    if isinstance(summ, bytes):
        summary_proto = tf.Summary()
        summary_proto.ParseFromString(summ)
        summ = summary_proto

    summary = tf.Summary()
    for value in summ.value:
        tag = value.tag
        images_arr = tf.make_ndarray(value.tensor)

        if len(images_arr.shape) == 5:
            # concatenate batch dimension horizontally
            images_arr = np.concatenate(list(images_arr), axis=-2)
        if len(images_arr.shape) != 4:
            raise ValueError('Tensors must be 4-D or 5-D for gif summary.')
        if images_arr.shape[-1] != 3:
            raise ValueError('Tensors must have 3 channels.')

        # encode sequence of images into gif string
        clip = mpy.ImageSequenceClip(list(images_arr), fps=4)
        with tempfile.NamedTemporaryFile() as f:
            filename = f.name + '.gif'
        clip.write_gif(filename, verbose=False)
        with open(filename, 'rb') as f:
            encoded_image_string = f.read()

        image = tf.Summary.Image()
        image.height = images_arr.shape[-3]
        image.width = images_arr.shape[-2]
        image.colorspace = 3  # code for 'RGB'
        image.encoded_image_string = encoded_image_string
        summary.value.add(tag=tag, image=image)
    return summary
Пример #25
0
def build_gif(gif_name="output"):
    fps = 12
    # get all png files in directory
    file_list = glob.glob("./output/*.png")
    list.sort(file_list, key=lambda x: int(x.split(".")[1].split("/")[2]))
    clip = mpy.ImageSequenceClip(file_list, fps=fps)
    clip.write_gif("{}.gif".format(gif_name), fps=fps)
Пример #26
0
    def run_trajectory(self):
        self.start = self.recorder.get_endeffector_pos()
        print 'actual end eep', self.start

        self.move_neutral()

        self.img_stack = []

        step = 0
        actions = []

        current_eep = self.recorder.get_endeffector_pos()
        eep_diff_action, predicted_eep = self.query_action(current_eep)
        predicted_eep[2] = max(predicted_eep[2], 0.25)
        current_eep[:3] = predicted_eep[:3]

        self.move_to(current_eep)

        while step < self.action_sequence_length:
            print step
            self.control_rate.sleep()
            current_eep = self.recorder.get_endeffector_pos()
            eep_diff_action, predicted_eep = self.query_action(current_eep)


            self.move_to(current_eep[:3] + eep_diff_action[:3])

            step += 1
        print 'end', self.recorder.get_endeffector_pos()
        clip = mpy.ImageSequenceClip([cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in self.img_stack], fps=20)
        clip.write_gif('test_frames.gif')

        self.move_neutral()
Пример #27
0
def create_gif(name):
    gif_name = 'web/img/' + name
    fps = 24
    file_list = glob.glob('*' + name + '.png')
    list.sort(file_list, key=lambda x: int(x.split(name + '.png')[0]))
    clip = mpy.ImageSequenceClip(file_list, fps=fps)
    clip.write_gif('{}.gif'.format(gif_name), fps=fps)
Пример #28
0
def convert_array_to_gif_summary(images_arr, tag, fps):

    summary = tf.Summary()

    if len(images_arr.shape) == 5:
        # concatenate batch dimension horizontally
        images_arr = np.concatenate(list(images_arr), axis=-2)
    if len(images_arr.shape) != 4:
        raise ValueError('Tensors must be 4-D or 5-D for gif summary.')
    if images_arr.shape[-1] != 3:
        raise ValueError('Tensors must have 3 channels.')

    # encode sequence of images into gif string
    clip = mpy.ImageSequenceClip(list(images_arr), fps=fps)
    with tempfile.NamedTemporaryFile() as f:
        filename = f.name + '.gif'
    clip.write_gif(filename, verbose=False, program='ffmpeg')
    with open(filename, 'rb') as f:
        encoded_image_string = f.read()

    image = tf.Summary.Image()
    image.height = images_arr.shape[-3]
    image.width = images_arr.shape[-2]
    image.colorspace = 3  # code for 'RGB'
    image.encoded_image_string = encoded_image_string
    summary.value.add(tag=tag, image=image)
    return summary
Пример #29
0
def make_movie(frames, songfile, outfile, duration=None, frameLenght=512):
    aud = mpy.AudioFileClip(songfile, fps=44100)
    if duration:
        aud.duration = duration
    clip = mpy.ImageSequenceClip(frames, fps=22050 / frameLenght)
    clip = clip.set_audio(aud)
    clip.write_videofile(outfile, audio_codec='aac')
Пример #30
0
def _run(env, nec, video_f=None, maxlen=-1):
    obs, rs, done = [env.reset()], [], False
    for i in itertools.count():
        ac, _ = nec.policy(obs[-1])
        ob, r, done, _ = env.step(ac)

        obs.append(ob)
        rs.append(r)

        if done or (maxlen != -1 and i > maxlen):
            break

    if not done:
        raise RuntimeError(
            f'the policy seems stuck; most likely not hit the darn start button!'
        )

    if video_f is not None:
        frames = np.stack([np.array(ob)[:, :, 0] for ob in obs], axis=0)
        frames = [(f * 255.).astype(np.uint8)
                  for f in frames]  # convert to uint8
        frames = [np.tile(f[:, :, None], (1, 1, 3))
                  for f in frames]  # convert to 3-channel image

        import moviepy.editor as mpy
        clip = mpy.ImageSequenceClip(frames, fps=30)
        clip.write_videofile(video_f,
                             verbose=False,
                             ffmpeg_params=['-y'],
                             progress_bar=False)

    return len(obs), np.sum(rs)