Exemplo n.º 1
0
    def write_env(self, env, policy=None, step=None, fps=None, speed=1.0):
        """
        Writes GIF (output from `env` for single episode by following `policy` greedily) to TensorBoard. 
        
        Important: given `env` state/observation, `policy` must return tensor in shape [n, a, 1] where n=batch_size, a=env.action_space.n.

        Args:
            env (Gym Env): OpenAI Gym environment.
            policy (function, optional): Policy to be followed. Defaults to random policy.
            step (int, optional): Current step/epoch number in the training loop. Defaults to `self.step`.
            fps (int, optional): Frames per second of the resulting GIF. Defaults to `env.metadata['video.frames_per_second']`.
            speed (float, optional): Speed of the rendered GIF (e.g. speed=2.0 will increase the speed twice). Defaults to 1.0.
        """

        state = env.reset()
        steps, rewards = [], []
        while True:
            steps.append(env.render('rgb_array'))
            try:
                action = policy(np.expand_dims(state, axis=0))[0]
                action = np.argmax(action)
            except:
                action = env.action_space.sample()
            state, reward, done, _ = env.step(action)
            rewards.append(reward)
            if done:
                break

        if fps is None:
            try:
                fps = env.metadata['video.frames_per_second']
            except:
                fps = 24
        fps = int(fps * speed)

        # from https://github.com/tensorflow/tensorboard/issues/39#issuecomment-568917607
        thwc = env.render('rgb_array').shape
        im_summ = tf.compat.v1.Summary.Image()
        im_summ.height, im_summ.width = thwc[0], thwc[1]
        im_summ.colorspace = 3  # fix to 3 for RGB
        with tempfile.NamedTemporaryFile() as f:
            fname = f.name + '.gif'
        clip = ImageSequenceClip(steps, fps=fps)
        clip.write_gif(fname, verbose=False, logger=None)
        with open(fname, 'rb') as f:
            enc_gif = f.read()
        os.remove(fname)

        im_summ.encoded_image_string = enc_gif
        # create a serialized summary obj:
        gif = tf.compat.v1.Summary()
        env_name = env.unwrapped.spec.id
        gif.value.add(image=im_summ,
                      tag=f'{env_name}/ Rewards: {sum(rewards)}')

        if step is None: step = self.step
        with self.writer.as_default():
            tf.summary.experimental.write_raw_pb(gif.SerializeToString(),
                                                 step=step)
        env.close()
Exemplo n.º 2
0
def make_gif(policy, env, step_count, state_filter, maxsteps=1000):
    envname = env.spec.id
    gif_name = '_'.join([envname, str(step_count)])
    state = env.reset()
    done = False
    steps = []
    rewards = []
    t = 0
    while (not done) & (t < maxsteps):
        s = env.render('rgb_array')
        steps.append(s)
        action = policy.get_action(state,
                                   state_filter=state_filter,
                                   deterministic=True)
        action = np.clip(action, env.action_space.low[0],
                         env.action_space.high[0])
        action = action.reshape(len(action), )
        state, reward, done, _ = env.step(action)
        rewards.append(reward)
        t += 1
    print('Final reward :', np.sum(rewards))
    clip = ImageSequenceClip(steps, fps=30)
    if not os.path.isdir('gifs'):
        os.makedirs('gifs')
    clip.write_gif('gifs/{}.gif'.format(gif_name), fps=30)
Exemplo n.º 3
0
def CreateGif(filename, array, fps=5, scale=1., fmt='gif'):
    ''' Create and save a gif or video from array of images.
        Parameters:
            * filename (string): name of the saved video
            * array (list or string): array of images name already in order, if string it supposed to be the first part of the images name (before iteration integer)
            * fps = 5 (integer): frame per seconds (limit human eye ~ 15)
            * scale = 1. (float): ratio factor to scale image hight and width
            * fmt (string): file extention of the gif/video (e.g: 'gif', 'mp4' or 'avi')
        Return:
            * moviepy clip object
    '''
    if (isinstance(array, str)):
        arrsize = len(gb.glob(array + '*.png'))
        array = [array + str(i) + '.png' for i in range(arrsize)]
    else:
        pass
    from moviepy.editor import ImageSequenceClip
    filename += '.' + fmt
    clip = ImageSequenceClip(list(array), fps=fps).resize(scale)
    if (fmt == 'gif'):
        clip.write_gif(filename, fps=fps)
    elif (fmt == 'mp4'):
        clip.write_videofile(filename, fps=fps, codec='mpeg4')
    elif (fmt == 'avi'):
        clip.write_videofile(filename, fps=fps, codec='png')
    else:
        print('Error! Wrong File extension.')
        sys.exit()
    command = os.popen('du -sh %s' % filename)
    print(command.read())
    return clip
Exemplo n.º 4
0
 def export_action(self):
     #get the folder containing the images using a file dialog
     dlg = QFileDialog()
     dlg.setFileMode(QFileDialog.Directory)
     folderLocs = QStringList(
     )  #create a QstringList to hold the location data
     if dlg.exec_():
         folderLocs = dlg.selectedFiles()
         for folderLoc in folderLocs:
             #Export the recorder images as video using ffmpeg
             print("Grabbing images from: ", str(folderLoc))
             folderName = str(
                 folderLoc
             )[-8:]  #get the folder name (the last 8 characters) from the directory name
             try:
                 clip = ImageSequenceClip(str(folderLoc),
                                          fps=int(self.writeFPS))
                 print("Writing file...")
                 if self.writeMethod == 0:
                     clip.write_videofile(self.localDir + "/exported/" +
                                          folderName + "_" +
                                          str(self.writeName) + ".avi",
                                          codec=str(self.writeCodec))
                 elif self.writeMethod == 1:
                     clip.write_gif(self.localDir + "/exported/" +
                                    folderName + "_" + str(self.writeName) +
                                    ".gif",
                                    program=str(self.writeProgram))
             except Exception as e:
                 print("Writing exception check settings.\nException:" +
                       str(e))
                 errorBox("Writing exception check settings.\nException:" +
                          str(e))
Exemplo n.º 5
0
def make_gif_of_traj(filename, output_filename, raw=True, fps=5):
    file = dd.io.load(filename)
    traj_len = len(file)
    print(f'trajectory length is {traj_len}')
    traj_images = []
    a = attrdict.AttrDict({'mean': 0, 'scale': 1})
    for i in range(1, traj_len):
        data = file[i]
        act = obs_to_action(data, file[i - 1], a)
        sorted_act = sorted(act, key=np.abs)
        if np.abs(sorted_act[0]) == 0 and sorted_act[1] == 0:
            print('alert alert')
        print(act)
        fig, axs = plot_images(data, raw=raw)
        fig.canvas.draw()
        image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
        image = image.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
        traj_images.append(image)
    clip = ImageSequenceClip(traj_images, fps=fps)
    print(f'Writing out to {output_filename}.mp4')
    clip.write_videofile(f'{output_filename}.mp4', fps=fps)
    print(f'Writing out to {output_filename}.gif')
    clip.write_gif(f'{output_filename}.gif',
                   fps=fps,
                   program='imageio',
                   opt='wu')
  def play(self, env, directory, mode):
    """ Returns the total reward for an episode of the game."""
    steps = []
    state = env.reset()
    done = False
    tot_reward = 0
    actions = [0] * self.actions
    while not done:
      if mode != "Train":
        s = env.render("rgb_array")
        steps.append(s)

      action = self.predict_action(state)
      actions[action] += 1
      state, reward, done, _ = env.step(action)
      tot_reward += reward
    self.cur_reward = tot_reward
    if mode != "Train" and tot_reward > self.max_reward:
      print("New high reward: ", tot_reward)
      clip = ImageSequenceClip(steps, fps=30)
      clip.write_gif("~/breakout.gif", fps=30)
      self.max_reward = tot_reward

    print("ACTIONS TAKEN", actions)
    return tot_reward
Exemplo n.º 7
0
    def reset(self):
        # clear all local memory
        self.seen_observations = []  # state of the environment
        self.seen_values = [
        ]  # corresponding estimated values (given by network)
        self.seen_policies = []  # policies predicted by the network
        self.seen_states = []  # state of the model
        self.seen_actions = []  # actions taken
        self.seen_rewards = []  # rewards given

        # reset n-step reward calculation
        self.n_step_reward = 0  # reward for n consecutive steps

        # reset environment
        self.observation = self.env.reset()
        self.observation = self.brain.preprocess(self.observation)
        self.seen_observations = [self.observation]

        # reset model
        self.state = self.brain.get_initial_state()
        self.seen_states = [self.state]

        self.total_reward = 0

        if self.vis:
            if len(self.frames) > 0:
                clip = ImageSequenceClip(self.frames, fps=self.vis_fps)
                if os.path.exists(params.VIDEO_OUT_DIR):
                    clip.write_gif(params.VIDEO_OUT_DIR + "/" +
                                   str(self.num_episodes) + ".gif")
                else:
                    print("video export directory not found")

            self.frames = []
Exemplo n.º 8
0
def main(args):
    model = ImageGPT.load_from_checkpoint(args.checkpoint).gpt.cuda()
    model.eval()

    context = torch.zeros(0, dtype=torch.long).cuda()

    frames = generate(model,
                      context,
                      28 * 28,
                      num_samples=args.rows * args.cols)

    pad_frames = []
    for frame in frames:
        pad = ((0, 0), (0, 28 * 28 - frame.shape[1]))
        pad_frames.append(np.pad(frame, pad_width=pad))

    pad_frames = np.stack(pad_frames)
    f, n, _ = pad_frames.shape
    pad_frames = pad_frames.reshape(f, args.rows, args.cols, 28, 28)
    pad_frames = pad_frames.swapaxes(2, 3).reshape(f, 28 * args.rows,
                                                   28 * args.cols)
    pad_frames = pad_frames[..., np.newaxis] * np.ones(3) * 17
    pad_frames = pad_frames.astype(np.uint8)

    clip = ImageSequenceClip(list(pad_frames)[::args.downsample],
                             fps=args.fps).resize(args.scale)
    clip.write_gif("out.gif", fps=args.fps)
Exemplo n.º 9
0
def gif(filename, array, fps=10, scale=1.0):
    """Creates a gif given a stack of images using moviepy
    Notes
    -----
    works with current Github version of moviepy (not the pip version)
    https://github.com/Zulko/moviepy/commit/d4c9c37bc88261d8ed8b5d9b7c317d13b2cdf62e
    Usage
    -----
    >>> X = randn(100, 64, 64)
    >>> gif('test.gif', X)
    Parameters
    ----------
    filename : string
        The filename of the gif to write to
    array : array_like
        A numpy array that contains a sequence of images
    fps : int
        frames per second (default: 10)
    scale : float
        how much to rescale each image by (default: 1.0)
    """

    # ensure that the file has the .gif extension
    fname, _ = os.path.splitext(filename)
    filename = fname + '.gif'

    # copy into the color dimension if the images are black and white
    if array.ndim == 3:
        array = array[..., np.newaxis] * np.ones(3)

    # make the moviepy clip
    clip = ImageSequenceClip(list(array), fps=fps).resize(scale)
    clip.write_gif(filename, fps=fps)
    clip.write_videofile(fname + ".mp4", fps=fps)
    return clip
Exemplo n.º 10
0
    def play(self, env, directory, mode):
        """ Returns the total reward for an episode of the game."""
        steps = []
        state = env.reset()
        done = False
        tot_reward = 0
        actions = [0] * self.actions
        while not done:
            if mode != "Train":
                s = env.render("rgb_array")
                steps.append(s)

            action = self.predict_action(state)
            actions[action] += 1
            state, reward, done, _ = env.step(action)
            tot_reward += reward
        self.cur_reward = tot_reward
        if mode != "Train" and tot_reward > self.max_reward:
            print("New high reward: ", tot_reward)
            clip = ImageSequenceClip(steps, fps=30)
            clip.write_gif("~/breakout.gif", fps=30)
            self.max_reward = tot_reward

        print("ACTIONS TAKEN", actions)
        return tot_reward
Exemplo n.º 11
0
def save_gif(stack, filename):
    fps = 8
    scale = 1.0
    assert len(stack.shape) == 3
    stack = stack[..., np.newaxis] * np.ones(3)  # RGB
    clip = ImageSequenceClip(list(stack), fps=fps).resize(scale)
    clip.write_gif(filename, fps=fps, verbose=False)
Exemplo n.º 12
0
def get_output(video_path,
               out_filename,
               label,
               fps=30,
               font_size=20,
               font_color='white',
               resize_algorithm='bicubic',
               use_frames=False):
    """Get demo output using ``moviepy``.

    This function will generate video file or gif file from raw video or
    frames, by using ``moviepy``. For more information of some parameters,
    you can refer to: https://github.com/Zulko/moviepy.

    Args:
        video_path (str): The video file path or the rawframes directory path.
            If ``use_frames`` is set to True, it should be rawframes directory
            path. Otherwise, it should be video file path.
        out_filename (str): Output filename for the generated file.
        label (str): Predicted label of the generated file.
        fps (int): Number of picture frames to read per second. Default: 30.
        font_size (int): Font size of the label. Default: 20.
        font_color (str): Font color of the label. Default: 'white'.
        resize_algorithm (str): The algorithm used for resizing.
            Default: 'bicubic'. For more information,
            see https://ffmpeg.org/ffmpeg-scaler.html.
        use_frames: Determine Whether to use rawframes as input. Default:False.
    """

    try:
        from moviepy.editor import (ImageSequenceClip, TextClip, VideoFileClip,
                                    CompositeVideoClip)
    except ImportError:
        raise ImportError('Please install moviepy to enable output file.')

    if use_frames:
        frame_list = sorted(
            [osp.join(video_path, x) for x in os.listdir(video_path)])
        video_clips = ImageSequenceClip(frame_list, fps=fps)
    else:
        video_clips = VideoFileClip(
            video_path, resize_algorithm=resize_algorithm)

    duration_video_clip = video_clips.duration
    text_clips = TextClip(label, fontsize=font_size, color=font_color)
    text_clips = (
        text_clips.set_position(
            ('right', 'bottom'),
            relative=True).set_duration(duration_video_clip))

    video_clips = CompositeVideoClip([video_clips, text_clips])

    out_type = osp.splitext(out_filename)[1][1:]
    if out_type == 'gif':
        video_clips.write_gif(out_filename)
    else:
        video_clips.write_videofile(out_filename, remove_temp=True)
Exemplo n.º 13
0
def _make_gif(agent, name, epsilon, n_seconds):
    fps = 15
    total_frames = n_seconds * fps
    frames = []
    for _ in range(total_frames):
        frames.append(agent.env.render(mode='rgb_array'))
        agent.step(epsilon)
        # the agent resets itself and the environment when done
    g = ImageSequenceClip(frames, fps=fps)
    g.write_gif(os.path.join('gifs', f'{name}.gif'), fps=fps)
Exemplo n.º 14
0
def make_gif(frames, out_fn):
    """
    :param frames: list of .png files representing frames in the animation
    :param out_fn: name of the output folder - create this folder inside the output folder
    :return: .gif file showing the simulated path as an animation
    """
    ofn = 'outputs/' + out_fn + '/'
    clip = ImageSequenceClip(frames, fps=4)
    clip.write_gif(ofn + out_fn + '.gif')
    clip.close()
Exemplo n.º 15
0
def create_movie(file_path_list,
                 movie_name_list,
                 movie_path,
                 file_type='png',
                 fps=1,
                 movie_type='mp4',
                 codec='mpeg4'):
    """
    creates the movie.

    can support any type supported by ffmpeg
    some examples:
    movie type / codec
    .avi / rawvideo, png
    .mp4 / libx264, mpeg4
    avi/rawvideo supported by libreoffice
    mp4 supported by windows media player

    Parameters
    ----------
    file_path_list : list of str
        List of folders where to find the images for the movies
    movie_name_list : list of str
        List of movies to create_movie
    movie_path : str
        path where to store the movies
    file_type : str
        the individual images file type
    fps : int
        the frames per second
    movie_type : str
        the type of movie file
    codec : str
        the codec used for the movie

    Returns
    -------
    Nothing

    """
    for movie_name, file_path in zip(movie_name_list, file_path_list):
        file_list = sorted(glob.glob(file_path + '*.' + file_type))
        print(file_list)

        # Generate clip
        clip = ImageSequenceClip(file_list, fps=fps)
        # Write out clip
        if not os.path.isdir(movie_path):
            os.makedirs(movie_path)
        clip.write_videofile(movie_path + movie_name + '.' + movie_type,
                             codec=codec)
        clip.write_gif(movie_path + movie_name + '.gif')

        print('Created movie ' + movie_path + movie_name)
Exemplo n.º 16
0
 def _close(self):
     if len(self._snapshots) == 0:
         return
     filename = 'render_' + str(self._episode) + '.gif'
     clip = ImageSequenceClip(self._snapshots, fps=30)
     source_folder = "/tmp/"
     local_filename = source_folder+filename
     print("writing file to:"+local_filename)
     clip.write_gif(local_filename, fps=30, logger=None)
     print("uploading file")
     self._uploader.upload(source_folder, filename)
Exemplo n.º 17
0
def make_spread_gif():
    """
    Creates and saves gif from images generated by make_spread().
    """
    # read images
    images = [imread('../figs/spread/' + file)
              for file in sorted(os.listdir(path='../figs/spread/'))
              if file != '.gitkeep']

    # make clip and save
    clip = ImageSequenceClip(images, fps=5)
    clip.write_gif('../spread.gif')
Exemplo n.º 18
0
def generador(lista_estados, cf, cc):
    imagenes = []
    for md in lista_estados:
        mi = np.zeros([cf, cc, 3], dtype=np.uint8)
        for i in range(cf):
            for j in range(cc):
                mi[i, j] = [255, 128, 0] if (md[i, j] == True) else [0, 0, 255]
        imagenes.append(mi)
    clip = ImageSequenceClip(imagenes,
                             fps=1)  # fps = 1 indica un cuadro por segundo
    clip.write_gif('automata.gif')
    clip.close()
    return
Exemplo n.º 19
0
def to_gif(filename, array, fps=12, scale=1.0):
    fname, _ = os.path.splitext(filename)
    filename = fname + '.gif'

    # copy into the color dimension if the images are black and white
    if array.ndim == 3:
        array = array[..., np.newaxis] * np.ones(3)

    # make the moviepy clip
    clip = ImageSequenceClip(list(array), fps=fps)
    clip.resize(scale)
    clip.write_gif(filename, fps=fps)
    return clip
Exemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(description='Create driving video.')
    parser.add_argument(
        'image_folder',
        type=str,
        default='',
        help=
        'Path to image folder. The video will be created from these images.')
    parser.add_argument('--fps',
                        type=int,
                        default=60,
                        help='FPS (Frames per second) setting for the video.')
    parser.add_argument('--ext',
                        type=str,
                        default='mp4',
                        help='Video format : mp4 or gif')
    parser.add_argument(
        '--cleanup',
        help='if present, then remove the images folder after video conversion'
    )
    args = parser.parse_args()

    # convert file folder into list firltered for image file types
    image_list = sorted([
        os.path.join(args.image_folder, image_file)
        for image_file in os.listdir(args.image_folder)
    ])

    image_list = [
        image_file for image_file in image_list
        if os.path.splitext(image_file)[1][1:].lower() in IMAGE_EXT
    ]

    print("Creating video {}, FPS={}".format(args.image_folder, args.fps))

    if args.ext == 'mp4':
        clip = ImageSequenceClip(image_list, fps=args.fps)
        clip.write_videofile(args.image_folder + '.mp4')
    else:
        clip = ImageSequenceClip(image_list[::3],
                                 fps=args.fps)  # keep gif file small
        clip = clip.resize(width=320)
        clip.write_gif(args.image_folder + '.gif',
                       program='ffmpeg',
                       fps=clip.fps)

    if args.cleanup:
        [
            os.remove(f'{args.image_folder}/{f}')
            for f in os.listdir(args.image_folder) if f.endswith('.jpg')
        ]
Exemplo n.º 21
0
 def generate_gif(self, filename=defaults.filename):
     state = self.env.reset()
     frames = []
     for _ in range(200):
         action = self.act(state, False)
         frame = self.env.render("rgb_array")
         next_state, reward, done, _ = self.env.step(action)
         frames.append(frame)
         if done:
             break
         else:
             state = next_state
     gif = ImageSequenceClip(frames, fps=30)
     gif.write_gif("gifs/{}.gif".format(filename), program="imageio")
Exemplo n.º 22
0
def random_agent():
    all_states = []
    for i in range(1):

        env = gym.make("Tag-v0")
        env.set_config(my_config)
        for j in range(360):
            obs, rew, _, _ = env.step(3)
            all_states.append(obs)
            print(rew)

        clip = ImageSequenceClip(all_states, fps=60)
        clip.write_gif(action_name[i] + str(my_config["map"]) + ".gif", fps=60)
        all_states = []
Exemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--cfg-path", type=str, default="conf/ant.yaml")
    parser.add_argument("--ckpt-path", type=str)
    parser.add_argument("--save-gif", action="store_true")
    args = parser.parse_args()

    with open(args.cfg_path) as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
        f.close()

    env = builder.build_env(config["env"])
    agent_ids = env.get_agent_ids()

    if args.save_gif:
        run_num = args.ckpt_path.split("/")[-3]
        save_dir = f"test_gif/{run_num}/"
        os.makedirs(save_dir)

    network = builder.build_network(config["network"])
    network.load_state_dict(torch.load(args.ckpt_path))
    for i in range(100):
        models = {}
        for agent_id in agent_ids:
            models[agent_id] = deepcopy(network)
            models[agent_id].eval()
            models[agent_id].reset()
        obs = env.reset()

        done = False
        episode_reward = 0
        ep_step = 0
        ep_render_lst = []
        while not done:
            actions = {}
            for k, model in models.items():
                s = obs[k]["state"][np.newaxis, ...]
                actions[k] = model(s)
            obs, r, done, _ = env.step(actions)
            rgb_array = env.render()
            if args.save_gif:
                ep_render_lst.append(rgb_array)
            episode_reward += r
            ep_step += 1
        print("reward: ", episode_reward, "ep_step: ", ep_step)
        if args.save_gif:
            clip = ImageSequenceClip(ep_render_lst, fps=30)
            clip.write_gif(save_dir + f"ep_{i}.gif", fps=30)
        del ep_render_lst
Exemplo n.º 24
0
def main(args):
    images_dir = args[1]
    imgs_count = int(args[2])
    fps = int(args[3])
    format = args[4]

    images = gen_filenames(images_dir, imgs_count)
    crop_images(images)

    clip = ImageSequenceClip(images, fps=fps)

    if format == "gif":
        clip.write_gif('giffer' + '.gif')
    else:
        clip.write_videofile('giffer_mov' + '.mp4')
Exemplo n.º 25
0
def make_canvas_gif():
    """
    Creates and saves gif from images generated by make_canvas().
    """
    # read images
    images = [imread('../figs/canvas/' + file)
              for file in sorted(os.listdir(path='../figs/canvas/'))
              if file != '.gitkeep']
    # durations of frames in log time
    durations = list(np.diff(np.log(4 + np.arange(len(images)))))

    # make clip and save as gif
    clip = ImageSequenceClip(images, durations=durations)
    clip.fps = 25
    clip.write_gif('../canvas.gif')
Exemplo n.º 26
0
def gen_gif(imgs_path, save_path, fps):
    '''  把一串连续的图片保存为gif动图
    :param imgs_path: 图片路径  ,比如"G:\\UCLA\\png"
    :param save_path: 保存的gif路径(包括文件名.gif) ,比如"G:\\UCLA\\ucla.gif"
    :param fps: 以多少副图片保存一帧
    :return: 一个gif图片,在save_path文件夹下
    '''

    image_list = []
    for im in os.listdir(imgs_path):
        image_list.append(os.path.join(imgs_path, im))

    clip = ImageSequenceClip(image_list, fps=fps)
    clip.write_gif(save_path, fps=fps)
# ------------------------------------------------------------------------------------------------
Exemplo n.º 27
0
def create_gif(filename, array, fps=10, scale=1.0):
    """
    Source: https://gist.github.com/nirum/d4224ad3cd0d71bfef6eba8f3d6ffd59
    """
    # ensure that the file has the .gif extension
    fname, _ = os.path.splitext(filename)
    filename = fname + '.gif'

    # copy into the color dimension if the images are black and white
    if array.ndim == 3:
        array = array[..., np.newaxis] * np.ones(3)

    # make the moviepy clip
    clip = ImageSequenceClip(list(array), fps=fps).resize(scale)
    clip.write_gif(filename, fps=fps)
    return clip
Exemplo n.º 28
0
def gif(filename, array, fps=10, scale=1.0):
    # ensure that the file has the .gif extension
    fname, _ = os.path.splitext(filename)
    filename = fname + '.gif'
    filename_v = fname + '.mp4'

    # copy into the color dimension if the images are black and white
    if array.ndim == 3:
        array = array[..., np.newaxis] * np.ones(3)

    # make the moviepy clip
    clip = ImageSequenceClip(list(array), fps=fps).resize(scale)
    clip.write_gif(filename, fps=fps)
    clip.write_videofile(filename_v, fps=fps)

    return clip
Exemplo n.º 29
0
def visualize_all_activations_3d(model, destination_path):
    """
    Get all activations of all Conv3D layers for each filter
    Saving 3D activations in destination_path as .gif files in
    separate folders for each convolutional layer during the process
    :param model: keras sequential model with Conv3D layers
    :param destination_path: path where to save gifs
    :return: all activations from all layers
    """

    # getting indices of all convolutional layers
    conv_layer_indices = []
    number_of_layers = len(model.layers)
    for i in range(number_of_layers):
        if type(model.layers[i]) is keras.layers.convolutional.Conv3D:
            conv_layer_indices.append(i)

    # getting number of convolutional layers
    number_of_conv_layers = len(conv_layer_indices)

    # getting number of filters for each convolutional layer
    number_of_filters = []
    for i in conv_layer_indices:
        number_of_filters.append(get_num_filters(model.layers[i]))

    # getting activations for each convolutional layer
    all_activations = []
    for l in range(number_of_conv_layers):
        gif_folder = destination_path + "/" + model.layers[
            conv_layer_indices[l]].name + "/"
        #created directory for each convolutional layer
        if not os.path.exists(gif_folder):
            os.makedirs(gif_folder)
            activations = []
            for f in number_of_filters[l]:
                # getting activation
                activation = visualize_activation(
                    model, layer_idx=conv_layer_indices[l], filter_indices=f)
                activations.append(activation)
                # saving activation
                clip = ImageSequenceClip(list(activation), fps=10).resize(1.0)
                gif_name = model.layers[conv_layer_indices[
                    l]].name + "_" + "activation" + str(f) + ".gif"
                clip.write_gif(gif_folder + gif_name, fps=10)
            all_activations.append(activations)
    return all_activations
Exemplo n.º 30
0
 async def save_gif(gif_frames, dest, fps=10):
     """生成 gif
     将输入的帧数据合并成视频并输出为 gif
     参数
     gif_frames: list<numpy.ndarray>
     为每一帧的数据
     dest: str
     为输出路径
     fps: int, float
     为输出 gif 每秒显示的帧数
     返回
     None
     但是会输出一个符合参数的 gif
     """
     clip = ImageSequenceClip(gif_frames, fps=fps)
     clip.write_gif(dest)  # 使用 imageio
     clip.close()
Exemplo n.º 31
0
    def save_vol_gif(self, filename, array, fps=10, scale=1.0):
        """Creates a gif given a stack of images using moviepy
        Notes
        -----
        works with current Github version of moviepy (not the pip version)
        https://github.com/Zulko/moviepy/commit/d4c9c37bc88261d8ed8b5d9b7c317d13b2cdf62e
        Usage
        -----
        >>> X = randn(100, 64, 64)
        >>> gif('test.gif', X)
        Parameters
        ----------
        filename : string
            The filename of the gif to write to
        array : array_like
            A numpy array that contains a sequence of images
        fps : int
            frames per second (default: 10)
        scale : float
            how much to rescale each image by (default: 1.0)
        """

        # ensure that the file has the .gif extension
        fname, _ = os.path.splitext(filename)
        filename = fname + '.gif'

        # Normalize the volume into 0-255 range for .gif file
        volume_norm = np.zeros_like(array, dtype=np.uint8)
        for z in range(array.shape[0]):
            volume_norm[z] = cv2.normalize(array[z],
                                           dst=volume_norm[z],
                                           alpha=0,
                                           beta=255,
                                           norm_type=cv2.NORM_MINMAX)
        volume_norm, array = array, volume_norm
        del volume_norm

        # copy into the color dimension if the images are black and white
        if array.ndim == 3:
            array = array[..., np.newaxis] * np.ones(3)

        # make the moviepy clip
        clip = ImageSequenceClip(list(array), fps=fps).resize(scale)
        clip.write_gif(filename, fps=fps)
        return clip
Exemplo n.º 32
0
def make_gif():
    files = []
    for (dirpath, dirnames, filenames) in walk("input"):
        files.extend(["input/" + x for x in filenames if ".jpg" in x])
    
    image_names = ["output/frame_{}.jpg".format(x) for x in range(len(files))]
    for i in range(len(files)):
        im = Image.open(files[i])
        im.thumbnail(pic_size, Image.ANTIALIAS)
        print image_names[i]
        im.save(image_names[i], quality=100)

    newName = "result.gif"

    clip = ImageSequenceClip(image_names, fps=4)
    clip.write_gif(newName, fuzz=False)

    for f in image_names:
        remove(f)
    print ""
def run(outdir, train_mode):

  # Build network.
  initializer = tf.keras.initializers.VarianceScaling()
  X = tf.placeholder(tf.float32, shape=[None, n_inputs])
  hidden = tf.layers.dense(
      X, N_HIDDEN, activation=tf.nn.elu, kernel_initializer=initializer)
  logits = tf.layers.dense(hidden, n_outputs)
  outputs = tf.nn.sigmoid(logits)  # probability of action 0 (left)
  p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
  action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)

  # Optimizer, gradients.
  y = 1. - tf.to_float(action)
  cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
      labels=y, logits=logits)
  optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
  grads_and_vars = optimizer.compute_gradients(cross_entropy)
  gradients = [grad for grad, variable in grads_and_vars]
  gradient_placeholders = []
  grads_and_vars_feed = []
  for grad, variable in grads_and_vars:
    gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
    gradient_placeholders.append(gradient_placeholder)
    grads_and_vars_feed.append((gradient_placeholder, variable))
  training_op = optimizer.apply_gradients(grads_and_vars_feed)

  # For TensorBoard.
  episode_reward = tf.placeholder(dtype=tf.float32, shape=[])
  tf.summary.scalar('reward', episode_reward)

  init = tf.global_variables_initializer()
  saver = tf.train.Saver()

  if train_mode:
    hp_save_dir = hp_directory(outdir)
    with tf.Session() as sess:
      init.run()
      # For TensorBoard.
      print('hp_save_dir')
      train_writer = tf.summary.FileWriter(hp_save_dir, sess.graph)
      for iteration in range(n_iterations):
        all_rewards = []
        all_gradients = []
        for game in range(N_GAMES_PER_UPDATE):
          current_rewards = []
          current_gradients = []
          obs = env.reset()
          for step in range(n_max_steps):
            action_val, gradients_val = sess.run(
                [action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
            obs, reward, done, info = env.step(action_val[0][0])
            current_rewards.append(reward)
            current_gradients.append(gradients_val)
            if done:
              break
          all_rewards.append(current_rewards)
          all_gradients.append(current_gradients)
        avg_reward = np.mean(([np.sum(r) for r in all_rewards]))

        print('\rIteration: {}, Reward: {}'.format(
            iteration, avg_reward, end=''))
        all_rewards = discount_and_normalize_rewards(
            all_rewards, discount_rate=DISCOUNT_RATE)
        feed_dict = {}
        for var_index, gradient_placeholder in enumerate(gradient_placeholders):
          mean_gradients = np.mean([
              reward * all_gradients[game_index][step][var_index]
              for game_index, rewards in enumerate(all_rewards)
              for step, reward in enumerate(rewards)
          ],
                                   axis=0)
          feed_dict[gradient_placeholder] = mean_gradients
        sess.run(training_op, feed_dict=feed_dict)
        if iteration % save_iterations == 0:
          print('Saving model to ', hp_save_dir)
          model_file = '{}/my_policy_net_pg.ckpt'.format(hp_save_dir)
          saver.save(sess, model_file)
          # Also save event files for TB.
          merge = tf.summary.merge_all()
          summary = sess.run(merge, feed_dict={episode_reward: avg_reward})
          train_writer.add_summary(summary, iteration)
      obs = env.reset()
      steps = []
      done = False
  else:  # Make a gif.
    from moviepy.editor import ImageSequenceClip
    model_file = '{}/my_policy_net_pg.ckpt'.format(outdir)
    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      saver.restore(sess, save_path=model_file)
      # Run model.
      obs = env.reset()
      done = False
      steps = []
      rewards = []
      while not done:
        s = env.render('rgb_array')
        steps.append(s)
        action_val = sess.run(action, feed_dict={X: obs.reshape(1, n_inputs)})
        obs, reward, done, info = env.step(action_val[0][0])
        rewards.append(reward)
      print('Final reward :', np.mean(rewards))
    clip = ImageSequenceClip(steps, fps=30)
    clip.write_gif('cartpole.gif', fps=30)
Exemplo n.º 34
0
        print pol.shape

        (timestamps, x_pos,
         y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
                                               y_pos, pol, window=1000)

        frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
                                               pol, num_frames, fs=3)
        print "Length of produced frames: ", len(frames)
        new_frames = []
        for frame in frames:
            tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
            new_frames.append(tmp_frame)

        clip = ImageSequenceClip(new_frames, fps=20)
        clip.write_gif(seq_save_path, fps=30)

        print "Sequence %s is saved at %s" % (img_name, seq_save_path)
elif option == "caltech256-ps":
    caltech_fn = "INI_Caltech256_10fps_20160424.hdf5"
    caltech_path = os.path.join(data_path, caltech_fn)
    caltech_db = h5py.File(caltech_path, mode="r")
    caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
    caltech_save_path = os.path.join(data_path, "caltech256_ps.eps")
    img_num = 60

    f = file(caltech_stats_path, mode="r")
    caltech_stats = pickle.load(f)
    f.close()

    caltech_list = caltech_stats["caltech256_list"]
Exemplo n.º 35
0
    video_save_path = join(data_path, "retina-simulation",
                           "horse-riding.gif")
    parvo_save_path = join(data_path, "retina-simulation",
                           "horse-riding-parvo.gif")
    magno_save_path = join(data_path, "retina-simulation",
                           "horse-riding-magno.gif")

    parvo_frames = []
    magno_frames = []
    origin_frames = []
    for frame in frames:
        retina.run(frame)

        origin_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        parvo_frame = retina.getParvo()
        parvo_frame = cv2.cvtColor(parvo_frame, cv2.COLOR_BGR2RGB)
        parvo_frames.append(parvo_frame)
        magno_frames.append(retina.getMagno())

    clip = ImageSequenceClip(parvo_frames, fps=30)
    clip.write_gif(parvo_save_path, fps=30)
    print "[MESSAGE] Parvo frames is saved at: %s" % (parvo_save_path)

    clip = ImageSequenceClip(magno_frames, fps=30)
    clip.write_gif(magno_save_path, fps=30)
    print "[MESSAGE] Magno frames is saved at: %s" % (magno_save_path)

    clip = ImageSequenceClip(origin_frames, fps=30)
    clip.write_gif(video_save_path, fps=30)
    print "[MESSAGE] Original frames is saved at: %s" % (video_save_path)