Esempio n. 1
0
 def render(self, mode='human'):
     imgs = self.get_images()
     bigimg = tile_images(imgs)
     if mode == 'human':
         self.get_viewer().imshow(bigimg)
     elif mode == 'rgb_array':
         return bigimg
     else:
         raise NotImplementedError
Esempio n. 2
0
 def render(self, mode='human'):
     imgs = self.get_images()
     bigimg = tile_images(imgs)
     if mode == 'human':
         self.get_viewer().imshow(bigimg)
         return self.get_viewer().isopen
     elif mode == 'rgb_array':
         return bigimg
     else:
         raise NotImplementedError
Esempio n. 3
0
 def render(self, mode='human'):
     for pipe in self.remotes:
         pipe.send(('render', None))
     imgs = [pipe.recv() for pipe in self.remotes]
     bigimg = tile_images(imgs)
     if mode == 'human':
         import cv2
         cv2.imshow('vecenv', bigimg[:, :, ::-1])
         cv2.waitKey(1)
     elif mode == 'rgb_array':
         return bigimg
     else:
         raise NotImplementedError
Esempio n. 4
0
    def render(self, mode='human'):
        for pipe in self.parent_pipes:
            pipe.send(('render', None))
        imgs = [pipe.recv() for pipe in self.parent_pipes]
        bigimg = tile_images(imgs)
        if mode == 'human':
            if self.viewer is None:
                self.viewer = rendering.SimpleImageViewer()

            self.viewer.imshow(bigimg[:, :, ::-1])
        elif mode == 'rgb_array':
            return bigimg
        else:
            raise NotImplementedError
Esempio n. 5
0
    def render(self, mode='human'):
        for pipe in self.remotes:
            pipe.send(('render', None))
        imgs = [pipe.recv() for pipe in self.remotes]
        bigimg = tile_images(imgs)
        if mode == 'human':
            if self.viewer is None:
                from gym.envs.classic_control import rendering
                self.viewer = rendering.SimpleImageViewer()

            self.viewer.imshow(bigimg[:, :, ::-1])

        elif mode == 'rgb_array':
            return bigimg
        else:
            raise NotImplementedError
 def render(self, mode='human'):
     if self.num_workers <= 1:
         return self.envs.render(mode=mode)
     for pipe in self.remotes:
         pipe.send(('render', None))
     imgs = np.asarray([pipe.recv() for pipe in self.remotes])
     imgs = imgs.reshape((-1, *imgs.shape[2:]))
     bigimg = tile_images(imgs)
     if mode == 'human':
         import cv2
         cv2.imshow('vecenv', bigimg[:, :, ::-1])
         cv2.waitKey(1)
     elif mode == 'rgb_array':
         return bigimg
     else:
         raise NotImplementedError
Esempio n. 7
0
 def render(self, mode='human'):
     # code doesn't work all that well
     # TODO: need to clean up
     for pipe in self.remotes:
         pipe.send(('render', None))
     imgs = np.asarray([pipe.recv() for pipe in self.remotes],
                       dtype=np.uint8)
     # tile_images expects a 4 dimensional shape
     imgs = np.squeeze(imgs, axis=1)
     bigimg = tile_images(imgs)
     if mode == 'human':
         import cv2
         cv2.imshow('vecenv', bigimg[:, :, ::-1])
         cv2.waitKey(1)
     elif mode == 'rgb_array':
         return bigimg.astype(np.uint8)
     else:
         raise NotImplementedError
Esempio n. 8
0
def evaluate_model(
    model, network_name: str,
    env_name: str, num_env: int, seed: int,
    evals_per_env=1,
    eval_dir: Path = None,
    use_logger=True,
    max_eplen=None,
    frame_stack_size=4,
    noop_reset=True,
    fire_reset=True,
    return_raw=False,
    return_prc=False,
    video_recorder=False,
    progress=False):
    tqdm = maybe_tqdm(progress)

    if eval_dir is not None:
        eval_dir.mkdir(exist_ok=True, parents=True)

    logging.debug(f'Creating {num_env} instances of {env_name}...')
    with closing(make_envs(
        env_name=env_name,
        num_env=num_env,
        seed=seed,
        max_eplen=max_eplen,
        frame_stack_size=frame_stack_size,
        noop_reset=noop_reset,
        fire_reset=fire_reset,
        eval_dir=eval_dir,
        use_logger=use_logger,
        video_recorder=video_recorder,
    )) as eval_envs:
        logging.debug(f'Done creating envs. Running each for {evals_per_env} episodes, at most {max_eplen} each...')
        eval_results, raw_observations, prc_observations, attention = run_model(
            model,
            eval_envs,
            evals_per_env=evals_per_env,
            return_raw=return_raw,
            return_prc=return_prc,
            return_attn=return_raw or return_prc,
            progress=progress,
        )

    if eval_dir is not None:
        with (eval_dir / 'results.json').open('w') as fp:
            json.dump(eval_results, fp, indent=4)

    if return_raw or return_prc:
        logging.debug(f'Rendering {len(attention)} frames of attention...')
        rh, rw, rc = 210, 160, 3
        ph, pw, pc = 84, 84, frame_stack_size
        if return_raw:
            assert raw_observations[0].shape == (num_env, rh, rw, rc)
        if return_prc:
            assert prc_observations[0].shape == (num_env, ph, pw, pc)

        saliency_maps = render_attention(
            attention,
            (num_env, ph, pw, pc),
            **models_baselines.attention_visualization_params[network_name]
        )
        logging.debug('Done rendering.')

        if eval_dir is not None:
            with VideoWriter(eval_dir / 'perception.mkv') as writer:
                it = tqdm(
                    render_perception(raw_observations, prc_observations, saliency_maps),
                    postfix='writing video',
                    total=len(saliency_maps)
                )
                for frame in it:
                    frame = tile_images(frame)
                    frame = (frame * 255).astype(np.uint8)
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    writer.write(frame)
    else:
        saliency_maps = []

    return eval_results, raw_observations, prc_observations, saliency_maps