Exemplo n.º 1
0
    def render_observations(self, pose):
        '''Render all environment observations, called inside every step()
        Input
            @pose: current robot pose
        Return:
            @observation: dict with key values being output components user
            specified in config file
        TODO:
            @hzyjerry: add noise to observation
        '''

        self.render_nonviz_sensor = self.robot.calc_state()

        if self._require_camera_input:
            self.r_camera_rgb.setNewPose(pose)
            all_dist, all_pos = self.r_camera_rgb.getAllPoseDist(pose)
            top_k = self.find_best_k_views(pose[0], all_dist, all_pos, avoid_block=False)
            with Profiler("Render to screen"):
                self.render_rgb_filled, self.render_depth, self.render_semantics, self.render_normal, self.render_prefilled = self.r_camera_rgb.renderOffScreen(pose, top_k)

        observations = {}
        for output in self.config["output"]:
            try:
                observations[output] = getattr(self, "render_" + output)
            except Exception as e:
                raise Exception("Output component {} is not available".format(output))

        #visuals = np.concatenate(visuals, 2)
        return observations
Exemplo n.º 2
0
    def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)

        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60,
            aspect=float(self._render_width) / self._render_height,
            nearVal=0.1,
            farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(width=self._render_width,
                                                height=self._render_height,
                                                viewMatrix=view_matrix,
                                                projectionMatrix=proj_matrix,
                                                renderer=p.ER_TINY_RENDERER)
        rgb_array = np.array(px).reshape(
            (self._render_width, self._render_height, -1))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array
Exemplo n.º 3
0
    def _step(self, a):
        sensor_state, sensor_reward, done, sensor_meta = BaseRobotEnv._step(
            self, a)
        pose = [sensor_meta['eye_pos'], sensor_meta['eye_quat']]
        sensor_meta.pop("eye_pos", None)
        sensor_meta.pop("eye_quat", None)
        sensor_meta["sensor"] = sensor_state

        if not self._require_camera_input or self.test_env:
            visuals = self.get_blank_visuals()
            return visuals, sensor_reward, done, sensor_meta

        ## Select the nearest points
        all_dist, all_pos = self.r_camera_rgb.rankPosesByDistance(pose)
        top_k = self.find_best_k_views(pose[0], all_dist, all_pos)

        # Speed bottleneck
        t = time.time()

        self.render_rgb, self.render_depth, self.render_semantics, self.render_normal, self.render_unfilled = self.r_camera_rgb.renderOffScreen(
            pose, top_k)

        dt = time.time() - t
        self.fps = 0.9 * self.fps + 0.1 * 1 / dt

        # Add quantifications
        visuals = self.get_visuals(self.render_rgb, self.render_depth)
        if self.config["show_diagnostics"]:
            self.render_rgb = self.add_text(self.render_rgb)

        if self.config["display_ui"]:
            with Profiler("Rendering visuals: render to visuals"):
                self.render_to_UI()
                self.save_frame += 1

        elif self.gui:
            # Speed bottleneck 2, 116fps
            self.r_camera_rgb.renderToScreen()

        robot_pos = self.robot.get_position()
        p.resetBasePositionAndOrientation(self.robot_mapId, [
            robot_pos[0] / self.robot.mjcf_scaling,
            robot_pos[1] / self.robot.mjcf_scaling, 6
        ], [0, 0, 0, 1])

        debugmode = 0
        if debugmode:
            print("Eye position", sensor_meta['eye_pos'])
        debugmode = 0
        if debugmode:
            print("Environment visuals shape", visuals.shape)
        return visuals, sensor_reward, done, sensor_meta
Exemplo n.º 4
0
from gibson.envs.husky_env import HuskyNavigateEnv
from gibson.utils.play import play
from gibson.core.render.profiler import Profiler
import os

config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                           'configs', 'benchmark.yaml')
print(config_file)

if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config', type=str, default=config_file)
    parser.add_argument('--gpu', type=int, default=0)

    args = parser.parse_args()

    #env = HuskyNavigateEnv(human=True, timestep=timestep, frame_skip=frame_skip, mode="RGB", is_discrete = True, resolution=args.resolution)
    env = HuskyNavigateEnv(config=args.config, gpu_idx=args.gpu)
    env.reset()
    frame = 0
    while frame < 10000:
        with Profiler("env step"):
            env.step(2)

        frame += 1
        print("frame {}".format(frame))
Exemplo n.º 5
0
def play(env, transpose=True, zoom=None, callback=None, keys_to_action=None):
    """Allows one to play the game using keyboard.

    To simply play the game use:

        play(gym.make("Pong-v3"))
        play(env)

    Above code works also if env is wrapped, so it's particularly useful in
    verifying that the frame-level preprocessing does not render the game
    unplayable.

    If you wish to plot real time statistics as you play, you can use
    gym.utils.play.PlayPlot. Here's a sample code for plotting the reward
    for last 5 second of gameplay.

        def callback(obs_t, obs_tp1, rew, done, info):
            return [rew,]
        env_plotter = EnvPlotter(callback, 30 * 5, ["reward"])

        env = gym.make("Pong-v3")
        play(env, callback=env_plotter.callback)


    Arguments
    ---------
    env: gym.Env
        Environment to use for playing.
    transpose: bool
        If True the output of observation is transposed.
        Defaults to true.
    zoom: float
        Make screen edge this many times bigger
    callback: lambda or None
        Callback if a callback is provided it will be executed after
        every step. It takes the following input:
            obs_t: observation before performing action
            obs_tp1: observation after performing action
            action: action that was executed
            rew: reward that was received
            done: whether the environemnt is done or not
            info: debug info
    keys_to_action: dict: tuple(int) -> int or None
        Mapping from keys pressed to action performed.
        For example if pressed 'w' and space at the same time is supposed
        to trigger action number 2 then key_to_action dict would look like this:

            {
                # ...
                sorted(ord('w'), ord(' ')) -> 2
                # ...
            }
        If None, default key_to_action mapping for that env is used, if provided.
    """

    obs_s = env.observation_space
    #assert type(obs_s) == gym.spaces.box.Box
    #assert len(obs_s.shape) == 2 or (len(obs_s.shape) == 3 and obs_s.shape[2] in [1,3])

    if keys_to_action is None:
        if hasattr(env, 'get_keys_to_action'):
            keys_to_action = env.get_keys_to_action()
        elif hasattr(env.unwrapped, 'get_keys_to_action'):
            keys_to_action = env.unwrapped.get_keys_to_action()
        #else:
        #    assert False, env.spec.id + " does not have explicit key to action mapping, " + \
        #                  "please specify one manually"
    relevant_keys = set(sum(map(list, keys_to_action.keys()), []))
    relevant_keys.add(ord('r'))
    '''
    if transpose:
        video_size = env.observation_space.shape[1], env.observation_space.shape[0]
    else:
        video_size = env.observation_space.shape[0], env.observation_space.shape[1]

    if zoom is not None:
        video_size = int(video_size[0] * zoom), int(video_size[1] * zoom)
    '''
    pressed_keys = []
    running = True
    env_done = True

    record_num = 0
    record_total = 0
    obs = env.reset()
    do_restart = False
    while running:
        if do_restart:
            do_restart = False
            env.reset()
            pressed_keys = []
            continue
        if len(pressed_keys) == 0:
            action = keys_to_action[()]
            with Profiler("Play Env: step"):
                start = time.time()
                obs, rew, env_done, info = env.step(action)
                record_total += time.time() - start
                record_num += 1
            #print(info['sensor'])
            print("Play mode: reward %f" % rew)
        for p_key in pressed_keys:
            action = keys_to_action[(p_key, )]
            prev_obs = obs
            with Profiler("Play Env: step"):
                start = time.time()
                obs, rew, env_done, info = env.step(action)
                record_total += time.time() - start
                record_num += 1
            print("Play mode: reward %f" % rew)
        if callback is not None:
            callback(prev_obs, obs, action, rew, env_done, info)
        # process pygame events
        key_codes = env.get_key_pressed(relevant_keys)
        #print("Key codes", key_codes)
        pressed_keys = []

        for key in key_codes:
            print(key)
            if key == ord('r'):
                do_restart = True
            if key == ord('j'):
                env.robot.turn_left()
                continue
            if key == ord('l'):
                env.robot.turn_right()
                continue
            if key == ord('i'):
                env.robot.move_forward()
                continue
            if key == ord('k'):
                env.robot.move_backward()
                continue
            if key not in relevant_keys:
                continue
            # test events, set key states
            #print(relevant_keys)
            pressed_keys.append(key)

            #print(pressed_keys)
            '''
Exemplo n.º 6
0
    do_restart = False

    vel = obs["nonviz_sensor"][3:6]

    while not rospy.is_shutdown():
        pub_vel_x.publish(vel[0])
        pub_vel_y.publish(vel[1])

        if do_restart:
            do_restart = False
            env.reset()
            pressed_keys = []
            continue
        if len(pressed_keys) == 0:
            action = keys_to_action[()]
            with Profiler("Play Env: step"):
                start = time.time()
                obs, rew, env_done, info = env.step(action)
                vel = obs["nonviz_sensor"][3:6]
                record_total += time.time() - start
                record_num += 1
            # print(info['sensor'])
            print("Play mode: reward %f" % rew)
        for p_key in pressed_keys:
            action = keys_to_action[(p_key, )]
            prev_obs = obs
            with Profiler("Play Env: step"):
                start = time.time()
                obs, rew, env_done, info = env.step(action)
                vel = obs["nonviz_sensor"][3:6]
                record_total += time.time() - start