Beispiel #1
0
    def __init__(self,
                 task_class,
                 observation_mode='state',
                 render_mode: Union[None, str] = None):
        self._observation_mode = observation_mode
        self._render_mode = render_mode
        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision':
            obs_config.set_all(True)
        else:
            raise ValueError('Unrecognised observation_mode: %s.' %
                             observation_mode)
        action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
        self.env = Environment(action_mode,
                               obs_config=obs_config,
                               headless=True)
        self.env.launch()
        self.task = self.env.get_task(task_class)

        _, obs = self.task.reset()

        self.action_space = spaces.Box(low=-1.0,
                                       high=1.0,
                                       shape=(self.env.action_size, ))

        if observation_mode == 'state':
            self.observation_space = spaces.Box(
                low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape)
        elif observation_mode == 'vision':
            self.observation_space = spaces.Dict({
                "state":
                spaces.Box(low=-np.inf,
                           high=np.inf,
                           shape=obs.get_low_dim_data().shape),
                "left_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.left_shoulder_rgb.shape),
                "right_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.right_shoulder_rgb.shape),
                "wrist_rgb":
                spaces.Box(low=0, high=1, shape=obs.wrist_rgb.shape),
                "front_rgb":
                spaces.Box(low=0, high=1, shape=obs.front_rgb.shape),
            })

        if render_mode is not None:
            # Add the camera to the scene
            cam_placeholder = Dummy('cam_cinematic_placeholder')
            self._gym_cam = VisionSensor.create([640, 360])
            self._gym_cam.set_pose(cam_placeholder.get_pose())
            if render_mode == 'human':
                self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            else:
                self._gym_cam.set_render_mode(RenderMode.OPENGL3)
Beispiel #2
0
 def get_task(self, task_class, arm_action_mode):
     obs_config = ObservationConfig()
     obs_config.set_all(False)
     obs_config.set_all_low_dim(True)
     obs_config.right_shoulder_camera.rgb = True
     action_mode = ActionMode(arm_action_mode)
     self.env = Environment(
         action_mode, ASSET_DIR, obs_config, headless=True)
     self.env.launch()
     return self.env.get_task(task_class)
    def __init__(self,
                 task_class,
                 state_dim,
                 n_features,
                 n_cluster,
                 n_latent,
                 parameters=None,
                 headless=False,
                 cov_reg=1E-8,
                 n_samples=50,
                 dense_reward=False,
                 imitation_noise=0.03):

        obs_config = ObservationConfig()
        obs_config.set_all_low_dim(True)
        obs_config.set_all_high_dim(False)
        self._obs_config = obs_config

        self._state_dim = state_dim
        self._headless = headless

        action_mode = ActionMode(ArmActionMode.ABS_JOINT_POSITION)
        self._task_class = task_class
        self._action_mode = action_mode
        self.env = Environment(action_mode, "", obs_config, headless=headless)
        self.env.launch()

        self.task = self.env.get_task(task_class)
        if parameters is None:
            self.parameters = np.load(
                "parameters/%s_%d.npy" %
                (self.task.get_name(), n_features))[:n_samples]
        # parameters = np.concatenate([parameters for _ in range(20)])
        self.imitation_noise = imitation_noise
        self.parameters[:, :3] += imitation_noise * np.random.normal(
            size=self.parameters[:, :3].shape)
        self.mppca = MPPCA(n_cluster,
                           n_latent,
                           n_iterations=30,
                           cov_reg=cov_reg,
                           n_init=500)
        self.mppca.fit(self.parameters)

        self.rlmppca = None
        self.dense_reward = dense_reward

        print(np.exp(self.mppca.log_pi))

        group = Group("rlbench", ["d%d" % i for i in range(7)] + ["gripper"])
        self.space = ClassicSpace(group, n_features=n_features)

        print("mpcca learned")
 def get_task(self, randomize_every, frequency, visual_config=None,
              dynamics_config=None):
     cam_config = CameraConfig(render_mode=RenderMode.OPENGL)
     obs_config = ObservationConfig(right_shoulder_camera=cam_config)
     obs_config.set_all(False)
     obs_config.set_all_low_dim(True)
     obs_config.right_shoulder_camera.rgb = True
     mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
     self.env = DomainRandomizationEnvironment(
         mode, ASSET_DIR, obs_config, True, True,
         randomize_every, frequency, visual_config, dynamics_config)
     self.env.launch()
     return self.env.get_task(OpenMicrowave)
Beispiel #5
0
 def test_get_stored_demos_images_without_init_sim(self):
     obs_config = ObservationConfig()
     obs_config.set_all(False)
     obs_config.set_all_low_dim(True)
     obs_config.right_shoulder_camera.rgb = True
     action_mode = ActionMode()
     self.env = environment.Environment(
         action_mode, ASSET_DIR, obs_config, headless=True)
     demos = self.env.get_demos('reach_target', 2)
     self.assertEqual(len(demos), 2)
     self.assertGreater(len(demos[0]), 0)
     self.assertIsInstance(demos[0][0].right_shoulder_rgb, np.ndarray)
     self.assertIsNone(demos[0][0].left_shoulder_rgb)
Beispiel #6
0
    def get_env(self, task=None):
        task = task if task else self.config.env_type
        if self.use_gym:
            assert type(
                task
            ) == str  # NOTE : When using gym, the task has to be represented as a sting.
            assert self.observation_mode in ['vision', 'state']

            env = gym.make(
                task,
                observation_mode=self.config.env_args['observation_mode'],
                render_mode=self.config.env_args['render_mode'])
            self.env_obj = env
        else:
            obs_config = ObservationConfig()
            if self.observation_mode == 'vision':
                obs_config.set_all(True)
            elif self.observation_mode == 'state':
                obs_config.set_all_high_dim(False)
                obs_config.set_all_low_dim(True)
            else:
                obs_config_dict = {
                    self.left_obv_key: obs_config.left_shoulder_camera,
                    self.right_obv_key: obs_config.right_shoulder_camera,
                    self.wrist_obv_key: obs_config.wrist_camera,
                    self.front_obv_key: obs_config.front_camera
                }

                assert self.observation_mode in obs_config_dict.keys()

                obs_config.set_all_high_dim(False)
                obs_config_dict[self.observation_mode].set_all(True)
                obs_config.set_all_low_dim(True)

            # TODO : Write code to change it from env_args
            action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
            self.env_obj = Environment(action_mode,
                                       obs_config=obs_config,
                                       headless=True)

            task = task if task else ReachTarget
            if type(task) == str:
                task = task.split('-')[0]
                task = self.env_obj._string_to_task(task)

            self.env_obj.launch()
            env = self.env_obj.get_task(
                task)  # NOTE : `env` refered as `task` in RLBench docs.
        return env
Beispiel #7
0
    def __init__(self, task_class, observation_mode='state'):
        self._observation_mode = observation_mode
        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision':
            obs_config.set_all(True)
        else:
            raise ValueError('Unrecognised observation_mode: %s.' %
                             observation_mode)
        action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
        self.env = Environment(action_mode,
                               obs_config=obs_config,
                               headless=True)
        self.env.launch()
        self.task = self.env.get_task(task_class)

        _, obs = self.task.reset()

        self.action_space = spaces.Box(low=-1.0,
                                       high=1.0,
                                       shape=(action_mode.action_size, ))

        if observation_mode == 'state':
            self.observation_space = spaces.Box(
                low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape)
        elif observation_mode == 'vision':
            self.observation_space = spaces.Dict({
                "state":
                spaces.Box(low=-np.inf,
                           high=np.inf,
                           shape=obs.get_low_dim_data().shape),
                "left_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.left_shoulder_rgb.shape),
                "right_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.right_shoulder_rgb.shape),
                "wrist_rgb":
                spaces.Box(low=0, high=1, shape=obs.wrist_rgb.shape),
            })

        self._gym_cam = None
Beispiel #8
0
    def __init__(self, task_class, state_dim, n_features, headless=True):

        super().__init__(n_features)
        self._group = Group("rlbench", ["d%d" % i for i in range(7)] + ["gripper"])
        self._space = ClassicSpace(self._group, n_features)

        obs_config = ObservationConfig()
        obs_config.set_all_low_dim(True)
        obs_config.set_all_high_dim(False)
        self._obs_config = obs_config

        self._state_dim = state_dim
        self._headless = headless

        action_mode = ActionMode(ArmActionMode.ABS_JOINT_POSITION)
        self._task_class = task_class
        self._action_mode = action_mode
        self.env = Environment(
            action_mode, "", obs_config, headless=headless)
        self.env.launch()

        self.task = self.env.get_task(task_class)
        self._obs = None
Beispiel #9
0
class RLBenchEnv(gym.Env):
    """An gym wrapper for RLBench."""

    metadata = {'render.modes': ['human']}

    def __init__(self,
                 task_class,
                 observation_mode='state',
                 action_mode='joint',
                 multi_action_space=False,
                 discrete_action_space=False):
        self.num_skip_control = 20
        self.epi_obs = []
        self.obs_record = True
        self.obs_record_id = None

        self._observation_mode = observation_mode
        self.obs_config = ObservationConfig()
        if observation_mode == 'state':
            self.obs_config.set_all_high_dim(False)
            self.obs_config.set_all_low_dim(True)
        elif observation_mode == 'state-goal':
            self.obs_config.set_all_high_dim(False)
            self.obs_config.set_all_low_dim(True)
            self.obs_config.set_goal_info(True)
        elif observation_mode == 'vision':
            self.obs_config.set_all(False)
            self.obs_config.set_camera_rgb(True)
        elif observation_mode == 'both':
            self.obs_config.set_all(True)
        else:
            raise ValueError('Unrecognised observation_mode: %s.' %
                             observation_mode)

        self._action_mode = action_mode
        self.ac_config = None
        if action_mode == 'joint':
            self.ac_config = ActionConfig(
                SnakeRobotActionConfig.ABS_JOINT_POSITION)
        elif action_mode == 'trigon':
            self.ac_config = ActionConfig(
                SnakeRobotActionConfig.TRIGON_MODEL_PARAM,
                is_discrete=discrete_action_space)
        else:
            raise ValueError('Unrecognised action_mode: %s.' % action_mode)

        self.env = Environment(action_config=self.ac_config,
                               obs_config=self.obs_config,
                               headless=True)
        self.env.launch()
        self.task = self.env.get_task(task_class)
        self.max_episode_steps = self.task.episode_len
        _, obs = self.task.reset()

        if action_mode == 'joint':
            self.action_space = spaces.Box(
                low=-1.7,
                high=1.7,
                shape=(self.ac_config.action_size, ),
                dtype=np.float32)
        elif action_mode == 'trigon':
            if multi_action_space:
                # action_space1 = spaces.MultiBinary(n=1)
                low1 = np.array([-0.8, -0.8])
                high1 = np.array([0.8, 0.8])
                action_space1 = spaces.Box(low=low1,
                                           high=high1,
                                           dtype=np.float32)
                # low = np.array([-0.8, -0.8, 1.0, 3.0, -50, -10, -0.1, -0.1])
                # high = np.array([0.8, 0.8, 3.0, 5.0, 50, 10, 0.1, 0.1])
                low2 = np.array([1.0])
                high2 = np.array([3.0])
                action_space2 = spaces.Box(low=low2,
                                           high=high2,
                                           dtype=np.float32)
                self.action_space = spaces.Tuple(
                    (action_space1, action_space2))
            elif discrete_action_space:
                self.action_space = spaces.MultiDiscrete([4, 4, 4])
            else:
                # low = np.array([0.0, -0.8, -0.8, 1.0, 3.0, -50, -10, -0.1, -0.1])
                # high = np.array([1.0, 0.8, 0.8, 3.0, 5.0, 50, 10, 0.1, 0.1])
                low = np.array([-1, -1, -1])
                high = np.array([1, 1, 1])
                self.action_space = spaces.Box(low=low,
                                               high=high,
                                               dtype=np.float32)

        if observation_mode == 'state' or observation_mode == 'state-goal':
            self.observation_space = spaces.Box(
                low=-np.inf,
                high=np.inf,
                shape=(obs.get_low_dim_data().shape[0] *
                       self.num_skip_control, ))
            if observation_mode == 'state-goal':
                self.goal_dim = obs.get_goal_dim()
        elif observation_mode == 'vision':
            self.observation_space = spaces.Box(
                low=0, high=1, shape=obs.head_camera_rgb.shape)
        elif observation_mode == 'both':
            self.observation_space = spaces.Dict({
                "state":
                spaces.Box(low=-np.inf,
                           high=np.inf,
                           shape=obs.get_low_dim_data().shape),
                "rattler_eye_rgb":
                spaces.Box(low=0, high=1, shape=obs.head_camera_rgb.shape),
                "rattler_eye_depth":
                spaces.Box(low=0, high=1, shape=obs.head_camera_depth.shape),
            })

        self._gym_cam = None

    def _extract_obs(self, obs):
        if self._observation_mode == 'state':
            return obs.get_low_dim_data()
        elif self._observation_mode == 'state-goal':
            obs_goal = {'observation': obs.get_low_dim_data()}
            obs_goal.update(obs.get_goal_data())
            return obs_goal
        elif self._observation_mode == 'vision':
            return obs.head_camera_rgb
        elif self._observation_mode == 'both':
            return {
                "state": obs.get_low_dim_data(),
                "rattler_eye_rgb": obs.head_camera_rgb,
                "rattle_eye_depth": obs.head_camera_depth,
            }

    def render(self, mode='human'):
        if self._gym_cam is None:
            # # Add the camera to the scene
            self._gym_cam = VisionSensor('monitor')
            self._gym_cam.set_resolution([640, 640])
            self._gym_cam.set_render_mode(RenderMode.EXTERNAL_WINDOWED)

    def reset(self):
        obs_data_group = []
        obs_data_dict = {
            'observation': [],
            'desired_goal': None,
            'achieved_goal': None
        }
        descriptions, obs = self.task.reset()
        self.epi_obs.append(obs)
        obs_data = self._extract_obs(obs)
        for _ in range(self.num_skip_control):
            # obs_data_group.extend(obs_data)
            if isinstance(obs_data, list) or isinstance(obs_data, np.ndarray):
                obs_data_group.extend(obs_data)
            elif isinstance(obs_data, dict):
                obs_data_dict['observation'].extend(obs_data['observation'])
                obs_data_dict['desired_goal'] = obs_data['desired_goal']
                obs_data_dict['achieved_goal'] = obs_data['achieved_goal']
        ret_obs = obs_data_group if len(obs_data_group) else obs_data_dict
        del descriptions  # Not used
        return ret_obs

    def step(self, action):
        obs_data_group = []
        obs_data_dict = {
            'observation': [],
            'desired_goal': None,
            'achieved_goal': None
        }
        reward_group = []
        terminate = False
        for _ in range(self.num_skip_control):
            obs, reward, step_terminate, success = self.task.step(action)
            self.epi_obs.append(obs)
            obs_data = self._extract_obs(obs)
            if isinstance(obs_data, list) or isinstance(obs_data, np.ndarray):
                obs_data_group.extend(obs_data)
            elif isinstance(
                    obs_data,
                    dict):  # used for hierarchical reinforcement algorithm
                obs_data_dict['observation'].extend(obs_data['observation'])
                obs_data_dict['desired_goal'] = obs_data['desired_goal']
                obs_data_dict['achieved_goal'] = obs_data['achieved_goal']
            reward_group.append(reward)
            terminate |= step_terminate
            if terminate:
                if self.obs_record and success:  # record a successful experience
                    self.record_obs("RobotPos")
                self.epi_obs = []
                break
        ret_obs = obs_data_group if len(obs_data_group) else obs_data_dict
        return ret_obs, np.mean(reward_group), terminate, {}

    def close(self):
        self.env.shutdown()

    # def load_env_param(self):
    #     self.env.load_env_param()

    def compute_reward(self, achieved_goal=None, desired_goal=None, info=None):
        assert achieved_goal is not None
        assert desired_goal is not None
        return self.task.compute_reward(achieved_goal, desired_goal)

    def record_obs(self, obs_part):
        if self.obs_record_id is None:
            record_filenames = glob.glob("./obs_record/obs_record_*.txt")
            record_filenames.sort(key=lambda filename: int(
                filename.split('_')[-1].split('.')[0]))
            if len(record_filenames) == 0:
                self.obs_record_id = 1
            else:
                last_id = int(
                    record_filenames[-1].split('_')[-1].split('.')[0])
                self.obs_record_id = last_id + 1
        else:
            self.obs_record_id += 1
        filename = './obs_record/obs_record_' + str(
            self.obs_record_id) + '.txt'
        obs_record_file = open(filename, 'w')

        if obs_part == 'All':
            pass
        if obs_part == 'RobotPos':
            robot_pos_arr = []
            for obs in self.epi_obs:
                robot_pos = obs.get_2d_robot_pos()
                robot_pos_arr.append(robot_pos)
            target_pos = self.task.get_goal()
            robot_pos_arr.append(
                target_pos)  # The last line records the target position
            robot_pos_arr = np.array(robot_pos_arr)
            np.savetxt(obs_record_file, robot_pos_arr, fmt="%f")
        obs_record_file.close()
Beispiel #10
0
    def __init__(self, task_class, observation_mode='state', randomization_mode="none", 
        rand_config=None, img_size=256, special_start=[], fixed_grip=-1, force_randomly_place=False, force_change_position=False, sparse=False, not_special_p = 0, ground_p = 0, special_is_grip=False, altview=False, procedural_ind=0, procedural_mode='same', procedural_set = [], is_mesh_obs=False, blank=False, COLOR_TUPLE=[1,0,0]):
        # blank is 0s agent. 
        self.blank = blank
        #altview is whether to have second camera angle or not. True/False, "both" to concatentae the observations. 
        self.altview=altview
        self.img_size=img_size
        self.sparse = sparse
        self.task_class = task_class
        self._observation_mode = observation_mode
        self._randomization_mode = randomization_mode
        #special start is a list of actions to take at the beginning.
        self.special_start = special_start
        #fixed_grip temp hack for keeping the gripper a certain way. Change later. 0 for fixed closed, 0.1 for fixed open, -1 for not fixed
        self.fixed_grip = fixed_grip
        #to force the task to be randomly placed
        self.force_randomly_place = force_randomly_place
        #force the task to change position in addition to rotation
        self.force_change_position = force_change_position



        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision' or observation_mode=="visiondepth" or observation_mode=="visiondepthmask":
            # obs_config.set_all(True)
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        else:
            raise ValueError(
                'Unrecognised observation_mode: %s.' % observation_mode)

        action_mode = ActionMode(ArmActionMode.DELTA_EE_POSE_PLAN_NOQ)
        print("using delta pose pan")

        if randomization_mode == "random":
            objs = ['target', 'boundary', 'Floor', 'Roof', 'Wall1', 'Wall2', 'Wall3', 'Wall4', 'diningTable_visible']
            if rand_config is None:
                assert False
            self.env = DomainRandomizationEnvironment(
                action_mode, obs_config=obs_config, headless=True,
                randomize_every=RandomizeEvery.EPISODE, frequency=1,
                visual_randomization_config=rand_config
            )
        else:
            self.env = Environment(
                action_mode, obs_config=obs_config, headless=True
            )
        self.env.launch()
        self.task = self.env.get_task(task_class)

        # Probability. Currently used for probability that pick and lift task will start off gripper at a certain location (should probs be called non_special p)
        self.task._task.not_special_p = not_special_p
        # Probability that ground goal.
        self.task._task.ground_p = ground_p 
        # For the "special" case, whether to grip the object or just hover above it. 
        self.task._task.special_is_grip = special_is_grip
        # for procedural env
        self.task._task.procedural_ind = procedural_ind
        # procedural mode: same, increase, or random. 
        self.task._task.procedural_mode = procedural_mode
        # ideally a list-like object, dictates the indices to sample from each episode. 
        self.task._task.procedural_set = procedural_set
        # if state obs is mesh obs
        self.task._task.is_mesh_obs = is_mesh_obs

        self.task._task.sparse = sparse
        self.task._task.COLOR_TUPLE = COLOR_TUPLE
        
        _, obs = self.task.reset()

        cam_placeholder = Dummy('cam_cinematic_placeholder')
        cam_pose = cam_placeholder.get_pose().copy()
        #custom pose
        cam_pose = [ 1.59999931,  0. ,         2.27999949 , 0.65328157, -0.65328145, -0.27059814, 0.27059814]
        cam_pose[0] = 1
        cam_pose[2] = 1.5
        self.frontcam = VisionSensor.create([img_size, img_size])
        self.frontcam.set_pose(cam_pose)

        self.frontcam.set_render_mode(RenderMode.OPENGL)
        self.frontcam.set_perspective_angle(60)
        self.frontcam.set_explicit_handling(1)

        self.frontcam_mask = VisionSensor.create([img_size, img_size])
        self.frontcam_mask.set_pose(cam_pose)
        self.frontcam_mask.set_render_mode(RenderMode.OPENGL_COLOR_CODED)
        self.frontcam_mask.set_perspective_angle(60)
        self.frontcam_mask.set_explicit_handling(1)

        if altview:
            alt_pose = [0.25    , -0.65    ,  1.5,   0, 0.93879825 ,0.34169483 , 0]
            self.altcam = VisionSensor.create([img_size, img_size])
            self.altcam.set_pose(alt_pose)
            self.altcam.set_render_mode(RenderMode.OPENGL)
            self.altcam.set_perspective_angle(60)
            self.altcam.set_explicit_handling(1)
            
            self.altcam_mask = VisionSensor.create([img_size, img_size])
            self.altcam_mask.set_pose(alt_pose)
            self.altcam_mask.set_render_mode(RenderMode.OPENGL_COLOR_CODED)
            self.altcam_mask.set_perspective_angle(60)
            self.altcam_mask.set_explicit_handling(1)


        self.action_space = spaces.Box(
            low=-1.0, high=1.0, shape=(action_mode.action_size,))

        if observation_mode == 'state':
            self.observation_space = spaces.Dict({
                "observation": spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_state_obs().shape),
                "achieved_goal": spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_achieved_goal().shape),
                'desired_goal': spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_desired_goal().shape)
            })
        # Use the frontvision cam
        elif observation_mode == 'vision':
            self.frontcam.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    low=0, high=1, shape=self.frontcam.capture_rgb().transpose(2,0,1).flatten().shape,
                    )
                })
            if altview == "both":
                example = self.frontcam.capture_rgb().transpose(2,0,1).flatten()
                self.observation_space = spaces.Dict({
                    "state": spaces.Box(
                        low=-np.inf, high=np.inf,
                        shape=obs.get_low_dim_data().shape),
                    "observation": spaces.Box(
                        low=0, high=1, shape=np.array([example,example]).shape,
                        )
                    })
        elif observation_mode == 'visiondepth':

            self.frontcam.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    #thinking about not flattening the shape, because primarily used for dataset and not for training
                    low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...]]).shape,
                    )
                })
        elif observation_mode == 'visiondepthmask':

            self.frontcam.handle_explicitly()
            self.frontcam_mask.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    #thinking about not flattening the shape, because primarily used for dataset and not for training
                    low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...], rgb_handles_to_mask(self.frontcam_mask.capture_rgb())]).shape,
                    )
                })
            if altview == "both":
                self.observation_space = spaces.Dict({
                    "state": spaces.Box(
                        low=-np.inf, high=np.inf,
                        shape=obs.get_low_dim_data().shape),
                    "observation": spaces.Box(
                        #thinking about not flattening the shape, because primarily used for dataset and not for training
                        low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...], rgb_handles_to_mask(self.frontcam_mask.capture_rgb())]).shape,
                        )
                    })

        self._gym_cam = None
        for i in range(self.action_size):
            if i % 2 == 0:
                # joints[i] = 0.7 * np.sin(2 * self.clk + (i / 2) * 30) + 0
                joints[i] = 0.5 * np.sin(2 * self.clk + (i/2) * 30)
            else:
                # joints[i] = 0.7 * np.sin(2 * self.clk + (i/2) * 30 + 12) + 0
                joints[i] = 0.5 * np.sin(4 * self.clk + (i/2) * 30 + np.pi)

        # return (np.random.normal(-0.7, 0.7, size=(self.action_size,))).tolist()
        self.clk += 0.05
        return joints.tolist()


obs_config = ObservationConfig()
obs_config.set_all(False)
obs_config.set_all_low_dim(True)

action_mode = ActionConfig(SnakeRobotActionConfig.ABS_JOINT_POSITION)
env = Environment(
    action_mode, obs_config=obs_config, headless=False)
env.launch()

task = env.get_task(ReachTarget)

agent = Agent(action_mode.action_size)

training_steps = 200000
episode_length = 100000
obs = None
for i in range(training_steps):
    if i % episode_length == 0:
Beispiel #12
0
class FakeRLBenchEnv(Environment):

    ROBOT_NAME = SUPPORTED_ROBOTS.keys()
    OBSERVATION_MODE = ("state", "vision", "all")
    ACTION_MODE = {
        "joint velocity": ArmActionMode.ABS_JOINT_VELOCITY,
        "delta joint velocity": ArmActionMode.DELTA_JOINT_VELOCITY,
        "joint position": ArmActionMode.ABS_JOINT_POSITION,
        "delta joint position": ArmActionMode.DELTA_JOINT_POSITION,
        "joint torque": ArmActionMode.ABS_JOINT_TORQUE,
        "delta joint torque": ArmActionMode.DELTA_JOINT_TORQUE,
        "effector velocity": ArmActionMode.ABS_EE_VELOCITY,
        "delta effector velocity": ArmActionMode.DELTA_EE_VELOCITY,
        "effector position": ArmActionMode.ABS_EE_POSE,
        "delta effector position": ArmActionMode.DELTA_EE_POSE
    }

    def __init__(self,
                 task_name: str,
                 observation_mode: str = "state",
                 action_mode: str = "delta joint position",
                 robot_name: str = "panda"):
        super(FakeRLBenchEnv, self).__init__(task_name)
        if task_name not in all_class_names(tasks):
            raise KeyError(f"Error: unknown task name {task_name}")
        if observation_mode not in FakeRLBenchEnv.OBSERVATION_MODE:
            raise KeyError(
                f"Error: unknown observation mode {observation_mode}, available: {FakeRLBenchEnv.OBSERVATION_MODE}"
            )
        if action_mode not in FakeRLBenchEnv.ACTION_MODE:
            raise KeyError(
                f"Error: unknown action mode {action_mode}, available: {FakeRLBenchEnv.ACTION_MODE.keys()}"
            )
        if robot_name not in FakeRLBenchEnv.ROBOT_NAME:
            raise KeyError(
                f"Error: unknown robot name {robot_name}, available: {FakeRLBenchEnv.ROBOT_NAME}"
            )

        # TODO: modify the task/robot/arm/gripper to support early instantiation before v-rep launched
        self._observation_mode = observation_mode
        self._action_mode = action_mode
        self._task_name = task_name
        self._robot_name = robot_name

        self._observation_config = ObservationConfig()
        if self._observation_mode == "state":
            self._observation_config.set_all_low_dim(True)
            self._observation_config.set_all_high_dim(False)
        elif self._observation_mode == "vision":
            self._observation_config.set_all_low_dim(False)
            self._observation_config.set_all_high_dim(True)
        elif self._observation_mode == "all":
            self._observation_config.set_all(True)

        self._action_config = ActionMode(
            FakeRLBenchEnv.ACTION_MODE[self._action_mode])

        self.env = None
        self.task = None

        self._update_info_dict()

    def init(self, display=False):
        with suppress_stdout():
            self.env = RLEnvironment(action_mode=self._action_config,
                                     obs_config=self._observation_config,
                                     headless=not display,
                                     robot_configuration=self._robot_name)
            self.env.launch()
            self.task = self.env.get_task(
                get_named_class(self._task_name, tasks))

    def reset(self, random: bool = True) -> StepDict:
        if not random:
            np.random.seed(0)
        self.task._static_positions = not random
        descriptions, obs = self.task.reset()
        # Returns a list of descriptions and the first observation
        next_step = {"opt": descriptions}

        if self._observation_mode == "state" or self._observation_mode == "all":
            next_step['s'] = obs.get_low_dim_data()
        if self._observation_mode == "vision" or self._observation_mode == "all":
            next_step["left shoulder rgb"] = obs.left_shoulder_rgb
            next_step["right_shoulder_rgb"] = obs.right_shoulder_rgb
            next_step["wrist_rgb"] = obs.wrist_rgb
        return next_step

    def step(self, last_step: StepDict) -> (StepDict, bool):
        assert 'a' in last_step, "Key 'a' for action not in last_step, maybe you passed a wrong dict ?"

        obs, reward, terminate = self.task.step(last_step['a'])
        last_step['r'] = reward
        last_step["info"] = {}
        next_step = {"opt": None}

        if self._observation_mode == "state" or self._observation_mode == "all":
            next_step['s'] = obs.get_low_dim_data()
        if self._observation_mode == "vision" or self._observation_mode == "all":
            next_step["left shoulder rgb"] = obs.left_shoulder_rgb
            next_step["right_shoulder_rgb"] = obs.right_shoulder_rgb
            next_step["wrist_rgb"] = obs.wrist_rgb
        return last_step, next_step, terminate

    def finalize(self) -> bool:
        with suppress_stdout():
            self.env.shutdown()
        self.task = None
        self.env = None
        return True

    def name(self) -> str:
        return self._task_name

    # ------------- private methods ------------- #

    def _update_info_dict(self):
        # update info dict
        self._info["action mode"] = self._action_mode
        self._info["observation mode"] = self._observation_mode
        # TODO: action dim should related to robot, not action mode, here we fixed it temporally
        self._info["action dim"] = (self._action_config.action_size, )
        self._info["action low"] = -np.ones(self._info["action dim"],
                                            dtype=np.float32)
        self._info["action high"] = np.ones(self._info["action dim"],
                                            dtype=np.float32)
        if self._observation_mode == "state" or self._observation_mode == "all":
            # TODO: observation should be determined without init the entire environment
            with suppress_stdout():
                env = RLEnvironment(action_mode=self._action_config,
                                    obs_config=self._observation_config,
                                    headless=True,
                                    robot_configuration=self._robot_name)
                env.launch()
                task = env.get_task(get_named_class(self._task_name, tasks))
                _, obs = task.reset()
                env.shutdown()
                del task
                del env
            self._info["time step"] = _DT
            self._info["state dim"] = tuple(obs.get_low_dim_data().shape)
            self._info["state low"] = np.ones(self._info["state dim"],
                                              dtype=np.float32) * -np.inf
            self._info["state high"] = np.ones(self._info["state dim"],
                                               dtype=np.float32) * np.inf
        if self._observation_mode == "vision" or self._observation_mode == "all":
            self._info["left shoulder rgb dim"] = tuple(
                self._observation_config.left_shoulder_camera.image_size) + (
                    3, )
            self._info["left shoulder rgb low"] = np.zeros(
                self._info["left shoulder rgb dim"], dtype=np.float32)
            self._info["left shoulder rgb high"] = np.ones(
                self._info["left shoulder rgb dim"], dtype=np.float32)
            self._info["right shoulder rgb  dim"] = tuple(
                self._observation_config.right_shoulder_camera.image_size) + (
                    3, )
            self._info["right shoulder rgb  low"] = np.zeros(
                self._info["right shoulder rgb  dim"], dtype=np.float32)
            self._info["right shoulder rgb  high"] = np.ones(
                self._info["right shoulder rgb  dim"], dtype=np.float32)
            self._info["wrist rgb dim"] = tuple(
                self._observation_config.wrist_camera.image_size) + (3, )
            self._info["wrist rgb low"] = np.zeros(self._info["wrist rgb dim"],
                                                   dtype=np.float32)
            self._info["wrist rgb high"] = np.ones(self._info["wrist rgb dim"],
                                                   dtype=np.float32)
        self._info["reward low"] = -np.inf
        self._info["reward high"] = np.inf

    def live_demo(self, amount: int, random: bool = True) -> SampleBatch:
        """
        :param amount: number of demonstration trajectories to be generated
        :param random: if the starting position is random
        :return: observation list : [amount x [(steps-1) x [s, a] + [s_term, None]]],
                 WARNING: that the action here is calculated from observation, when executing, they may cause some inaccuracy
        """
        seeds = [rnd.randint(0, 4096) if random else 0 for _ in range(amount)]
        self.task._static_positions = not random

        demo_pack = []
        for seed in seeds:
            np.random.seed(seed)
            pack = self.task.get_demos(1, True)[0]

            demo_traj = []
            np.random.seed(seed)
            desc, obs = self.task.reset()
            v_tar = 0.
            for o_tar in pack[1:]:
                action = []
                if self._action_config.arm == ArmActionMode.ABS_JOINT_VELOCITY:
                    action.extend(
                        (o_tar.joint_positions - obs.joint_positions) / _DT)
                elif self._action_config.arm == ArmActionMode.ABS_JOINT_POSITION:
                    action.extend(o_tar.joint_positions)
                elif self._action_config.arm == ArmActionMode.ABS_JOINT_TORQUE:
                    action.extend(o_tar.joint_forces)
                    raise TypeError(
                        "Warning, abs_joint_torque is not currently supported")
                elif self._action_config.arm == ArmActionMode.ABS_EE_POSE:
                    action.extend(o_tar.gripper_pose)
                elif self._action_config.arm == ArmActionMode.ABS_EE_VELOCITY:
                    # WARNING: This calculating method is not so accurate since rotation cannot be directed 'add' together
                    #          since the original RLBench decides to do so, we should follow it
                    action.extend(
                        (o_tar.gripper_pose - obs.gripper_pose) / _DT)
                elif self._action_config.arm == ArmActionMode.DELTA_JOINT_VELOCITY:
                    v_tar = (o_tar.joint_positions - obs.joint_positions) / _DT
                    action.extend(v_tar - obs.joint_velocities)
                    raise TypeError(
                        "Warning, delta_joint_velocity is not currently supported"
                    )
                elif self._action_config.arm == ArmActionMode.DELTA_JOINT_POSITION:
                    action.extend(o_tar.joint_positions - obs.joint_positions)
                elif self._action_config.arm == ArmActionMode.DELTA_JOINT_TORQUE:
                    action.extend(o_tar.joint_forces - obs.joint_forces)
                    raise TypeError(
                        "Warning, delta_joint_torque is not currently supported"
                    )
                elif self._action_config.arm == ArmActionMode.DELTA_EE_POSE:
                    action.extend(o_tar.gripper_pose[:3] -
                                  obs.gripper_pose[:3])
                    q = Quaternion(o_tar.gripper_pose[3:7]) * Quaternion(
                        obs.gripper_pose[3:7]).conjugate
                    action.extend(list(q))
                elif self._action_config.arm == ArmActionMode.DELTA_EE_VELOCITY:
                    # WARNING: This calculating method is not so accurate since rotation cannot be directed 'add' together
                    #          since the original RLBench decides to do so, we should follow it
                    v_tar_new = (o_tar.gripper_pose - obs.gripper_pose) / _DT
                    action.extend(v_tar_new - v_tar)
                    v_tar = v_tar_new
                    raise TypeError(
                        "Warning, delta_ee_velocity is not currently supported"
                    )

                action.append(1.0 if o_tar.gripper_open > 0.9 else 0.0)
                action = np.asarray(action, dtype=np.float32)
                demo_traj.append({
                    'observation': obs,
                    'a': action,
                    's': obs.get_low_dim_data()
                })
                obs, reward, done = self.task.step(action)
                demo_traj[-1]['r'] = reward

            demo_pack.append(demo_traj)
        return {
            "trajectory": demo_pack,
            "config": "default",
            "policy": "hand-coding",
            "env class": self.__class__.__name__,
            "env name": self._task_name,
            "env config": "default",
            "observation config": self._observation_mode,
            "robot config": self._robot_name,
            "action config": self._action_mode
        }
Beispiel #13
0
import sys

from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.observation_config import ObservationConfig
from rlbench.tasks.reach_target import ReachTarget
from agents.td3 import TD3

# set the observation configuration
obs_config = ObservationConfig()

# use only low-dim observations
obs_only_low_dim = True  # currently only low-dim supported
obs_config.set_all_high_dim(not obs_only_low_dim)
obs_config.set_all_low_dim(obs_only_low_dim)

# define action mode
action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)

# create an agent
agent = TD3(argv=sys.argv[1:],
            action_mode=action_mode,
            obs_config=obs_config,
            task_class=ReachTarget)

# run agent
agent.run_real_world()
    def __init__(self, task_class, n_features, load, n_movements):
        """
        Learn the Movement from demonstration.

        :param task_class: Task that we aim to learn
        :param n_features: Number of RBF in n_features
        :param load: Load from data
        :param n_movements: how many movements do we want to learn
        """

        frequency = 200

        # To use 'saved' demos, set the path below, and set live_demos=False
        live_demos = not load
        DATASET = '' if live_demos else 'datasets'

        obs_config = ObservationConfig()
        obs_config.set_all_low_dim(True)
        obs_config.set_all_high_dim(False)
        self._task_class = task_class
        action_mode = ActionMode(ArmActionMode.ABS_JOINT_POSITION)

        group = Group("rlbench", ["d%d" % i for i in range(7)] + ["gripper"])

        env = Environment(action_mode, DATASET, obs_config, headless=True)
        env.launch()

        task = env.get_task(task_class)

        trajectories = []
        states = []

        lengths = []

        print("Start Demo")
        demos = task.get_demos(n_movements, live_demos=live_demos)
        print("End Demo")

        init = True
        for demo in demos:
            trajectory = NamedTrajectory(*group.refs)
            t = 0
            for ob in demo:
                if t == 0:
                    if init:
                        print("State dim: %d" % ob.task_low_dim_state.shape[0])
                        init = False
                    states.append(ob.task_low_dim_state)
                kwargs = {
                    "d%d" % i: ob.joint_positions[i]
                    for i in range(ob.joint_positions.shape[0])
                }
                kwargs["gripper"] = ob.gripper_open
                trajectory.notify(duration=1 / frequency, **kwargs)
                t += 1
            lengths.append(t / 200.)
            trajectories.append(trajectory)

        space = ClassicSpace(group,
                             n_features=n_features,
                             regularization=1E-15)
        z = np.linspace(-0.2, 1.2, 1000)
        Phi = space.get_phi(z)
        for i in range(n_features):
            plt.plot(z, Phi[:, i])
        plt.show()
        parameters = np.array([
            np.concatenate([
                s,
                np.array([l]),
                LearnTrajectory(space, traj).get_block_params()
            ]) for s, l, traj in zip(states, lengths, trajectories)
        ])
        np.save("parameters/%s_%d.npy" % (task.get_name(), n_features),
                parameters)
        env.shutdown()