Example #1
0
    def reset(self):
        self.pick_point = bullet.get_object_position(
            self.env.blocking_object)[0]
        self.pick_point[2] = self.pick_point_z
        self.drop_point = bullet.get_object_position(self.env.tray_id)[0]
        self.drop_point[2] = -0.2

        if self.suboptimal and np.random.uniform() > 0.5:
            self.drop_point[0] += np.random.uniform(-0.2, 0.0)
            self.drop_point[1] += np.random.uniform(0.0, 0.2)

        self.place_attempted = False
        self.neutral_taken = False
        self.drawer_policy.reset()
Example #2
0
    def get_action(self):
        ee_pos, _ = bullet.get_link_state(
            self.env.robot_id, self.env.end_effector_index)
        object_pos, _ = bullet.get_object_position(
            self.env.objects[self.object_to_target])
        object_lifted = object_pos[2] > self.pick_height_thresh_noisy
        gripper_droppoint_dist = np.linalg.norm(self.drop_point - ee_pos)
        done = False

        if gripper_droppoint_dist > 0.02:
            # lifted, now need to move towards the container
            action_xyz = (self.drop_point - ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [0]
        else:
            # already moved above the container; drop object
            action_xyz = (0., 0., 0.)
            action_angles = [0., 0., 0.]
            action_gripper = [0.7]
            self.place_attempted = True

        agent_info = dict(place_attempted=self.place_attempted, done=done)
        neutral_action = [0.]
        action = np.concatenate(
            (action_xyz, action_angles, action_gripper, neutral_action))
        return action, agent_info
Example #3
0
    def get_action(self):
        ee_pos, _ = bullet.get_link_state(self.env.robot_id,
                                          self.env.end_effector_index)
        object_pos, _ = bullet.get_object_position(self.env.blocking_object)
        object_lifted = object_pos[2] > self.pick_height_thresh_noisy
        gripper_pickpoint_dist = np.linalg.norm(self.pick_point - ee_pos)
        gripper_droppoint_dist = np.linalg.norm(self.drop_point - ee_pos)
        done = False
        neutral_action = [0.]

        if self.place_attempted:
            # Return to neutral, then open the drawer.
            if self.neutral_taken:
                action, info = self.drawer_policy.get_action()
                action_xyz = action[:3]
                action_angles = action[3:6]
                action_gripper = [action[6]]
                neutral_action = [action[7]]
                done = info['done']
            else:
                action_xyz = [0., 0., 0.]
                action_angles = [0., 0., 0.]
                action_gripper = [0.0]
                neutral_action = [0.7]
                self.neutral_taken = True
        elif gripper_pickpoint_dist > 0.02 and self.env.is_gripper_open:
            # move near the object
            action_xyz = (self.pick_point - ee_pos) * self.xyz_action_scale
            xy_diff = np.linalg.norm(action_xyz[:2] / self.xyz_action_scale)
            if xy_diff > 0.03:
                action_xyz[2] = 0.0
            action_angles = [0., 0., 0.]
            action_gripper = [0.0]
        elif self.env.is_gripper_open:
            # near the object enough, performs grasping action
            action_xyz = (self.pick_point - ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [-0.7]
        elif not object_lifted:
            # lifting objects above the height threshold for picking
            action_xyz = (self.env.ee_pos_init -
                          ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
        elif gripper_droppoint_dist > 0.02:
            # lifted, now need to move towards the container
            action_xyz = (self.drop_point - ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
        else:
            # already moved above the container; drop object
            action_xyz = [0., 0., 0.]
            action_angles = [0., 0., 0.]
            action_gripper = [0.7]
            self.place_attempted = True

        agent_info = dict(place_attempted=self.place_attempted, done=done)
        action = np.concatenate(
            (action_xyz, action_angles, action_gripper, neutral_action))
        return action, agent_info
Example #4
0
    def get_action(self):
        ee_pos, _ = bullet.get_link_state(self.env.robot_id,
                                          self.env.end_effector_index)
        object_pos, _ = bullet.get_object_position(
            self.env.objects[self.object_to_target])
        object_lifted = object_pos[2] > self.pick_height_thresh_noisy
        object_gripper_dist = np.linalg.norm(object_pos - ee_pos)

        container_pos = self.env.container_position
        target_pos = np.append(container_pos[:2], container_pos[2] + 0.15)
        target_pos = target_pos + np.random.normal(scale=0.01)
        gripper_target_dist = np.linalg.norm(target_pos - ee_pos)
        gripper_target_threshold = 0.03

        done = False

        if self.place_attempted:
            # Avoid pick and place the object again after one attempt
            action_xyz = [0., 0., 0.]
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
        elif object_gripper_dist > self.dist_thresh and self.env.is_gripper_open:
            # move near the object
            action_xyz = (object_pos - ee_pos) * self.xyz_action_scale
            xy_diff = np.linalg.norm(action_xyz[:2] / self.xyz_action_scale)
            if xy_diff > 0.03:
                action_xyz[2] = 0.0
            action_angles = [0., 0., 0.]
            action_gripper = [0.0]
        elif self.env.is_gripper_open:
            # near the object enough, performs grasping action
            action_xyz = (object_pos - ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [-0.7]
        elif not object_lifted:
            # lifting objects above the height threshold for picking
            action_xyz = (self.env.ee_pos_init -
                          ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
        elif gripper_target_dist > gripper_target_threshold:
            # lifted, now need to move towards the container
            action_xyz = (target_pos - ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
        else:
            # already moved above the container; drop object
            action_xyz = (0., 0., 0.)
            action_angles = [0., 0., 0.]
            action_gripper = [0.7]
            self.place_attempted = True

        agent_info = dict(place_attempted=self.place_attempted, done=done)
        action = np.concatenate((action_xyz, action_angles, action_gripper))
        return action, agent_info
Example #5
0
 def reset(self):
     # self.dist_thresh = 0.06 + np.random.normal(scale=0.01)
     self.object_to_target = self.env.object_names[np.random.randint(
         self.env.num_objects)]
     self.pick_point = bullet.get_object_position(
         self.env.objects[self.object_to_target])[0]
     if self.object_to_target in GRASP_OFFSETS.keys():
         self.pick_point += np.asarray(GRASP_OFFSETS[self.object_to_target])
     self.pick_point += np.random.normal(scale=self.pick_point_noise,
                                         size=(3, ))
     self.pick_point[2] = self.pick_point_z + np.random.normal(scale=0.01)
Example #6
0
 def reset(self):
     # self.dist_thresh = 0.06 + np.random.normal(scale=0.01)
     self.object_to_target = self.env.object_names[np.random.randint(
         self.env.num_objects)]
     self.pick_point = bullet.get_object_position(
         self.env.objects[self.object_to_target])[0]
     if self.object_to_target in GRASP_OFFSETS.keys():
         self.pick_point += np.asarray(GRASP_OFFSETS[self.object_to_target])
     self.pick_point[2] = -0.32
     self.drop_point = self.env.container_position
     self.drop_point[2] = -0.2
     self.place_attempted = False
Example #7
0
 def reset(self):
     # self.dist_thresh = 0.06 + np.random.normal(scale=0.01)
     self.object_to_target = self.env.object_names[np.random.randint(
         self.env.num_objects)]
     if self.suboptimal and np.random.uniform() > 0.5:
         self.pick_point = np.random.uniform(self.env.object_position_low,
                                             self.env.object_position_high)
     else:
         self.pick_point = bullet.get_object_position(
             self.env.objects[self.object_to_target])[0]
     if self.object_to_target in GRASP_OFFSETS.keys():
         self.pick_point += np.asarray(GRASP_OFFSETS[self.object_to_target])
     self.pick_point += np.random.normal(scale=self.pick_point_noise,
                                         size=(3, ))
     self.pick_point[2] = -0.32 + np.random.normal(scale=0.01)
Example #8
0
    def reset(self):
        # self.dist_thresh = 0.06 + np.random.normal(scale=0.01)
        self.drawer_never_opened = True
        offset_coeff = (-1)**(1 - self.env.left_opening)
        self.handle_offset = np.array([offset_coeff * 0.01, 0.0, -0.01])

        self.object_to_target = self.env.object_names[np.random.randint(
            self.env.num_objects)]
        print(self.env.num_objects)
        print(self.object_to_target)
        self.pick_point = bullet.get_object_position(
            self.env.objects[self.object_to_target])[0]
        if self.object_to_target in GRASP_OFFSETS.keys():
            self.pick_point += np.asarray(GRASP_OFFSETS[self.object_to_target])
        self.pick_point += np.random.normal(scale=self.pick_point_noise,
                                            size=(3, ))
        self.pick_point[2] = self.pick_point_z + np.random.normal(scale=0.01)
        self.t_upward = 4
        self.reached_grasp = False
Example #9
0
    def get_observation(self):
        gripper_state = self.get_gripper_state()
        gripper_binary_state = [float(self.is_gripper_open)]
        ee_pos, ee_quat = bullet.get_link_state(
            self.robot_id, self.end_effector_index)
        object_position, object_orientation = bullet.get_object_position(
            self.objects[self.target_object])
        if self.observation_mode == 'pixels':
            image_observation = self.render_obs()
            image_observation = np.float32(image_observation.flatten()) / 255.0
            observation = {
                'object_position': object_position,
                'object_orientation': object_orientation,
                'state': np.concatenate(
                    (ee_pos, ee_quat, gripper_state, gripper_binary_state)),
                'image': image_observation
            }
        else:
            raise NotImplementedError

        return observation
Example #10
0
    def get_action(self):
        ee_pos, _ = bullet.get_link_state(self.env.robot_id,
                                          self.env.end_effector_index)
        object_pos, _ = bullet.get_object_position(
            self.env.objects[self.object_to_target])
        object_lifted = object_pos[2] > self.pick_height_thresh
        gripper_pickpoint_dist = np.linalg.norm(self.pick_point - ee_pos)
        done = False
        neutral_action = [0.]

        if gripper_pickpoint_dist > 0.02 and self.env.is_gripper_open:
            # move near the object
            action_xyz = (self.pick_point - ee_pos) * self.xyz_action_scale
            xy_diff = np.linalg.norm(action_xyz[:2] / self.xyz_action_scale)
            if xy_diff > 0.03:
                action_xyz[2] = 0.0
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
        elif self.env.is_gripper_open:
            # near the object enough, performs grasping action
            action_xyz = (self.pick_point - ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [-0.7]
        elif not object_lifted:
            # lifting objects above the height threshold for picking
            action_xyz = (self.env.ee_pos_init -
                          ee_pos) * self.xyz_action_scale
            action_angles = [0., 0., 0.]
            action_gripper = [0.]
            neutral_action = [0.7]
        else:
            # Hold
            action_xyz = (0., 0., 0.)
            action_angles = [0., 0., 0.]
            action_gripper = [0.]

        agent_info = dict(done=done)
        action = np.concatenate(
            (action_xyz, action_angles, action_gripper, neutral_action))
        return action, agent_info
Example #11
0
    def get_action(self):
        ee_pos, _ = bullet.get_link_state(self.env.robot_id,
                                          self.env.end_effector_index)
        handle_pos = self.env.get_drawer_handle_pos() + self.handle_offset
        gripper_handle_dist = np.linalg.norm(handle_pos - ee_pos)
        gripper_handle_xy_dist = np.linalg.norm(handle_pos[:2] - ee_pos[:2])
        object_pos, _ = bullet.get_object_position(
            self.env.objects[self.object_to_target])
        object_lifted = object_pos[2] > self.pick_height_thresh
        gripper_pickpoint_dist = np.linalg.norm(self.pick_point - ee_pos)
        done = False

        if (gripper_handle_xy_dist > self.gripper_xy_dist_thresh
                and not self.env.is_drawer_open()) and not self.reached_grasp:
            print('xy - approaching handle')
            action_xyz = (handle_pos - ee_pos) * 7.0
            action_xyz = list(action_xyz[:2]) + [0.]  # don't droop down.
            action_angles = [0., 0., 0.]
            action_gripper = [0.0]
        elif (gripper_handle_dist > self.gripper_dist_thresh
              and not self.env.is_drawer_open()) and not self.reached_grasp:
            # moving down toward handle
            action_xyz = (handle_pos - ee_pos) * 7.0
            action_angles = [0., 0., 0.]
            action_gripper = [0.0]
        elif not self.env.is_drawer_open() and not self.reached_grasp:
            print("opening drawer")
            x_command = (-1)**(1 - self.env.left_opening)
            action_xyz = np.array([x_command, 0, 0])
            # action = np.asarray([0., 0., 0.7])
            action_angles = [0., 0., 0.]
            action_gripper = [0.0]
        elif (np.abs(ee_pos[2] - self.ending_height_thresh) >
              self.gripper_dist_thresh) and self.t_upward >= 0:
            # print("Lift upward")
            self.drawer_never_opened = False
            action_xyz = np.array([0, 0, 0.7])  # force upward action
            action_angles = [0., 0., 0.]
            action_gripper = [0.0]
            self.t_upward -= 1
        else:
            self.reached_grasp = True
            if gripper_pickpoint_dist > 0.02 and self.env.is_gripper_open:
                # move near the object
                print('move to object')
                print(self.pick_point)
                action_xyz = (self.pick_point - ee_pos) * self.xyz_action_scale
                xy_diff = np.linalg.norm(action_xyz[:2] /
                                         self.xyz_action_scale)
                if xy_diff > 0.03:
                    action_xyz[2] = 0.0
                action_angles = [0., 0., 0.]
                action_gripper = [0.]
            elif self.env.is_gripper_open:
                # near the object enough, performs grasping action
                print('grasp')
                action_xyz = (self.pick_point - ee_pos) * self.xyz_action_scale
                action_angles = [0., 0., 0.]
                action_gripper = [-0.7]
            elif not object_lifted:
                # lifting objects above the height threshold for picking
                action_xyz = (self.env.ee_pos_init -
                              ee_pos) * self.xyz_action_scale
                action_angles = [0., 0., 0.]
                action_gripper = [0.]
            else:
                # Hold
                action_xyz = (0., 0., 0.)
                action_angles = [0., 0., 0.]
                action_gripper = [0.]

        agent_info = dict(done=done)
        action = np.concatenate((action_xyz, action_angles, action_gripper))
        return action, agent_info