예제 #1
0
class GraspEnv(object):
    def __init__(self, headless, control_mode='joint_velocity'):
        self.headless = headless
        self.reward_offset = 10.0
        self.reward_range = self.reward_offset
        self.penalty_offset = 1
        #self.penalty_offset = 1.
        self.fall_down_offset = 0.1
        self.metadata = []  #gym env argument
        self.control_mode = control_mode

        #launch and setup the scene, and set the proxy variables in present of the counterparts in the scene
        self.pr = PyRep()
        if control_mode == 'end_position':
            SCENE_FILE = join(dirname(abspath(__file__)),
                              './scenes/UnicarRobot_ik.ttt')
        elif control_mode == 'joint_velocity':
            SCENE_FILE = join(dirname(abspath(__file__)),
                              './scenes/UnicarRobot.ttt')
        self.pr.launch(SCENE_FILE, headless=headless)
        self.pr.start()
        self.agent = UnicarRobotArm()  #drehkranz + UR10
        #self.gripper = UnicarRobotGreifer()#suction
        #self.suction = UnicarRobotGreifer()
        self.suction = Shape("UnicarRobotGreifer_body_sub0")
        self.proximity_sensor = ProximitySensor('UnicarRobotGreifer_sensor')
        self.table = Shape('UnicarRobotTable')
        self.carbody = Shape('UnicarRobotCarbody')

        if control_mode == 'end_position':
            self.agent.set_control_loop_enabled(True)
            self.action_space = np.zeros(4)
        elif control_mode == 'joint_velocity':
            self.agent.set_control_loop_enabled(False)
            self.action_space = np.zeros(7)
        else:
            raise NotImplementedError
        #self.observation_space = np.zeros(17)
        self.observation_space = np.zeros(20)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.target = Shape("UnicarRobotTarget")  #Box
        self.agent_ee_tip = self.agent.get_tip()
        self.tip_target = Dummy('UnicarRobotArm_target')
        self.tip_pos = self.agent_ee_tip.get_position()

        # set a proper initial robot gesture or tip position
        if control_mode == 'end_position':
            initial_pos = [0, 1.5, 1.6]
            self.tip_target.set_position(initial_pos)
            #one big step for rotatin is enough, with reset_dynamics = True, set the rotation instantaneously
            self.tip_target.set_orientation([0, 0, 0], reset_dynamics=True)
        elif control_mode == 'joint_velocity':
            self.initial_joint_positions = [0, 0, 0, 0, 0, 0, 0]
            self.agent.set_joint_positions(self.initial_joint_positions)
        self.pr.step()
        self.initial_tip_positions = self.agent_ee_tip.get_position()
        self.initial_target_positions = self.target.get_position()

    def _get_state(self):
        '''
        Return state containging arm joint positions/velocities & target position.
        '''
        return np.array(self.agent.get_joint_positions() +
                        self.agent.get_joint_velocities() +
                        self.agent_ee_tip.get_position() +
                        self.agent_ee_tip.get_orientation())  #all 20

    def _is_holding(self):
        '''
        Return the state of holding the UnicarRobotTarget or not, return bool value
        '''
        if self.proximity_sensor.is_detected(self.target) == True:
            return True
        else:
            return False

    def _move(self,
              action,
              bounding_offset=0.15,
              step_factor=0.2,
              max_itr=20,
              max_error=0.05,
              rotation_norm=5):
        pos = self.suction.get_position()

        if pos[0]+action[0] > POS_MIN[0]-bounding_offset and pos[0]+action[0] < POS_MAX[0]-bounding_offset \
            and pos[1]+action[1] > POS_MIN[1]-bounding_offset and pos[1]+action[1] < POS_MAX[1]-bounding_offset \
            and pos[2]+action[2] > POS_MIN[2]-2*bounding_offset:

            ori_z = -self.agent_ee_tip.get_orientation(
            )[2]  # the minus is because the mismatch between the set_orientation() and get_orientation()
            #ori_z = self.agent_ee_tip.get_orientation()[2]
            target_pos = np.array(self.agent_ee_tip.get_position() +
                                  np.array(action[:3]))
            diff = 1
            itr = 0
            while np.sum(np.abs(diff)) > max_error and itr < max_itr:
                itr += 1
                # set pos in small step
                cur_pos = self.agent_ee_tip.get_position()
                diff = target_pos - cur_pos
                pos = cur_pos + step_factor * diff
                self.tip_target.set_position(pos.tolist())
                self.pr.step()

            ori_z += rotation_norm * action[3]
            self.tip_target.set_orientation([0, np.pi, ori_z])
            self.pr.step()

        else:
            print("Potantial Movement Out of the Bounding Box!")
            self.pr.step()

    def reinit(self):
        self.shutdown()
        self.__init__(self.headless)

    def reset(self, random_target=False):
        '''
        Get a random position within a cuboid and set the target position
        '''
        # set target object
        if random_target:
            pos = list(np.random.uniform(POS_MIN, POS_MAX))
            self.target.set_position(pos)
        else:
            self.target.set_position(self.initial_target_positions)
        self.target.set_orientation([0, 0, 0])
        self.pr.step()

        #set end position to be initialized
        if self.control_mode == 'end_position':
            self.agent.set_control_loop_enabled(True)  # IK mode
            self.tip_target.set_position(self.initial_tip_positions)
            self.pr.step()
            itr = 0
            max_itr = 10
            while np.sum(
                    np.abs(
                        np.array(self.agent_ee_tip.get_position() -
                                 np.array(self.initial_tip_positions)))
            ) > 0.1 and itr < max_itr:
                itr += 1
                self.step(np.random.uniform(-0.2, 0.2, 4))
                self.pr.step()
        elif self.control_mode == 'joint_velocity':  #JointMode Force
            self.agent.set_joint_positions(self.initial_joint_positions)
            self.pr.step()

        #set collidable, for collision detection
        #self.gripper.set_collidable(True)
        self.suction.set_collidable(True)
        self.target.set_collidable(True)

        return self._get_state()  #return the current state of the environment

    def step(self, action):
        '''
        move the robot arm according to the control mode
        if control_mode == 'end_position' then action is 3 dim of tip (end of robot arm) position values + 1 dim rotation of suction
        if control_mode == 'joint_velocity' then action is 7 dim of joint velocity + 1 dim of rotation of suction
        '''
        #initialization
        done = False  #episode finishes
        reward = 0
        hold_flag = False  #hold the object or not
        if self.control_mode == 'end_position':
            if action is None or action.shape[0] != 4:  #check action is valid
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 4))
            self._move(action)
        elif self.control_mode == 'joint_velocity':
            if action is None or action.shape[0] != 7:  #???
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-1, 1, 7))
            self.agent.set_joint_target_velocities(action)
            self.pr.step()

        else:
            raise NotImplementedError

        #ax,ay,az = self.gripper.get_position()#gripper position
        ax, ay, az = self.suction.get_position()  #suction position
        if math.isnan(
                ax):  #capture the broken suction cases during exploration
            print("Suction position is nan.")
            self.reinit()
            done = True

        desired_position_tip = [0.0, 1.5513, 1.74]
        desired_orientation_tip = [-np.pi, 0, 0.001567]
        tip_x, tip_y, tip_z = self.agent_ee_tip.get_position(
        )  #end_effector position
        tip_row, tip_pitch, tip_yaw = self.agent_ee_tip.get_orientation(
        )  #end_effector orientation
        tx, ty, tz = self.target.get_position()  #box position
        offset = 0.312  #augmented reward: offset of target position above the target object
        #square distance between the gripper and the target object
        sqr_distance = np.sqrt((tip_x - desired_position_tip[0])**2 +
                               (tip_y - desired_position_tip[1])**2 +
                               (tip_z - desired_position_tip[2])**2)
        sqr_orientation = np.sqrt((tip_row - desired_orientation_tip[0])**2 +
                                  (tip_pitch - desired_orientation_tip[1])**2 +
                                  (tip_yaw - desired_orientation_tip[2])**2)
        ''' for visual-based control only, large time consumption! '''
        # current_vision = self.vision_sensor.capture_rgb()  # capture a screenshot of the view with vision sensor
        # plt.imshow(current_vision)
        # plt.savefig('./img/vision.png')
        desired_orientation = [0, 0, -np.pi / 2]
        desired_orientation_tip = [-np.pi, 0, 0.001567]
        #Enable the suction if close enough to the object and the object is detected with the proximity sensor
        if sqr_distance < 0.001 and self.proximity_sensor.is_detected(
                self.target) == True and sqr_orientation < 0.001:
            #make sure the suction is not worked
            self.suction.release()
            self.pr.step()
            self.suction.grasp(self.target)
            self.pr.step()

            if self._is_holding():
                reward += self.reward_offset
                done = True
                hold_flag = True
            else:
                self.suction.release()
                self.pr.step()
        else:
            pass

        #the base reward is negative distance from suction to target
        #reward -= (np.sqrt(sqr_distance))

        #case when the object is fall off the table
        if tz < self.initial_target_positions[
                2] - self.fall_down_offset:  #tz is target(box) position in z direction
            done = True
            reward -= self.reward_offset

        # Augmented reward for orientation: better grasping gesture if the suction has vertical orientation to the target object

        desired_position_tip = [0.0, 1.5513, 1.74]
        tip_x, tip_y, tip_z = self.agent_ee_tip.get_position()
        tip_row, tip_pitch, tip_yaw = self.agent_ee_tip.get_orientation()

        reward -= (np.sqrt((tip_x - desired_position_tip[0])**2 +
                           (tip_y - desired_position_tip[1])**2 +
                           (tip_z - desired_position_tip[2])**2) +
                   np.sqrt((tip_row - desired_orientation_tip[0])**2 +
                           (tip_pitch - desired_orientation_tip[1])**2 +
                           (tip_yaw - desired_orientation_tip[2])**2))

        #Penalty for collision with the table
        if self.suction.check_collision(
                self.table) or self.suction.check_collision(
                    self.carbody) or self.agent.check_collision(
                        self.table) or self.suction.check_collision(
                            self.target) or self.agent.check_collision(
                                self.target):
            reward -= self.penalty_offset

        if math.isnan(reward):
            reward = 0.

        return self._get_state(), reward, done, {'finished': hold_flag}

    def shutdown(self):
        '''close the simulator'''
        self.pr.stop()
        self.pr.shutdown()
예제 #2
0
 def test_check_collision_all(self):
     c1 = Shape('colliding_cube0')
     self.assertTrue(c1.check_collision(None))
예제 #3
0
class ReacherEnv(object):
    def __init__(self,
                 headless,
                 control_mode='end_position',
                 visual_control=False):
        '''
        :visual_control: bool, controlled by visual state or not (vector state).
        '''
        self.reward_offset = 10.0
        self.reward_range = self.reward_offset  # reward range for register gym env when using vectorized env wrapper
        self.fall_down_offset = 0.1  # for judging the target object fall off the table
        self.metadata = []  # gym env format
        self.visual_control = visual_control
        self.control_mode = control_mode
        self.pr = PyRep()
        if control_mode == 'end_position':  # need to use different scene, the one with all joints in inverse kinematics mode
            SCENE_FILE = join(dirname(abspath(__file__)),
                              './scenes/sawyer_reacher_rl_new_ik.ttt')
        elif control_mode == 'joint_velocity':  # the scene with all joints in force/torche mode for forward kinematics
            SCENE_FILE = join(dirname(abspath(__file__)),
                              './scenes/sawyer_reacher_rl_new.ttt')
        self.pr.launch(SCENE_FILE, headless=headless)
        self.pr.start()
        self.agent = Sawyer()
        self.gripper = BaxterGripper()
        self.gripper_left_pad = Shape(
            'BaxterGripper_leftPad')  # the pad on the gripper finger
        self.proximity_sensor = ProximitySensor(
            'BaxterGripper_attachProxSensor'
        )  # need the name of the sensor here
        self.vision_sensor = VisionSensor(
            'Vision_sensor')  # need the name of the sensor here
        if control_mode == 'end_position':
            self.agent.set_control_loop_enabled(True)  # if false, won't work
            self.action_space = np.zeros(
                4)  # 3 DOF end position control + 1 rotation of gripper
        elif control_mode == 'joint_velocity':
            self.agent.set_control_loop_enabled(False)
            self.action_space = np.zeros(
                8)  # 7 DOF velocity control + 1 rotation of gripper
        else:
            raise NotImplementedError
        if self.visual_control == False:
            self.observation_space = np.zeros(
                17
            )  # position and velocity of 7 joints + position of the target
        else:
            self.observation_space = np.zeros(100)  # dim of img!
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.target = Shape('target')  # object
        self.tip_target = Dummy(
            'Sawyer_target')  # the target point of the tip to move towards
        # self.table = Shape('diningTable')
        self.agent_ee_tip = self.agent.get_tip()
        self.tip_pos = self.agent_ee_tip.get_position()
        self.tip_quat = self.agent_ee_tip.get_quaternion(
        )  # tip rotation as quaternion, if not control the rotation

        # set a proper initial gesture/tip position
        if control_mode == 'end_position':
            # agent_position=self.agent.get_position()
            # initial_pos_offset = [0.4, 0.3, 0.2]  # initial relative position of gripper to the whole arm
            # initial_pos = [(a + o) for (a, o) in zip(agent_position, initial_pos_offset)]
            initial_pos = [0.3, 0.1, 0.9]
            self.tip_target.set_position(initial_pos)
            # one big step for rotation setting is enough, with reset_dynamics=True, set the rotation instantaneously
            self.tip_target.set_orientation(
                [0, 3.1415, 1.5707],
                reset_dynamics=True)  # make gripper face downwards
            self.pr.step()
        elif control_mode == 'joint_velocity':
            self.initial_joint_positions = [
                0.001815199851989746, -1.4224984645843506, 0.704303503036499,
                2.54307222366333, 2.972468852996826, -0.4989511966705322,
                4.105560302734375
            ]
            self.agent.set_joint_positions(self.initial_joint_positions)

        self.initial_joint_positions = self.agent.get_joint_positions()
        self.initial_tip_positions = self.agent_ee_tip.get_position()
        self.initial_target_positions = self.target.get_position()

    def _get_state(self):
        # Return state containing arm joint angles/velocities & target position
        return np.array(self.agent.get_joint_positions() +
                        self.agent.get_joint_velocities() +
                        self.target.get_position())

    def _is_holding(self):
        '''
         Return is holding the target or not, return bool.
        '''

        # Nothe that the collision check is not always accurate all the time,
        # for continuous conllision, maybe only the first 4-5 frames of collision can be detected
        pad_collide_object = self.gripper_left_pad.check_collision(self.target)
        if pad_collide_object and self.proximity_sensor.is_detected(
                self.target) == True:
            return True
        else:
            return False

    def _get_visual_state(self):
        # Return a numpy array of size (width, height, 3)
        return self.vision_sensor.capture_rgb(
        )  # A numpy array of size (width, height, 3)

    def _is_holding(self):
        # Return is holding the target or not, return bool

        # Nothe that the collision check is not always accurate all the time,
        # for continuous conllision, maybe only the first 4-5 frames of collision can be detected
        pad_collide_object = self.gripper_left_pad.check_collision(self.target)
        if pad_collide_object and self.proximity_sensor.is_detected(
                self.target) == True:
            return True
        else:
            return False

    # def _move(self, action):
    #     '''
    #     Move the tip according to the action with inverse kinematics for 'end_position' control;
    #     with control of tip target in inverse kinematics mode instead of using .solve_ik() in forward kinematics mode.
    #     '''
    #     robot_moving_unit=0.01  # the amount of single step move of robot, not accurate
    #     moving_loop_itr=int(np.sum(np.abs(action[:3]))/robot_moving_unit)+1  # adaptive number of moving steps, with minimal of 1 step.
    #     # print(moving_loop_itr)
    #     small_step = list(1./moving_loop_itr*np.array(action))  # break the action into small steps, as the robot cannot move to the target position within one frame
    #     pos=self.agent_ee_tip.get_position()

    #     '''
    #     there is a mismatch between the object set_orientation() and get_orientation():
    #     the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
    #     '''
    #     ori_z=-self.agent_ee_tip.get_orientation()[2] # the minus is because the mismatch between the set and get
    #     assert len(small_step) == len(pos)+1  # 3 values for position, 1 value for rotation

    #     for _ in range(moving_loop_itr):
    #         for idx in range(len(pos)):
    #             pos[idx] += small_step[idx]
    #         self.tip_target.set_position(pos)
    #         self.pr.step()
    #         ''' deprecated! no need to use small steps for the rotation with reset_dynamics=True'''
    #         # ori_z+=small_step[3]  # change the orientation along z-axis with a small step
    #         # self.tip_target.set_orientation([0,3.1415,ori_z], reset_dynamics=True)  # make gripper face downwards
    #         # self.pr.step()
    #     ''' one big step for z-rotation is enough, with reset_dynamics=True, set the rotation instantaneously '''
    #     ori_z+=action[3]
    #     self.tip_target.set_orientation([0,3.1415,ori_z], reset_dynamics=True)  # make gripper face downwards
    #     self.pr.step()

    def _move(self, action):
        ''' 
        Move the tip according to the action with inverse kinematics for 'end_position' control;
        with control of tip target in inverse kinematics mode instead of using .solve_ik() in forward kinematics mode.
        Mode 2: a close-loop control, using ik.
        '''
        pos = self.gripper.get_position()
        bounding_offset = 0.1
        step_factor = 0.2  # small step factor mulitplied on the gradient step calculated by inverse kinematics
        max_itr = 20  # maximum moving iterations
        max_error = 0.1  # upper bound of distance error for movement at each call
        rotation_norm = 5.  # factor for normalization of rotation values
        # check if state+action will be within of the bounding box, if so, move normally; else no action.
        #  x_min < x < x_max  and  y_min < y < y_max  and  z > z_min
        if pos[0]+action[0]>POS_MIN[0]-bounding_offset and pos[0]+action[0]<POS_MAX[0]+bounding_offset  \
            and pos[1]+action[1] > POS_MIN[1]-bounding_offset and pos[1]+action[1] < POS_MAX[1]+bounding_offset  \
            and pos[2]+action[2] > POS_MIN[2]-bounding_offset:
            ''' 
            there is a mismatch between the object set_orientation() and get_orientation():
            the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
            '''
            ori_z = -self.agent_ee_tip.get_orientation()[
                2]  # the minus is because the mismatch between the set and get
            target_pos = np.array(self.agent_ee_tip.get_position()) + np.array(
                action[:3])
            diff = 1
            itr = 0
            # print('before: ', ori_z)
            while np.sum(np.abs(diff)) > max_error and itr < max_itr:
                itr += 1
                # set pos in small step
                cur_pos = self.agent_ee_tip.get_position()
                diff = target_pos - cur_pos
                pos = cur_pos + step_factor * diff
                self.tip_target.set_position(pos.tolist())
                self.pr.step()
            ''' one big step for z-rotation is enough '''
            ori_z += rotation_norm * action[3]
            self.tip_target.set_orientation([0, np.pi, ori_z
                                             ])  # make gripper face downwards
            self.pr.step()

        else:
            # print("Potential Movement Out of the Bounding Box!")
            pass  # no action if potentially out of the bounding box

    def reset(self):
        # Get a random position within a cuboid and set the target position
        max_itr = 10
        pos = list(np.random.uniform(POS_MIN, POS_MAX))
        self.target.set_position(pos)
        self.target.set_orientation([0, 0, 0])
        self.pr.step()
        # changing the color or texture for domain randomization
        self.target.set_color(
            np.random.uniform(low=0, high=1, size=3).tolist()
        )  # set [r,g,b] 3 channel values of object color
        # set end position to be initialized
        if self.control_mode == 'end_position':  # JointMode.IK
            self.agent.set_control_loop_enabled(True)
            self.tip_target.set_position(
                self.initial_tip_positions
            )  # cannot set joint positions directly due to in ik mode nor force/torch mode
            self.pr.step()
            # prevent stuck case
            itr = 0
            while np.sum(
                    np.abs(
                        np.array(self.agent_ee_tip.get_position() -
                                 np.array(self.initial_tip_positions)))
            ) > 0.1 and itr < max_itr:
                itr += 1
                self.step(np.random.uniform(
                    -0.2, 0.2,
                    4))  # take random actions for preventing the stuck cases
                self.pr.step()
        elif self.control_mode == 'joint_velocity':  # JointMode.FORCE
            self.agent.set_joint_positions(
                self.initial_joint_positions
            )  # sometimes the gripper is stuck, cannot get back to initial
            self.pr.step()

        # self.table.set_collidable(True)
        self.gripper_left_pad.set_collidable(
            True
        )  # set the pad on the gripper to be collidable, so as to check collision
        self.target.set_collidable(True)
        if np.sum(self.gripper.get_open_amount()) < 1.5:
            self.gripper.actuate(1, velocity=0.5)  # open the gripper
            self.pr.step()
        if self.visual_control:
            return self._get_visual_state()
        else:
            return self._get_state()

    def step(self, action):
        '''
        Move the robot arm according to the action.
        If control_mode=='joint_velocity', action is 7 dim of joint velocity values + 1 dim of gripper rotation.
        if control_mode=='end_position', action is 3 dim of tip (end of robot arm) position values + 1 dim of gripper rotation.
        '''
        if self.control_mode == 'end_position':
            if action is None or action.shape[0] != 4:
                action = list(np.random.uniform(-0.1, 0.1, 4))  # random
            self._move(action)
        elif self.control_mode == 'joint_velocity':
            if action is None or action.shape[0] != 7:
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 7))  # random
            self.agent.set_joint_target_velocities(
                action)  # Execute action on arm
            self.pr.step()
        else:
            raise NotImplementedError

        ax, ay, az = self.gripper.get_position()
        tx, ty, tz = self.target.get_position()
        # Reward is negative distance to target
        distance = (ax - tx)**2 + (ay - ty)**2 + (az - tz)**2
        done = False

        # print(self.proximity_sensor.is_detected(self.target))
        current_vision = self.vision_sensor.capture_rgb(
        )  # capture a screenshot of the view with vision sensor
        plt.imshow(current_vision)
        plt.savefig('./img/vision.png')

        reward = 0
        # close the gripper if close enough to the object and the object is detected with the proximity sensor
        if distance < 0.1 and self.proximity_sensor.is_detected(
                self.target) == True:
            # make sure the gripper is open before grasping
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

            self.gripper.actuate(
                0, velocity=0.5
            )  # if done, close the hand, 0 for close and 1 for open.
            self.pr.step()  # Step the physics simulation

            if self._is_holding():
                # reward for hold here!
                reward += 10
                done = True

            else:
                self.gripper.actuate(1, velocity=0.5)
                self.pr.step()

        elif np.sum(
                self.gripper.get_open_amount()
        ) < 1.5:  # if gripper is closed due to collision or esle, open it; .get_open_amount() return list of gripper joint values
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

        else:
            pass

        reward -= np.sqrt(distance)

        if tz < self.initial_target_positions[
                2] - self.fall_down_offset:  # the object fall off the table
            done = True
            reward = -self.reward_offset

        if self.visual_control:
            return self._get_visual_state(), reward, done, {}
        else:
            return self._get_state(), reward, done, {}

    def shutdown(self):
        self.pr.stop()
        self.pr.shutdown()
예제 #4
0
 def test_check_collision(self):
     c1 = Shape('colliding_cube0')
     c2 = Shape('colliding_cube1')
     self.assertTrue(c1.check_collision(c2))
예제 #5
0
class EnvPollos(Env):
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        logging.basicConfig(level=logging.DEBUG)
        # they have to be simmetric. We interpret actions as angular increments
        #self.action_space = Box(low=np.array([-0.1, -1.7, -2.5, -1.5, 0.0, 0.0]),
        #                        high=np.array([0.1, 1.7, 2.5, 1.5, 0.0, 0.0]))
        inc = 0.1
        self.action_space = Box(low=np.array(
            [-inc, -inc, -inc, -inc, 0.0, 0.0]),
                                high=np.array([inc, inc, inc, inc, 0.0, 0.0]))
        self.observation_space = Box(low=0,
                                     high=255,
                                     shape=(480, 640, 1),
                                     dtype=np.uint8)

        # self.observation_space = Box(low=np.array([-0.1, -0.2, 0, -1.5, 0.0, 0.0, -0.5, -0.2, -0.2]),
        #                              high=np.array([0.1, 1.7, 2.5, 0, 0.0, 0.0, 0.5, 0.2, 1.0]))

        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 1.2
        self.agent.set_control_loop_enabled(True)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.joints = [
            Joint('UR10_joint1'),
            Joint('UR10_joint2'),
            Joint('UR10_joint3'),
            Joint('UR10_joint4'),
            Joint('UR10_joint5'),
            Joint('UR10_joint6')
        ]
        #self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]]
        #                      for j in self.joints]
        self.high_joints_limits = [0.1, 1.7, 2.7, 0.0, 0.1, 0.1]
        self.low_joints_limits = [-0.1, -0.2, 0.0, -1.5, -0.1, -0.1]
        self.agent_hand = Shape('hand')

        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip(
        ).get_quaternion()
        self.target = Dummy('UR10_target')
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        self.table_target = Dummy('table_target')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()
        self.table_target = Dummy('table_target')
        # objects
        self.scene_objects = [
            Shape('table0'),
            Shape('Plane'),
            Shape('Plane0'),
            Shape('ConcretBlock')
        ]
        #self.agent_tip = self.agent.get_tip()
        self.initial_distance = np.linalg.norm(
            np.array(self.initial_pollo_position) -
            np.array(self.initial_agent_tip_position))

        # camera
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(
            self.camera.get_handle(), -1)
        self.np_camera_matrix_extrinsics = np.delete(
            np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width / 2.0) / math.tan(angle / 2.0)
        focaly_px = (height / 2.0) / math.tan(angle / 2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])

        self.reset()

    def reset(self):
        pos = list(
            np.random.uniform([-0.1, -0.1, 0.0], [0.1, 0.1, 0.1]) +
            self.initial_pollo_position)
        self.pollo.set_position(pos)
        self.pollo.set_quaternion(self.initial_pollo_orientation)
        self.agent.set_joint_positions(self.initial_joint_positions)
        self.initial_epoch_time = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False, True)):
                break
        # a los dos mil reset hay que recargar el ROBOT porque se descoyunta
        #if self.num_resets > 2000:

        self.total_reward = 0.0
        print('----------------')

        return self._get_state()

    def step(self, action):
        if action is None:
            self.pr.step()
            return self._get_state(), 0.0, False, {}

        # check for nan
        if np.any(np.isnan(action)):
            print(action)
            return self._get_state(), -1.0, False, {}

        # action[1] = np.interp(action[1], [-1,7,1.7], [-0.2,1.7])
        # action[2] = np.interp(action[2], [-2.5,2.5], [0,2.5])
        # action[3] = np.interp(action[3], [-1.5,1.5], [-1.5,0])

        # add actions as increments to current joints value
        new_joints = np.array(
            self.agent.get_joint_positions()) + np.array(action)

        # check limits
        if np.any(np.greater(new_joints, self.high_joints_limits)) or np.any(
                np.less(new_joints, self.low_joints_limits)):
            logging.debug("New joints value out of limits r=-10")
            return self._get_state(), -10.0, True, {}

        # move the robot and wait to stop
        self.agent.set_joint_target_positions(new_joints)
        reloj = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False,
                                   True)) or (time.time() - reloj) > 3:
                break

        # compute relevant magnitudes
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        dist = np.linalg.norm(np_pollo_target - np_robot_tip_position)
        altura = np.clip((np_pollo_target[2] - self.initial_pollo_position[2]),
                         0, 0.5)

        for obj in self.scene_objects:  # colisión con la mesa
            if self.agent_hand.check_collision(obj):
                logging.debug("Collision with objects r=-10")
                self.total_reward = self.total_reward + -10.0
                print(self.total_reward)
                return self._get_state(), -10.0, True, {}
        if altura > 0.3:  # pollo en mano
            logging.debug("Height reached !!! r=30")
            self.total_reward = self.total_reward + 30.0
            print(self.total_reward)
            return self._get_state(), 30.0, True, {}
        elif dist > self.initial_distance:  # mano se aleja
            logging.debug("Hand moving away from chicken r=-10")
            self.total_reward = self.total_reward + -10.0
            print(self.total_reward)
            return self._get_state(), -10.0, True, {}
        used_time = time.time() - self.initial_epoch_time
        if used_time > 5:  # too much time
            logging.debug("Time out. End of epoch r=-10")
            if altura > 0.01:
                self.total_reward = self.total_reward + altura
            else:
                self.total_reward = self.total_reward - 10
            print(self.total_reward)
            return self._get_state(), -10.0, True, {}

        # Reward
        #pollo_height = np.exp(-altura*20)  # para 0.3m pollo_height = 0.002, para 0m pollo_height = 1
        reward = altura - dist - used_time
        self.total_reward = self.total_reward + reward
        #print(self.total_reward)
        return self._get_state(), reward, False, {}

    def close(self):
        self.pr.stop()
        self.pr.shutdown()

    def render(self):
        print("RENDER")
        np_pollo_en_camara = self.getPolloEnCamara()

        # capture depth image
        depth = self.camera.capture_rgb()
        circ = plt.Circle(
            (int(np_pollo_en_camara[0]), int(np_pollo_en_camara[1])), 10)
        plt.clf()
        fig = plt.gcf()
        ax = fig.gca()
        ax.add_artist(circ)
        ax.imshow(depth, cmap='hot')
        plt.pause(0.000001)

    # Aux

    # transform env.pollo_target.get_position() to camera coordinates and project pollo_en_camera a image coordinates
    def getPolloEnCamara(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_target_cam = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        np_pollo_en_camara = self.np_camera_matrix_intrinsics.dot(
            np_pollo_target_cam)
        np_pollo_en_camara = np_pollo_en_camara / np_pollo_en_camara[2]
        np_pollo_en_camara = np.delete(np_pollo_en_camara, 2)
        return np_pollo_en_camara

    def getPolloEnCamaraEx(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_en_camara = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        return np_pollo_en_camara

    def _get_state(self):
        depth = self.camera.capture_depth()
        scaler = MinMaxScaler(feature_range=(0, 250))
        scaler = scaler.fit(depth)
        depth_scaled = scaler.transform(depth)
        depth_scaled = np.array(depth_scaled).reshape(480, 640, 1)
        return depth_scaled.astype('uint8')

    def vrepToNP(self, c):
        return np.array([[c[0], c[4], c[8], c[3]], [c[1], c[5], c[9], c[7]],
                         [c[2], c[6], c[10], c[11]], [0, 0, 0, 1]])
예제 #6
0
class GraspEnv(object):
    ''' Sawyer robot grasping a cuboid '''
    def __init__(self, headless, control_mode='joint_velocity'):
        '''
        parameters:
        :headless: bool, if True, no visualization, else with visualization.
        :control mode: str, 'end_position' or 'joint_velocity'.
        '''
        # set public variables
        self.headless = headless  # if headless is True, no visualization
        self.reward_offset = 10.0  # reward of achieving the grasping object
        self.reward_range = self.reward_offset  # reward range for register gym env when using vectorized env wrapper
        self.penalty_offset = 1.  # penalty value for undesired cases
        self.fall_down_offset = 0.1  # distance for judging the target object fall off the table
        self.metadata = []  # gym env argument
        self.control_mode = control_mode  # the control mode of robotic arm: 'end_position' or 'joint_velocity'

        # launch and set up the scene, and set the proxy variables in represent of the counterparts in the scene
        self.pr = PyRep()  # call the PyRep
        if control_mode == 'end_position':  # need to use different scene, the one with all joints in inverse kinematics mode
            SCENE_FILE = join(
                dirname(abspath(__file__)),
                './scenes/sawyer_reacher_rl_new_ik.ttt'
            )  # scene with joints controlled by ik (inverse kinematics)
        elif control_mode == 'joint_velocity':  # the scene with all joints in force/torch mode for forward kinematics
            SCENE_FILE = join(
                dirname(abspath(__file__)),
                './scenes/sawyer_reacher_rl_new.ttt'
            )  # scene with joints controlled by forward kinematics
        self.pr.launch(SCENE_FILE, headless=headless
                       )  # lunch the scene, headless means no visualization
        self.pr.start()  # start the scene
        self.agent = Sawyer()  # get the robot arm in the scene
        self.gripper = BaxterGripper()  # get the gripper in the scene
        self.gripper_left_pad = Shape(
            'BaxterGripper_leftPad')  # the left pad on the gripper finger
        self.proximity_sensor = ProximitySensor(
            'BaxterGripper_attachProxSensor'
        )  # need the name of the sensor here
        self.vision_sensor = VisionSensor(
            'Vision_sensor')  # need the name of the sensor here
        self.table = Shape(
            'diningTable')  # the table in the scene for checking collision
        if control_mode == 'end_position':  # control the robot arm by the position of its end using inverse kinematics
            self.agent.set_control_loop_enabled(
                True)  # if false, inverse kinematics won't work
            self.action_space = np.zeros(
                4)  # 3 DOF end position control + 1 rotation of gripper
        elif control_mode == 'joint_velocity':  # control the robot arm by directly setting velocity values on each joint, using forward kinematics
            self.agent.set_control_loop_enabled(False)
            self.action_space = np.zeros(
                7
            )  # 7 DOF velocity control, no need for extra control of end rotation, the 7th joint controls it.
        else:
            raise NotImplementedError
        self.observation_space = np.zeros(
            17)  # position and velocity of 7 joints + position of the target
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.target = Shape('target')  # get the target object
        self.agent_ee_tip = self.agent.get_tip(
        )  # a part of robot as the end of inverse kinematics chain for controlling
        self.tip_target = Dummy(
            'Sawyer_target'
        )  # the target point of the tip (end of the robot arm) to move towards
        self.tip_pos = self.agent_ee_tip.get_position()  # tip x,y,z position

        # set a proper initial robot gesture or tip position
        if control_mode == 'end_position':
            initial_pos = [0.3, 0.1, 0.9]
            self.tip_target.set_position(initial_pos)
            # one big step for rotation setting is enough, with reset_dynamics=True, set the rotation instantaneously
            self.tip_target.set_orientation(
                [0, np.pi, np.pi / 2], reset_dynamics=True
            )  # first two dimensions along x and y axis make gripper face downwards
        elif control_mode == 'joint_velocity':
            self.initial_joint_positions = [0.001815199851989746, -1.4224984645843506, \
                0.704303503036499, 2.54307222366333, 2.972468852996826, -0.4989511966705322, 4.105560302734375] # a proper initial gesture
            self.agent.set_joint_positions(self.initial_joint_positions)
        self.pr.step()
        self.initial_tip_positions = self.agent_ee_tip.get_position()
        self.initial_target_positions = self.target.get_position()

    def _get_state(self):
        '''
        Return state containing arm joint positions/velocities & target position.
        '''
        return np.array(self.agent.get_joint_positions() +  # list, dim=7
                        self.agent.get_joint_velocities() +  # list, dim=7
                        self.target.get_position())  # list, dim=3

    def _is_holding(self):
        '''
         Return the state of holding the target or not, return bool.
        '''
        # Note that the collision check is not always accurate all the time,
        # for continuous collision frames, maybe only the first 4-5 frames of collision can be detected.
        pad_collide_object = self.gripper_left_pad.check_collision(self.target)
        if pad_collide_object and self.proximity_sensor.is_detected(
                self.target) == True:
            return True
        else:
            return False

    def _move(self,
              action,
              bounding_offset=0.15,
              step_factor=0.2,
              max_itr=20,
              max_error=0.05,
              rotation_norm=5.):
        ''' 
        Move the end effector on robot arm according to the action with inverse kinematics for 'end_position' control mode;
        Inverse kinematics mode control is achieved through setting the tip target instead of using .solve_ik(), 
        because sometimes the .solve_ik() does not function correctly.
        Mode: a close-loop proportional control, using ik.

        parameters:
        :bounding_offset: offset of bounding box outside the valid target position range, as valid and safe range of action
        :step_factor: small step factor mulitplied on the difference of current and desired position, i.e. proportional factor
        :max_itr: maximum moving iterations
        :max_error: upper bound of distance error for movement at each call
        :rotation_norm: factor for normalization of rotation values, since the action are of the same scale for each dimension
        '''
        pos = self.gripper.get_position()

        # check if state+action will be within of the bounding box, if so, move normally; otherwise the action is not conducted.
        #  i.e. x_min < x < x_max  and  y_min < y < y_max  and  z > z_min
        if pos[0]+action[0]>POS_MIN[0]-bounding_offset and pos[0]+action[0]<POS_MAX[0]+bounding_offset  \
            and pos[1]+action[1] > POS_MIN[1]-bounding_offset and pos[1]+action[1] < POS_MAX[1]+2*bounding_offset  \
            and pos[2]+action[2] > POS_MIN[2]-2*bounding_offset:  # larger offset in z axis

            # there is a mismatch between the object set_orientation() and get_orientation():
            # the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
            ori_z = -self.agent_ee_tip.get_orientation(
            )[2]  # the minus is because the mismatch between the set_orientation() and get_orientation()
            target_pos = np.array(self.agent_ee_tip.get_position()) + np.array(
                action[:3])
            diff = 1  # intialization
            itr = 0
            while np.sum(np.abs(diff)) > max_error and itr < max_itr:
                itr += 1
                # set pos in small step
                cur_pos = self.agent_ee_tip.get_position()
                diff = target_pos - cur_pos  # difference of current and target position, close-loop control
                pos = cur_pos + step_factor * diff  # step small step according to current difference, to prevent that ik cannot be solved
                self.tip_target.set_position(pos.tolist())
                self.pr.step(
                )  # every time when setting target tip, need to call simulation step to achieve it

            # one big step for z-rotation is enough, but small error still exists due to the ik solver
            ori_z += rotation_norm * action[
                3]  # normalize the rotation values, as usually same action range is used in policy for both rotation and position
            self.tip_target.set_orientation([
                0, np.pi, ori_z
            ])  # make gripper face downwards and rotate ori_z along z axis
            self.pr.step()

        else:
            print("Potential Movement Out of the Bounding Box!")
            pass  # no action if potentially moving out of the bounding box

    def reinit(self):
        '''
        Reinitialize the environment, e.g. when the gripper is broken during exploration.
        '''
        self.shutdown()  # shutdown the original env first
        self.__init__(self.headless)  # initialize with the same headless mode

    def reset(self, random_target=False):
        '''
        Get a random position within a cuboid and set the target position.
        '''
        # set target object
        if random_target:  # randomize
            pos = list(np.random.uniform(
                POS_MIN, POS_MAX))  # sample from uniform in valid range
            self.target.set_position(pos)  # random position
        else:  # non-randomize
            self.target.set_position(
                self.initial_target_positions)  # fixed position
        self.target.set_orientation([0, 0, 0])
        self.pr.step()

        # set end position to be initialized
        if self.control_mode == 'end_position':  # JointMode.IK
            self.agent.set_control_loop_enabled(True)  # ik mode
            self.tip_target.set_position(
                self.initial_tip_positions
            )  # cannot set joint positions directly due to in ik mode or force/torch mode
            self.pr.step()
            # prevent stuck cases. as using ik for moving, stucking can make ik cannot be solved therefore not reset correctly, therefore taking
            # some random action when desired position is not reached.
            itr = 0
            max_itr = 10
            while np.sum(
                    np.abs(
                        np.array(self.agent_ee_tip.get_position() -
                                 np.array(self.initial_tip_positions)))
            ) > 0.1 and itr < max_itr:
                itr += 1
                self.step(np.random.uniform(
                    -0.2, 0.2,
                    4))  # take random actions for preventing the stuck cases
                self.pr.step()

        elif self.control_mode == 'joint_velocity':  # JointMode.FORCE
            self.agent.set_joint_positions(self.initial_joint_positions)
            self.pr.step()

        # set collidable, for collision detection
        self.gripper_left_pad.set_collidable(
            True
        )  # set the pad on the gripper to be collidable, so as to check collision
        self.target.set_collidable(True)
        # open the gripper if it's not fully open
        if np.sum(self.gripper.get_open_amount()) < 1.5:
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

        return self._get_state()  # return current state of the environment

    def step(self, action):
        '''
        Move the robot arm according to the action.
        If control_mode=='joint_velocity', action is 7 dim of joint velocity values + 1 dim rotation of gripper;
        if control_mode=='end_position', action is 3 dim of tip (end of robot arm) position values + 1 dim rotation of gripper;
        '''
        # initialization
        done = False  # episode finishes
        reward = 0
        hold_flag = False  # holding the object or not
        if self.control_mode == 'end_position':
            if action is None or action.shape[
                    0] != 4:  # check if action is valid
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 4))  # random
            self._move(action)

        elif self.control_mode == 'joint_velocity':
            if action is None or action.shape[
                    0] != 7:  # check if action is valid
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 7))  # random
            self.agent.set_joint_target_velocities(
                action)  # Execute action on arm
            self.pr.step()

        else:
            raise NotImplementedError

        ax, ay, az = self.gripper.get_position()
        if math.isnan(
                ax):  # capture the broken gripper cases during exploration
            print('Gripper position is nan.')
            self.reinit()
            done = True
        tx, ty, tz = self.target.get_position()
        offset = 0.08  # augmented reward: offset of target position above the target object
        sqr_distance = (ax - tx)**2 + (ay - ty)**2 + (
            az - (tz + offset)
        )**2  # squared distance between the gripper and the target object
        ''' for visual-based control only, large time consumption! '''
        # current_vision = self.vision_sensor.capture_rgb()  # capture a screenshot of the view with vision sensor
        # plt.imshow(current_vision)
        # plt.savefig('./img/vision.png')

        # close the gripper if close enough to the object and the object is detected with the proximity sensor
        if sqr_distance < 0.1 and self.proximity_sensor.is_detected(
                self.target) == True:
            # make sure the gripper is open before grasping
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()
            self.gripper.actuate(
                0, velocity=0.5
            )  # if done, close the hand, 0 for close and 1 for open; velocity 0.5 ensures the gripper to close with in one frame
            self.pr.step()  # Step the physics simulation

            if self._is_holding():
                reward += self.reward_offset  # extra reward for grasping the object
                done = True
                hold_flag = True
            else:
                self.gripper.actuate(1, velocity=0.5)
                self.pr.step()
        elif np.sum(
                self.gripper.get_open_amount()
        ) < 1.5:  # if gripper is closed (not fully open) due to collision or esle, open it; get_open_amount() return list of gripper joint values
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()
        else:
            pass

        # the base reward is negative distance to target
        reward -= np.sqrt(sqr_distance)

        # case when the object fall off the table
        if tz < self.initial_target_positions[2] - self.fall_down_offset:
            done = True
            reward = -self.reward_offset

        # Augmented reward for orientation: better grasping gesture if the gripper has vertical orientation to the target object.
        # Note: the frame of gripper has a difference of pi/2 in z orientation as the frame of target.
        desired_orientation = np.concatenate(
            ([np.pi,
              0], [self.target.get_orientation()[2]
                   ]))  # gripper vertical to target in z and facing downwards,
        rotation_penalty = -np.sum(
            np.abs(
                np.array(self.agent_ee_tip.get_orientation()) -
                desired_orientation))
        rotation_norm = 0.02
        reward += rotation_norm * rotation_penalty

        # Penalty for collision with the table
        if self.gripper_left_pad.check_collision(self.table):
            reward -= self.penalty_offset
            #print('Penalize collision with table.')

        if math.isnan(reward):  # capture the cases of numerical problem
            reward = 0.

        return self._get_state(), reward, done, {'finished': hold_flag}

    def shutdown(self):
        ''' Close the simulator '''
        self.pr.stop()
        self.pr.shutdown()
예제 #7
0
class ReacherEnv(object):
    def __init__(self, headless, control_mode='joint_velocity'):
        '''
        :headless: bool, if True, no visualization, else with visualization.
        :control mode: str, 'end_position' or 'joint_velocity'.
        '''
        self.headless = headless
        self.reward_offset = 10.0  # reward of achieving the grasping object
        self.reward_range = self.reward_offset  # reward range for register gym env when using vectorized env wrapper
        self.fall_down_offset = 0.1  # for judging the target object fall off the table
        self.metadata = []  # gym env format
        self.control_mode = control_mode  # the control mode of robotic arm: 'end_position' or 'joint_velocity'
        self.pr = PyRep()
        if control_mode == 'end_position':  # need to use different scene, the one with all joints in inverse kinematics mode
            SCENE_FILE = join(
                dirname(abspath(__file__)),
                './scenes/sawyer_reacher_rl_new_ik.ttt'
            )  # scene with joints controlled by ik (inverse kinematics)
        elif control_mode == 'joint_velocity':  # the scene with all joints in force/torch mode for forward kinematics
            SCENE_FILE = join(
                dirname(abspath(__file__)),
                './scenes/sawyer_reacher_rl_new.ttt'
            )  # scene with joints controlled by forward kinematics
        self.pr.launch(SCENE_FILE, headless=headless
                       )  # lunch the scene, headless means no visualization
        self.pr.start()  # start the scene
        self.agent = Sawyer()  # get the robot arm in the scene
        self.gripper = BaxterGripper()  # get the gripper in the scene
        self.gripper_left_pad = Shape(
            'BaxterGripper_leftPad')  # the pad on the gripper finger
        self.proximity_sensor = ProximitySensor(
            'BaxterGripper_attachProxSensor'
        )  # need the name of the sensor here
        self.vision_sensor = VisionSensor(
            'Vision_sensor')  # need the name of the sensor here
        self.initial_joint_positions = [
            0.162, -1.109, 0.259, 1.866, 2.949, -0.811, 4.440
        ]  # a good staring gesture of robot, avoid stucking and colliding
        self.agent.set_joint_positions(self.initial_joint_positions)
        if control_mode == 'end_position':  # control the robot arm by the position of its end using inverse kinematics
            self.agent.set_control_loop_enabled(
                True)  # if false, ik won't work
            self.action_space = np.zeros(
                4)  # 3 DOF end position control + 1 rotation of gripper
        elif control_mode == 'joint_velocity':  # control the robot arm by directly setting velocity values on each joint, forward kinematics
            self.agent.set_control_loop_enabled(False)
            self.action_space = np.zeros(
                7
            )  # 7 DOF velocity control, no need for extra control of end rotation, the 7th joint controls it.
        else:
            raise NotImplementedError
        self.observation_space = np.zeros(
            17)  # position and velocity of 7 joints + position of the target
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.target = Shape('target')  # get the target object
        self.agent_ee_tip = self.agent.get_tip(
        )  # a part of robot as the end of inverse kinematics chain for controlling
        self.tip_target = Dummy(
            'Sawyer_target'
        )  # the target point of the tip (end of the robot arm) to move towards
        self.tip_pos = self.agent_ee_tip.get_position()  # tip x,y,z position
        self.tip_quat = self.agent_ee_tip.get_quaternion(
        )  # tip rotation as quaternion, if not control the rotation
        # reference for reset
        self.initial_tip_positions = [0.3, 0.1, 1.]
        self.initial_z_rotation = np.pi / 2.
        self.initial_target_positions = self.target.get_position()

    def _get_state(self):
        '''
         Return state containing arm joint angles/velocities & target position.
         '''
        return np.array(self.agent.get_joint_positions() +  # list
                        self.agent.get_joint_velocities() +  # list
                        self.target.get_position())  # list

    def _is_holding(self):
        '''
         Return is holding the target or not, return bool.
        '''

        # Note that the collision check is not always accurate all the time,
        # for continuous conllision, maybe only the first 4-5 frames of collision can be detected
        pad_collide_object = self.gripper_left_pad.check_collision(self.target)
        if pad_collide_object and self.proximity_sensor.is_detected(
                self.target) == True:
            return True
        else:
            return False

    # def _move(self, action):
    #     ''' Mode 0: as given by original examples in Pyrep repo '''
    #     # Deprecated, always fail the ik! Move the tip according to the action with inverse kinematics for 'end_position' control.
    #     action=list(action)
    #     pos=self.agent_ee_tip.get_position()
    #     assert len(action) == len(pos)
    #     for idx in range(len(pos)):
    #         pos[idx] += action[idx]
    #     print('pos: ', pos)
    #     new_joint_angles = self.agent.solve_ik(pos, quaternion=self.tip_quat)
    #     self.agent.set_joint_target_positions(new_joint_angles)

    # def _move(self, action):
    #     '''
    #     Deprecated!
    #     Move the tip according to the action with inverse kinematics for 'end_position' control;
    #     with control of tip target in inverse kinematics mode instead of using .solve_ik() in forward kinematics mode.
    #     Mode 1: an open-loop control, using ik; not as accurate as the closed-loop control.
    #     '''
    #     pos=self.gripper.get_position()
    #     bounding_offset=0.1
    #     robot_moving_unit=0.01  # the amount of single step move of robot, not accurate; the smaller the value, the smoother the movement.
    #     # check if state+action will be within of the bounding box, if so, move normally; else no action.
    #     #  x_min < x < x_max  and  y_min < y < y_max  and  z > z_min
    #     if pos[0]+action[0]>POS_MIN[0]-bounding_offset and pos[0]+action[0]<POS_MAX[0]+bounding_offset  \
    #         and pos[1]+action[1] > POS_MIN[1]-bounding_offset and pos[1]+action[1] < POS_MAX[1]+bounding_offset  \
    #         and pos[2]+action[2] > POS_MIN[2]-bounding_offset:

    #         moving_loop_itr=int(np.sum(np.abs(action[:3]))/robot_moving_unit)+1  # adaptive number of moving steps, with minimal of 1 step; the larger it is, the more accurate for each movement.
    #         small_step = list(1./moving_loop_itr*np.array(action))  # break the action into small steps, as the robot cannot move to the target position within one frame

    #         '''
    #         there is a mismatch between the object set_orientation() and get_orientation():
    #         the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
    #         '''
    #         ori_z=-self.agent_ee_tip.get_orientation()[2] # the minus is because the mismatch between the set and get
    #         assert len(small_step) == len(pos)+1  # 3 values for position, 1 value for rotation

    #         for _ in range(moving_loop_itr):
    #             for idx in range(len(pos)):
    #                 pos[idx] += small_step[idx]
    #             self.tip_target.set_position(pos)
    #             self.pr.step()
    #             ''' deprecated! no need to use small steps for the rotation with reset_dynamics=True'''
    #             ori_z+=small_step[3]  # change the orientation along z-axis with a small step
    #             self.tip_target.set_orientation([0, np.pi, ori_z], reset_dynamics=True)  # make gripper face downwards
    #             self.pr.step()
    #         ''' one big step for z-rotation is enough, with reset_dynamics=True, set the rotation instantaneously '''
    #         # ori_z+=action[3]
    #         # self.tip_target.set_orientation([0, np.pi, ori_z], reset_dynamics=True)  # make gripper face downwards
    #         # self.pr.step()

    #     else:
    #         print("Potential Movement Out of the Bounding Box!")
    #         pass # no action if potentially out of the bounding box

    def _move(self,
              action,
              bounding_offset=0.15,
              step_factor=0.4,
              max_itr=40,
              max_error=0.01,
              rotation_norm=5.):
        ''' 
        Move the tip according to the action with inverse kinematics for 'end_position' control;
        with control of tip target in inverse kinematics mode instead of using .solve_ik() in forward kinematics mode.
        Mode 2: a close-loop control, using ik.

        parameters:
        :bounding_offset: offset of bounding box and valid target position range, bounding box is larger
        :step_factor: small step factor mulitplied on the gradient step calculated by inverse kinematics
        :max_itr=20: maximum moving iterations
        :max_error: upper bound of distance error for movement at each call
        :rotation_norm: factor for normalization of rotation values
        '''
        pos = self.gripper.get_position()

        # check if state+action will be within of the bounding box, if so, move normally; else action is not conducted.
        #  i.e. x_min < x < x_max  and  y_min < y < y_max  and  z > z_min
        if pos[0]+action[0]>POS_MIN[0]-bounding_offset and pos[0]+action[0]<POS_MAX[0]+bounding_offset  \
            and pos[1]+action[1] > POS_MIN[1]-bounding_offset and pos[1]+action[1] < POS_MAX[1]+2*bounding_offset  \
            and pos[2]+action[2] > POS_MIN[2]-2*bounding_offset:  # larger offset in z axis
            ''' 
            there is a mismatch between the object set_orientation() and get_orientation():
            the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
            '''
            ori_z = -self.agent_ee_tip.get_orientation()[
                2]  # the minus is because the mismatch between the set and get
            target_pos = np.array(self.agent_ee_tip.get_position()) + np.array(
                action[:3])
            diff = 1
            itr = 0
            while np.sum(np.abs(diff)) > max_error and itr < max_itr:
                # print(action[:3], np.sum(np.abs(diff)))
                itr += 1
                # set pos in small step
                cur_pos = self.agent_ee_tip.get_position()
                diff = target_pos - cur_pos  # difference of current and target position, close-loop control
                pos = cur_pos + step_factor * diff  # step small step according to current difference, to prevent that ik cannot be solved
                self.tip_target.set_position(pos.tolist())
                self.pr.step(
                )  # every time set target tip, need to call simulation step to achieve it
            ''' one big step for z-rotation is enough, but still small error exists '''
            ori_z += rotation_norm * action[
                3]  # normalize the rotation values, as usually same action range is used in policy for both rotation and position
            self.tip_target.set_orientation(
                [0, 3.1415,
                 ori_z])  # make gripper face downwards and rotate ori_z
            self.pr.step()
            # print('after: ', ori_z, -self.agent_ee_tip.get_orientation()[2])

        else:
            print("Potential Movement Out of the Bounding Box!")
            pass  # no action if potentially moving out of the bounding box

    def reset(self, random_target=False):
        '''
        Reset with the _move() function.
        Get a random position within a cuboid and set the target position.
        '''
        if random_target:  # randomly set the target position
            pos = list(np.random.uniform(
                POS_MIN, POS_MAX))  # sample from uniform in valid range
            self.target.set_position(pos)  # random position
        else:
            self.target.set_position(
                self.initial_target_positions)  # fixed position
        self.target.set_orientation([0, 0, 0])
        self.pr.step()

        # set end position to be initialized
        if self.control_mode == 'end_position':  # JointMode.IK
            curr_pos = self.agent_ee_tip.get_position()
            a = np.array(self.initial_tip_positions) - np.array(curr_pos)
            self._move(np.concatenate((a, [self.initial_z_rotation])),
                       max_itr=80)

            # prevent stuck cases, as using ik for moving, stucking can make ik cannot be solved therefore not reset correctly
            itr = 0
            max_itr = 10
            while np.sum(
                    np.abs(
                        np.array(self.agent_ee_tip.get_position() -
                                 np.array(self.initial_tip_positions)))
            ) > 0.1 and itr < max_itr:
                itr += 1
                self.step(np.random.uniform(
                    -0.2, 0.2,
                    4))  # take random actions for preventing the stuck cases
                self.pr.step()
        elif self.control_mode == 'joint_velocity':  # JointMode.FORCE
            self.agent.set_joint_positions(
                self.initial_joint_positions
            )  # sometimes the gripper is stuck, cannot get back to initial
            self.pr.step()

        # set collidable, for collision detection
        self.gripper_left_pad.set_collidable(
            True
        )  # set the pad on the gripper to be collidable, so as to check collision
        self.target.set_collidable(True)

        # open the gripper if it's not fully open
        if np.sum(self.gripper.get_open_amount()) < 1.5:
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

        return self._get_state()  # return current state of the environment

    # def reset(self, random_target=False):
    #     '''
    #     Deprecated!
    #     Get a random position within a cuboid and set the target position.
    #     '''
    #     print('reset')
    #     if random_target:  # randomly set the target position
    #         pos = list(np.random.uniform(POS_MIN, POS_MAX))  # sample from uniform in valid range
    #         self.target.set_position(pos)  # random position
    #     else:
    #         self.target.set_position(self.initial_target_positions) # fixed position
    #     self.target.set_orientation([0,0,0])

    #     # set end position to be initialized
    #     if self.control_mode == 'end_position':  # JointMode.IK
    #         self.agent.set_control_loop_enabled(True)
    #         self.tip_target.set_position(self.initial_tip_positions)  # cannot set joint positions directly due to in ik mode nor force/torch mode
    #         self.pr.step()
    #         # prevent stuck cases, as using ik for moving, stucking can make ik cannot be solved therefore not reset correctly
    #         itr=0
    #         max_itr=10
    #         while np.sum(np.abs(np.array(self.agent_ee_tip.get_position()-np.array(self.initial_tip_positions))))>0.1 and itr<max_itr:
    #             itr+=1
    #             self.step(np.random.uniform(-0.2,0.2,4))  # take random actions for preventing the stuck cases
    #             self.pr.step()
    #     elif self.control_mode == 'joint_velocity': # JointMode.FORCE
    #         self.agent.set_joint_positions(self.initial_joint_positions)  # sometimes the gripper is stuck, cannot get back to initial

    #     # set collidable, for collision detection
    #     self.gripper_left_pad.set_collidable(True)  # set the pad on the gripper to be collidable, so as to check collision
    #     self.target.set_collidable(True)

    #     # open the gripper if it's not fully open
    #     if np.sum(self.gripper.get_open_amount())<1.5:
    #         self.gripper.actuate(1, velocity=0.5)
    #         self.pr.step()

    #     return self._get_state()  # return current state of the environment

    def step(self, action):
        '''
        Move the robot arm according to the action.
        If control_mode=='joint_velocity', action is 7 dim of joint velocity values + 1 dim rotation of gripper;
        if control_mode=='end_position', action is 3 dim of tip (end of robot arm) position values + 1 dim rotation of gripper;
        '''
        done = False
        if self.control_mode == 'end_position':
            if action is None or action.shape[0] != 4:
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 4))  # random
            # print(action, self.agent_ee_tip.get_position())
            self._move(action)

        elif self.control_mode == 'joint_velocity':
            if action is None or action.shape[0] != 7:
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 7))  # random
            self.agent.set_joint_target_velocities(
                action)  # Execute action on arm
            self.pr.step()  # if no pr.step() the action won't be conducted

        else:
            raise NotImplementedError

        ax, ay, az = self.gripper.get_position()
        if math.isnan(
                ax
        ):  # capture the case when the gripper is broken during exporation
            self.__init__(headless=self.headless)
            done = True

        tx, ty, tz = self.target.get_position()

        distance = (ax - tx)**2 + (ay - ty)**2 + (
            az - tz)**2  # distance between the gripper and the target object
        ''' for visual-based control only, large time consumption! '''
        # current_vision = self.vision_sensor.capture_rgb()  # capture a screenshot of the view with vision sensor
        # plt.imshow(current_vision)
        # plt.savefig('./img/vision.png')

        reward = 0
        # close the gripper if close enough to the object and the object is detected with the proximity sensor
        if distance < 0.1 and self.proximity_sensor.is_detected(
                self.target) == True:
            # make sure the gripper is open before grasping
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

            self.gripper.actuate(
                0, velocity=0.5
            )  # if done, close the hand, 0 for close and 1 for open; velocity 0.5 ensures the gripper to close with in one frame
            self.pr.step()  # Step the physics simulation

            if self._is_holding():
                # reward for hold here!
                reward += self.reward_offset  # extra reward for grasping the object
                done = True
            else:
                self.gripper.actuate(1, velocity=0.5)
                self.pr.step()

        elif np.sum(
                self.gripper.get_open_amount()
        ) < 1.5:  # if gripper is closed (not fully open) due to collision or esle, open it; .get_open_amount() return list of gripper joint values
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

        else:
            pass

        reward -= np.clip(
            np.sqrt(distance), 0, self.reward_offset
        )  # Reward is negative distance to target; clipped for removing abnormal cases

        if tz < self.initial_target_positions[
                2] - self.fall_down_offset:  # the object fall off the table
            done = True
            reward = -self.reward_offset

        # can also set reward for orientation, same orientation for target and gripper, they are actually vertical, so can grasp
        # reward += np.sqrt(np.array(self.agent_ee_tip.get_orientation())-np.array(self.target.get_orientation()))

        return self._get_state(), reward, done, {}

    def shutdown(self):
        ''' Close the simulator '''
        self.pr.stop()
        self.pr.shutdown()
예제 #8
0
class EnvPollos(Env):
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        # 5 points in 3D space forming the trajectory
        self.action_space = Box(low=-0.3, high=0.3, shape=(5,3), dtype=np.float32)
        self.observation_space = Box(low=np.array([-0.1, -0.2, 0, -1.5, 0.0, 0.0, -0.5, -0.2, -0.2]), 
                                     high=np.array([0.1, 1.7, 2.5, 0, 0.0, 0.0, 0.5, 0.2, 1.0]))
        
        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 1.2
        self.joints = [Joint('UR10_joint1'), Joint('UR10_joint2'), Joint('UR10_joint3'), Joint('UR10_joint4'), Joint('UR10_joint5'), Joint('UR10_joint6')]
        self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]] 
                              for j in self.joints]
        print(self.joints_limits)
        self.agent_hand = Shape('hand')
        self.agent.set_control_loop_enabled(True)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip().get_quaternion()
        self.target = Dummy('UR10_target')
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        self.table_target = Dummy('table_target')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()
        self.table_target = Dummy('table_target')
        # objects
        self.scene_objects = [Shape('table0'), Shape('Plane'), Shape('Plane0'), Shape('ConcretBlock')]
        #self.agent_tip = self.agent.get_tip()
        self.initial_distance = np.linalg.norm(np.array(self.initial_pollo_position)-np.array(self.initial_agent_tip_position))
        
        # camera 
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(self.camera.get_handle(),-1)
        self.np_camera_matrix_extrinsics = np.delete(np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width/2.0) / math.tan(angle/2.0)
        focaly_px = (height/2.0) / math.tan(angle/2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])
                                        
        self.reset()

    def reset(self):
        pos = list(np.random.uniform( [-0.1, -0.1, 0.0],  [0.1, 0.1, 0.1]) + self.initial_pollo_position)
        self.pollo.set_position(pos)
        self.pollo.set_quaternion(self.initial_pollo_orientation)
        self.agent.set_joint_positions(self.initial_joint_positions)
        self.initial_epoch_time = time.time()
        while True:         # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where( np.fabs(a) < 0.1, False, True )):
                break
        return self._get_state()

    def step(self, action): 
        print(action.shape, action)
        if action is None:
            self.pr.step()
            return self._get_state(), 0.0, False, {}
        
        if np.any(np.isnan(action)):
            print(action)
            return self._get_state(), -1.0, False, {}

        # create a path with tip and pollo at its endpoints and 5 adjustable points in the middle
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        np_robot_tip_orientation = np.array(self.agent.get_tip().get_orientation())
        c_path = CartesianPath.create()     
        c_path.insert_control_points(action[4])         # point after pollo to secure the grasp
        c_path.insert_control_points([list(np_pollo_target) + list(np_robot_tip_orientation)])       # pollo
        c_path.insert_control_points(action[0:3])   
        c_path.insert_control_points([list(np_robot_tip_position) + list(np_robot_tip_orientation)]) # robot hand
        try:
            self.path = self.agent.get_path_from_cartesian_path(c_path)
        except IKError as e:
            print('Agent::grasp    Could not find joint values')  
            return self._get_state(), -1, True, {}   
    
        # go through path
        reloj = time.time()
        while self.path.step():
            self.pr.step()                                   # Step the physics simulation
            if (time.time()-reloj) > 4:              
                return self._get_state(), -10.0, True, {}    # Too much time
            if any((self.agent_hand.check_collision(obj) for obj in self.scene_objects)):   # colisión con la mesa
                return self._get_state(), -10.0, True, {}   
        
        # path ok, compute reward
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        dist = np.linalg.norm(np_pollo_target-np_robot_tip_position)
        altura = np.clip((np_pollo_target[2] - self.initial_pollo_position[2]), 0, 0.5)
        
        if altura > 0.3:                                                     # pollo en mano
            return self._get_state(), altura, True, {}
        elif dist > self.initial_distance:                                   # mano se aleja
            return self._get_state(), -10.0, True, {}
        if time.time() - self.initial_epoch_time > 5:                        # too much time
            return self._get_state(), -10.0, True, {}
        
        # Reward 
        pollo_height = np.exp(-altura*10)  # para 1m pollo_height = 0.001, para 0m pollo_height = 1
        reward = - pollo_height - dist
        return self._get_state(), reward, False, {}

    def close(self):
        self.pr.stop()
        self.pr.shutdown()
    
    def render(self):
        print("RENDER")
        np_pollo_en_camara = self.getPolloEnCamara()
         
        # capture depth image
        depth = self.camera.capture_rgb()
        circ = plt.Circle((int(np_pollo_en_camara[0]), int(np_pollo_en_camara[1])),10)
        plt.clf()
        fig = plt.gcf()
        ax = fig.gca()
        ax.add_artist(circ)
        ax.imshow(depth, cmap = 'hot')
        plt.pause(0.000001)

    # Aux

    # transform env.pollo_target.get_position() to camera coordinates and project pollo_en_camera a image coordinates        
    def getPolloEnCamara(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_target_cam = self.np_camera_matrix_extrinsics.dot(np.append([np_pollo_target],1.0))
        np_pollo_en_camara = self.np_camera_matrix_intrinsics.dot(np_pollo_target_cam)
        np_pollo_en_camara = np_pollo_en_camara / np_pollo_en_camara[2]
        np_pollo_en_camara = np.delete(np_pollo_en_camara,2)
        return np_pollo_en_camara

    def getPolloEnCamaraEx(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_en_camara = self.np_camera_matrix_extrinsics.dot(np.append([np_pollo_target],1.0))
        return np_pollo_en_camara


    def _get_state(self):
        # Return state containing arm joint angles/velocities & target position
        # return (self.agent.get_joint_positions() + 
        #         self.agent.get_joint_velocities() +
        #         self.pollo_target.get_position())
        p = self.getPolloEnCamaraEx()
        j = self.agent.get_joint_positions()
        r = np.array([j[0],j[1],j[2],j[3],j[4],j[5],p[0],p[1],p[2]])
        return r
    
    def vrepToNP(self, c):
        return np.array([[c[0],c[4],c[8],c[3]],
                         [c[1],c[5],c[9],c[7]],
                         [c[2],c[6],c[10],c[11]],
                         [0,   0,   0,    1]])