def test_solve_ik_via_sampling(self):
     arm = Panda()
     waypoint = Dummy('Panda_waypoint')
     configs = arm.solve_ik_via_sampling(
         waypoint.get_position(), waypoint.get_orientation(), max_configs=5)
     self.assertIsNotNone(configs)
     self.assertEqual(len(configs), 5)
     current_config = arm.get_joint_positions()
     # Checks correct config (last)
     arm.set_joint_positions(configs[-1])
     self.assertTrue(np.allclose(
         arm.get_tip().get_pose(), waypoint.get_pose(), atol=0.001))
     # Checks correct config (first)
     arm.set_joint_positions(configs[0])
     self.assertTrue(np.allclose(
         arm.get_tip().get_pose(), waypoint.get_pose(), atol=0.001))
     # Checks order
     prev_config_dist = 0
     for config in configs:
         config_dist = sum(
             [(c - f)**2 for c, f in zip(current_config, config)])
         # This test requires that the metric scale for each joint remains at
         # 1.0 in _getConfigDistance lua function
         self.assertLessEqual(prev_config_dist, config_dist)
         prev_config_dist = config_dist
Пример #2
0
 def render(self, mode='human'):
     if self._gym_cam is None:
         # Add the camera to the scene
         cam_placeholder = Dummy('cam_cinematic_placeholder')
         self._gym_cam = VisionSensor.create([640, 360])
         self._gym_cam.set_pose(cam_placeholder.get_pose())
         self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
 def test_solve_ik_via_jacobian(self):
     arm = Panda()
     waypoint = Dummy('Panda_waypoint')
     new_config = arm.solve_ik_via_jacobian(
         waypoint.get_position(), waypoint.get_orientation())
     arm.set_joint_positions(new_config)
     self.assertTrue(np.allclose(
         arm.get_tip().get_pose(), waypoint.get_pose(), atol=0.001))
Пример #4
0
    def __init__(self,
                 task_class,
                 observation_mode='state',
                 render_mode: Union[None, str] = None):
        self._observation_mode = observation_mode
        self._render_mode = render_mode
        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision':
            obs_config.set_all(True)
        else:
            raise ValueError('Unrecognised observation_mode: %s.' %
                             observation_mode)
        action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
        self.env = Environment(action_mode,
                               obs_config=obs_config,
                               headless=True)
        self.env.launch()
        self.task = self.env.get_task(task_class)

        _, obs = self.task.reset()

        self.action_space = spaces.Box(low=-1.0,
                                       high=1.0,
                                       shape=(self.env.action_size, ))

        if observation_mode == 'state':
            self.observation_space = spaces.Box(
                low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape)
        elif observation_mode == 'vision':
            self.observation_space = spaces.Dict({
                "state":
                spaces.Box(low=-np.inf,
                           high=np.inf,
                           shape=obs.get_low_dim_data().shape),
                "left_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.left_shoulder_rgb.shape),
                "right_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.right_shoulder_rgb.shape),
                "wrist_rgb":
                spaces.Box(low=0, high=1, shape=obs.wrist_rgb.shape),
                "front_rgb":
                spaces.Box(low=0, high=1, shape=obs.front_rgb.shape),
            })

        if render_mode is not None:
            # Add the camera to the scene
            cam_placeholder = Dummy('cam_cinematic_placeholder')
            self._gym_cam = VisionSensor.create([640, 360])
            self._gym_cam.set_pose(cam_placeholder.get_pose())
            if render_mode == 'human':
                self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            else:
                self._gym_cam.set_render_mode(RenderMode.OPENGL3)
Пример #5
0
def main(argv):

    obs_config = ObservationConfig(record_gripper_closing=True)
    obs_config.set_all(False)

    vrc = rand_every = None
    frequency = 0
    if FLAGS.domain_randomization:
        vrc = VisualRandomizationConfig(FLAGS.textures_path)
        rand_every = RandomizeEvery.TRANSITION
        frequency = 10

    env = Environment(ActionMode(), obs_config=obs_config,
                      randomize_every=rand_every, frequency=frequency,
                      visual_randomization_config=vrc, headless=FLAGS.headless)
    env.launch()

    # Add the camera to the scene
    cam_placeholder = Dummy('cam_cinematic_placeholder')
    cam = VisionSensor.create(FLAGS.camera_resolution)
    cam.set_pose(cam_placeholder.get_pose())
    cam.set_parent(cam_placeholder)

    cam_motion = CircleCameraMotion(cam, Dummy('cam_cinematic_base'), 0.005)
    tr = TaskRecorder(env, cam_motion, fps=30)

    if len(FLAGS.tasks) > 0:
        task_names = FLAGS.tasks
    else:
        task_names = [t.split('.py')[0] for t in os.listdir(TASKS_PATH)
                      if t != '__init__.py' and t.endswith('.py')]
    task_classes = [task_file_to_task_class(
        task_file) for task_file in task_names]

    for i, (name, cls) in enumerate(zip(task_names, task_classes)):
        good = tr.record_task(cls)
        if FLAGS.individual and good:
            tr.save(os.path.join(FLAGS.save_dir, '%s.avi' % name))

    if not FLAGS.individual:
        tr.save(os.path.join(FLAGS.save_dir, 'recorded_tasks.avi'))
    env.shutdown()
Пример #6
0
 def __init__(self, machine, camera, state="state"):
     super(CustomEnv, self).__init__()
     # Define action and observation space
     # They must be gym.spaces objects
     self.machine = machine
     self.camera = camera
     self.action_space = spaces.Discrete(N_ACTIONS)
     if state == "state":
         self.observation_space = spaces.Box(low=-10,
                                             high=10,
                                             shape=(1, 15))
     elif state == "vision":
         self.observation_space = spaces.Box(low=0,
                                             high=1,
                                             shape=(128, 128, 3))
     self.min_force = 1
     cam_placeholder = Dummy('cam_cinematic_placeholder')
     self._gym_cam = VisionSensor.create([640, 360])
     self._gym_cam.set_pose(cam_placeholder.get_pose())
     self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
     # self._gym_cam.set_render_mode(RenderMode.OPENGL3)
     self.state_rep = state
Пример #7
0
    def render(self, mode='human'):
        # todo render available at any time
        if self._render_mode is None:
            self._render_mode = mode
            # Add the camera to the scene
            cam_placeholder = Dummy('cam_cinematic_placeholder')
            self._gym_cam = VisionSensor.create([640, 360])
            self._gym_cam.set_pose(cam_placeholder.get_pose())
            if mode == 'human':
                self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            else:
                self._gym_cam.set_render_mode(RenderMode.OPENGL3)

        if mode != self._render_mode:
            raise ValueError(
                'The render mode must match the render mode selected in the '
                'constructor. \nI.e. if you want "human" render mode, then '
                'create the env by calling: '
                'gym.make("reach_target-state-v0", render_mode="human").\n'
                'You passed in mode %s, but expected %s.' %
                (mode, self._render_mode))
        if mode == 'rgb_array':
            return self._gym_cam.capture_rgb()
Пример #8
0
class Arm(RobotComponent):
    """Base class representing a robot arm with path planning support.
    """
    def __init__(self,
                 count: int,
                 name: str,
                 num_joints: int,
                 base_name: str = None,
                 max_velocity=1.0,
                 max_acceleration=4.0,
                 max_jerk=1000):
        """Count is used for when we have multiple copies of arms"""
        joint_names = ['%s_joint%d' % (name, i + 1) for i in range(num_joints)]
        super().__init__(count, name, joint_names, base_name)

        # Used for motion planning
        self.max_velocity = max_velocity
        self.max_acceleration = max_acceleration
        self.max_jerk = max_jerk

        # Motion planning handles
        suffix = '' if count == 0 else '#%d' % (count - 1)
        self._ik_target = Dummy('%s_target%s' % (name, suffix))
        self._ik_tip = Dummy('%s_tip%s' % (name, suffix))
        self._ik_group = vrep.simGetIkGroupHandle('%s_ik%s' % (name, suffix))
        self._collision_collection = vrep.simGetCollectionHandle(
            '%s_arm%s' % (name, suffix))

    def get_configs_for_tip_pose(self,
                                 position: List[float],
                                 euler: List[float] = None,
                                 quaternion: List[float] = None,
                                 ignore_collisions=False,
                                 trials=300,
                                 max_configs=60) -> List[List[float]]:
        """Gets a valid joint configuration for a desired end effector pose.
        Must specify either rotation in euler or quaternions, but not both!
        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs
        :param max_configs: The maximum number of configurations we want to
            generate before ranking them.
        :raises: ConfigurationError if no joint configuration could be found.
        :return: A list of valid joint configurations for the desired end effector pose.
        """

        if not ((euler is None) ^ (quaternion is None)):
            raise ConfigurationPathError(
                'Specify either euler or quaternion values, but not both.')

        prev_pose = self._ik_target.get_pose()
        self._ik_target.set_position(position)
        if euler is not None:
            self._ik_target.set_orientation(euler)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion)

        handles = [j.get_handle() for j in self.joints]

        # Despite verbosity being set to 0, OMPL spits out a lot of text
        with utils.suppress_std_out_and_err():
            _, ret_floats, _, _ = utils.script_call(
                'findSeveralCollisionFreeConfigsAndCheckApproach@PyRep',
                PYREP_SCRIPT_TYPE,
                ints=[
                    self._ik_group, self._collision_collection,
                    int(ignore_collisions), trials, max_configs
                ] + handles)
        self._ik_target.set_pose(prev_pose)

        if len(ret_floats) == 0:
            raise ConfigurationError(
                'Could not find a valid joint configuration for desired end effector pose.'
            )

        num_configs = int(len(ret_floats) / len(handles))
        return [[
            ret_floats[len(handles) * i + j] for j in range(len(handles))
        ] for i in range(num_configs)]

    def solve_ik(self,
                 position: List[float],
                 euler: List[float] = None,
                 quaternion: List[float] = None) -> List[float]:
        """Solves an IK group and returns the calculated joint values.

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :return: A list containing the calculated joint values.
        """
        self._ik_target.set_position(position)
        if euler is not None:
            self._ik_target.set_orientation(euler)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion)

        ik_result, joint_values = vrep.simCheckIkGroup(
            self._ik_group, [j.get_handle() for j in self.joints])
        if ik_result == vrep.sim_ikresult_fail:
            raise IKError(
                'IK failed. Perhaps the distance was between the tip '
                ' and target was too large.')
        elif ik_result == vrep.sim_ikresult_not_performed:
            raise IKError('IK not performed.')
        return joint_values

    def get_path_from_cartesian_path(
            self, path: CartesianPath) -> ArmConfigurationPath:
        """Translate a path from cartesian space, to arm configuration space.

        Note: It must be possible to reach the start of the path via a linear
        path, otherwise an error will be raised.

        :param path: A :py:class:`CartesianPath` instance to be translated to
            a configuration-space path.
        :raises: ConfigurationPathError if no path could be created.

        :return: A path in the arm configuration space.
        """
        handles = [j.get_handle() for j in self.joints]
        _, ret_floats, _, _ = utils.script_call(
            'getPathFromCartesianPath@PyRep',
            PYREP_SCRIPT_TYPE,
            ints=[
                path.get_handle(), self._ik_group,
                self._ik_target.get_handle()
            ] + handles)
        if len(ret_floats) == 0:
            raise ConfigurationPathError(
                'Could not create a path from cartesian path.')
        return ArmConfigurationPath(self, ret_floats)

    def get_linear_path(self,
                        position: List[float],
                        euler: List[float] = None,
                        quaternion: List[float] = None,
                        steps=50,
                        ignore_collisions=False) -> ArmConfigurationPath:
        """Gets a linear configuration path given a target pose.

        Generates a path that drives a robot from its current configuration
        to its target dummy in a straight line (i.e. shortest path in Cartesian
        space).

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param steps: The desired number of path points. Each path point
            contains a robot configuration. A minimum of two path points is
            required. If the target pose distance is large, a larger number
            of steps leads to better results for this function.
        :param ignore_collisions: If collision checking should be disabled.
        :raises: ConfigurationPathError if no path could be created.

        :return: A linear path in the arm configuration space.
        """
        if not ((euler is None) ^ (quaternion is None)):
            raise ConfigurationPathError(
                'Specify either euler or quaternion values, but not both.')

        prev_pose = self._ik_target.get_pose()
        self._ik_target.set_position(position)
        if euler is not None:
            self._ik_target.set_orientation(euler)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion)
        handles = [j.get_handle() for j in self.joints]

        # Despite verbosity being set to 0, OMPL spits out a lot of text
        with utils.suppress_std_out_and_err():
            _, ret_floats, _, _ = utils.script_call(
                'getLinearPath@PyRep',
                PYREP_SCRIPT_TYPE,
                ints=[
                    steps, self._ik_group, self._collision_collection,
                    int(ignore_collisions)
                ] + handles)
        self._ik_target.set_pose(prev_pose)

        if len(ret_floats) == 0:
            raise ConfigurationPathError('Could not create path.')
        return ArmConfigurationPath(self, ret_floats)

    def get_nonlinear_path(self,
                           position: List[float],
                           euler: List[float] = None,
                           quaternion: List[float] = None,
                           ignore_collisions=False,
                           trials=100,
                           max_configs=60,
                           trials_per_goal=6,
                           algorithm=Algos.SBL) -> ArmConfigurationPath:
        """Gets a non-linear (planned) configuration path given a target pose.

        A path is generated by finding several configs for a pose, and ranking
        them according to the distance in configuration space (smaller is
        better).

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs
        :param max_configs: The maximum number of configurations we want to
            generate before ranking them.
        :param trials_per_goal: The number of paths per config we want to trial.
        :param algorithm: The algorithm for path planning to use.
        :raises: ConfigurationPathError if no path could be created.

        :return: A non-linear path in the arm configuration space.
        """

        if not ((euler is None) ^ (quaternion is None)):
            raise ConfigurationPathError(
                'Specify either euler or quaternion values, but not both.')

        prev_pose = self._ik_target.get_pose()
        self._ik_target.set_position(position)
        if euler is not None:
            self._ik_target.set_orientation(euler)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion)

        handles = [j.get_handle() for j in self.joints]

        # Despite verbosity being set to 0, OMPL spits out a lot of text
        with utils.suppress_std_out_and_err():
            _, ret_floats, _, _ = utils.script_call(
                'getNonlinearPath@PyRep',
                PYREP_SCRIPT_TYPE,
                ints=[
                    self._ik_group, self._collision_collection,
                    int(ignore_collisions), trials, max_configs,
                    trials_per_goal
                ] + handles,
                strings=[algorithm.value])
        self._ik_target.set_pose(prev_pose)

        if len(ret_floats) == 0:
            raise ConfigurationPathError('Could not create path.')
        return ArmConfigurationPath(self, ret_floats)

    def get_path(self,
                 position: List[float],
                 euler: List[float] = None,
                 quaternion: List[float] = None,
                 ignore_collisions=False,
                 trials=100,
                 max_configs=60,
                 trials_per_goal=6,
                 algorithm=Algos.SBL) -> ArmConfigurationPath:
        """Tries to get a linear path, failing that tries a non-linear path.

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs.
            (Only applicable if a non-linear path is needed)
        :param max_configs: The maximum number of configurations we want to
            generate before ranking them.
            (Only applicable if a non-linear path is needed)
        :param trials_per_goal: The number of paths per config we want to trial.
            (Only applicable if a non-linear path is needed)
        :param algorithm: The algorithm for path planning to use.
            (Only applicable if a non-linear path is needed)

        :raises: ConfigurationPathError if neither a linear or non-linear path
            can be created.
        :return: A linear or non-linear path in the arm configuration space.
        """
        try:
            p = self.get_linear_path(position,
                                     euler,
                                     quaternion,
                                     ignore_collisions=ignore_collisions)
            return p
        except ConfigurationPathError:
            pass  # Allowed. Try again, but with non-linear.

        # This time if an exception is thrown, we dont want to catch it.
        p = self.get_nonlinear_path(position, euler, quaternion,
                                    ignore_collisions, trials, max_configs,
                                    trials_per_goal, algorithm)
        return p

    def get_tip(self) -> Dummy:
        """Gets the tip of the arm.

        Each arm is required to have a tip for path planning.

        :return: The tip of the arm.
        """
        return self._ik_tip

    def get_jacobian(self):
        """Calculates the Jacobian.

        :return: the row-major Jacobian matix.
        """
        self._ik_target.set_matrix(self._ik_tip.get_matrix())
        vrep.simCheckIkGroup(self._ik_group,
                             [j.get_handle() for j in self.joints])
        jacobian, (rows, cols) = vrep.simGetIkGroupMatrix(self._ik_group, 0)
        jacobian = np.array(jacobian).reshape((rows, cols), order='F')
        return jacobian
Пример #9
0
    def __init__(self, task_class, observation_mode='state', randomization_mode="none", 
        rand_config=None, img_size=256, special_start=[], fixed_grip=-1, force_randomly_place=False, force_change_position=False, sparse=False, not_special_p = 0, ground_p = 0, special_is_grip=False, altview=False, procedural_ind=0, procedural_mode='same', procedural_set = [], is_mesh_obs=False, blank=False, COLOR_TUPLE=[1,0,0]):
        # blank is 0s agent. 
        self.blank = blank
        #altview is whether to have second camera angle or not. True/False, "both" to concatentae the observations. 
        self.altview=altview
        self.img_size=img_size
        self.sparse = sparse
        self.task_class = task_class
        self._observation_mode = observation_mode
        self._randomization_mode = randomization_mode
        #special start is a list of actions to take at the beginning.
        self.special_start = special_start
        #fixed_grip temp hack for keeping the gripper a certain way. Change later. 0 for fixed closed, 0.1 for fixed open, -1 for not fixed
        self.fixed_grip = fixed_grip
        #to force the task to be randomly placed
        self.force_randomly_place = force_randomly_place
        #force the task to change position in addition to rotation
        self.force_change_position = force_change_position



        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision' or observation_mode=="visiondepth" or observation_mode=="visiondepthmask":
            # obs_config.set_all(True)
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        else:
            raise ValueError(
                'Unrecognised observation_mode: %s.' % observation_mode)

        action_mode = ActionMode(ArmActionMode.DELTA_EE_POSE_PLAN_NOQ)
        print("using delta pose pan")

        if randomization_mode == "random":
            objs = ['target', 'boundary', 'Floor', 'Roof', 'Wall1', 'Wall2', 'Wall3', 'Wall4', 'diningTable_visible']
            if rand_config is None:
                assert False
            self.env = DomainRandomizationEnvironment(
                action_mode, obs_config=obs_config, headless=True,
                randomize_every=RandomizeEvery.EPISODE, frequency=1,
                visual_randomization_config=rand_config
            )
        else:
            self.env = Environment(
                action_mode, obs_config=obs_config, headless=True
            )
        self.env.launch()
        self.task = self.env.get_task(task_class)

        # Probability. Currently used for probability that pick and lift task will start off gripper at a certain location (should probs be called non_special p)
        self.task._task.not_special_p = not_special_p
        # Probability that ground goal.
        self.task._task.ground_p = ground_p 
        # For the "special" case, whether to grip the object or just hover above it. 
        self.task._task.special_is_grip = special_is_grip
        # for procedural env
        self.task._task.procedural_ind = procedural_ind
        # procedural mode: same, increase, or random. 
        self.task._task.procedural_mode = procedural_mode
        # ideally a list-like object, dictates the indices to sample from each episode. 
        self.task._task.procedural_set = procedural_set
        # if state obs is mesh obs
        self.task._task.is_mesh_obs = is_mesh_obs

        self.task._task.sparse = sparse
        self.task._task.COLOR_TUPLE = COLOR_TUPLE
        
        _, obs = self.task.reset()

        cam_placeholder = Dummy('cam_cinematic_placeholder')
        cam_pose = cam_placeholder.get_pose().copy()
        #custom pose
        cam_pose = [ 1.59999931,  0. ,         2.27999949 , 0.65328157, -0.65328145, -0.27059814, 0.27059814]
        cam_pose[0] = 1
        cam_pose[2] = 1.5
        self.frontcam = VisionSensor.create([img_size, img_size])
        self.frontcam.set_pose(cam_pose)

        self.frontcam.set_render_mode(RenderMode.OPENGL)
        self.frontcam.set_perspective_angle(60)
        self.frontcam.set_explicit_handling(1)

        self.frontcam_mask = VisionSensor.create([img_size, img_size])
        self.frontcam_mask.set_pose(cam_pose)
        self.frontcam_mask.set_render_mode(RenderMode.OPENGL_COLOR_CODED)
        self.frontcam_mask.set_perspective_angle(60)
        self.frontcam_mask.set_explicit_handling(1)

        if altview:
            alt_pose = [0.25    , -0.65    ,  1.5,   0, 0.93879825 ,0.34169483 , 0]
            self.altcam = VisionSensor.create([img_size, img_size])
            self.altcam.set_pose(alt_pose)
            self.altcam.set_render_mode(RenderMode.OPENGL)
            self.altcam.set_perspective_angle(60)
            self.altcam.set_explicit_handling(1)
            
            self.altcam_mask = VisionSensor.create([img_size, img_size])
            self.altcam_mask.set_pose(alt_pose)
            self.altcam_mask.set_render_mode(RenderMode.OPENGL_COLOR_CODED)
            self.altcam_mask.set_perspective_angle(60)
            self.altcam_mask.set_explicit_handling(1)


        self.action_space = spaces.Box(
            low=-1.0, high=1.0, shape=(action_mode.action_size,))

        if observation_mode == 'state':
            self.observation_space = spaces.Dict({
                "observation": spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_state_obs().shape),
                "achieved_goal": spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_achieved_goal().shape),
                'desired_goal': spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_desired_goal().shape)
            })
        # Use the frontvision cam
        elif observation_mode == 'vision':
            self.frontcam.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    low=0, high=1, shape=self.frontcam.capture_rgb().transpose(2,0,1).flatten().shape,
                    )
                })
            if altview == "both":
                example = self.frontcam.capture_rgb().transpose(2,0,1).flatten()
                self.observation_space = spaces.Dict({
                    "state": spaces.Box(
                        low=-np.inf, high=np.inf,
                        shape=obs.get_low_dim_data().shape),
                    "observation": spaces.Box(
                        low=0, high=1, shape=np.array([example,example]).shape,
                        )
                    })
        elif observation_mode == 'visiondepth':

            self.frontcam.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    #thinking about not flattening the shape, because primarily used for dataset and not for training
                    low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...]]).shape,
                    )
                })
        elif observation_mode == 'visiondepthmask':

            self.frontcam.handle_explicitly()
            self.frontcam_mask.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    #thinking about not flattening the shape, because primarily used for dataset and not for training
                    low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...], rgb_handles_to_mask(self.frontcam_mask.capture_rgb())]).shape,
                    )
                })
            if altview == "both":
                self.observation_space = spaces.Dict({
                    "state": spaces.Box(
                        low=-np.inf, high=np.inf,
                        shape=obs.get_low_dim_data().shape),
                    "observation": spaces.Box(
                        #thinking about not flattening the shape, because primarily used for dataset and not for training
                        low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...], rgb_handles_to_mask(self.frontcam_mask.capture_rgb())]).shape,
                        )
                    })

        self._gym_cam = None
Пример #10
0
class ArmPSM(PyRep):
    def __init__(self, pr, armNumber=1):
        """self.pr = PyRep()
        self.pr.launch(scenePath)
        self.pr.start()
        self.pr.step()"""
        self.pr = pr
        self.psm = armNumber
        self.ik_mode = 1

        self.base_handle = Shape('RCM_PSM{}'.format(self.psm))
        self.j1_handle = Joint('J1_PSM{}'.format(self.psm))
        self.j2_handle = Joint('J2_PSM{}'.format(self.psm))
        self.j3_handle = Joint('J3_PSM{}'.format(self.psm))
        self.j4_handle = Joint('J1_TOOL{}'.format(self.psm))
        self.j5_handle = Joint('J2_TOOL{}'.format(self.psm))
        self.j6d_handle = Joint('J3_dx_TOOL{}'.format(self.psm))
        self.j6s_handle = Joint('J3_sx_TOOL{}'.format(self.psm))

        self.j5_dummy_handle = Dummy('J2_virtual_TOOL{}'.format(self.psm))

        self.j6d_tip_dummy_handle = Dummy('J3_dx_tip_TOOL{}'.format(self.psm))
        self.j6s_tip_dummy_handle = Dummy('J3_sx_tip_TOOL{}'.format(self.psm))

        self.ik_target_dx_dummy_handle = Dummy('IK_target_dx_PSM{}'.format(
            self.psm))
        self.ik_target_sx_dummy_handle = Dummy('IK_target_sx_PSM{}'.format(
            self.psm))
        self.marker = Shape('yaw_{}'.format(self.psm))
        self.EE_virtual_handle = Dummy('EE_virtual_TOOL{}'.format(self.psm))

        self.ik_signal = IntegerSignal("run_IK_PSM{}".format(self.psm))
        self.dyn_signal = IntegerSignal("run_dyn_PSM{}".format(self.psm))

        #Set IK mode off to save on computation for VREP:
        self.setIkMode(0)
        #Set dynamics mode off to save on compuation time for VREP:
        self.setDynamicsMode(0)

    #dyn_mode = 1 turns on dynamics
    #dyn_mode = 0 turns off dynamics
    def setDynamicsMode(self, dyn_mode):
        self.dyn_mode = dyn_mode
        self.dyn_signal.set(self.dyn_mode)

    #ik_mode = 1 turns on ik_mode
    #ik_mode = 0 turns off ik_mode
    def setIkMode(self, ik_mode):
        self.ik_mode = ik_mode
        self.ik_signal.set(self.ik_mode)

    def posquat2Matrix(self, pos, quat):
        T = np.eye(4)
        T[0:3,
          0:3] = quaternions.quat2mat([quat[-1], quat[0], quat[1], quat[2]])
        T[0:3, 3] = pos

        return np.array(T)

    def matrix2posquat(self, T):
        pos = T[0:3, 3]
        quat = quaternions.mat2quat(T[0:3, 0:3])
        quat = [quat[1], quat[2], quat[3], quat[0]]

        return np.array(pos), np.array(quat)

    def getJawAngle(self):
        pos6d = self.j6d_handle.get_joint_position()
        pos6s = self.j6s_handle.get_joint_position()
        jawAngle = 0.5 * (pos6d + pos6s) / 0.4106
        return jawAngle

    def getJointAngles(self):
        pos1 = self.j1_handle.get_joint_position()
        pos2 = self.j2_handle.get_joint_position()
        pos3 = self.j3_handle.get_joint_position()
        pos4 = self.j4_handle.get_joint_position()
        pos5 = self.j5_handle.get_joint_position()
        pos6s = self.j6s_handle.get_joint_position()
        pos6d = self.j6d_handle.get_joint_position()

        pos6 = 0.5 * (pos6d - pos6s)
        jawAngle = 0.5 * (pos6d + pos6s) / 0.4106

        jointAngles = np.array([pos1, pos2, pos3, pos4, pos5, pos6])

        return jointAngles, jawAngle

    def getJointVelocities(self):
        vel1 = self.j1_handle.get_joint_velocity()
        vel2 = self.j2_handle.get_joint_velocity()
        vel3 = self.j3_handle.get_joint_velocity()
        vel4 = self.j4_handle.get_joint_velocity()
        vel5 = self.j5_handle.get_joint_velocity()
        vel6s = self.j6s_handle.get_joint_velocity()
        vel6d = self.j6d_handle.get_joint_velocity()

        vel6 = 0.5 * (vel6s - vel6d)
        jawVel = 0.5 * (vel6s + vel6d) / 0.4106

        jointVelocities = np.array([vel1, vel2, vel3, vel4, vel5, vel6])

        return jointVelocities, jawVel

    def setJointAngles(self, jointAngles, jawAngle):

        self.j1_handle.set_joint_position(jointAngles[0])
        self.j2_handle.set_joint_position(jointAngles[1])
        self.j3_handle.set_joint_position(jointAngles[2])
        self.j4_handle.set_joint_position(jointAngles[3])
        self.j5_handle.set_joint_position(jointAngles[4])

        pos6s = 0.4106 * jawAngle - jointAngles[5]
        pos6d = 0.4106 * jawAngle + jointAngles[5]

        self.j6s_handle.set_joint_position(pos6s)
        self.j6d_handle.set_joint_position(pos6d)

    def getPoseAtJoint(self, j):
        if j == 0:
            pose = self.base_handle.get_pose()
            pos, quat = pose[0:3], pose[3:]
        elif j == 1:
            pose = self.j2_handle.get_pose(relative_to=self.base_handle)
            pos, quat = pose[0:3], pose[3:]
            T = self.posquat2Matrix(pos, quat)
            rot90x = [[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]
            pos, quat = self.matrix2posquat(np.dot(T, rot90x))
        elif j == 2:
            pose = self.j3_handle.get_pose(relative_to=self.base_handle)
            pos, quat = pose[0:3], pose[3:]
            T = self.posquat2Matrix(pos, quat)
            rot = [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]
            pos, quat = self.matrix2posquat(np.dot(T, rot))
        elif j == 3:
            pose = self.j4_handle.get_pose(relative_to=self.base_handle)
            pos, quat = pose[0:3], pose[3:]
            T = self.posquat2Matrix(pos, quat)
            rot = [[
                -1,
                0,
                0,
                0,
            ], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
            pos, quat = self.matrix2posquat(np.dot(T, rot))
        elif j == 4:
            pose = self.j5_handle.get_pose(relative_to=self.base_handle)
            pos, quat = pose[0:3], pose[3:]
            T = self.posquat2Matrix(pos, quat)
            rot = [[0, 0, -1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]
            pos, quat = self.matrix2posquat(np.dot(T, rot))
        elif j == 5:
            pose = self.j5_dummy_handle.get_pose(relative_to=self.base_handle)
            pos, quat = pose[0:3], pose[3:]
        else:
            pose = self.EE_virtual_handle.get_pose(
                relative_to=self.base_handle)
            pos, quat = pose[0:3], pose[3:]
            if j != 6:
                T = self.posquat2Matrix(pos, quat)

                ct = np.cos(0)
                st = np.sin(0)

                ca = np.cos(-np.pi / 2.0)
                sa = np.sin(-np.pi / 2.0)

                T_x = np.array([[1, 0, 0, 0], [0, ca, -sa, 0], [0, sa, ca, 0],
                                [0, 0, 0, 1]])
                T_z = np.array([[ct, -st, 0, 0], [st, ct, 0, 0],
                                [0, 0, 1, 0.0102], [0, 0, 0, 1]])
                T = np.dot(np.dot(T, T_x), T_z)

                pos, quat = self.matrix2posquat(T)

        return np.array(pos), np.array(quat)

    def getPoseAtEE(self):
        return self.getPoseAtJoint(6)

    #def getVelocityAtEE(self):
    #   return self.EE_virtual_handle.get_velocity()

    def setPoseAtEE(self, pos, quat, jawAngle):
        theta = 0.4106 * jawAngle

        b_T_ee = self.posquat2Matrix(pos, quat)

        ee_T_sx = np.array([
            [9.99191168e-01, 4.02120491e-02, -5.31786338e-06, 4.17232513e-07],
            [-4.01793160e-02, 9.98383134e-01, 4.02087139e-02, -1.16467476e-04],
            [1.62218404e-03, -4.01759782e-02, 9.99191303e-01, -3.61323357e-04],
            [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]
        ])

        ee_T_dx = np.array([[
            -9.99191251e-01, -4.02099858e-02, -1.98098369e-06, 4.17232513e-07
        ], [-4.01773877e-02, 9.98383193e-01, -4.02091818e-02, -1.16467476e-04],
                            [
                                1.61878841e-03, -4.01765831e-02,
                                -9.99191284e-01, -3.61323357e-04
                            ],
                            [
                                0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
                                1.00000000e+00
                            ]])

        b_T_sx = np.dot(b_T_ee, ee_T_sx)
        b_T_dx = np.dot(b_T_ee, ee_T_dx)

        ct = np.cos(theta)
        st = np.sin(theta)

        x_T_ts = np.array([[
            ct,
            -st,
            0,
            -st * 0.009,
        ], [st, ct, 0, ct * 0.009], [
            0,
            0,
            1,
            0,
        ], [0, 0, 0, 1]])

        pos_sx, quat_sx = self.matrix2posquat(np.dot(b_T_sx, x_T_ts))
        pos_dx, quat_dx = self.matrix2posquat(np.dot(b_T_dx, x_T_ts))

        self.ik_target_dx_dummy_handle.set_pose(np.r_[pos_dx, quat_dx],
                                                relative_to=self.base_handle)
        self.ik_target_sx_dummy_handle.set_pose(np.r_[pos_sx, quat_sx],
                                                relative_to=self.base_handle)

    def get_marker_position(self, relative_to=None):
        return self.marker.get_position(relative_to)

    """def stopSim(self):
Пример #11
0
class SpecificWorker(GenericWorker):
    def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)

    def __del__(self):
        print('SpecificWorker destructor')
        self.pr.stop()
        self.pr.shutdown()

    def setParams(self, params):
        SCENE_FILE = params["scene_dir"]
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()

        self.cameras = {}
        cam = VisionSensor("Camera_Shoulder")
        self.cameras["Camera_Shoulder"] = {
            "handle":
            cam,
            "id":
            1,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "depth":
            3,
            "focal":
            cam.get_resolution()[0] /
            np.tan(np.radians(cam.get_perspective_angle())),
            "position":
            cam.get_position(),
            "rotation":
            cam.get_quaternion(),
            "image_rgb":
            np.array(0),
            "image_rgbd":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.grasping_objects = {}
        can = Shape("can")
        self.grasping_objects["002_master_chef_can"] = {
            "handle": can,
            "sim_pose": None,
            "pred_pose_rgb": None,
            "pred_pose_rgbd": None
        }

        with (open("objects_pcl.pickle", "rb")) as file:
            self.object_pcl = pickle.load(file)

        self.intrinsics = np.array(
            [[
                self.cameras["Camera_Shoulder"]["focal"], 0.00000000e+00,
                self.cameras["Camera_Shoulder"]["width"] / 2.0
            ],
             [
                 0.00000000e+00, self.cameras["Camera_Shoulder"]["focal"],
                 self.cameras["Camera_Shoulder"]["height"] / 2.0
             ], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

        self.arm_ops = {
            "MoveToHome": 1,
            "MoveToObj": 2,
            "CloseGripper": 3,
            "OpenGripper": 4
        }

        self.grasping_iter = 10

        self.arm_base = Shape("gen3")
        self.arm_target = Dummy("target")
        self.gripper = Joint("RG2_openCloseJoint")

    def compute(self):
        print('SpecificWorker.compute...')
        try:
            self.pr.step()

            # open the arm gripper
            self.move_gripper(self.arm_ops["OpenGripper"])

            # read arm camera RGB signal
            cam = self.cameras["Camera_Shoulder"]
            image_float = cam["handle"].capture_rgb()
            depth = cam["handle"].capture_depth(in_meters=False)
            image = cv2.normalize(src=image_float,
                                  dst=None,
                                  alpha=0,
                                  beta=255,
                                  norm_type=cv2.NORM_MINMAX,
                                  dtype=cv2.CV_8U)
            cam["image_rgb"] = RoboCompCameraRGBDSimple.TImage(
                width=cam["width"],
                height=cam["height"],
                depth=3,
                focalx=cam["focal"],
                focaly=cam["focal"],
                image=image.tobytes())
            cam["image_rgbd"] = RoboCompCameraRGBDSimple.TImage(
                width=cam["width"],
                height=cam["height"],
                depth=3,
                focalx=cam["focal"],
                focaly=cam["focal"],
                image=image.tobytes())
            cam["depth"] = RoboCompCameraRGBDSimple.TDepth(
                width=cam["width"],
                height=cam["height"],
                depthFactor=1.0,
                depth=depth.tobytes())

            # get objects's poses from simulator
            for obj_name in self.grasping_objects.keys():
                self.grasping_objects[obj_name][
                    "sim_pose"] = self.grasping_objects[obj_name][
                        "handle"].get_pose()

            # get objects' poses from RGB
            pred_poses = self.objectposeestimationrgb_proxy.getObjectPose(
                cam["image_rgb"])
            self.visualize_poses(image_float, pred_poses, "rgb_pose.png")
            for pose in pred_poses:
                if pose.objectname in self.grasping_objects.keys():
                    obj_trans = [pose.x, pose.y, pose.z]
                    obj_quat = [pose.qx, pose.qy, pose.qz, pose.qw]
                    obj_pose = self.process_pose(obj_trans, obj_quat)
                    self.grasping_objects[
                        pose.objectname]["pred_pose_rgb"] = obj_pose

            # get objects' poses from RGBD
            pred_poses = self.objectposeestimationrgbd_proxy.getObjectPose(
                cam["image_rgbd"], cam["depth"])
            self.visualize_poses(image_float, pred_poses, "rgbd_pose.png")
            for pose in pred_poses:
                if pose.objectname in self.grasping_objects.keys():
                    obj_trans = [pose.x, pose.y, pose.z]
                    obj_quat = [pose.qx, pose.qy, pose.qz, pose.qw]
                    obj_pose = self.process_pose(obj_trans, obj_quat)
                    self.grasping_objects[
                        pose.objectname]["pred_pose_rgbd"] = obj_pose

            # create a dummy for arm path planning
            approach_dummy = Dummy.create()
            approach_dummy.set_name("approach_dummy")

            # initialize approach dummy in embedded lua scripts
            call_ret = self.pr.script_call(
                "initDummy@gen3", vrepConst.sim_scripttype_childscript)

            # set object pose for the arm to follow
            # NOTE : choose simulator or predicted pose
            dest_pose = self.grasping_objects["002_master_chef_can"][
                "pred_pose_rgbd"]
            dest_pose[
                2] += 0.04  # add a small offset along z-axis to grasp the object top

            # set dummy pose with the pose of object to be grasped
            approach_dummy.set_pose(dest_pose)

            # move gen3 arm to the object
            self.move_arm(approach_dummy, self.arm_ops["MoveToObj"])

            # close the arm gripper
            self.move_gripper(self.arm_ops["CloseGripper"])

            # change approach dummy pose to the final destination pose
            dest_pose[2] += 0.4
            approach_dummy.set_pose(dest_pose)

            # move gen3 arm to the final destination
            self.move_arm(approach_dummy, self.arm_ops["MoveToObj"])

            # remove the created approach dummy
            approach_dummy.remove()

        except Exception as e:
            print(e)
        return True

    def process_pose(self, obj_trans, obj_rot):
        # convert an object pose from camera frame to world frame
        # define camera pose and z-axis flip matrix
        cam_trans = self.cameras["Camera_Shoulder"]["position"]
        cam_rot_mat = R.from_quat(self.cameras["Camera_Shoulder"]["rotation"])
        z_flip = R.from_matrix(np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]))
        # get object position in world coordinates
        obj_trans = np.dot(
            cam_rot_mat.as_matrix(),
            np.dot(z_flip.as_matrix(),
                   np.array(obj_trans).reshape(-1, )))
        final_trans = obj_trans + cam_trans
        # get object orientation in world coordinates
        obj_rot_mat = R.from_quat(obj_rot)
        final_rot_mat = obj_rot_mat * z_flip * cam_rot_mat
        final_rot = final_rot_mat.as_quat()
        # return final object pose in world coordinates
        final_pose = list(final_trans)
        final_pose.extend(list(final_rot))
        return final_pose

    def visualize_poses(self, image, poses, img_name):
        # visualize the predicted poses on RGB image
        image = np.uint8(image * 255.0)
        for pose in poses:
            # visualize only defined objects
            if pose.objectname not in self.grasping_objects.keys():
                continue
            obj_pcl = self.object_pcl[pose.objectname]
            obj_trans = np.array([pose.x, pose.y, pose.z])
            if img_name == "rgb_pose.png":
                obj_trans[2] -= 0.2
            obj_rot = R.from_quat([pose.qx, pose.qy, pose.qz,
                                   pose.qw]).as_matrix()
            proj_pcl = self.vertices_reprojection(obj_pcl, obj_rot, obj_trans,
                                                  self.intrinsics)
            image = self.draw_pcl(image,
                                  proj_pcl,
                                  r=1,
                                  color=(randint(0, 255), randint(0, 255),
                                         randint(0, 255)))
        cv2.imwrite(os.path.join("output", img_name), image)

    def vertices_reprojection(self, vertices, r, t, k):
        # re-project vertices in pixel space
        p = np.matmul(k, np.matmul(r, vertices.T) + t.reshape(-1, 1))
        p[0] = p[0] / (p[2] + 1e-5)
        p[1] = p[1] / (p[2] + 1e-5)
        return p[:2].T

    def draw_pcl(self, img, p2ds, r=1, color=(255, 0, 0)):
        # draw object point cloud on RGB image
        h, w = img.shape[0], img.shape[1]
        for pt_2d in p2ds:
            pt_2d[0] = np.clip(pt_2d[0], 0, w)
            pt_2d[1] = np.clip(pt_2d[1], 0, h)
            img = cv2.circle(img, (int(pt_2d[0]), int(pt_2d[1])), r, color, -1)
        return img

    def move_arm(self, dummy_dest, func_number):
        # move arm to destination
        # NOTE : this function is using remote lua scripts embedded in the arm
        # for better path planning, so make sure to use the correct arm model
        call_function = True
        init_pose = np.array(
            self.arm_target.get_pose(relative_to=self.arm_base))
        # loop until the arm reached the object
        while True:
            # step the simulation
            self.pr.step()
            # set function index to the desired operation
            if call_function:
                try:
                    # call thearded child lua scripts via PyRep
                    call_ret = self.pr.script_call(
                        "setFunction@gen3",
                        vrepConst.sim_scripttype_childscript,
                        ints=[func_number])
                except Exception as e:
                    print(e)
            # get current poses to compare
            actual_pose = self.arm_target.get_pose(relative_to=self.arm_base)
            object_pose = dummy_dest.get_pose(relative_to=self.arm_base)
            # compare poses to check for operation end
            pose_diff = np.abs(np.array(actual_pose) - np.array(init_pose))
            if call_function and pose_diff[0] > 0.01 or pose_diff[
                    1] > 0.01 or pose_diff[2] > 0.01:
                call_function = False
            # check whether the arm reached the target
            dest_pose_diff = np.abs(
                np.array(actual_pose) - np.array(object_pose))
            if dest_pose_diff[0] < 0.015 and dest_pose_diff[
                    1] < 0.015 and dest_pose_diff[2] < 0.015:
                break

    def move_gripper(self, func_number):
        # open or close the arm gripper
        # NOTE : this function is using remote lua scripts embedded in the arm
        # for better path planning, so make sure to use the correct arm model
        call_function = True
        open_percentage = init_position = self.gripper.get_joint_position()
        # loop until the gripper is completely open (or closed)
        for iter in range(self.grasping_iter):
            # step the simulation
            self.pr.step()
            # set function index to the desired operation
            if call_function:
                try:
                    # call thearded child lua scripts via PyRep
                    call_ret = self.pr.script_call(
                        "setFunction@gen3",
                        vrepConst.sim_scripttype_childscript,
                        ints=[func_number])
                except Exception as e:
                    print(e)
                # compare the gripper position to determine whether the gripper moved
                if abs(self.gripper.get_joint_position() -
                       init_position) > 0.005:
                    call_function = False
            # compare the gripper position to determine whether the gripper closed or opened
            if not call_function and abs(open_percentage - self.gripper.
                                         get_joint_position()) < 0.003:
                break
            #actualizamos el porcentaje de apertura
            open_percentage = self.gripper.get_joint_position()
Пример #12
0
class Arm(RobotComponent):
    """Base class representing a robot arm with path planning support.
    """

    def __init__(self, count: int, name: str, num_joints: int,
                 base_name: str = None,
                 max_velocity=1.0, max_acceleration=4.0, max_jerk=1000):
        """Count is used for when we have multiple copies of arms"""
        joint_names = ['%s_joint%d' % (name, i+1) for i in range(num_joints)]
        super().__init__(count, name, joint_names, base_name)

        # Used for motion planning
        self.max_velocity = max_velocity
        self.max_acceleration = max_acceleration
        self.max_jerk = max_jerk

        # Motion planning handles
        suffix = '' if count == 0 else '#%d' % (count - 1)
        self._ik_target = Dummy('%s_target%s' % (name, suffix))
        self._ik_tip = Dummy('%s_tip%s' % (name, suffix))
        self._ik_group = sim.simGetIkGroupHandle('%s_ik%s' % (name, suffix))
        self._collision_collection = sim.simGetCollectionHandle(
            '%s_arm%s' % (name, suffix))

    def set_ik_element_properties(self, constraint_x=True, constraint_y=True,
                                  constraint_z=True,
                                  constraint_alpha_beta=True,
                                  constraint_gamma=True) -> None:
        constraints = 0
        if constraint_x:
            constraints |= sim.sim_ik_x_constraint
        if constraint_y:
            constraints |= sim.sim_ik_y_constraint
        if constraint_z:
            constraints |= sim.sim_ik_z_constraint
        if constraint_alpha_beta:
            constraints |= sim.sim_ik_alpha_beta_constraint
        if constraint_gamma:
            constraints |= sim.sim_ik_gamma_constraint
        sim.simSetIkElementProperties(
            ikGroupHandle=self._ik_group,
            tipDummyHandle=self._ik_tip.get_handle(),
            constraints=constraints,
            precision=None,
            weight=None,
        )

    def set_ik_group_properties(self, resolution_method='pseudo_inverse', max_iterations=6, dls_damping=0.1) -> None:
        try:
            res_method = {'pseudo_inverse': sim.sim_ik_pseudo_inverse_method,
                          'damped_least_squares': sim.sim_ik_damped_least_squares_method,
                          'jacobian_transpose': sim.sim_ik_jacobian_transpose_method}[resolution_method]
        except KeyError:
            raise Exception('Invalid resolution method,'
                            'Must be one of ["pseudo_inverse" | "damped_least_squares" | "jacobian_transpose"]')
        sim.simSetIkGroupProperties(
            ikGroupHandle=self._ik_group,
            resolutionMethod=res_method,
            maxIterations=max_iterations,
            damping=dls_damping
        )

    def solve_ik_via_sampling(self,
                              position: Union[List[float], np.ndarray],
                              euler: Union[List[float], np.ndarray] = None,
                              quaternion: Union[List[float], np.ndarray] = None,
                              ignore_collisions: bool = False,
                              trials: int = 300,
                              max_configs: int = 1,
                              distance_threshold: float = 0.65,
                              max_time_ms: int = 10,
                              relative_to: Object = None
                              ) -> np.ndarray:
        """Solves an IK group and returns the calculated joint values.

        This IK method performs a random searches for manipulator configurations
        that matches the given end-effector pose in space. When the tip pose
        is close enough then IK is computed in order to try to bring the
        tip onto the target. This is the method that should be used when
        the start pose is far from the end pose.

        We generate 'max_configs' number of samples within X number of 'trials',
        before ranking them according to angular distance.

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs.
        :param max_configs: The maximum number of configurations we want to
            generate before sorting them.
        :param distance_threshold: Distance indicating when IK should be
            computed in order to try to bring the tip onto the target.
        :param max_time_ms: Maximum time in ms spend searching for
            each configuation.
        :param relative_to: Indicates relative to which reference frame we want
            the target pose. Specify None to retrieve the absolute pose,
            or an Object relative to whose reference frame we want the pose.
        :raises: ConfigurationError if no joint configuration could be found.

        :return: 'max_configs' number of joint configurations, ranked according
            to angular distance.
        """
        if not ((euler is None) ^ (quaternion is None)):
            raise ConfigurationError(
                'Specify either euler or quaternion values, but not both.')

        prev_pose = self._ik_target.get_pose()
        self._ik_target.set_position(position, relative_to)
        if euler is not None:
            self._ik_target.set_orientation(euler, relative_to)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion, relative_to)

        handles = [j.get_handle() for j in self.joints]
        cyclics, intervals = self.get_joint_intervals()
        low_limits, max_limits = list(zip(*intervals))
        # If there are huge intervals, then limit them
        low_limits = np.maximum(low_limits, -np.pi*2).tolist()
        max_limits = np.minimum(max_limits, np.pi*2).tolist()

        collision_pairs = []
        if not ignore_collisions:
            collision_pairs = [self._collision_collection, sim.sim_handle_all]

        metric = joint_options = None
        valid_joint_positions = []
        for i in range(trials):
            config = sim.simGetConfigForTipPose(
                self._ik_group, handles, distance_threshold, int(max_time_ms),
                metric, collision_pairs, joint_options, low_limits, max_limits)
            if len(config) > 0:
                valid_joint_positions.append(config)
            if len(valid_joint_positions) >= max_configs:
                break

        self._ik_target.set_pose(prev_pose)
        if len(valid_joint_positions) == 0:
            raise ConfigurationError(
                'Could not find a valid joint configuration for desired '
                'end effector pose.')

        if len(valid_joint_positions) > 1:
            current_config = np.array(self.get_joint_positions())
            # Sort based on angular distance
            valid_joint_positions.sort(
                key=lambda x: np.linalg.norm(current_config - x))

        return np.array(valid_joint_positions)


    def get_configs_for_tip_pose(self,
                                 position: Union[List[float], np.ndarray],
                                 euler: Union[List[float], np.ndarray] = None,
                                 quaternion: Union[List[float], np.ndarray] = None,
                                 ignore_collisions=False,
                                 trials=300, max_configs=60,
                                 relative_to: Object = None
                                 ) -> List[List[float]]:
        """Gets a valid joint configuration for a desired end effector pose.
        Must specify either rotation in euler or quaternions, but not both!
        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs
        :param max_configs: The maximum number of configurations we want to
            generate before ranking them.
        :param relative_to: Indicates relative to which reference frame we want
        the target pose. Specify None to retrieve the absolute pose,
        or an Object relative to whose reference frame we want the pose.
        :raises: ConfigurationError if no joint configuration could be found.
        :return: A list of valid joint configurations for the desired
        end effector pose.
        """

        warnings.warn("Please use 'solve_ik_via_sampling' instead.",
                      DeprecationWarning)
        return list(self.solve_ik_via_sampling(
            position, euler, quaternion, ignore_collisions, trials,
            max_configs, relative_to=relative_to))

    def solve_ik_via_jacobian(
            self, position: Union[List[float], np.ndarray],
            euler: Union[List[float], np.ndarray] = None,
            quaternion: Union[List[float], np.ndarray] = None,
            relative_to: Object = None) -> List[float]:
        """Solves an IK group and returns the calculated joint values.

        This IK method performs a linearisation around the current robot
        configuration via the Jacobian. The linearisation is valid when the
        start and goal pose are not too far away, but after a certain point,
        linearisation will no longer be valid. In that case, the user is better
        off using 'solve_ik_via_sampling'.

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param relative_to: Indicates relative to which reference frame we want
        the target pose. Specify None to retrieve the absolute pose,
        or an Object relative to whose reference frame we want the pose.
        :return: A list containing the calculated joint values.
        """
        self._ik_target.set_position(position, relative_to)
        if euler is not None:
            self._ik_target.set_orientation(euler, relative_to)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion, relative_to)

        ik_result, joint_values = sim.simCheckIkGroup(
            self._ik_group, [j.get_handle() for j in self.joints])
        if ik_result == sim.sim_ikresult_fail:
            raise IKError('IK failed. Perhaps the distance was between the tip '
                          ' and target was too large.')
        elif ik_result == sim.sim_ikresult_not_performed:
            raise IKError('IK not performed.')
        return joint_values

    def solve_ik(self, position: Union[List[float], np.ndarray],
                 euler: Union[List[float], np.ndarray] = None,
                 quaternion: Union[List[float], np.ndarray] = None,
                 relative_to: Object = None) -> List[float]:
        """Solves an IK group and returns the calculated joint values.

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param relative_to: Indicates relative to which reference frame we want
        the target pose. Specify None to retrieve the absolute pose,
        or an Object relative to whose reference frame we want the pose.
        :return: A list containing the calculated joint values.
        """
        warnings.warn("Please use 'solve_ik_via_jacobian' instead.",
                      DeprecationWarning)
        return self.solve_ik_via_jacobian(
            position, euler, quaternion, relative_to)

    def get_path_from_cartesian_path(self, path: CartesianPath
                                     ) -> ArmConfigurationPath:
        """Translate a path from cartesian space, to arm configuration space.

        Note: It must be possible to reach the start of the path via a linear
        path, otherwise an error will be raised.

        :param path: A :py:class:`CartesianPath` instance to be translated to
            a configuration-space path.
        :raises: ConfigurationPathError if no path could be created.

        :return: A path in the arm configuration space.
        """
        handles = [j.get_handle() for j in self.joints]
        _, ret_floats, _, _ = utils.script_call(
            'getPathFromCartesianPath@PyRep', PYREP_SCRIPT_TYPE,
            ints=[path.get_handle(), self._ik_group,
                  self._ik_target.get_handle()] + handles)
        if len(ret_floats) == 0:
            raise ConfigurationPathError(
                'Could not create a path from cartesian path.')
        return ArmConfigurationPath(self, ret_floats)

    def get_linear_path(self, position: Union[List[float], np.ndarray],
                        euler: Union[List[float], np.ndarray] = None,
                        quaternion: Union[List[float], np.ndarray] = None,
                        steps=50, ignore_collisions=False,
                        relative_to: Object = None) -> ArmConfigurationPath:
        """Gets a linear configuration path given a target pose.

        Generates a path that drives a robot from its current configuration
        to its target dummy in a straight line (i.e. shortest path in Cartesian
        space).

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param steps: The desired number of path points. Each path point
            contains a robot configuration. A minimum of two path points is
            required. If the target pose distance is large, a larger number
            of steps leads to better results for this function.
        :param ignore_collisions: If collision checking should be disabled.
        :param relative_to: Indicates relative to which reference frame we want
        the target pose. Specify None to retrieve the absolute pose,
        or an Object relative to whose reference frame we want the pose.
        :raises: ConfigurationPathError if no path could be created.

        :return: A linear path in the arm configuration space.
        """
        if not ((euler is None) ^ (quaternion is None)):
            raise ConfigurationPathError(
                'Specify either euler or quaternion values, but not both.')

        prev_pose = self._ik_target.get_pose()
        self._ik_target.set_position(position, relative_to)
        if euler is not None:
            self._ik_target.set_orientation(euler, relative_to)
        elif quaternion is not None:
            self._ik_target.set_quaternion(quaternion, relative_to)
        handles = [j.get_handle() for j in self.joints]

        collision_pairs = []
        if not ignore_collisions:
            collision_pairs = [self._collision_collection, sim.sim_handle_all]
        joint_options = None
        ret_floats = sim.generateIkPath(
            self._ik_group, handles, steps, collision_pairs, joint_options)
        self._ik_target.set_pose(prev_pose)
        if len(ret_floats) == 0:
            raise ConfigurationPathError('Could not create path.')
        return ArmConfigurationPath(self, ret_floats)

    def get_nonlinear_path(self, position: Union[List[float], np.ndarray],
                           euler: Union[List[float], np.ndarray] = None,
                           quaternion: Union[List[float], np.ndarray] = None,
                           ignore_collisions=False,
                           trials=300,
                           max_configs=1,
                           distance_threshold: float = 0.65,
                           max_time_ms: int = 10,
                           trials_per_goal=1,
                           algorithm=Algos.RRTConnect,
                           relative_to: Object = None
                           ) -> ArmConfigurationPath:
        """Gets a non-linear (planned) configuration path given a target pose.

        A path is generated by finding several configs for a pose, and ranking
        them according to the distance in configuration space (smaller is
        better).

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs.
            See 'solve_ik_via_sampling'.
        :param max_configs: The maximum number of configurations we want to
            generate before sorting them. See 'solve_ik_via_sampling'.
        :param distance_threshold: Distance indicating when IK should be
            computed in order to try to bring the tip onto the target.
            See 'solve_ik_via_sampling'.
        :param max_time_ms: Maximum time in ms spend searching for
            each configuation. See 'solve_ik_via_sampling'.
        :param trials_per_goal: The number of paths per config we want to trial.
        :param algorithm: The algorithm for path planning to use.
        :param relative_to: Indicates relative to which reference frame we want
        the target pose. Specify None to retrieve the absolute pose,
        or an Object relative to whose reference frame we want the pose.
        :raises: ConfigurationPathError if no path could be created.

        :return: A non-linear path in the arm configuration space.
        """

        handles = [j.get_handle() for j in self.joints]

        try:
            configs = self.solve_ik_via_sampling(
                position, euler, quaternion, ignore_collisions, trials,
                max_configs, distance_threshold, max_time_ms, relative_to)
        except ConfigurationError as e:
            raise ConfigurationPathError('Could not create path.') from e

        _, ret_floats, _, _ = utils.script_call(
            'getNonlinearPath@PyRep', PYREP_SCRIPT_TYPE,
            ints=[self._collision_collection, int(ignore_collisions),
                  trials_per_goal] + handles,
            floats=configs.flatten().tolist(),
            strings=[algorithm.value])

        if len(ret_floats) == 0:
            raise ConfigurationPathError('Could not create path.')
        return ArmConfigurationPath(self, ret_floats)

    def get_path(self, position: Union[List[float], np.ndarray],
                 euler: Union[List[float], np.ndarray] = None,
                 quaternion: Union[List[float], np.ndarray] = None,
                 ignore_collisions=False,
                 trials=300,
                 max_configs=1,
                 distance_threshold: float = 0.65,
                 max_time_ms: int = 10,
                 trials_per_goal=1,
                 algorithm=Algos.RRTConnect,
                 relative_to: Object = None
                 ) -> ArmConfigurationPath:
        """Tries to get a linear path, failing that tries a non-linear path.

        Must specify either rotation in euler or quaternions, but not both!

        :param position: The x, y, z position of the target.
        :param euler: The x, y, z orientation of the target (in radians).
        :param quaternion: A list containing the quaternion (x,y,z,w).
        :param ignore_collisions: If collision checking should be disabled.
        :param trials: The maximum number of attempts to reach max_configs.
            See 'solve_ik_via_sampling'.
        :param max_configs: The maximum number of configurations we want to
            generate before sorting them. See 'solve_ik_via_sampling'.
        :param distance_threshold: Distance indicating when IK should be
            computed in order to try to bring the tip onto the target.
            See 'solve_ik_via_sampling'.
        :param max_time_ms: Maximum time in ms spend searching for
            each configuation. See 'solve_ik_via_sampling'.
        :param trials_per_goal: The number of paths per config we want to trial.
        :param algorithm: The algorithm for path planning to use.
        :param relative_to: Indicates relative to which reference frame we want
        the target pose. Specify None to retrieve the absolute pose,
        or an Object relative to whose reference frame we want the pose.

        :raises: ConfigurationPathError if neither a linear or non-linear path
            can be created.
        :return: A linear or non-linear path in the arm configuration space.
        """
        try:
            p = self.get_linear_path(position, euler, quaternion,
                                     ignore_collisions=ignore_collisions,
                                     relative_to=relative_to)
            return p
        except ConfigurationPathError:
            pass  # Allowed. Try again, but with non-linear.

        # This time if an exception is thrown, we dont want to catch it.
        p = self.get_nonlinear_path(
            position, euler, quaternion, ignore_collisions, trials, max_configs,
            distance_threshold, max_time_ms, trials_per_goal, algorithm,
            relative_to)
        return p

    def get_tip(self) -> Dummy:
        """Gets the tip of the arm.

        Each arm is required to have a tip for path planning.

        :return: The tip of the arm.
        """
        return self._ik_tip

    def get_jacobian(self):
        """Calculates the Jacobian.

        :return: the row-major Jacobian matix.
        """
        self._ik_target.set_matrix(self._ik_tip.get_matrix())
        sim.simCheckIkGroup(self._ik_group,
                            [j.get_handle() for j in self.joints])
        jacobian, (rows, cols) = sim.simGetIkGroupMatrix(self._ik_group, 0)
        jacobian = np.array(jacobian).reshape((rows, cols), order='F')
        return jacobian

    def check_arm_collision(self, obj: 'Object' = None) -> bool:
        """Checks whether two entities are colliding.

        :param obj: The other collidable object to check collision against,
            or None to check against all collidable objects. Note that objects
            must be marked as collidable!
        :return: If the object is colliding.
        """
        handle = sim.sim_handle_all if obj is None else obj.get_handle()
        return sim.simCheckCollision(self._collision_collection, handle) == 1
Пример #13
0
class Object_():
    def __init__(self, name):

        self.obj_frame = Dummy(name + '_frame')
        self.obj_visible = Shape(name + '_visible')
        self.obj_respondable = Shape(name + '_respondable')
        self.obj_mask = Shape(name + '_mask')

    #region transform -> self.obj_frame
    def get_pose(self, relative_to=None):
        self.obj_frame.get_pose(relative_to=relative_to)

    def set_pose(self, pose, relative_to=None):
        """
        :param pose: An array containing the (X,Y,Z,Qx,Qy,Qz,Qw) pose of
            the object.
        """
        self.obj_frame.set_parent(None)
        self.obj_respondable.set_parent(self.obj_frame)
        # self.obj_visible.set_parent(self.obj_respondable)
        self.obj_frame.set_pose(pose, relative_to=relative_to)
        self.obj_respondable.set_parent(None)
        self.obj_frame.set_parent(self.obj_respondable)
        # self.obj_visible.set_parent(self.obj_respondable)

    #endregion

    #region physics and shape property -> self.shape
    def is_dynamic(self):
        return self.shape.is_dynamic()

    def set_dynamic(self, value: bool):
        self.shape.set_dynamic(value)

    def is_respondable(self):
        return self.shape.is_respondable()

    def set_respondable(self, value: bool):
        self.shape.set_respondable(value)

    def is_collidable(self):
        return self.shape.is_collidable()

    def set_collidable(self, value: bool):
        self.shape.set_collidable(value)

    def is_detectable(self):
        return self.shape.is_detectable()

    def set_detectable(self, value: bool):
        self.shape.set_detectable(value)

    def is_renderable(self):
        return self.shape.is_renderable()

    def set_renderable(self, value: bool):
        self.shape.set_renderable(value)

    def check_collision(self, obj=None):
        return self.shape.check_collision(obj)

    #endregion
    def remove(self):
        self.shape.remove()
        self.obj_frame.remove()