Example #1
0
    def _compute_waypoints(self, action):
        """Convert action of a single step to waypoints.

        Args:
            action: The action of a single step.

        Returns:
            List of waypoints of a single step.
        """
        action = np.reshape(action, [2, 2])
        start = action[0, :]
        motion = action[1, :]

        # Start.
        x = start[0] * self.start_range[0] + self.start_offset[0]
        y = start[1] * self.start_range[1] + self.start_offset[1]
        z = self.start_z
        angle = 0.0
        start = Pose([[x, y, z],
                      [np.pi, 0, (angle + np.pi) % (2 * np.pi) - np.pi]])

        # End.
        delta_x = motion[0] * self.config.ACTION.MOTION.TRANSLATION_X
        delta_y = motion[1] * self.config.ACTION.MOTION.TRANSLATION_Y
        x = x + delta_x
        y = y + delta_y
        x = np.clip(x, self.cspace.low[0], self.cspace.high[0])
        y = np.clip(y, self.cspace.low[1], self.cspace.high[1])
        end = Pose([[x, y, z],
                    [np.pi, 0, (angle + np.pi) % (2 * np.pi) - np.pi]])

        waypoints = [start, end]
        return waypoints
Example #2
0
    def compute_inverse_kinematics(self,
                                   link_uid,
                                   link_pose,
                                   upper_limits=None,
                                   lower_limits=None,
                                   ranges=None,
                                   damping=None,
                                   neutral_positions=None):
        """Compute the inverse kinematics.

        Args:
            link_uid: The unique ID of the link.
            link_pose: The target pose of the link.
            upper_limits: The upper limits of joints.
            lower_limits: The lower limits of joints.
            ranges: The ranges of joints.
            dampings: The list of joint damping parameters.
            neutral_positions: The neutral joint positions.

        returns:
            target_positions: The list of target joint positions.
        """
        body_uid, link_ind = link_uid

        if not isinstance(link_pose, Pose):
            link_pose = Pose(link_pose)

        position = link_pose.position
        quaternion = link_pose.quaternion

        kwargs = dict()

        kwargs['bodyUniqueId'] = body_uid
        kwargs['endEffectorLinkIndex'] = link_ind
        kwargs['targetPosition'] = list(position)

        if quaternion is not None:
            kwargs['targetOrientation'] = list(quaternion)

        # TODO(kuanfang): Not sure if those are necessary for computing IK.
        if lower_limits is not None:
            kwargs['lowerLimits'] = lower_limits

        if upper_limits is not None:
            kwargs['upperLimits'] = upper_limits

        if ranges is not None:
            kwargs['jointRanges'] = ranges

        if damping is not None:
            kwargs['jointDamping'] = damping

        if neutral_positions is not None:
            kwargs['restPoses'] = neutral_positions

        kwargs['physicsClientId'] = self.uid

        target_positions = pybullet.calculateInverseKinematics(**kwargs)

        return target_positions
Example #3
0
    def __init__(self,
                 simulator,
                 pose=[[0, 0, 0], [0, 0, 0]],
                 joint_positions=None,
                 config=None):
        """Initialize.

        Args:
            simulator: The simulated simulator for the robot.
            pose: The initial pose of the robot base.
            joint_positions: The list of initial joint positions.
            config: The configuartion as a dictionary.
        """
        super(SawyerSim, self).__init__(config=config)

        self._simulator = simulator

        self._arm_pose = Pose(pose)

        self._arm = None
        self._base = None
        self._head = None

        if joint_positions is None:
            self._initial_joint_positions = self.config.LIMB_NEUTRAL_POSITIONS
        else:
            self._initial_joint_positions = joint_positions

        self.reboot()
Example #4
0
    def __call__(self, event):
        """Call.

        Args:
            event: A clicking event.
        """
        pixel = [event.xdata, event.ydata]
        z = self.depth[int(pixel[1]), int(pixel[0])]
        position = self.cli.camera.deproject_pixel(pixel, z)
        pose = Pose([position, [np.pi, 0, 0]])

        position.z = self.eef_z

        while not (self.cli.robot.is_limb_ready()
                   and self.cli.robot.is_gripper_ready()):
            if self.cli.mode == 'sim':
                self.cli.simulator.step()

        print('Clicked pixel: %r. Moving end effector to: % s' %
              (pixel + [z], position))
        self.cli.robot.move_to_gripper_pose(pose)
        while not (self.cli.robot.is_limb_ready()
                   and self.cli.robot.is_gripper_ready()):
            if self.cli.mode == 'sim':
                self.cli.simulator.step()

        self.show_image()
        plt.scatter(pixel[0], pixel[1], c='r')

        return pixel
Example #5
0
    def end_effector(self):
        ros_pose = self._limb.endpoint_pose()
        ros_position = ros_pose['position']
        ros_quaternion = ros_pose['orientation']

        position = [ros_position.x, ros_position.y, ros_position.z]
        orientation = [
            ros_quaternion.x, ros_quaternion.y, ros_quaternion.z,
            ros_quaternion.w
        ]

        return Pose([position, orientation])
Example #6
0
    def get_constraint_pose(self, constraint_uid):
        """Get the constraint pose.

        Args:
            constraint_uid: The constraint unique ID.

        Returns:
            An instance of Pose.
        """
        _, _, _, _, _, _, _, position, _, quaternion, _, _, _, _, _ = (
            pybullet.getConstraintInfo(constraintUniqueId=constraint_uid,
                                       physicsClientId=self.uid))
        return Pose([position, quaternion])
Example #7
0
    def get_body_pose(self, body_uid):
        """Get the pose of the body.

        The pose of the body is defined as the pose of the base of the body.

        Args:
            body_uid: The body Unique ID.

        Returns:
            An instance of Pose.
        """
        position, quaternion = pybullet.getBasePositionAndOrientation(
            bodyUniqueId=body_uid, physicsClientId=self.uid)
        return Pose([position, quaternion])
Example #8
0
    def set_body_pose(self, body_uid, pose):
        """Set the pose of the body.

        Args:
            body_uid: The body Unique ID.
            pose: An instance of Pose.
        """
        pose = Pose(pose)
        position = list(pose.position)
        quaternion = list(pose.quaternion)
        pybullet.resetBasePositionAndOrientation(bodyUniqueId=body_uid,
                                                 posObj=position,
                                                 ornObj=quaternion,
                                                 physicsClientId=self.uid)
Example #9
0
    def _sample_body_poses(self, num_samples, body_config, max_attemps=32):
        """Sample body poses.

        Args:
            num_samples: Number of samples.
            body_config: Configuration of the body.
            max_attemps: Maximum number of attemps to find a feasible
                placement.

        Returns:
            List of poses.
        """
        while True:
            movable_poses = []

            for i in range(num_samples):
                num_attemps = 0
                is_valid = False
                while not is_valid and num_attemps <= max_attemps:
                    pose = Pose.uniform(x=body_config.POSE.X,
                                        y=body_config.POSE.Y,
                                        z=body_config.POSE.Z,
                                        roll=body_config.POSE.ROLL,
                                        pitch=body_config.POSE.PITCH,
                                        yaw=body_config.POSE.YAW)

                    # Check if the new pose is distant from other bodies.
                    is_valid = True
                    for other_pose in movable_poses:
                        dist = np.linalg.norm(pose.position[:2] -
                                              other_pose.position[:2])

                        if dist < body_config.MARGIN:
                            is_valid = False
                            num_attemps += 1
                            break

                if not is_valid:
                    logger.info(
                        'Cannot find the placement of body %d. '
                        'Start re-sampling.', i)
                    break
                else:
                    movable_poses.append(pose)

            if i == num_attemps:
                break

        return movable_poses
Example #10
0
    def get_link_center_of_mass(self, link_uid):
        """Get the center of mass of the link.

        Args:
            link_uid: A tuple of the body Unique ID and the link index.

        Returns:
            An instance of Pose.
        """
        body_uid, link_ind = link_uid
        position, quaternion, _, _, _, _ = pybullet.getLinkState(
            bodyUniqueId=body_uid,
            linkIndex=link_ind,
            physicsClientId=self.uid)
        return Pose([position, quaternion])
Example #11
0
    def as_4dof(self):
        """Computes the 4-DOF pose of the grasp in the world frame.

        Returns:
            The 4-DOF gripper pose in the world.
        """
        angle = (self.angle + np.pi / 2)
        grasp_pose_in_camera = Pose([self.pose.position, [0, 0, angle]])
        grasp_pose_in_world = get_transform(source=self.camera.pose).transform(
            grasp_pose_in_camera)

        x, y, z = grasp_pose_in_world.position
        angle = grasp_pose_in_world.euler[2]

        return [x, y, z, angle]
Example #12
0
    def load_calibration(self, path, robot_pose=[[0, 0, 0], [0, 0, 0]]):
        """Set the camera by using the camera calibration results.

        Args:
            path: The data directory of the calibration results.
        """
        intrinsics_path = os.path.join(path, 'IR_intrinsics.npy')
        intrinsics = np.load(intrinsics_path, encoding='latin1')
        translation_path = os.path.join(path, 'robot_IR_translation.npy')
        translation = np.load(translation_path, encoding='latin1')
        rotation_path = os.path.join(path, 'robot_IR_rotation.npy')
        rotation = np.load(rotation_path, encoding='latin1')

        # Convert the extrinsics from the robot frame to the world frame.
        from_robot_to_world = get_transform(source=robot_pose)
        robot_pose_in_camera = Pose([translation, rotation])
        camera_pose_in_robot = robot_pose_in_camera.inverse()
        camera_pose_in_world = from_robot_to_world.transform(
                camera_pose_in_robot)
        world_origin_in_camera = camera_pose_in_world.inverse()
        translation = world_origin_in_camera.position
        rotation = world_origin_in_camera.matrix3

        return intrinsics, translation, rotation
Example #13
0
    def _reset_scene(self):
        """Reset the scene in simulation or the real world.
        """
        super(Grasp4DofEnv, self)._reset_scene()

        # Reload graspable object.
        if self.config.SIM.GRASPABLE.RESAMPLE_N_EPISODES:
            if (self.num_episodes %
                    self.config.SIM.GRASPABLE.RESAMPLE_N_EPISODES == 0):
                self.graspable_path = None

        if self.graspable_path is None:
            if self.config.SIM.GRASPABLE.USE_RANDOM_SAMPLE:
                self.graspable_path = random.choice(self.all_graspable_paths)
            else:
                self.graspable_index = ((self.graspable_index + 1) %
                                        len(self.all_graspable_paths))
                self.graspable_path = (
                    self.all_graspable_paths[self.graspable_index])

        pose = Pose.uniform(x=self.config.SIM.GRASPABLE.POSE.X,
                            y=self.config.SIM.GRASPABLE.POSE.Y,
                            z=self.config.SIM.GRASPABLE.POSE.Z,
                            roll=self.config.SIM.GRASPABLE.POSE.ROLL,
                            pitch=self.config.SIM.GRASPABLE.POSE.PITCH,
                            yaw=self.config.SIM.GRASPABLE.POSE.YAW)
        pose = get_transform(source=self.table_pose).transform(pose)
        scale = np.random.uniform(*self.config.SIM.GRASPABLE.SCALE)
        logger.info('Loaded the graspable object from %s with scale %.2f...',
                    self.graspable_path, scale)
        self.graspable = self.simulator.add_body(self.graspable_path,
                                                 pose,
                                                 scale=scale,
                                                 name=GRASPABLE_NAME)
        logger.debug('Waiting for graspable objects to be stable...')
        self.simulator.wait_until_stable(self.graspable)

        # Reset camera.
        self._reset_camera(
            self.camera,
            intrinsics=self.config.KINECT2.DEPTH.INTRINSICS,
            translation=self.config.KINECT2.DEPTH.TRANSLATION,
            rotation=self.config.KINECT2.DEPTH.ROTATION,
            intrinsics_noise=self.config.KINECT2.DEPTH.INTRINSICS_NOISE,
            translation_noise=self.config.KINECT2.DEPTH.TRANSLATION_NOISE,
            rotation_noise=self.config.KINECT2.DEPTH.ROTATION_NOISE)
Example #14
0
def convert_pose_for_ros(pose):
    """Convert an instance from robovat.math.Pose into geometry_msgs.msg.Pose.

    Args:
        pose: An instance of robovat.math.Pose.

    Returns:
        An instance of geometry_msgs.msg.Pose.
    """
    # The Orientation is defined as x, y, z, w.
    pose = Pose(pose)
    position = pose.position.tolist()
    orientation = pose.quaternion.tolist()

    return geometry_msgs.msg.Pose(
            position=geometry_msgs.msg.Point(*position),
            orientation=geometry_msgs.msg.Quaternion(*orientation))
Example #15
0
    def set_constraint_pose(self, constraint_uid, pose):
        """Set the constraint pose.

        Args:
            constraint_uid: The constraint unique ID.
            pose: An instance of Pose.
        """
        if not isinstance(pose, Pose):
            pose = Pose(pose)

        position = list(pose.position)
        quaternion = list(pose.quaternion)

        pybullet.changeConstraint(userConstraintUniqueId=constraint_uid,
                                  jointChildPivot=position,
                                  jointChildFrameOrientation=quaternion,
                                  physicsClientId=self.uid)
Example #16
0
    def add_body(self, filename, pose, scale=1.0, is_static=False):
        """Load a body into the simulation.

        Args:
            filename: The path to the body file. The current supported file
                formats are urdf and sdf.
            pose: The pose of the base, as an instance of robovat.math.Pose or
                a tuple of position and orientation.
            scale: The global scaling factor.
            is_static: If set the pose of the base to be fixed.

        Returns:
            body_uid: The unique ID of the body.
        """
        filename = os.path.abspath(filename)
        assert os.path.exists(filename), 'File %s does not exist.' % filename
        _, ext = os.path.splitext(filename)

        pose = Pose(pose)
        position = list(pose.position)
        quaternion = list(pose.quaternion)

        if ext == '.urdf':
            # Do not use pybullet.URDF_USE_SELF_COLLISION since it will cause
            # problems for the motor control in Bullet.
            pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 0)
            body_uid = pybullet.loadURDF(
                fileName=filename,
                basePosition=position,
                baseOrientation=quaternion,
                globalScaling=scale,
                useFixedBase=is_static,
                physicsClientId=self.uid,
                flags=pybullet.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT,
            )
            pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 1)
        else:
            raise ValueError('Unrecognized extension %s.' % ext)

        return int(body_uid)
Example #17
0
    def pose(self):
        """Computes the 3D pose of the grasp relative to the camera.

        If an approach direction is not specified then the camera
        optical axis is used.

        Returns:
            The pose of the grasp in the camera frame.
        """
        if self.camera is None:
            raise ValueError('Must specify camera intrinsics to compute 3D '
                             'grasp pose.')

        # Compute 3D grasp center in camera basis.
        grasp_center_camera = self.camera.deproject_pixel(
                self.center, self.depth, is_world_frame=False)

        # Compute 3D grasp axis in camera basis.
        grasp_axis_image = self.axis
        grasp_axis_image = grasp_axis_image / np.linalg.norm(grasp_axis_image)
        grasp_axis_camera = np.array(
            [grasp_axis_image[0], grasp_axis_image[1], 0])
        grasp_axis_camera = (
            grasp_axis_camera / np.linalg.norm(grasp_axis_camera))

        # Aligned with camera Z axis.
        grasp_x_camera = np.array([0, 0, 1])
        grasp_y_camera = grasp_axis_camera
        grasp_z_camera = np.cross(grasp_x_camera, grasp_y_camera)
        grasp_x_camera = np.cross(grasp_z_camera, grasp_y_camera)
        grasp_rot_camera = np.array(
            [grasp_x_camera, grasp_y_camera, grasp_z_camera]).T

        if np.linalg.det(grasp_rot_camera) < 0:
            # Fix possible reflections due to SVD.
            grasp_rot_camera[:, 0] = -grasp_rot_camera[:, 0]

        pose = Pose([grasp_center_camera, grasp_rot_camera])

        return pose
Example #18
0
    def _reset_scene(self):
        """Reset the scene in simulation or the real world."""
        self.table_pose = Pose(self.config.SIM.TABLE.POSE)
        self.table_pose.position.z += np.random.uniform(
            *self.config.TABLE.HEIGHT_RANGE)

        if self.is_simulation:
            self.ground = self.simulator.add_body(self.config.SIM.GROUND.PATH,
                                                  self.config.SIM.GROUND.POSE,
                                                  is_static=True,
                                                  name='ground')

            self.table = self.simulator.add_body(self.config.SIM.TABLE.PATH,
                                                 self.table_pose,
                                                 is_static=True,
                                                 name='table')

            if self.config.SIM.WALL.USE:
                self.wall = self.simulator.add_body(self.config.SIM.WALL.PATH,
                                                    self.config.SIM.WALL.POSE,
                                                    is_static=True,
                                                    name='wall')
Example #19
0
    def __init__(self, mode, config, debug, assets_dir):
        """Initialize.

        Args:
            mode: 'sim' or 'real'.
            config: The configuration file for the environment.
            debug: True if it is debugging mode, False otherwise.
            assets_dir: The assets directory.
        """

        self.mode = mode
        self.config = YamlConfig(config).as_easydict()
        self.debug = debug
        self.assets_dir = assets_dir

        # Command line client input history.
        readline.parse_and_bind('tab: complete')
        history_file = os.path.join('.python_history')

        try:
            readline.read_history_file(history_file)
        except IOError:
            pass

        atexit.register(readline.write_history_file, history_file)

        # Set up the scene.
        if self.mode == 'sim':
            print('Setting up the environment in simulation...')
            self.simulator = Simulator(use_visualizer=self.debug,
                                       assets_dir=self.assets_dir)
            self.simulator.reset()
            self.simulator.start()

            self.ground = self.simulator.add_body(self.config.SIM.GROUND.PATH,
                                                  self.config.SIM.GROUND.POSE,
                                                  is_static=True,
                                                  name='ground')

            self.table_pose = Pose(self.config.SIM.TABLE.POSE)
            self.table_pose.position.z += np.random.uniform(
                *self.config.SIM.TABLE.HEIGHT_RANGE)
            self.simulator.add_body(self.config.SIM.TABLE.PATH,
                                    self.table_pose,
                                    is_static=True,
                                    name='table')

            # Camera.
            self.camera = BulletCamera(simulator=self.simulator, distance=1.0)

        elif self.mode == 'real':
            print('Setting up the environment in the real world...')
            self.table_pose = Pose(self.config.SIM.TABLE.POSE)
            self.table_pose.position.z += np.random.uniform(
                *self.config.SIM.TABLE.HEIGHT_RANGE)
            self.camera = Kinect2(packet_pipeline_mode=0,
                                  device_num=0,
                                  skip_regirobovation=False,
                                  use_inpaint=True)
            self.simulator = None

        else:
            raise ValueError

        # Set up the robot.
        self.robot = franka_panda.factory(simulator=self.simulator,
                                          config=self.config.SIM.ARM.CONFIG)

        # Start the camera camera.
        self.camera.set_calibration(
            intrinsics=self.config.KINECT2.DEPTH.INTRINSICS,
            translation=self.config.KINECT2.DEPTH.TRANSLATION,
            rotation=self.config.KINECT2.DEPTH.ROTATION)
        self.camera.start()

        if self.simulator:
            camera_pose = [
                self.config.KINECT2.DEPTH.TRANSLATION,
                self.config.KINECT2.DEPTH.ROTATION
            ]
            camera_pose = Pose(camera_pose).inverse()
            self.simulator.plot_pose(camera_pose, axis_length=0.05)
Example #20
0
    def move_to_gripper_pose(self,
                             pose,
                             speed=None,
                             timeout=None,
                             threshold=None,
                             straight_line=False):
        """Move the arm to the specified joint positions.


        Please refer to:
        https://rethinkrobotics.github.io/intera_sdk_docs/5.0.4/intera_interface/html/intera_interface.limb.Limb-class.html

        See the parent class.
        """
        if speed is None:
            speed = self.config.LIMB_MAX_VELOCITY_RATIO

        if timeout is None:
            timeout = self.config.LIMB_TIMEOUT

        if threshold is None:
            threshold = self.config.LIMB_POSITION_THRESHOLD

        if straight_line:
            start_pose = self.end_effector
            end_pose = pose

            if self._motion_planning is None:
                # A hacky way of straight line motion without motion planning.
                rospy.logwarn('No motion planning is available. Use the hacky'
                              'way of straight line motion.')
                delta_position = end_pose.position - start_pose.position
                end_effector_step = 0.1
                num_waypoints = int(np.linalg.norm(delta_position)
                                    / end_effector_step)
                waypoints = []

                for i in range(num_waypoints):
                    scale = float(i) / float(num_waypoints)
                    position = start_pose.position + delta_position * scale
                    euler = end_pose.euler
                    waypoint = Pose([position, euler])
                    waypoints.append(waypoint)

                waypoints.append(end_pose)

            else:
                # With motion planning, the path is always a straight line.
                waypoints = [start_pose, end_pose]

            self.move_along_gripper_path(waypoints, speed=speed)

        else:
            positions = self._compute_inverse_kinematics(
                self.config.END_EFFCTOR_NAME, pose)

            if positions is None:
                rospy.logerr("IK response is not valid.")
            else:
                self.move_to_joint_positions(
                    positions=positions,
                    speed=speed,
                    timeout=timeout,
                    threshold=threshold)
Example #21
0
    def _reset_scene(self):
        """Reset the scene in simulation or the real world.
        """

        # Configure debug visualizer
        if self._debug and self.is_simulation:
            visualizer_info = self.simulator.physics.get_debug_visualizer_info(['yaw', 'pitch', 'dist', 'target'])

            # Adjust the camera view to your own need.
            visualizer_info['dist'] = 1.5
            visualizer_info['yaw'] = 50
            visualizer_info['pitch'] = -20

            self.simulator.physics.reset_debug_visualizer(camera_distance=visualizer_info['dist'],
                                                         camera_yaw=visualizer_info['yaw'],
                                                         camera_pitch=visualizer_info['pitch'],
                                                         camera_target_position=visualizer_info['target'])
        
        super(FrankaPandaGraspEnv, self)._reset_scene()

        # Reload graspable object.
        if self.config.SIM.GRASPABLE.RESAMPLE_N_EPISODES:
            if (self.num_episodes %
                    self.config.SIM.GRASPABLE.RESAMPLE_N_EPISODES == 0):
                self.graspable_path = None

        if self.graspable_path is None:
            if self.config.SIM.GRASPABLE.USE_RANDOM_SAMPLE:
                self.graspable_path = random.choice(
                    self.all_graspable_paths)
            else:
                self.graspable_index = ((self.graspable_index + 1) %
                                        len(self.all_graspable_paths))
                self.graspable_path = (
                    self.all_graspable_paths[self.graspable_index])

        pose = Pose.uniform(x=self.config.SIM.GRASPABLE.POSE.X,
                            y=self.config.SIM.GRASPABLE.POSE.Y,
                            z=self.config.SIM.GRASPABLE.POSE.Z,
                            roll=self.config.SIM.GRASPABLE.POSE.ROLL,
                            pitch=self.config.SIM.GRASPABLE.POSE.PITCH,
                            yaw=self.config.SIM.GRASPABLE.POSE.YAW)
        pose = get_transform(source=self.table_pose).transform(pose)
        scale = np.random.uniform(*self.config.SIM.GRASPABLE.SCALE)
        logger.info('Loaded the graspable object from %s with scale %.2f...',
                    self.graspable_path, scale)
        self.graspable = self.simulator.add_body(self.graspable_path, pose, scale=scale, name=GRASPABLE_NAME, baseMass=0.1)
        # self.graspable = self.simulator.add_body(self.graspable_path, pose, scale=scale * 0.01, name=GRASPABLE_NAME, collisionFrameOrientation=[-0.5, -0.5, 0.5, 0.5], visualFrameOrientation=[-0.5, -0.5, 0.5, 0.5], baseMass=0.1)
        
        logger.debug('Waiting for graspable objects to be stable...')
        # Change some properties of the object compensating for
        # artifacts due to simulation
        self.table.set_dynamics(contact_damping=100., contact_stiffness=100.)
        self.graspable.set_dynamics(
            lateral_friction=1.,
            rolling_friction=0.001,
            spinning_friction=0.001,
            contact_damping=100.,
            contact_stiffness=100.)
        self.simulator.wait_until_stable(self.graspable)

        # Reset camera.
        self._reset_camera(
            self.camera,
            intrinsics=self.config.KINECT2.DEPTH.INTRINSICS,
            translation=self.config.KINECT2.DEPTH.TRANSLATION,
            rotation=self.config.KINECT2.DEPTH.ROTATION,
            intrinsics_noise=self.config.KINECT2.DEPTH.INTRINSICS_NOISE,
            translation_noise=self.config.KINECT2.DEPTH.TRANSLATION_NOISE,
            rotation_noise=self.config.KINECT2.DEPTH.ROTATION_NOISE)
Example #22
0
    def move_to_gripper_pose(self,
                             pose,
                             speed=None,
                             timeout=None,
                             threshold=None,
                             straight_line=False):
        """Move the arm to the specified gripper pose.

        See the parent class.
        """
        if speed is None:
            speed = self.config.LIMB_MAX_VELOCITY_RATIO

        if timeout is None:
            timeout = self.config.LIMB_TIMEOUT

        if threshold is None:
            threshold = self.config.LIMB_POSITION_THRESHOLD

        self._arm.reset_targets()

        pose = Pose(pose)

        if straight_line:
            start_pose = self.end_effector.pose
            end_pose = pose
            delta_position = end_pose.position - start_pose.position
            num_waypoints = int(np.linalg.norm(delta_position)
                                / self.config.END_EFFECTOR_STEP)
            waypoints = []

            for i in range(num_waypoints):
                scale = float(i) / float(num_waypoints)
                position = start_pose.position + delta_position * scale
                euler = end_pose.euler
                waypoint = Pose([position, euler])
                waypoints.append(waypoint)

            waypoints.append(end_pose)

            self.move_along_gripper_path(waypoints, speed=speed)

        else:
            # Set the maximal joint velocities.
            joint_velocities = dict([
                (joint.name, speed * joint.max_velocity)
                for joint in self._limb_joints
                ])
            kwargs = {
                'joint_velocities': joint_velocities,
            }

            robot_command = RobotCommand(
                component=self._arm.name,
                command_type='set_max_joint_velocities',
                arguments=kwargs)

            self._send_robot_command(robot_command)

            # Command the IK control.
            kwargs = {
                'link_ind': self._end_effector.index,
                'link_pose': pose,
                'timeout': timeout,
                'threshold': threshold,
            }

            robot_command = RobotCommand(
                component=self._arm.name,
                command_type='set_target_link_pose',
                arguments=kwargs)

            self._send_robot_command(robot_command)
Example #23
0
    def add_constraint(self,
                       parent_uid,
                       child_uid,
                       joint_type='fixed',
                       joint_axis=[0, 0, 0],
                       parent_frame_pose=None,
                       child_frame_pose=None):
        """Add the constraint.

        A constraint limits the relative movements between two entities using a
        joint. Different types of joints have different degrees of freedom.

        Args:
            parent: The unique ID of the parent entity.
            child: The unique ID of the child entity.
            joint_type: The type of the joint.
            joint_axis: The axis of the joint.
            parent_frame_pose: The pose of the joint in the parent frame.
            child_frame_pose: The pose of the joint in the child frame.

        Returns:
            The constraint unique ID.
        """
        if isinstance(parent_uid, six.integer_types):
            # The parent entity is a body.
            parent_body_uid = parent_uid
            parent_link_ind = -1
        elif isinstance(parent_uid, (tuple, list)):
            # The parent entity is a link.
            parent_body_uid, parent_link_ind = parent_uid
        else:
            raise ValueError

        if isinstance(child_uid, six.integer_types):
            # The child entity is a body.
            child_body_uid = child_uid
            child_link_ind = -1
        elif isinstance(child_uid, (tuple, list)):
            # The child entity is a link.
            child_body_uid, child_link_ind = child_uid
        elif child_uid is None:
            # The child entity is ignored.
            child_body_uid = -1
            child_link_ind = -1
        else:
            raise ValueError

        if parent_frame_pose is None:
            parent_frame_position = [0, 0, 0]
            parent_frame_quaternion = None
        else:
            if not isinstance(parent_frame_pose, Pose):
                parent_frame_pose = Pose(parent_frame_pose)
            parent_frame_position = parent_frame_pose.position
            parent_frame_quaternion = parent_frame_pose.quaternion

        if child_frame_pose is None:
            child_frame_position = [0, 0, 0]
            child_frame_quaternion = None
        else:
            if not isinstance(child_frame_pose, Pose):
                child_frame_pose = Pose(child_frame_pose)
            child_frame_position = child_frame_pose.position
            child_frame_quaternion = child_frame_pose.quaternion

        joint_type = JOINT_TYPES_MAPPING[joint_type]
        joint_axis = list(joint_axis)

        kwargs = dict()
        kwargs['physicsClientId'] = self.uid
        kwargs['parentBodyUniqueId'] = parent_body_uid
        kwargs['parentLinkIndex'] = parent_link_ind
        kwargs['childBodyUniqueId'] = child_body_uid
        kwargs['childLinkIndex'] = child_link_ind
        kwargs['jointType'] = joint_type
        kwargs['jointAxis'] = joint_axis
        kwargs['parentFramePosition'] = parent_frame_position
        kwargs['childFramePosition'] = child_frame_position
        if parent_frame_quaternion is not None:
            kwargs['parentFrameOrientation'] = parent_frame_quaternion
        if child_frame_quaternion is not None:
            kwargs['childFrameOrientation'] = child_frame_quaternion

        constraint_uid = pybullet.createConstraint(**kwargs)

        return constraint_uid
Example #24
0
    def _sample_body_poses_on_tiles(self,
                                    num_samples,
                                    body_config,
                                    layout,
                                    safe_drop_height=0.2,
                                    max_attemps=32):
        """Sample tile poses on the tiles.

        Args:
            num_samples: Number of samples.
            body_config: Configuration of the body.
            layout: Configuration of the layout.
            safe_drop_height: Dropping height of the body.
            max_attemps: Maximum number of attemps to find a feasible
                placement.

        Returns:
            List of poses.
        """
        tile_size = layout.size
        tile_offset = layout.offset

        while True:
            movable_poses = []

            for i in range(num_samples):
                num_attemps = 0
                is_valid = False

                if i == 0 and layout.target is not None:
                    tile_config = layout.target
                else:
                    tile_config = layout.obstacle

                while not is_valid and num_attemps <= max_attemps:
                    num_tiles = len(tile_config)
                    tile_id = np.random.choice(num_tiles)
                    tile_center = tile_config[tile_id]
                    x_range = [
                        tile_offset[0] + (tile_center[0] - 0.5) * tile_size,
                        tile_offset[0] + (tile_center[0] + 0.5) * tile_size
                    ]
                    y_range = [
                        tile_offset[1] + (tile_center[1] - 0.5) * tile_size,
                        tile_offset[1] + (tile_center[1] + 0.5) * tile_size
                    ]
                    z = self.table_pose.position.z + safe_drop_height
                    pose = Pose.uniform(x=x_range,
                                        y=y_range,
                                        z=z,
                                        roll=[-np.pi, np.pi],
                                        pitch=[-np.pi / 2, np.pi / 2],
                                        yaw=[-np.pi, np.pi])

                    is_valid = True
                    for other_pose in movable_poses:
                        dist = np.linalg.norm(pose.position[:2] -
                                              other_pose.position[:2])

                        if dist < body_config.MARGIN:
                            is_valid = False
                            num_attemps += 1
                            break

                if not is_valid:
                    logger.info(
                        'Cannot find the placement of body %d. '
                        'Start re-sampling.', i)
                    break
                else:
                    movable_poses.append(pose)

            if i == num_attemps:
                break

        return movable_poses
Example #25
0
    def _execute_action(self, action):
        """Execute the grasp action.

        Args:
            action: A 4-DoF grasp defined in the image space or the 3D space.
        """
        if self.config.ACTION.TYPE == 'CUBOID':
            x, y, z, angle = action
        elif self.config.ACTION.TYPE == 'IMAGE':
            grasp = Grasp2D.from_vector(action, camera=self.camera)
            x, y, z, angle = grasp.as_4dof()
        else:
            raise ValueError('Unrecognized action type: %r' %
                             (self.config.ACTION.TYPE))

        start = Pose([[x, y, z + self.config.ARM.FINGER_TIP_OFFSET],
                      [0, np.pi, angle]])

        phase = 'initial'

        # Handle the simulation robustness.
        if self.is_simulation:
            num_action_steps = 0

        while (phase != 'done'):

            if self.is_simulation:
                self.simulator.step()
                if phase == 'start':
                    num_action_steps += 1

            if self._is_phase_ready(phase, num_action_steps):
                phase = self._get_next_phase(phase)
                logger.debug('phase: %s', phase)

                if phase == 'overhead':
                    self.robot.move_to_joint_positions(
                        self.config.ARM.OVERHEAD_POSITIONS)
                    # self.robot.grip(0)

                elif phase == 'prestart':
                    prestart = start.copy()
                    prestart.z = self.config.ARM.GRIPPER_SAFE_HEIGHT
                    self.robot.move_to_gripper_pose(prestart)

                elif phase == 'start':
                    self.robot.move_to_gripper_pose(start, straight_line=True)

                    # Prevent problems caused by unrealistic frictions.
                    if self.is_simulation:
                        self.robot.l_finger_tip.set_dynamics(
                            lateral_friction=0.001, spinning_friction=0.001)
                        self.robot.r_finger_tip.set_dynamics(
                            lateral_friction=0.001, spinning_friction=0.001)
                        self.table.set_dynamics(lateral_friction=100)

                elif phase == 'end':
                    self.robot.grip(1)

                elif phase == 'postend':
                    postend = self.robot.end_effector.pose
                    postend.z = self.config.ARM.GRIPPER_SAFE_HEIGHT
                    self.robot.move_to_gripper_pose(postend,
                                                    straight_line=True)

                    # Prevent problems caused by unrealistic frictions.
                    if self.is_simulation:
                        self.robot.l_finger_tip.set_dynamics(
                            lateral_friction=100,
                            rolling_friction=10,
                            spinning_friction=10)
                        self.robot.r_finger_tip.set_dynamics(
                            lateral_friction=100,
                            rolling_friction=10,
                            spinning_friction=10)
                        self.table.set_dynamics(lateral_friction=1)
Example #26
0
 def pose(self):
     world_origin_in_camera = Pose([self._translation, self._rotation])
     return world_origin_in_camera.inverse()
Example #27
0
    def run_command(self, command):  # NOQA
        """Run the input command.

        Args:
            command: An input string command.
        """
        command = command.replace(',', '').replace('[', '').replace(']', '')
        words = command.split(' ')
        command_type = words[0]

        # Print the help information.
        if command_type == 'help' or command_type == 'h':
            print(HELP)

        # Reset the robot joint positions.
        elif command_type == 'reset' or command_type == 'r':
            self.robot.reset(self.config.ARM.OFFSTAGE_POSITIONS)

        # Visualize the camera image.
        elif command_type == 'visualize' or command_type == 'v':
            results = self.camera.frames()
            image = results['rgb']
            depth = results['depth']

            plt.figure(figsize=(20, 10))
            plt.subplot(121)
            plt.imshow(image)
            plt.title('RGB Image')
            plt.subplot(122)
            plt.imshow(depth)
            plt.title('Depth Image')

            plt.show()

        # Visualize the table.
        elif command_type == 'table' or command_type == 't':
            results = self.camera.frames()
            image = results['rgb']
            depth = results['depth']

            table_points = [
                [0, 0, 0],
                [0, -0.61, 0],
                [0, 0.61, 0],
                [-0.38, 0, 0],
                [0.38, 0, 0],
                [-0.38, -0.61, 0],
                [-0.38, 0.61, 0],
                [0.38, -0.61, 0],
                [0.38, 0.61, 0],
            ]
            table_offset = self.table_pose.position
            table_points = np.array(table_points) + table_offset
            table_pixels = self.camera.project_point(table_points)

            plt.figure(figsize=(20, 10))
            plt.subplot(121)
            plt.imshow(image)
            plt.scatter(table_pixels[:, 0], table_pixels[:, 1], c='r')
            plt.title('RGB Image')
            plt.subplot(122)
            plt.imshow(depth)
            plt.scatter(table_pixels[:, 0], table_pixels[:, 1], c='r')
            plt.title('Depth Image')

            plt.show()

        # Visualize the layout.
        elif command_type == 'layout' or command_type == 'l':
            results = self.camera.frames()
            image = results['rgb']

            layout_name = words[1]
            layout_config = self.config.LAYOUT[layout_name]

            tile_config = layout_config.REGION
            size = layout_config.SIZE
            offset = layout_config.OFFSET

            plt.figure(figsize=(10, 10))
            plt.subplot(111)
            plt.imshow(image)

            for i, center in enumerate(tile_config.CENTERS):
                position = np.array(offset) + np.array(center) * size
                x = position[0]
                y = position[1]
                z = self.table_pose.position.z
                corners = [[x - 0.5 * size, y - 0.5 * size, z],
                           [x + 0.5 * size, y - 0.5 * size, z],
                           [x - 0.5 * size, y + 0.5 * size, z],
                           [x + 0.5 * size, y + 0.5 * size, z]]

                pixels = self.camera.project_point(corners)

                color = 'green'
                plt.plot([pixels[0, 0], pixels[1, 0]],
                         [pixels[0, 1], pixels[1, 1]],
                         color=color,
                         linewidth=2)
                plt.plot([pixels[0, 0], pixels[2, 0]],
                         [pixels[0, 1], pixels[2, 1]],
                         color=color,
                         linewidth=2)
                plt.plot([pixels[1, 0], pixels[3, 0]],
                         [pixels[1, 1], pixels[3, 1]],
                         color=color,
                         linewidth=2)
                plt.plot([pixels[2, 0], pixels[3, 0]],
                         [pixels[2, 1], pixels[3, 1]],
                         color=color,
                         linewidth=2)

            plt.show()

        # Visualize the camera image.
        elif command_type == 'pointcloud' or command_type == 'pc':
            if len(words) == 1:
                num_clusters = 0
            else:
                num_clusters = int(words[1])

            images = self.camera.frames()
            image = images['rgb']
            depth = images['depth']
            point_cloud = self.camera.deproject_depth_image(depth)

            fig = plt.figure(figsize=(20, 5))

            ax1 = fig.add_subplot(141)
            ax1.imshow(depth)

            if (self.config.OBS.CROP_MIN is not None
                    and self.config.OBS.CROP_MAX is not None):
                crop_max = np.array(self.config.OBS.CROP_MAX)[np.newaxis, :]
                crop_min = np.array(self.config.OBS.CROP_MIN)[np.newaxis, :]
                crop_mask = np.logical_and(
                    np.all(point_cloud >= crop_min, axis=-1),
                    np.all(point_cloud <= crop_max, axis=-1))
                point_cloud = point_cloud[crop_mask]

            ax2 = fig.add_subplot(142, projection='3d')
            downsampled_point_cloud = pc_utils.downsample(point_cloud,
                                                          num_samples=4096)
            pc_utils.show(downsampled_point_cloud, ax2, axis_range=1.0)

            if num_clusters > 0:
                point_cloud = pc_utils.remove_table(point_cloud)

                segmask = pc_utils.cluster(point_cloud,
                                           num_clusters=num_clusters,
                                           method='dbscan')
                point_cloud = point_cloud[segmask != -1]

                segmask = pc_utils.cluster(point_cloud,
                                           num_clusters=num_clusters)
                point_cloud = pc_utils.group_by_labels(point_cloud, segmask,
                                                       num_clusters, 256)

                ax3 = fig.add_subplot(143, projection='3d')
                pc_utils.show(point_cloud, ax3, axis_range=1.0)

                ax4 = fig.add_subplot(144)
                pc_utils.show2d(point_cloud, self.camera, ax4, image=image)

            plt.show()

        # Visualize the camera image.
        elif command_type == 'rgbd':
            images = self.camera.frames()
            image = images['rgb']
            depth = images['depth']
            point_cloud = self.camera.deproject_depth_image(depth)

            fig = plt.figure(figsize=(5, 5))
            ax = fig.add_subplot(111)

            if (self.config.OBS.CROP_MIN is not None
                    and self.config.OBS.CROP_MAX is not None):
                crop_max = np.array(self.config.OBS.CROP_MAX)[np.newaxis, :]
                crop_min = np.array(self.config.OBS.CROP_MIN)[np.newaxis, :]
                crop_mask = np.logical_and(
                    np.all(point_cloud >= crop_min, axis=-1),
                    np.all(point_cloud <= crop_max, axis=-1))
                point_cloud = point_cloud[crop_mask]

            point_cloud = pc_utils.remove_table(point_cloud)
            point_cloud = pc_utils.downsample(point_cloud, num_samples=4096)
            pixels = self.camera.project_point(point_cloud)
            pixels = np.array(pixels, dtype=np.int32)

            background = np.zeros_like(image)
            background[pixels[:, 1], pixels[:, 0]] = (image[pixels[:, 1],
                                                            pixels[:, 0]])

            plt.imshow(background)

            plt.show()

        # Move the gripper to the clicked pixel position.
        elif command_type == 'click' or command_type == 'c':

            fig, ax = plt.subplots(figsize=(20, 10))
            onclick = EndEffectorClickController(self, ax)

            results = self.camera.frames()
            plt.imshow(results['rgb'])
            plt.title('Image')
            fig.canvas.mpl_connect('button_press_event', onclick)
            plt.show()

        # Move joints to the target positions.
        elif command_type == 'joints' or command_type == 'j':
            joint_positions = [float(ch) for ch in words[1:]]
            print('Moving to joint positions: %s ...' % joint_positions)
            self.robot.move_to_joint_positions(joint_positions)

        # Move the end effector to the target pose.
        elif command_type == 'end_effector' or command_type == 'e':
            pose = [float(ch) for ch in words[1:]]
            if len(pose) == 6 or len(pose) == 7:
                pose = Pose(pose[:3], pose[3:])
            elif len(pose) == 3:
                end_effector_pose = self.robot.end_effector
                pose = Pose([pose, end_effector_pose.orientation])
            else:
                print('The format of the input pose is wrong.')

            print('Moving to end effector pose: %s ...' % pose)
            self.robot.move_to_gripper_pose(pose)

        # Move the end effector to the target pose.
        elif command_type == 'end_effector_line' or command_type == 'el':
            pose = [float(ch) for ch in words[1:]]
            if len(pose) == 6 or len(pose) == 7:
                pose = Pose(pose[:3], pose[3:])
            elif len(pose) == 3:
                end_effector_pose = self.robot.end_effector
                pose = Pose(pose, end_effector_pose.orientation)
            else:
                print('The format of the input pose is wrong.')

            print('Moving to end effector pose: %s ...' % pose)
            self.robot.move_to_gripper_pose(pose, straight_line=True)

        # Open the gripper.
        elif command_type == 'open' or command_type == 'o':
            joint_positions = self.robot.grip(0)

        # Close the gripper.
        elif command_type == 'grasp' or command_type == 'g':
            joint_positions = self.robot.grip(1)

        # Print the current robot status.
        elif command_type == 'print' or command_type == 'p':
            joint_positions = self.robot.joint_positions
            joint_positions = [
                joint_positions['right_j0'],
                joint_positions['right_j1'],
                joint_positions['right_j2'],
                joint_positions['right_j3'],
                joint_positions['right_j4'],
                joint_positions['right_j5'],
                joint_positions['right_j6'],
            ]
            print('Joint positions: %s' % (joint_positions))

            end_effector_pose = self.robot.end_effector
            print('End Effector position: %s, %s' %
                  (end_effector_pose.position, end_effector_pose.euler))

        else:
            print('Unrecognized command: %s' % command)