Пример #1
0
 def get_fiducial_origin(self):
     """
     Get an SE3Pose proto defining the origin of the fiducial in the world frame.
     The world frame starts at the bottom left of the image, with positive y up, positive x
     to the right, and positive z out of the page.
     :return: the SE3Pose proto defining the fiducial in this origin.
     """
     theta = np.deg2rad(self.fiducial_rotation)
     # Fiducial frame:
     # Assume x is up, and z points out. The rotation matrix
     # therefore has x pointed directly out of the page, and
     # the zy vectors pointing to the left and up respectively.
     # Note that the image origin has z pointing out of the page,
     # y up and x to the right.
     # Therefore the z axis is equal to (cos(t), sin(t)) and the y axis is
     #  (sin(t), -cos(t)).
     rot_matrix = np.array([[0, np.sin(theta),
                             np.cos(theta)],
                            [0, -np.cos(theta),
                             np.sin(theta)], [1, 0, 0]])
     world_tform_fiducial = SE3Pose(
         rot=Quat.from_matrix(rot_matrix),
         x=self.fiducial_position[0] / self.pixels_per_meter,
         y=self.fiducial_position[1] / self.pixels_per_meter,
         z=0)
     return world_tform_fiducial.to_proto()
Пример #2
0
    def _run(self, robot, options):
        cameras = robot.ensure_client(
            MediaLogClient.default_service_name).list_cameras()
        cameras = [
            c for c in cameras if not (c.name == 'pano' or c.name == 'ptz')
        ]

        # Format is dictated by Spot CAM
        with open(options.calibration_file, 'w') as out_file:
            for ring_index in range(5):
                out_file.write('cam{}\n'.format(ring_index))
                camera = cameras[ring_index]

                out_file.write('fx= {:.8f}\n'.format(
                    camera.pinhole.focal_length.x))
                out_file.write('fy= {:.8f}\n'.format(
                    camera.pinhole.focal_length.y))
                out_file.write('cx= {:.8f}\n'.format(
                    camera.pinhole.center_point.x))
                out_file.write('cy= {:.8f}\n'.format(
                    camera.pinhole.center_point.y))
                out_file.write('k1= {:.8f}\n'.format(camera.pinhole.k1))
                out_file.write('k2= {:.8f}\n'.format(camera.pinhole.k2))
                out_file.write('k3= {:.8f}\n'.format(camera.pinhole.k3))
                out_file.write('k4= {:.8f}\n'.format(camera.pinhole.k4))
                q = Quat(camera.base_tfrom_sensor.rotation.w,
                         camera.base_tfrom_sensor.rotation.x,
                         camera.base_tfrom_sensor.rotation.y,
                         camera.base_tfrom_sensor.rotation.z)
                R = q.to_matrix()
                T = [
                    camera.base_tfrom_sensor.position.x,
                    camera.base_tfrom_sensor.position.y,
                    camera.base_tfrom_sensor.position.z
                ]

                for i in range(3):
                    for j in range(3):
                        out_file.write('t{}{}= {:.8f}\n'.format(i, j, R[i][j]))
                    out_file.write('t{}{}= {:.8f}\n'.format(i, 3, T[i]))

                out_file.write('\n')

        return cameras
Пример #3
0
    def _get_transform(self, from_wp, to_wp):
        '''Get transform from from-waypoint to to-waypoint.'''

        from_se3 = from_wp.waypoint_tform_ko
        from_tf = SE3Pose(
            from_se3.position.x, from_se3.position.y, from_se3.position.z,
            Quat(w=from_se3.rotation.w,
                 x=from_se3.rotation.x,
                 y=from_se3.rotation.y,
                 z=from_se3.rotation.z))

        to_se3 = to_wp.waypoint_tform_ko
        to_tf = SE3Pose(
            to_se3.position.x, to_se3.position.y, to_se3.position.z,
            Quat(w=to_se3.rotation.w,
                 x=to_se3.rotation.x,
                 y=to_se3.rotation.y,
                 z=to_se3.rotation.z))

        from_T_to = from_tf.mult(to_tf.inverse())
        return from_T_to.to_proto()
Пример #4
0
def main(argv):
    # The last argument should be the IP address of the robot. The app will use the directory to find
    # the velodyne and start getting data from it.
    parser = argparse.ArgumentParser()
    bosdyn.client.util.add_common_arguments(parser)
    options = parser.parse_args(argv)

    sdk = bosdyn.client.create_standard_sdk('VelodyneClient')
    robot = sdk.create_robot(options.hostname)
    robot.authenticate(options.username, options.password)
    robot.sync_with_directory()

    _point_cloud_client = robot.ensure_client('velodyne-point-cloud')
    _robot_state_client = robot.ensure_client(
        RobotStateClient.default_service_name)
    _point_cloud_task = AsyncPointCloud(_point_cloud_client)
    _robot_state_task = AsyncRobotState(_robot_state_client)
    _task_list = [_point_cloud_task, _robot_state_task]
    _async_tasks = AsyncTasks(_task_list)
    print('Connected.')

    update_thread = threading.Thread(target=_update_thread,
                                     args=[_async_tasks])
    update_thread.daemon = True
    update_thread.start()

    # Wait for the first responses.
    while any(task.proto is None for task in _task_list):
        time.sleep(0.1)
    fig = plt.figure()

    body_tform_butt = SE3Pose(-0.5, 0, 0, Quat())
    body_tform_head = SE3Pose(0.5, 0, 0, Quat())

    # Plot the point cloud as an animation.
    ax = fig.add_subplot(111, projection='3d')
    aggregate_data = collections.deque(maxlen=5)
    while True:
        if _point_cloud_task.proto[0].point_cloud:
            data = np.fromstring(_point_cloud_task.proto[0].point_cloud.data,
                                 dtype=np.float32)
            aggregate_data.append(data)
            plot_data = np.concatenate(aggregate_data)
            ax.clear()
            ax.set_xlabel('X (m)')
            ax.set_ylabel('Y (m)')
            ax.set_zlabel('Z (m)')

            # Draw robot position and orientation on plot
            if _robot_state_task.proto:
                odom_tform_body = get_odom_tform_body(
                    _robot_state_task.proto.kinematic_state.transforms_snapshot
                ).to_proto()
                helper_se3 = SE3Pose.from_obj(odom_tform_body)
                odom_tform_butt = helper_se3.mult(body_tform_butt)
                odom_tform_head = helper_se3.mult(body_tform_head)
                ax.plot([odom_tform_butt.x], [odom_tform_butt.y],
                        [odom_tform_butt.z],
                        'o',
                        color=SPOT_YELLOW,
                        markersize=7,
                        label='robot_butt')
                ax.plot([odom_tform_head.x], [odom_tform_head.y],
                        [odom_tform_head.z],
                        'o',
                        color=SPOT_YELLOW,
                        markersize=7,
                        label='robot_head')
                ax.text(odom_tform_butt.x,
                        odom_tform_butt.y,
                        odom_tform_butt.z,
                        'Spot Butt',
                        size=TEXT_SIZE,
                        zorder=1,
                        color='k')
                ax.text(odom_tform_head.x,
                        odom_tform_head.y,
                        odom_tform_head.z,
                        'Spot Head',
                        size=TEXT_SIZE,
                        zorder=1,
                        color='k')
                ax.plot([odom_tform_butt.x, odom_tform_head.x],
                        [odom_tform_butt.y, odom_tform_head.y],
                        zs=[odom_tform_butt.z, odom_tform_head.z],
                        linewidth=6,
                        color=SPOT_YELLOW)

            # Plot point cloud data
            ax.plot(plot_data[0::3], plot_data[1::3], plot_data[2::3], '.')
            set_axes_equal(ax)
            plt.draw()
            plt.pause(0.016)
            if window_closed(ax):
                sys.exit(0)
Пример #5
0
 def get_desired_angle(self, xhat):
     """Compute heading based on the vector from robot to object."""
     zhat = [0.0, 0.0, 1.0]
     yhat = np.cross(zhat, xhat)
     mat = np.array([xhat, yhat, zhat]).transpose()
     return Quat.from_matrix(mat).to_yaw()
Пример #6
0
def get_object_position(world_tform_cam, visual_image, depth_image,
                        bounding_box, rotation_angle):
    """
    Extract the bounding box, then find the mode in that region.

    Args:
        world_tform_cam (SE3Pose): SE3 transform from world to camera frame
        visual_image (ImageResponse): From a visual camera
        depth_image (ImageResponse): From a depth camera corresponding to the visual_image
        bounding_box (list): Bounding box from tensorflow
        rotation_angle (float): Angle (in degrees) to rotate depth image to match cam image rotation
    """

    # Make sure there are two images.
    if visual_image is None or depth_image is None:
        # Fail.
        return

    # Rotate bounding box back to original frame
    points = [(bounding_box[1], bounding_box[0]),
              (bounding_box[3], bounding_box[0]),
              (bounding_box[3], bounding_box[2]),
              (bounding_box[1], bounding_box[2])]

    origin = (visual_image.shot.image.cols / 2,
              visual_image.shot.image.rows / 2)

    points_rot = [
        rotate_about_origin_degrees(origin, point, rotation_angle)
        for point in points
    ]

    # Get the bounding box corners.
    y_min = max(0, min([point[1] for point in points_rot]))
    x_min = max(0, min([point[0] for point in points_rot]))
    y_max = min(visual_image.shot.image.rows,
                max([point[1] for point in points_rot]))
    x_max = min(visual_image.shot.image.cols,
                max([point[0] for point in points_rot]))

    # Check that the bounding box is valid.
    if (x_min < 0 or y_min < 0 or x_max > visual_image.shot.image.cols
            or y_max > visual_image.shot.image.rows):
        print(
            f'Bounding box is invalid: ({x_min}, {y_min}) | ({x_max}, {y_max})'
        )
        print(
            f'Bounds: ({visual_image.shot.image.cols}, {visual_image.shot.image.rows})'
        )
        return

    # Unpack the images.
    try:
        if depth_image.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_DEPTH_U16:
            dtype = np.uint16
        else:
            dtype = np.uint8
        img = np.fromstring(depth_image.shot.image.data, dtype=dtype)
        if depth_image.shot.image.format == image_pb2.Image.FORMAT_RAW:
            img = img.reshape(depth_image.shot.image.rows,
                              depth_image.shot.image.cols)
        else:
            img = cv2.imdecode(img, -1)
        depth_image_pixels = img

        # Get the depth data from the region in the bounding box.
        depth = get_distance_to_closest_object_depth(
            x_min, x_max, y_min, y_max, depth_image.source.depth_scale,
            depth_image_pixels)

        if depth >= 4.0:
            # Not enough depth data.
            print('Not enough depth data.')
            return False

        # Calculate the transform to the center point of the object using camera intrinsics
        # and depth calculated earlier in the function
        focal_x = depth_image.source.pinhole.intrinsics.focal_length.x
        principal_x = depth_image.source.pinhole.intrinsics.principal_point.x

        focal_y = depth_image.source.pinhole.intrinsics.focal_length.y
        principal_y = depth_image.source.pinhole.intrinsics.principal_point.y

        center_x = round((x_max - x_min) / 2.0 + x_min)
        center_y = round((y_max - y_min) / 2.0 + y_min)

        tform_x = depth * (center_x - principal_x) / focal_x
        tform_y = depth * (center_y - principal_y) / focal_y
        tform_z = depth
        obj_tform_camera = SE3Pose(tform_x, tform_y, tform_z, Quat())

        return world_tform_cam * obj_tform_camera
    except Exception as exc:  # pylint: disable=broad-except
        print(f'Error getting object position: {exc}')
        return
Пример #7
0
def _get_heading(xhat):
    zhat = [0.0, 0.0, 1.0]
    yhat = np.cross(zhat, xhat)
    mat = np.array([xhat, yhat, zhat]).transpose()
    return Quat.from_matrix(mat).to_yaw()
Пример #8
0
    def _navigate_to_anchor(self, *args):
        """Navigate to a pose in seed frame, using anchors."""
        # The following options are accepted for arguments: [x, y], [x, y, yaw], [x, y, z, yaw],
        # [x, y, z, qw, qx, qy, qz].
        # When a value for z is not specified, we use the current z height.
        # When only yaw is specified, the quaternion is constructed from the yaw.
        # When yaw is not specified, an identity quaternion is used.

        if len(args) < 1 or len(args[0]) not in [2, 3, 4, 7]:
            print("Invalid arguments supplied.")
            return

        seed_T_goal = SE3Pose(float(args[0][0]), float(args[0][1]), 0.0,
                              Quat())

        if len(args[0]) in [4, 7]:
            seed_T_goal.z = float(args[0][2])
        else:
            localization_state = self._graph_nav_client.get_localization_state(
            )
            if not localization_state.localization.waypoint_id:
                print("Robot not localized")
                return
            seed_T_goal.z = localization_state.localization.seed_tform_body.position.z

        if len(args[0]) == 3:
            seed_T_goal.rot = Quat.from_yaw(float(args[0][2]))
        elif len(args[0]) == 4:
            seed_T_goal.rot = Quat.from_yaw(float(args[0][3]))
        elif len(args[0]) == 7:
            seed_T_goal.rot = Quat(w=float(args[0][3]),
                                   x=float(args[0][4]),
                                   y=float(args[0][5]),
                                   z=float(args[0][6]))

        self._lease = self._lease_wallet.get_lease()
        if not self.toggle_power(should_power_on=True):
            print(
                "Failed to power on the robot, and cannot complete navigate to request."
            )
            return

        # Stop the lease keepalive and create a new sublease for graph nav.
        self._lease = self._lease_wallet.advance()
        sublease = self._lease.create_sublease()
        self._lease_keepalive.shutdown()
        nav_to_cmd_id = None
        # Navigate to the destination.
        is_finished = False
        while not is_finished:
            # Issue the navigation command about twice a second such that it is easy to terminate the
            # navigation command (with estop or killing the program).
            try:
                nav_to_cmd_id = self._graph_nav_client.navigate_to_anchor(
                    seed_T_goal.to_proto(),
                    1.0,
                    leases=[sublease.lease_proto],
                    command_id=nav_to_cmd_id)
            except ResponseError as e:
                print("Error while navigating {}".format(e))
                break
            time.sleep(
                .5)  # Sleep for half a second to allow for command execution.
            # Poll the robot for feedback to determine if the navigation command is complete. Then sit
            # the robot down once it is finished.
            is_finished = self._check_success(nav_to_cmd_id)

        self._lease = self._lease_wallet.advance()
        self._lease_keepalive = LeaseKeepAlive(self._lease_client)

        # Update the lease and power off the robot if appropriate.
        if self._powered_on and not self._started_powered_on:
            # Sit the robot down + power off after the navigation command is complete.
            self.toggle_power(should_power_on=False)