コード例 #1
0
def se3pose_proto_to_vtk_tf(se3_pose):
    """Converts an SE3Pose proto into a vtk transform object."""
    pose_obj = SE3Pose.from_obj(se3_pose)
    pose_mat = pose_obj.to_matrix()
    tf = vtk.vtkTransform()
    tf.SetMatrix(pose_mat.flatten())
    return tf
コード例 #2
0
 def get_fiducial_origin(self):
     """
     Get an SE3Pose proto defining the origin of the fiducial in the world frame.
     The world frame starts at the bottom left of the image, with positive y up, positive x
     to the right, and positive z out of the page.
     :return: the SE3Pose proto defining the fiducial in this origin.
     """
     theta = np.deg2rad(self.fiducial_rotation)
     # Fiducial frame:
     # Assume x is up, and z points out. The rotation matrix
     # therefore has x pointed directly out of the page, and
     # the zy vectors pointing to the left and up respectively.
     # Note that the image origin has z pointing out of the page,
     # y up and x to the right.
     # Therefore the z axis is equal to (cos(t), sin(t)) and the y axis is
     #  (sin(t), -cos(t)).
     rot_matrix = np.array([[0, np.sin(theta),
                             np.cos(theta)],
                            [0, -np.cos(theta),
                             np.sin(theta)], [1, 0, 0]])
     world_tform_fiducial = SE3Pose(
         rot=Quat.from_matrix(rot_matrix),
         x=self.fiducial_position[0] / self.pixels_per_meter,
         y=self.fiducial_position[1] / self.pixels_per_meter,
         z=0)
     return world_tform_fiducial.to_proto()
コード例 #3
0
def show_fiducial_origin(opt_info):
    """
    Draws the fiducial as a frame on the map.
    :param opt_info: info about the optimization.
    """
    VEC_LENGTH = 30.0
    world_T_fiducial = opt_info.get_fiducial_origin()
    world_T_fiducial_mat = SE3Pose.from_obj(
        world_T_fiducial).rotation.to_matrix()
    plt.plot([
        world_T_fiducial.position.x * opt_info.pixels_per_meter,
        world_T_fiducial.position.x * opt_info.pixels_per_meter +
        world_T_fiducial_mat[0, 2] * VEC_LENGTH
    ], [
        world_T_fiducial.position.y * opt_info.pixels_per_meter,
        world_T_fiducial.position.y * opt_info.pixels_per_meter +
        world_T_fiducial_mat[1, 2] * VEC_LENGTH
    ], 'b-')

    plt.plot([
        world_T_fiducial.position.x * opt_info.pixels_per_meter,
        world_T_fiducial.position.x * opt_info.pixels_per_meter +
        world_T_fiducial_mat[0, 1] * VEC_LENGTH
    ], [
        world_T_fiducial.position.y * opt_info.pixels_per_meter,
        world_T_fiducial.position.y * opt_info.pixels_per_meter +
        world_T_fiducial_mat[1, 1] * VEC_LENGTH
    ], 'g-')
コード例 #4
0
    def _get_transform(self, from_wp, to_wp):
        '''Get transform from from-waypoint to to-waypoint.'''

        from_se3 = from_wp.waypoint_tform_ko
        from_tf = SE3Pose(
            from_se3.position.x, from_se3.position.y, from_se3.position.z,
            Quat(w=from_se3.rotation.w,
                 x=from_se3.rotation.x,
                 y=from_se3.rotation.y,
                 z=from_se3.rotation.z))

        to_se3 = to_wp.waypoint_tform_ko
        to_tf = SE3Pose(
            to_se3.position.x, to_se3.position.y, to_se3.position.z,
            Quat(w=to_se3.rotation.w,
                 x=to_se3.rotation.x,
                 y=to_se3.rotation.y,
                 z=to_se3.rotation.z))

        from_T_to = from_tf.mult(to_tf.inverse())
        return from_T_to.to_proto()
コード例 #5
0
ファイル: ros_helpers.py プロジェクト: benjamin-tam/spot_ros
def GetTFFromState(state, spot_wrapper, inverse_target_frame):
    """Maps robot link state data from robot state proto to ROS TFMessage message

    Args:
        data: Robot State proto
        spot_wrapper: A SpotWrapper object
        inverse_target_frame: A frame name to be inversed to a parent frame.
    Returns:
        TFMessage message
    """
    tf_msg = TFMessage()

    for frame_name in state.kinematic_state.transforms_snapshot.child_to_parent_edge_map:
        if state.kinematic_state.transforms_snapshot.child_to_parent_edge_map.get(
                frame_name).parent_frame_name:
            try:
                transform = state.kinematic_state.transforms_snapshot.child_to_parent_edge_map.get(
                    frame_name)
                new_tf = TransformStamped()
                local_time = spot_wrapper.robotToLocalTime(
                    state.kinematic_state.acquisition_timestamp)
                new_tf.header.stamp = rospy.Time(local_time.seconds,
                                                 local_time.nanos)
                if inverse_target_frame == frame_name:
                    geo_tform_inversed = SE3Pose.from_obj(
                        transform.parent_tform_child).inverse()
                    new_tf.header.frame_id = frame_name
                    new_tf.child_frame_id = transform.parent_frame_name
                    new_tf.transform.translation.x = geo_tform_inversed.position.x
                    new_tf.transform.translation.y = geo_tform_inversed.position.y
                    new_tf.transform.translation.z = geo_tform_inversed.position.z
                    new_tf.transform.rotation.x = geo_tform_inversed.rotation.x
                    new_tf.transform.rotation.y = geo_tform_inversed.rotation.y
                    new_tf.transform.rotation.z = geo_tform_inversed.rotation.z
                    new_tf.transform.rotation.w = geo_tform_inversed.rotation.w
                else:
                    new_tf.header.frame_id = transform.parent_frame_name
                    new_tf.child_frame_id = frame_name
                    new_tf.transform.translation.x = transform.parent_tform_child.position.x
                    new_tf.transform.translation.y = transform.parent_tform_child.position.y
                    new_tf.transform.translation.z = transform.parent_tform_child.position.z
                    new_tf.transform.rotation.x = transform.parent_tform_child.rotation.x
                    new_tf.transform.rotation.y = transform.parent_tform_child.rotation.y
                    new_tf.transform.rotation.z = transform.parent_tform_child.rotation.z
                    new_tf.transform.rotation.w = transform.parent_tform_child.rotation.w
                tf_msg.transforms.append(new_tf)
            except Exception as e:
                spot_wrapper.logger.error('Error: {}'.format(e))

    return tf_msg
コード例 #6
0
def main(argv):
    # The last argument should be the IP address of the robot. The app will use the directory to find
    # the velodyne and start getting data from it.
    parser = argparse.ArgumentParser()
    bosdyn.client.util.add_common_arguments(parser)
    options = parser.parse_args(argv)

    sdk = bosdyn.client.create_standard_sdk('VelodyneClient')
    robot = sdk.create_robot(options.hostname)
    robot.authenticate(options.username, options.password)
    robot.sync_with_directory()

    _point_cloud_client = robot.ensure_client('velodyne-point-cloud')
    _robot_state_client = robot.ensure_client(
        RobotStateClient.default_service_name)
    _point_cloud_task = AsyncPointCloud(_point_cloud_client)
    _robot_state_task = AsyncRobotState(_robot_state_client)
    _task_list = [_point_cloud_task, _robot_state_task]
    _async_tasks = AsyncTasks(_task_list)
    print('Connected.')

    update_thread = threading.Thread(target=_update_thread,
                                     args=[_async_tasks])
    update_thread.daemon = True
    update_thread.start()

    # Wait for the first responses.
    while any(task.proto is None for task in _task_list):
        time.sleep(0.1)
    fig = plt.figure()

    body_tform_butt = SE3Pose(-0.5, 0, 0, Quat())
    body_tform_head = SE3Pose(0.5, 0, 0, Quat())

    # Plot the point cloud as an animation.
    ax = fig.add_subplot(111, projection='3d')
    aggregate_data = collections.deque(maxlen=5)
    while True:
        if _point_cloud_task.proto[0].point_cloud:
            data = np.fromstring(_point_cloud_task.proto[0].point_cloud.data,
                                 dtype=np.float32)
            aggregate_data.append(data)
            plot_data = np.concatenate(aggregate_data)
            ax.clear()
            ax.set_xlabel('X (m)')
            ax.set_ylabel('Y (m)')
            ax.set_zlabel('Z (m)')

            # Draw robot position and orientation on plot
            if _robot_state_task.proto:
                odom_tform_body = get_odom_tform_body(
                    _robot_state_task.proto.kinematic_state.transforms_snapshot
                ).to_proto()
                helper_se3 = SE3Pose.from_obj(odom_tform_body)
                odom_tform_butt = helper_se3.mult(body_tform_butt)
                odom_tform_head = helper_se3.mult(body_tform_head)
                ax.plot([odom_tform_butt.x], [odom_tform_butt.y],
                        [odom_tform_butt.z],
                        'o',
                        color=SPOT_YELLOW,
                        markersize=7,
                        label='robot_butt')
                ax.plot([odom_tform_head.x], [odom_tform_head.y],
                        [odom_tform_head.z],
                        'o',
                        color=SPOT_YELLOW,
                        markersize=7,
                        label='robot_head')
                ax.text(odom_tform_butt.x,
                        odom_tform_butt.y,
                        odom_tform_butt.z,
                        'Spot Butt',
                        size=TEXT_SIZE,
                        zorder=1,
                        color='k')
                ax.text(odom_tform_head.x,
                        odom_tform_head.y,
                        odom_tform_head.z,
                        'Spot Head',
                        size=TEXT_SIZE,
                        zorder=1,
                        color='k')
                ax.plot([odom_tform_butt.x, odom_tform_head.x],
                        [odom_tform_butt.y, odom_tform_head.y],
                        zs=[odom_tform_butt.z, odom_tform_head.z],
                        linewidth=6,
                        color=SPOT_YELLOW)

            # Plot point cloud data
            ax.plot(plot_data[0::3], plot_data[1::3], plot_data[2::3], '.')
            set_axes_equal(ax)
            plt.draw()
            plt.pause(0.016)
            if window_closed(ax):
                sys.exit(0)
コード例 #7
0
ファイル: ros_helpers.py プロジェクト: benjamin-tam/spot_ros
def getImageMsg(data, spot_wrapper, inverse_target_frame):
    """Takes the image, camera, and TF data and populates the necessary ROS messages

    Args:
        data: Image proto
        spot_wrapper: A SpotWrapper object
        inverse_target_frame: A frame name to be inversed to a parent frame.
    Returns:
        (tuple):
            * Image: message of the image captured
            * CameraInfo: message to define the state and config of the camera that took the image
            * TFMessage: with the transforms necessary to locate the image frames
    """
    tf_msg = TFMessage()
    for frame_name in data.shot.transforms_snapshot.child_to_parent_edge_map:
        if data.shot.transforms_snapshot.child_to_parent_edge_map.get(
                frame_name).parent_frame_name:
            try:
                transform = data.shot.transforms_snapshot.child_to_parent_edge_map.get(
                    frame_name)
                new_tf = TransformStamped()
                local_time = spot_wrapper.robotToLocalTime(
                    data.shot.acquisition_time)
                new_tf.header.stamp = rospy.Time(local_time.seconds,
                                                 local_time.nanos)
                parent = transform.parent_frame_name
                child = frame_name
                if inverse_target_frame == frame_name:
                    geo_tform_inversed = SE3Pose.from_obj(
                        transform.parent_tform_child).inverse()
                    new_tf.header.frame_id = frame_name
                    new_tf.child_frame_id = transform.parent_frame_name
                    new_tf.transform.translation.x = geo_tform_inversed.position.x
                    new_tf.transform.translation.y = geo_tform_inversed.position.y
                    new_tf.transform.translation.z = geo_tform_inversed.position.z
                    new_tf.transform.rotation.x = geo_tform_inversed.rotation.x
                    new_tf.transform.rotation.y = geo_tform_inversed.rotation.y
                    new_tf.transform.rotation.z = geo_tform_inversed.rotation.z
                    new_tf.transform.rotation.w = geo_tform_inversed.rotation.w
                else:
                    new_tf.header.frame_id = transform.parent_frame_name
                    new_tf.child_frame_id = frame_name
                    new_tf.transform.translation.x = transform.parent_tform_child.position.x
                    new_tf.transform.translation.y = transform.parent_tform_child.position.y
                    new_tf.transform.translation.z = transform.parent_tform_child.position.z
                    new_tf.transform.rotation.x = transform.parent_tform_child.rotation.x
                    new_tf.transform.rotation.y = transform.parent_tform_child.rotation.y
                    new_tf.transform.rotation.z = transform.parent_tform_child.rotation.z
                    new_tf.transform.rotation.w = transform.parent_tform_child.rotation.w
                tf_msg.transforms.append(new_tf)
            except Exception as e:
                spot_wrapper.logger.error('Error: {}'.format(e))

    image_msg = Image()
    local_time = spot_wrapper.robotToLocalTime(data.shot.acquisition_time)
    image_msg.header.stamp = rospy.Time(local_time.seconds, local_time.nanos)
    image_msg.header.frame_id = data.shot.frame_name_image_sensor
    image_msg.height = data.shot.image.rows
    image_msg.width = data.shot.image.cols

    # Color/greyscale formats.
    # JPEG format
    if data.shot.image.format == image_pb2.Image.FORMAT_JPEG:
        image_msg.encoding = "rgb8"
        image_msg.is_bigendian = True
        image_msg.step = 3 * data.shot.image.cols
        image_msg.data = data.shot.image.data

    # Uncompressed.  Requires pixel_format.
    if data.shot.image.format == image_pb2.Image.FORMAT_RAW:
        # One byte per pixel.
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_GREYSCALE_U8:
            image_msg.encoding = "mono8"
            image_msg.is_bigendian = True
            image_msg.step = data.shot.image.cols
            image_msg.data = data.shot.image.data

        # Three bytes per pixel.
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGB_U8:
            image_msg.encoding = "rgb8"
            image_msg.is_bigendian = True
            image_msg.step = 3 * data.shot.image.cols
            image_msg.data = data.shot.image.data

        # Four bytes per pixel.
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGBA_U8:
            image_msg.encoding = "rgba8"
            image_msg.is_bigendian = True
            image_msg.step = 4 * data.shot.image.cols
            image_msg.data = data.shot.image.data

        # Little-endian uint16 z-distance from camera (mm).
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_DEPTH_U16:
            image_msg.encoding = "16UC1"
            image_msg.is_bigendian = False
            image_msg.step = 2 * data.shot.image.cols
            image_msg.data = data.shot.image.data

    camera_info_msg = DefaultCameraInfo()
    local_time = spot_wrapper.robotToLocalTime(data.shot.acquisition_time)
    camera_info_msg.header.stamp = rospy.Time(local_time.seconds,
                                              local_time.nanos)
    camera_info_msg.header.frame_id = data.shot.frame_name_image_sensor
    camera_info_msg.height = data.shot.image.rows
    camera_info_msg.width = data.shot.image.cols

    camera_info_msg.K[0] = data.source.pinhole.intrinsics.focal_length.x
    camera_info_msg.K[2] = data.source.pinhole.intrinsics.principal_point.x
    camera_info_msg.K[4] = data.source.pinhole.intrinsics.focal_length.y
    camera_info_msg.K[5] = data.source.pinhole.intrinsics.principal_point.y

    camera_info_msg.P[0] = data.source.pinhole.intrinsics.focal_length.x
    camera_info_msg.P[2] = data.source.pinhole.intrinsics.principal_point.x
    camera_info_msg.P[5] = data.source.pinhole.intrinsics.focal_length.y
    camera_info_msg.P[6] = data.source.pinhole.intrinsics.principal_point.y

    return image_msg, camera_info_msg, tf_msg
コード例 #8
0
def get_object_position(world_tform_cam, visual_image, depth_image,
                        bounding_box, rotation_angle):
    """
    Extract the bounding box, then find the mode in that region.

    Args:
        world_tform_cam (SE3Pose): SE3 transform from world to camera frame
        visual_image (ImageResponse): From a visual camera
        depth_image (ImageResponse): From a depth camera corresponding to the visual_image
        bounding_box (list): Bounding box from tensorflow
        rotation_angle (float): Angle (in degrees) to rotate depth image to match cam image rotation
    """

    # Make sure there are two images.
    if visual_image is None or depth_image is None:
        # Fail.
        return

    # Rotate bounding box back to original frame
    points = [(bounding_box[1], bounding_box[0]),
              (bounding_box[3], bounding_box[0]),
              (bounding_box[3], bounding_box[2]),
              (bounding_box[1], bounding_box[2])]

    origin = (visual_image.shot.image.cols / 2,
              visual_image.shot.image.rows / 2)

    points_rot = [
        rotate_about_origin_degrees(origin, point, rotation_angle)
        for point in points
    ]

    # Get the bounding box corners.
    y_min = max(0, min([point[1] for point in points_rot]))
    x_min = max(0, min([point[0] for point in points_rot]))
    y_max = min(visual_image.shot.image.rows,
                max([point[1] for point in points_rot]))
    x_max = min(visual_image.shot.image.cols,
                max([point[0] for point in points_rot]))

    # Check that the bounding box is valid.
    if (x_min < 0 or y_min < 0 or x_max > visual_image.shot.image.cols
            or y_max > visual_image.shot.image.rows):
        print(
            f'Bounding box is invalid: ({x_min}, {y_min}) | ({x_max}, {y_max})'
        )
        print(
            f'Bounds: ({visual_image.shot.image.cols}, {visual_image.shot.image.rows})'
        )
        return

    # Unpack the images.
    try:
        if depth_image.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_DEPTH_U16:
            dtype = np.uint16
        else:
            dtype = np.uint8
        img = np.fromstring(depth_image.shot.image.data, dtype=dtype)
        if depth_image.shot.image.format == image_pb2.Image.FORMAT_RAW:
            img = img.reshape(depth_image.shot.image.rows,
                              depth_image.shot.image.cols)
        else:
            img = cv2.imdecode(img, -1)
        depth_image_pixels = img

        # Get the depth data from the region in the bounding box.
        depth = get_distance_to_closest_object_depth(
            x_min, x_max, y_min, y_max, depth_image.source.depth_scale,
            depth_image_pixels)

        if depth >= 4.0:
            # Not enough depth data.
            print('Not enough depth data.')
            return False

        # Calculate the transform to the center point of the object using camera intrinsics
        # and depth calculated earlier in the function
        focal_x = depth_image.source.pinhole.intrinsics.focal_length.x
        principal_x = depth_image.source.pinhole.intrinsics.principal_point.x

        focal_y = depth_image.source.pinhole.intrinsics.focal_length.y
        principal_y = depth_image.source.pinhole.intrinsics.principal_point.y

        center_x = round((x_max - x_min) / 2.0 + x_min)
        center_y = round((y_max - y_min) / 2.0 + y_min)

        tform_x = depth * (center_x - principal_x) / focal_x
        tform_y = depth * (center_y - principal_y) / focal_y
        tform_z = depth
        obj_tform_camera = SE3Pose(tform_x, tform_y, tform_z, Quat())

        return world_tform_cam * obj_tform_camera
    except Exception as exc:  # pylint: disable=broad-except
        print(f'Error getting object position: {exc}')
        return
コード例 #9
0
def main(argv):
    parser = argparse.ArgumentParser()
    bosdyn.client.util.add_common_arguments(parser)
    parser.add_argument('--protocol',
                        default='tcp',
                        type=str,
                        choices=['tcp', 'udp'],
                        help='IP Protocel to use, either tcp or udp.')
    parser.add_argument('--server-port',
                        default=5201,
                        type=int,
                        help='Port number of iperf3 server')
    parser.add_argument('--server-hostname',
                        default='127.0.0.1',
                        type=str,
                        help='IP address of ieprf3 server')
    options = parser.parse_args(argv)

    sdk = bosdyn.client.create_standard_sdk('CommsTestingClient',
                                            [MissionClient])
    robot = sdk.create_robot(options.hostname)  #ROBOT_IP
    robot.authenticate(options.username, options.password)
    robot.sync_with_directory()

    _robot_state_client = robot.ensure_client(
        RobotStateClient.default_service_name)
    _mission_client = robot.ensure_client(MissionClient.default_service_name)
    _robot_state_task = AsyncRobotState(_robot_state_client)
    _mission_state_task = AsyncMissionState(_mission_client)
    _task_list = [_robot_state_task, _mission_state_task]
    _async_tasks = AsyncTasks(_task_list)
    print('Connected.')

    update_thread = threading.Thread(target=_update_thread,
                                     args=[_async_tasks])
    update_thread.daemon = True
    update_thread.start()

    # Wait for the first responses.
    while any(task.proto is None for task in _task_list):
        time.sleep(0.1)

    # list to hold all the data
    data_list = []
    curr_fname = ''
    try:
        while True:
            if _mission_state_task.proto.status != mission_proto.State.STATUS_RUNNING:
                # Write out data if it exists
                if len(data_list) > 0:
                    print(
                        f'Finished a mission, data can be found in {curr_fname}'
                    )
                    data_list.clear()
                print(_mission_state_task.proto)
                time.sleep(1)

            else:
                if _robot_state_task.proto:

                    # This script currently uses odometry positioning, which is based on the robot's
                    # position at boot time. Runs across boots will not be easily comparable
                    odom_tform_body = get_odom_tform_body(
                        _robot_state_task.proto.kinematic_state.
                        transforms_snapshot).to_proto()
                    helper_se3 = SE3Pose.from_obj(odom_tform_body)

                    #check latency
                    ping_ret = check_ping(options.server_hostname)
                    ping = -1 if ping_ret is None else ping_ret['avg']

                    # Run iperf3 client
                    client = iperf3.Client()
                    client.duration = 1
                    client.server_hostname = options.server_hostname
                    client.protocol = options.protocol
                    client.port = options.server_port
                    result = client.run()

                    # update list of data points
                    if result.error:
                        print(result.error)
                    else:
                        data_entry = {
                            'loc_x': helper_se3.x,
                            'loc_y': helper_se3.y,
                            'loc_z': helper_se3.z,
                            'time': datetime.datetime.now(),
                            'latency(ms)': ping
                        }
                        if options.protocol == 'udp':
                            data_entry.update({
                                'send throughput(Mbps)': result.Mbps,
                                'recv throughput(Mbps)': -1,
                                'jitter(ms)': result.jitter_ms,
                                'lost(%)': result.lost_percent,
                                'retransmits': -1
                            })
                        elif options.protocol == 'tcp':
                            data_entry.update({
                                'send throughput(Mbps)': result.sent_Mbps,
                                'recv throughput(Mbps)': result.received_Mbps,
                                'jitter(ms)': -1,
                                'lost(%)': -1,
                                'retransmits': result.retransmits
                            })
                        data_list.append(data_entry)
                        print(data_list[-1])

                        # Create csv with header if it doesn't exist, then write latest row
                        keys = data_list[0].keys()
                        if curr_fname == '':
                            curr_fname = create_csv_filename(options.protocol)
                            with open(curr_fname, 'w') as output_file:
                                header_writer = csv.DictWriter(
                                    output_file, keys)
                                header_writer.writeheader()
                        with open(curr_fname, 'a') as output_file:
                            dict_writer = csv.DictWriter(output_file, keys)
                            dict_writer.writerow(data_list[-1])

                    del client

    except KeyboardInterrupt:
        print("Caught KeyboardInterrupt, exiting")
        return True
コード例 #10
0
    def _navigate_to_anchor(self, *args):
        """Navigate to a pose in seed frame, using anchors."""
        # The following options are accepted for arguments: [x, y], [x, y, yaw], [x, y, z, yaw],
        # [x, y, z, qw, qx, qy, qz].
        # When a value for z is not specified, we use the current z height.
        # When only yaw is specified, the quaternion is constructed from the yaw.
        # When yaw is not specified, an identity quaternion is used.

        if len(args) < 1 or len(args[0]) not in [2, 3, 4, 7]:
            print("Invalid arguments supplied.")
            return

        seed_T_goal = SE3Pose(float(args[0][0]), float(args[0][1]), 0.0,
                              Quat())

        if len(args[0]) in [4, 7]:
            seed_T_goal.z = float(args[0][2])
        else:
            localization_state = self._graph_nav_client.get_localization_state(
            )
            if not localization_state.localization.waypoint_id:
                print("Robot not localized")
                return
            seed_T_goal.z = localization_state.localization.seed_tform_body.position.z

        if len(args[0]) == 3:
            seed_T_goal.rot = Quat.from_yaw(float(args[0][2]))
        elif len(args[0]) == 4:
            seed_T_goal.rot = Quat.from_yaw(float(args[0][3]))
        elif len(args[0]) == 7:
            seed_T_goal.rot = Quat(w=float(args[0][3]),
                                   x=float(args[0][4]),
                                   y=float(args[0][5]),
                                   z=float(args[0][6]))

        self._lease = self._lease_wallet.get_lease()
        if not self.toggle_power(should_power_on=True):
            print(
                "Failed to power on the robot, and cannot complete navigate to request."
            )
            return

        # Stop the lease keepalive and create a new sublease for graph nav.
        self._lease = self._lease_wallet.advance()
        sublease = self._lease.create_sublease()
        self._lease_keepalive.shutdown()
        nav_to_cmd_id = None
        # Navigate to the destination.
        is_finished = False
        while not is_finished:
            # Issue the navigation command about twice a second such that it is easy to terminate the
            # navigation command (with estop or killing the program).
            try:
                nav_to_cmd_id = self._graph_nav_client.navigate_to_anchor(
                    seed_T_goal.to_proto(),
                    1.0,
                    leases=[sublease.lease_proto],
                    command_id=nav_to_cmd_id)
            except ResponseError as e:
                print("Error while navigating {}".format(e))
                break
            time.sleep(
                .5)  # Sleep for half a second to allow for command execution.
            # Poll the robot for feedback to determine if the navigation command is complete. Then sit
            # the robot down once it is finished.
            is_finished = self._check_success(nav_to_cmd_id)

        self._lease = self._lease_wallet.advance()
        self._lease_keepalive = LeaseKeepAlive(self._lease_client)

        # Update the lease and power off the robot if appropriate.
        if self._powered_on and not self._started_powered_on:
            # Sit the robot down + power off after the navigation command is complete.
            self.toggle_power(should_power_on=False)