Beispiel #1
0
    def trajectory_cmd(self,
                       goal_x,
                       goal_y,
                       goal_heading,
                       cmd_duration,
                       frame_name='odom'):
        """Send a trajectory motion command to the robot.

        Args:
            goal_x: Position X coordinate in meters
            goal_y: Position Y coordinate in meters
            goal_heading: Pose heading in radians
            cmd_duration: Time-to-live for the command in seconds.
            frame_name: frame_name to be used to calc the target position. 'odom' or 'vision'
        """
        self._at_goal = False
        self._logger.info("got command duration of {}".format(cmd_duration))
        end_time = time.time() + cmd_duration
        if frame_name == 'vision':
            vision_tform_body = frame_helpers.get_vision_tform_body(
                self._robot_state_client.get_robot_state(
                ).kinematic_state.transforms_snapshot)
            body_tform_goal = math_helpers.SE3Pose(
                x=goal_x,
                y=goal_y,
                z=0,
                rot=math_helpers.Quat.from_yaw(goal_heading))
            vision_tform_goal = vision_tform_body * body_tform_goal
            response = self._robot_command(
                RobotCommandBuilder.trajectory_command(
                    goal_x=vision_tform_goal.x,
                    goal_y=vision_tform_goal.y,
                    goal_heading=vision_tform_goal.rot.to_yaw(),
                    frame_name=frame_helpers.VISION_FRAME_NAME,
                    params=self._mobility_params),
                end_time_secs=end_time)
        elif frame_name == 'odom':
            odom_tform_body = frame_helpers.get_odom_tform_body(
                self._robot_state_client.get_robot_state(
                ).kinematic_state.transforms_snapshot)
            body_tform_goal = math_helpers.SE3Pose(
                x=goal_x,
                y=goal_y,
                z=0,
                rot=math_helpers.Quat.from_yaw(goal_heading))
            odom_tform_goal = odom_tform_body * body_tform_goal
            response = self._robot_command(
                RobotCommandBuilder.trajectory_command(
                    goal_x=odom_tform_goal.x,
                    goal_y=odom_tform_goal.y,
                    goal_heading=odom_tform_goal.rot.to_yaw(),
                    frame_name=frame_helpers.ODOM_FRAME_NAME,
                    params=self._mobility_params),
                end_time_secs=end_time)
        else:
            raise ValueError('frame_name must be \'vision\' or \'odom\'')
        if response[0]:
            self._last_trajectory_command = response[2]
        return response[0], response[1]
    def go_to_tag(self, fiducial_rt_world):
        """Use the position of the april tag in vision world frame and command the robot."""
        # Compute the go-to point (offset by .5m from the fiducial position) and the heading at
        # this point.
        self._current_tag_world_pose, self._angle_desired = self.offset_tag_pose(
            fiducial_rt_world, self._tag_offset)

        #Command the robot to go to the tag in kinematic odometry frame
        mobility_params = self.set_mobility_params()
        tag_cmd = RobotCommandBuilder.trajectory_command(
            goal_x=self._current_tag_world_pose[0],
            goal_y=self._current_tag_world_pose[1],
            goal_heading=self._angle_desired,
            frame_name=VISION_FRAME_NAME,
            params=mobility_params,
            body_height=0.0,
            locomotion_hint=spot_command_pb2.HINT_AUTO)
        end_time = 5.0
        if self._movement_on and self._powered_on:
            #Issue the command to the robot
            self._robot_command_client.robot_command(
                lease=None,
                command=tag_cmd,
                end_time_secs=time.time() + end_time)
            # #Feedback to check and wait until the robot is in the desired position or timeout
            start_time = time.time()
            current_time = time.time()
            while (not self.final_state()
                   and current_time - start_time < end_time):
                time.sleep(.25)
                current_time = time.time()
        return
Beispiel #3
0
def get_go_to(world_tform_object,
              robot_state,
              mobility_params,
              dist_margin=1.2):
    """Gets trajectory command to a goal location

    Args:
        world_tform_object (SE3Pose): Transform from vision frame to target object
        robot_state (RobotState): Current robot state
        mobility_params (MobilityParams): Mobility parameters
        dist_margin (float): Distance margin to target
    """
    vo_tform_robot = get_vision_tform_body(
        robot_state.kinematic_state.transforms_snapshot)
    delta_ewrt_vo = np.array([
        world_tform_object.x - vo_tform_robot.x,
        world_tform_object.y - vo_tform_robot.y, 0
    ])
    norm = np.linalg.norm(delta_ewrt_vo)
    if norm == 0:
        return None
    delta_ewrt_vo_norm = delta_ewrt_vo / norm
    heading = _get_heading(delta_ewrt_vo_norm)
    vo_tform_goal = np.array([
        world_tform_object.x - delta_ewrt_vo_norm[0] * dist_margin,
        world_tform_object.y - delta_ewrt_vo_norm[1] * dist_margin
    ])
    tag_cmd = RobotCommandBuilder.trajectory_command(
        goal_x=vo_tform_goal[0],
        goal_y=vo_tform_goal[1],
        goal_heading=heading,
        frame_name=VISION_FRAME_NAME,
        params=mobility_params)
    return tag_cmd
    def trajectory_cmd_srv(self, trajectory):
        '''
        Callback that specifies waypoint(s) (Point) [m] with a final orientation [rad]

        The name of the frame that trajectory is relative to.
        The trajectory must be expressed in a gravity aligned frame, so either "vision", "odom", or "flat_body".
        Any other provided se2_frame_name will be rejected and the trajectory command will not be exectuted.
        '''
        # TODO: Support other reference frames (currently only VISION ref. frame)

        for pose in trajectory.waypoints.poses:
            x = pose.position.x
            y = pose.position.y
            heading = math.atan2(y,x)
            frame = VISION_FRAME_NAME

            cmd = RobotCommandBuilder.trajectory_command(
                goal_x=x,
                goal_y=y,
                goal_heading=heading,
                frame_name=frame,
            )
            self.command_client.robot_command(lease=None, command=cmd, end_time_secs=time.time() + self.TRAJECTORY_CMD_TIMEOUT)
            
        robot_state = self.get_robot_state()[0].vision_tform_body
        final_pose = geometry_msgs.msg.Pose()
        final_pose.position = robot_state.translation
        final_pose.orientation = robot_state.rotation

        spot_ros_srvs.srv.TrajectoryResponse(final_pose)
Beispiel #5
0
 def _return_to_origin(self):
     self._start_robot_command('fwd_and_rotate',
                               RobotCommandBuilder.trajectory_command(
                                   goal_x=0.0, goal_y=0.0, goal_heading=0.0,
                                   frame_name=ODOM_FRAME_NAME, params=None, body_height=0.0,
                                   locomotion_hint=spot_command_pb2.HINT_SPEED_SELECT_TROT),
                               end_time_secs=time.time() + 20)
def test_trajectory_command():
    goal_x = 1.0
    goal_y = 2.0
    goal_heading = 3.0
    frame = ODOM_FRAME_NAME
    command = RobotCommandBuilder.trajectory_command(goal_x, goal_y, goal_heading, frame)
    _test_has_mobility_deprecated(command)
    assert command.mobility_command.HasField("se2_trajectory_request")
    traj = command.mobility_command.se2_trajectory_request.trajectory
    assert len(traj.points) == 1
    assert traj.points[0].pose.position.x == goal_x
    assert traj.points[0].pose.position.y == goal_y
    assert traj.points[0].pose.angle == goal_heading
    assert command.mobility_command.se2_trajectory_request.se2_frame_name == ODOM_FRAME_NAME
Beispiel #7
0
def test_trajectory_command():
    goal_x = 1.0
    goal_y = 2.0
    goal_heading = 3.0
    frame = geometry_pb2.Frame(base_frame=geometry_pb2.FRAME_KO)
    command = RobotCommandBuilder.trajectory_command(goal_x, goal_y,
                                                     goal_heading, frame)
    _test_has_mobility(command)
    assert command.mobility_command.HasField("se2_trajectory_request")
    traj = command.mobility_command.se2_trajectory_request.trajectory
    assert len(traj.points) == 1
    assert traj.points[0].pose.position.x == goal_x
    assert traj.points[0].pose.position.y == goal_y
    assert traj.points[0].pose.angle == goal_heading
    assert traj.frame == frame
Beispiel #8
0
def main(argv):
    parser = argparse.ArgumentParser()
    bosdyn.client.util.add_common_arguments(parser)
    parser.add_argument(
        '-s',
        '--ml-service',
        help='Service name of external machine learning server.',
        required=True)
    parser.add_argument('-m',
                        '--model',
                        help='Model name running on the external server.',
                        required=True)
    parser.add_argument(
        '-p',
        '--person-model',
        help='Person detection model name running on the external server.')
    parser.add_argument(
        '-c',
        '--confidence-dogtoy',
        help=
        'Minimum confidence to return an object for the dogoy (0.0 to 1.0)',
        default=0.5,
        type=float)
    parser.add_argument(
        '-e',
        '--confidence-person',
        help='Minimum confidence for person detection (0.0 to 1.0)',
        default=0.6,
        type=float)
    options = parser.parse_args(argv)

    cv2.namedWindow("Fetch")
    cv2.waitKey(500)

    sdk = bosdyn.client.create_standard_sdk('SpotFetchClient')
    sdk.register_service_client(NetworkComputeBridgeClient)
    robot = sdk.create_robot(options.hostname)
    robot.authenticate(options.username, options.password)

    # Time sync is necessary so that time-based filter requests can be converted
    robot.time_sync.wait_for_sync()

    network_compute_client = robot.ensure_client(
        NetworkComputeBridgeClient.default_service_name)
    robot_state_client = robot.ensure_client(
        RobotStateClient.default_service_name)
    command_client = robot.ensure_client(
        RobotCommandClient.default_service_name)
    lease_client = robot.ensure_client(LeaseClient.default_service_name)
    manipulation_api_client = robot.ensure_client(
        ManipulationApiClient.default_service_name)

    # This script assumes the robot is already standing via the tablet.  We'll take over from the
    # tablet.
    lease = lease_client.take()

    lk = bosdyn.client.lease.LeaseKeepAlive(lease_client)

    # Store the position of the hand at the last toy drop point.
    vision_tform_hand_at_drop = None

    while True:
        holding_toy = False
        while not holding_toy:
            # Capture an image and run ML on it.
            dogtoy, image, vision_tform_dogtoy = get_obj_and_img(
                network_compute_client, options.ml_service, options.model,
                options.confidence_dogtoy, kImageSources, 'dogtoy')

            if dogtoy is None:
                # Didn't find anything, keep searching.
                continue

            # If we have already dropped the toy off, make sure it has moved a sufficient amount before
            # picking it up again
            if vision_tform_hand_at_drop is not None and pose_dist(
                    vision_tform_hand_at_drop, vision_tform_dogtoy) < 0.5:
                print('Found dogtoy, but it hasn\'t moved.  Waiting...')
                time.sleep(1)
                continue

            print('Found dogtoy...')

            # Got a dogtoy.  Request pick up.

            # Stow the arm in case it is deployed
            stow_cmd = RobotCommandBuilder.arm_stow_command()
            command_client.robot_command(stow_cmd)

            # Walk to the object.
            walk_rt_vision, heading_rt_vision = compute_stand_location_and_yaw(
                vision_tform_dogtoy, robot_state_client, distance_margin=1.0)

            move_cmd = RobotCommandBuilder.trajectory_command(
                goal_x=walk_rt_vision[0],
                goal_y=walk_rt_vision[1],
                goal_heading=heading_rt_vision,
                frame_name=frame_helpers.VISION_FRAME_NAME,
                params=get_walking_params(0.5, 0.5))
            end_time = 5.0
            cmd_id = command_client.robot_command(command=move_cmd,
                                                  end_time_secs=time.time() +
                                                  end_time)

            # Wait until the robot reports that it is at the goal.
            block_for_trajectory_cmd(command_client,
                                     cmd_id,
                                     timeout_sec=5,
                                     verbose=True)

            # The ML result is a bounding box.  Find the center.
            (center_px_x,
             center_px_y) = find_center_px(dogtoy.image_properties.coordinates)

            # Request Pick Up on that pixel.
            pick_vec = geometry_pb2.Vec2(x=center_px_x, y=center_px_y)
            grasp = manipulation_api_pb2.PickObjectInImage(
                pixel_xy=pick_vec,
                transforms_snapshot_for_camera=image.shot.transforms_snapshot,
                frame_name_image_sensor=image.shot.frame_name_image_sensor,
                camera_model=image.source.pinhole)

            # We can specify where in the gripper we want to grasp. About halfway is generally good for
            # small objects like this. For a bigger object like a shoe, 0 is better (use the entire
            # gripper)
            grasp.grasp_params.grasp_palm_to_fingertip = 0.6

            # Tell the grasping system that we want a top-down grasp.

            # Add a constraint that requests that the x-axis of the gripper is pointing in the
            # negative-z direction in the vision frame.

            # The axis on the gripper is the x-axis.
            axis_on_gripper_ewrt_gripper = geometry_pb2.Vec3(x=1, y=0, z=0)

            # The axis in the vision frame is the negative z-axis
            axis_to_align_with_ewrt_vision = geometry_pb2.Vec3(x=0, y=0, z=-1)

            # Add the vector constraint to our proto.
            constraint = grasp.grasp_params.allowable_orientation.add()
            constraint.vector_alignment_with_tolerance.axis_on_gripper_ewrt_gripper.CopyFrom(
                axis_on_gripper_ewrt_gripper)
            constraint.vector_alignment_with_tolerance.axis_to_align_with_ewrt_frame.CopyFrom(
                axis_to_align_with_ewrt_vision)

            # We'll take anything within about 15 degrees for top-down or horizontal grasps.
            constraint.vector_alignment_with_tolerance.threshold_radians = 0.25

            # Specify the frame we're using.
            grasp.grasp_params.grasp_params_frame_name = frame_helpers.VISION_FRAME_NAME

            # Build the proto
            grasp_request = manipulation_api_pb2.ManipulationApiRequest(
                pick_object_in_image=grasp)

            # Send the request
            print('Sending grasp request...')
            cmd_response = manipulation_api_client.manipulation_api_command(
                manipulation_api_request=grasp_request)

            # Wait for the grasp to finish
            grasp_done = False
            failed = False
            time_start = time.time()
            while not grasp_done:
                feedback_request = manipulation_api_pb2.ManipulationApiFeedbackRequest(
                    manipulation_cmd_id=cmd_response.manipulation_cmd_id)

                # Send a request for feedback
                response = manipulation_api_client.manipulation_api_feedback_command(
                    manipulation_api_feedback_request=feedback_request)

                current_state = response.current_state
                current_time = time.time() - time_start
                print('Current state ({time:.1f} sec): {state}'.format(
                    time=current_time,
                    state=manipulation_api_pb2.ManipulationFeedbackState.Name(
                        current_state)),
                      end='                \r')
                sys.stdout.flush()

                failed_states = [
                    manipulation_api_pb2.MANIP_STATE_GRASP_FAILED,
                    manipulation_api_pb2.
                    MANIP_STATE_GRASP_PLANNING_NO_SOLUTION,
                    manipulation_api_pb2.
                    MANIP_STATE_GRASP_FAILED_TO_RAYCAST_INTO_MAP,
                    manipulation_api_pb2.
                    MANIP_STATE_GRASP_PLANNING_WAITING_DATA_AT_EDGE
                ]

                failed = current_state in failed_states
                grasp_done = current_state == manipulation_api_pb2.MANIP_STATE_GRASP_SUCCEEDED or failed

                time.sleep(0.1)

            holding_toy = not failed

        # Move the arm to a carry position.
        print('')
        print('Grasp finished, search for a person...')
        carry_cmd = RobotCommandBuilder.arm_carry_command()
        command_client.robot_command(carry_cmd)

        # Wait for the carry command to finish
        time.sleep(0.75)

        person = None
        while person is None:
            # Find a person to deliver the toy to
            person, image, vision_tform_person = get_obj_and_img(
                network_compute_client, options.ml_service,
                options.person_model, options.confidence_person, kImageSources,
                'person')

        # We now have found a person to drop the toy off near.
        drop_position_rt_vision, heading_rt_vision = compute_stand_location_and_yaw(
            vision_tform_person, robot_state_client, distance_margin=2.0)

        wait_position_rt_vision, wait_heading_rt_vision = compute_stand_location_and_yaw(
            vision_tform_person, robot_state_client, distance_margin=3.0)

        # Tell the robot to go there

        # Limit the speed so we don't charge at the person.
        move_cmd = RobotCommandBuilder.trajectory_command(
            goal_x=drop_position_rt_vision[0],
            goal_y=drop_position_rt_vision[1],
            goal_heading=heading_rt_vision,
            frame_name=frame_helpers.VISION_FRAME_NAME,
            params=get_walking_params(0.5, 0.5))
        end_time = 5.0
        cmd_id = command_client.robot_command(command=move_cmd,
                                              end_time_secs=time.time() +
                                              end_time)

        # Wait until the robot reports that it is at the goal.
        block_for_trajectory_cmd(command_client,
                                 cmd_id,
                                 timeout_sec=5,
                                 verbose=True)

        print('Arrived at goal, dropping object...')

        # Do an arm-move to gently put the object down.
        # Build a position to move the arm to (in meters, relative to and expressed in the gravity aligned body frame).
        x = 0.75
        y = 0
        z = -0.25
        hand_ewrt_flat_body = geometry_pb2.Vec3(x=x, y=y, z=z)

        # Point the hand straight down with a quaternion.
        qw = 0.707
        qx = 0
        qy = 0.707
        qz = 0
        flat_body_Q_hand = geometry_pb2.Quaternion(w=qw, x=qx, y=qy, z=qz)

        flat_body_tform_hand = geometry_pb2.SE3Pose(
            position=hand_ewrt_flat_body, rotation=flat_body_Q_hand)

        robot_state = robot_state_client.get_robot_state()
        vision_tform_flat_body = frame_helpers.get_a_tform_b(
            robot_state.kinematic_state.transforms_snapshot,
            frame_helpers.VISION_FRAME_NAME,
            frame_helpers.GRAV_ALIGNED_BODY_FRAME_NAME)

        vision_tform_hand_at_drop = vision_tform_flat_body * math_helpers.SE3Pose.from_obj(
            flat_body_tform_hand)

        # duration in seconds
        seconds = 1

        arm_command = RobotCommandBuilder.arm_pose_command(
            vision_tform_hand_at_drop.x, vision_tform_hand_at_drop.y,
            vision_tform_hand_at_drop.z, vision_tform_hand_at_drop.rot.w,
            vision_tform_hand_at_drop.rot.x, vision_tform_hand_at_drop.rot.y,
            vision_tform_hand_at_drop.rot.z, frame_helpers.VISION_FRAME_NAME,
            seconds)

        # Keep the gripper closed.
        gripper_command = RobotCommandBuilder.claw_gripper_open_fraction_command(
            0.0)

        # Combine the arm and gripper commands into one RobotCommand
        command = RobotCommandBuilder.build_synchro_command(
            gripper_command, arm_command)

        # Send the request
        cmd_id = command_client.robot_command(command)

        # Wait until the arm arrives at the goal.
        block_until_arm_arrives(command_client, cmd_id)

        # Open the gripper
        gripper_command = RobotCommandBuilder.claw_gripper_open_fraction_command(
            1.0)
        command = RobotCommandBuilder.build_synchro_command(gripper_command)
        cmd_id = command_client.robot_command(command)

        # Wait for the dogtoy to fall out
        time.sleep(1.5)

        # Stow the arm.
        stow_cmd = RobotCommandBuilder.arm_stow_command()
        command_client.robot_command(stow_cmd)

        time.sleep(1)

        print('Backing up and waiting...')

        # Back up one meter and wait for the person to throw the object again.
        move_cmd = RobotCommandBuilder.trajectory_command(
            goal_x=wait_position_rt_vision[0],
            goal_y=wait_position_rt_vision[1],
            goal_heading=wait_heading_rt_vision,
            frame_name=frame_helpers.VISION_FRAME_NAME,
            params=get_walking_params(0.5, 0.5))
        end_time = 5.0
        cmd_id = command_client.robot_command(command=move_cmd,
                                              end_time_secs=time.time() +
                                              end_time)

        # Wait until the robot reports that it is at the goal.
        block_for_trajectory_cmd(command_client,
                                 cmd_id,
                                 timeout_sec=5,
                                 verbose=True)

    lease_client.return_lease(lease)
Beispiel #9
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    bosdyn.client.util.add_common_arguments(parser)
    options = parser.parse_args()

    # Create robot object.
    sdk = bosdyn.client.create_standard_sdk('RobotCommandMaster')
    sdk.load_app_token(options.app_token)
    robot = sdk.create_robot(options.hostname)
    robot.authenticate(options.username, options.password)

    # Check that an estop is connected with the robot so that the robot commands can be executed.
    verify_estop(robot)

    # Create the lease client.
    lease_client = robot.ensure_client(LeaseClient.default_service_name)
    lease = lease_client.acquire()
    robot.time_sync.wait_for_sync()
    lk = bosdyn.client.lease.LeaseKeepAlive(lease_client)

    # Setup clients for the robot state and robot command services.
    robot_state_client = robot.ensure_client(
        RobotStateClient.default_service_name)
    robot_command_client = robot.ensure_client(
        RobotCommandClient.default_service_name)

    # Power on the robot and stand it up.
    robot.power_on()
    blocking_stand(robot_command_client)

    # Get robot state information. Specifically, we are getting the vision_tform_body transform to understand
    # the robot's current position in the vision frame.
    vision_tform_body = get_vision_tform_body(
        robot_state_client.get_robot_state(
        ).kinematic_state.transforms_snapshot)

    # We want to command a trajectory to go forward one meter in the x-direction of the body.
    # It is simple to define this trajectory relative to the body frame, since we know that will be
    # just 1 meter forward in the x-axis of the body.
    # Note that the rotation is just math_helpers.Quat(), which is the identity quaternion. We want the
    # rotation of the body at the goal to match the rotation of the body currently, so we do not need
    # to transform the rotation.
    body_tform_goal = math_helpers.SE3Pose(x=1,
                                           y=0,
                                           z=0,
                                           rot=math_helpers.Quat())
    # We can then transform this transform to get the goal position relative to the vision frame.
    vision_tform_goal = vision_tform_body * body_tform_goal

    # Command the robot to go to the goal point in the vision frame. The command will stop at the new
    # position in the vision frame.
    robot_cmd = RobotCommandBuilder.trajectory_command(
        goal_x=vision_tform_goal.x,
        goal_y=vision_tform_goal.y,
        goal_heading=vision_tform_goal.rot.to_yaw(),
        frame_name=VISION_FRAME_NAME)
    end_time = 2.0
    robot_command_client.robot_command(lease=None,
                                       command=robot_cmd,
                                       end_time_secs=time.time() + end_time)
    time.sleep(end_time)

    # Get new robot state information after moving the robot. Here we are getting the transform odom_tform_body,
    # which describes the robot body's position in the odom frame.
    odom_tform_body = get_odom_tform_body(robot_state_client.get_robot_state().
                                          kinematic_state.transforms_snapshot)

    # We want to command a trajectory to go backwards one meter and to the left one meter.
    # It is simple to define this trajectory relative to the body frame, since we know that will be
    # just 1 meter backwards (negative-value) in the x-axis of the body and one meter left (positive-value)
    # in the y-axis of the body.
    body_tform_goal = math_helpers.SE3Pose(x=-1,
                                           y=1,
                                           z=0,
                                           rot=math_helpers.Quat())
    # We can then transform this transform to get the goal position relative to the odom frame.
    odom_tform_goal = odom_tform_body * body_tform_goal

    # Command the robot to go to the goal point in the odom frame. The command will stop at the new
    # position in the odom frame.
    robot_cmd = RobotCommandBuilder.trajectory_command(
        goal_x=odom_tform_goal.x,
        goal_y=odom_tform_goal.y,
        goal_heading=odom_tform_goal.rot.to_yaw(),
        frame_name=ODOM_FRAME_NAME)
    end_time = 5.0
    robot_command_client.robot_command(lease=None,
                                       command=robot_cmd,
                                       end_time_secs=time.time() + end_time)
    time.sleep(end_time)

    return True
Beispiel #10
0
def run_gcode_program(config):
    """A simple example of using the Boston Dynamics API to command a Spot robot."""

    config_parser = configparser.ConfigParser()
    config_parser.read_file(open('gcode.cfg'))
    gcode_file = config_parser.get("General", "gcode_file")
    scale = config_parser.getfloat("General", "scale")
    min_dist_to_goal = config_parser.getfloat("General", "min_dist_to_goal")
    allow_walking = config_parser.getboolean("General", "allow_walking")
    velocity = config_parser.getfloat("General", "velocity")
    press_force_percent = config_parser.getfloat("General", "press_force_percent")
    below_z_is_admittance = config_parser.getfloat("General", "below_z_is_admittance")
    travel_z = config_parser.getfloat("General", "travel_z")
    gcode_start_x = config_parser.getfloat("General", "gcode_start_x")
    gcode_start_y = config_parser.getfloat("General", "gcode_start_y")
    draw_on_wall = config_parser.getboolean("General", "draw_on_wall")
    use_vision_frame = config_parser.getboolean("General", "use_vision_frame")
    use_xy_to_z_cross_term = config_parser.getboolean("General", "use_xy_to_z_cross_term")
    bias_force_x = config_parser.getfloat("General", "bias_force_x")

    if config_parser.has_option("General",
                                "walk_to_at_end_rt_gcode_origin_x") and config_parser.has_option(
                                    "General", "walk_to_at_end_rt_gcode_origin_y"):
        walk_to_at_end_rt_gcode_origin_x = config_parser.getfloat(
            "General", "walk_to_at_end_rt_gcode_origin_x")
        walk_to_at_end_rt_gcode_origin_y = config_parser.getfloat(
            "General", "walk_to_at_end_rt_gcode_origin_y")
    else:
        walk_to_at_end_rt_gcode_origin_x = None
        walk_to_at_end_rt_gcode_origin_y = None

    if velocity <= 0:
        print('Velocity must be greater than 0.  Currently is: ', velocity)
        return

    if use_vision_frame:
        api_send_frame = VISION_FRAME_NAME
    else:
        api_send_frame = ODOM_FRAME_NAME

    # The Boston Dynamics Python library uses Python's logging module to
    # generate output. Applications using the library can specify how
    # the logging information should be output.
    bosdyn.client.util.setup_logging(config.verbose)

    # The SDK object is the primary entry point to the Boston Dynamics API.
    # create_standard_sdk will initialize an SDK object with typical default
    # parameters. The argument passed in is a string identifying the client.
    sdk = bosdyn.client.create_standard_sdk('GcodeClient')

    # A Robot object represents a single robot. Clients using the Boston
    # Dynamics API can manage multiple robots, but this tutorial limits
    # access to just one. The network address of the robot needs to be
    # specified to reach it. This can be done with a DNS name
    # (e.g. spot.intranet.example.com) or an IP literal (e.g. 10.0.63.1)
    robot = sdk.create_robot(config.hostname)

    # Clients need to authenticate to a robot before being able to use it.
    robot.authenticate(config.username, config.password)

    # Establish time sync with the robot. This kicks off a background thread to establish time sync.
    # Time sync is required to issue commands to the robot. After starting time sync thread, block
    # until sync is established.
    robot.time_sync.wait_for_sync()

    # Verify the robot has an arm.
    assert robot.has_arm(), "Robot requires an arm to run the gcode example."

    # Verify the robot is not estopped and that an external application has registered and holds
    # an estop endpoint.
    assert not robot.is_estopped(), "Robot is estopped. Please use an external E-Stop client, " \
                                    "such as the estop SDK example, to configure E-Stop."

    arm_surface_contact_client = robot.ensure_client(ArmSurfaceContactClient.default_service_name)

    # Only one client at a time can operate a robot. Clients acquire a lease to
    # indicate that they want to control a robot. Acquiring may fail if another
    # client is currently controlling the robot. When the client is done
    # controlling the robot, it should return the lease so other clients can
    # control it. Note that the lease is returned as the "finally" condition in this
    # try-catch-finally block.
    lease_client = robot.ensure_client(bosdyn.client.lease.LeaseClient.default_service_name)
    lease = lease_client.acquire()
    try:
        with bosdyn.client.lease.LeaseKeepAlive(lease_client):
            # Now, we are ready to power on the robot. This call will block until the power
            # is on. Commands would fail if this did not happen. We can also check that the robot is
            # powered at any point.
            robot.logger.info("Powering on robot... This may take a several seconds.")
            robot.power_on(timeout_sec=20)
            assert robot.is_powered_on(), "Robot power on failed."
            robot.logger.info("Robot powered on.")

            # Tell the robot to stand up. The command service is used to issue commands to a robot.
            # The set of valid commands for a robot depends on hardware configuration. See
            # SpotCommandHelper for more detailed examples on command building. The robot
            # command service requires timesync between the robot and the client.
            robot.logger.info("Commanding robot to stand...")
            command_client = robot.ensure_client(RobotCommandClient.default_service_name)
            blocking_stand(command_client, timeout_sec=10)
            robot.logger.info("Robot standing.")

            robot_state_client = robot.ensure_client(RobotStateClient.default_service_name)
            # Update state
            robot_state = robot_state_client.get_robot_state()

            gcode = GcodeReader(gcode_file, scale, robot.logger, below_z_is_admittance, travel_z,
                                draw_on_wall, gcode_start_x, gcode_start_y)

            # Prep arm

            # Build a position to move the arm to (in meters, relative to the body frame's origin)
            x = 0.75
            y = 0

            if not draw_on_wall:
                z = -0.35

                qw = .707
                qx = 0
                qy = .707
                qz = 0
            else:
                z = -0.25

                qw = 1
                qx = 0
                qy = 0
                qz = 0

            flat_body_T_hand = math_helpers.SE3Pose(x, y, z,
                                                    math_helpers.Quat(w=qw, x=qx, y=qy, z=qz))
            odom_T_flat_body = get_a_tform_b(robot_state.kinematic_state.transforms_snapshot,
                                             ODOM_FRAME_NAME, GRAV_ALIGNED_BODY_FRAME_NAME)
            odom_T_hand = odom_T_flat_body * flat_body_T_hand

            robot.logger.info('Moving arm to starting position.')

            # Send the request
            odom_T_hand_obj = odom_T_hand.to_proto()

            move_time = 0.000001  # move as fast as possible because we will use (default) velocity/accel limiting.

            arm_command = RobotCommandBuilder.arm_pose_command(
                odom_T_hand_obj.position.x, odom_T_hand_obj.position.y, odom_T_hand_obj.position.z,
                odom_T_hand_obj.rotation.w, odom_T_hand_obj.rotation.x, odom_T_hand_obj.rotation.y,
                odom_T_hand_obj.rotation.z, ODOM_FRAME_NAME, move_time)

            command = RobotCommandBuilder.build_synchro_command(arm_command)

            cmd_id = command_client.robot_command(command)

            # Wait for the move to complete
            block_until_arm_arrives(command_client, cmd_id)

            # Update state and Get the hand position
            robot_state = robot_state_client.get_robot_state()
            (world_T_body, body_T_hand, world_T_hand, odom_T_body) = get_transforms(
                use_vision_frame, robot_state)

            world_T_admittance_frame = geometry_pb2.SE3Pose(
                position=geometry_pb2.Vec3(x=0, y=0, z=0),
                rotation=geometry_pb2.Quaternion(w=1, x=0, y=0, z=0))
            if draw_on_wall:
                # Create an admittance frame that has Z- along the robot's X axis
                xhat_ewrt_robot = [0, 0, 1]
                xhat_ewrt_vo = [0, 0, 0]
                (xhat_ewrt_vo[0],
                 xhat_ewrt_vo[1], xhat_ewrt_vo[2]) = world_T_body.rot.transform_point(
                     xhat_ewrt_robot[0], xhat_ewrt_robot[1], xhat_ewrt_robot[2])
                (z1, z2, z3) = world_T_body.rot.transform_point(-1, 0, 0)
                zhat_temp = [z1, z2, z3]
                zhat = make_orthogonal(xhat_ewrt_vo, zhat_temp)
                yhat = np.cross(zhat, xhat_ewrt_vo)
                mat = np.array([xhat_ewrt_vo, yhat, zhat]).transpose()
                q_wall = Quat.from_matrix(mat)

                zero_vec3 = geometry_pb2.Vec3(x=0, y=0, z=0)
                q_wall_proto = geometry_pb2.Quaternion(w=q_wall.w, x=q_wall.x, y=q_wall.y,
                                                       z=q_wall.z)

                world_T_admittance_frame = geometry_pb2.SE3Pose(position=zero_vec3,
                                                                rotation=q_wall_proto)

            # Touch the ground/wall
            move_arm(robot_state, True, [world_T_hand], arm_surface_contact_client, velocity,
                     allow_walking, world_T_admittance_frame, press_force_percent, api_send_frame,
                     use_xy_to_z_cross_term, bias_force_x)

            time.sleep(4.0)
            last_admittance = True

            # Update state
            robot_state = robot_state_client.get_robot_state()

            # Get the hand position
            (world_T_body, body_T_hand, world_T_hand, odom_T_body) = get_transforms(
                use_vision_frame, robot_state)

            odom_T_ground_plane = get_a_tform_b(robot_state.kinematic_state.transforms_snapshot,
                                                "odom", "gpe")
            world_T_odom = world_T_body * odom_T_body.inverse()

            (gx, gy, gz) = world_T_odom.transform_point(odom_T_ground_plane.x,
                                                        odom_T_ground_plane.y,
                                                        odom_T_ground_plane.z)
            ground_plane_rt_vo = [gx, gy, gz]

            # Compute the robot's position on the ground plane.
            #ground_plane_T_robot = odom_T_ground_plane.inverse() *

            # Compute an origin.
            if not draw_on_wall:
                # For on the ground:
                #   xhat = body x
                #   zhat = (0,0,1)

                # Ensure the origin is gravity aligned, otherwise we get some height drift.
                zhat = [0.0, 0.0, 1.0]
                (x1, x2, x3) = world_T_body.rot.transform_point(1.0, 0.0, 0.0)
                xhat_temp = [x1, x2, x3]
                xhat = make_orthogonal(zhat, xhat_temp)
                yhat = np.cross(zhat, xhat)
                mat = np.array([xhat, yhat, zhat]).transpose()
                vo_Q_origin = Quat.from_matrix(mat)

                world_T_origin = SE3Pose(world_T_hand.x, world_T_hand.y, world_T_hand.z,
                                         vo_Q_origin)
            else:
                # todo should I use the same one?
                world_T_origin = world_T_hand

            gcode.set_origin(world_T_origin, world_T_admittance_frame)
            robot.logger.info('Origin set')

            (is_admittance, world_T_goals,
             is_pause) = gcode.get_next_world_T_goals(ground_plane_rt_vo)

            while is_pause:
                do_pause()
                (is_admittance, world_T_goals,
                 is_pause) = gcode.get_next_world_T_goals(ground_plane_rt_vo)

            if world_T_goals is None:
                # we're done!
                done = True

            move_arm(robot_state, is_admittance, world_T_goals, arm_surface_contact_client,
                     velocity, allow_walking, world_T_admittance_frame, press_force_percent,
                     api_send_frame, use_xy_to_z_cross_term, bias_force_x)
            odom_T_hand_goal = world_T_odom.inverse() * world_T_goals[-1]
            last_admittance = is_admittance

            done = False
            while not done:

                # Update state
                robot_state = robot_state_client.get_robot_state()

                # Determine if we are at the goal point
                (world_T_body, body_T_hand, world_T_hand, odom_T_body) = get_transforms(
                    use_vision_frame, robot_state)

                (gx, gy, gz) = world_T_odom.transform_point(odom_T_ground_plane.x,
                                                            odom_T_ground_plane.y,
                                                            odom_T_ground_plane.z)
                ground_plane_rt_vo = [gx, gy, gz]

                world_T_odom = world_T_body * odom_T_body.inverse()
                odom_T_hand = odom_T_body * body_T_hand

                admittance_frame_T_world = math_helpers.SE3Pose.from_obj(
                    world_T_admittance_frame).inverse()
                admit_frame_T_hand = admittance_frame_T_world * world_T_odom * odom_T_body * body_T_hand
                admit_frame_T_hand_goal = admittance_frame_T_world * world_T_odom * odom_T_hand_goal

                if is_admittance:
                    dist = math.sqrt((admit_frame_T_hand.x - admit_frame_T_hand_goal.x)**2 +
                                     (admit_frame_T_hand.y - admit_frame_T_hand_goal.y)**2)
                    #+ (admit_frame_T_hand.z - admit_frame_T_hand_goal.z)**2 )
                else:
                    dist = math.sqrt((admit_frame_T_hand.x - admit_frame_T_hand_goal.x)**2 +
                                     (admit_frame_T_hand.y - admit_frame_T_hand_goal.y)**2 +
                                     (admit_frame_T_hand.z - admit_frame_T_hand_goal.z)**2)

                arm_near_goal = dist < min_dist_to_goal

                if arm_near_goal:
                    # Compute where to go.
                    (is_admittance, world_T_goals,
                     is_pause) = gcode.get_next_world_T_goals(ground_plane_rt_vo)

                    while is_pause:
                        do_pause()
                        (is_admittance, world_T_goals,
                         is_pause) = gcode.get_next_world_T_goals(ground_plane_rt_vo)

                    if world_T_goals is None:
                        # we're done!
                        done = True
                        robot.logger.info("Gcode program finished.")
                        break

                    move_arm(robot_state, is_admittance, world_T_goals, arm_surface_contact_client,
                             velocity, allow_walking, world_T_admittance_frame, press_force_percent,
                             api_send_frame, use_xy_to_z_cross_term, bias_force_x)
                    odom_T_hand_goal = world_T_odom.inverse() * world_T_goals[-1]

                    if is_admittance != last_admittance:
                        if is_admittance:
                            print('Waiting for touchdown...')
                            time.sleep(3.0)  # pause to wait for touchdown
                        else:
                            time.sleep(1.0)
                    last_admittance = is_admittance
                elif not is_admittance:
                    # We are in a travel move, so we'll keep updating to account for a changing
                    # ground plane.
                    (is_admittance, world_T_goals, is_pause) = gcode.get_next_world_T_goals(
                        ground_plane_rt_vo, read_new_line=False)

            # At the end, walk back to the start.
            robot.logger.info('Done with gcode, going to stand...')
            blocking_stand(command_client, timeout_sec=10)
            robot.logger.info("Robot standing")

            # Compute walking location
            if walk_to_at_end_rt_gcode_origin_x is not None and walk_to_at_end_rt_gcode_origin_y is not None:
                robot.logger.info("Walking to end position...")
                gcode_origin_T_walk = SE3Pose(walk_to_at_end_rt_gcode_origin_x * scale,
                                              walk_to_at_end_rt_gcode_origin_y * scale, 0,
                                              Quat(1, 0, 0, 0))

                odom_T_walk = world_T_odom.inverse() * gcode.world_T_origin * gcode_origin_T_walk

                odom_T_walk_se2 = SE2Pose.flatten(odom_T_walk)

                # Command the robot to go to the end point.
                walk_cmd = RobotCommandBuilder.trajectory_command(
                    goal_x=odom_T_walk_se2.x, goal_y=odom_T_walk_se2.y,
                    goal_heading=odom_T_walk_se2.angle, frame_name="odom")
                end_time = 15.0
                #Issue the command to the robot
                command_client.robot_command(command=walk_cmd, end_time_secs=time.time() + end_time)
                time.sleep(end_time)

            robot.logger.info('Done.')

            # Power the robot off. By specifying "cut_immediately=False", a safe power off command
            # is issued to the robot. This will attempt to sit the robot before powering off.
            robot.power_off(cut_immediately=False, timeout_sec=20)
            assert not robot.is_powered_on(), "Robot power off failed."
            robot.logger.info("Robot safely powered off.")
    finally:
        # If we successfully acquired a lease, return it.
        lease_client.return_lease(lease)
    def move_relative(robot_command_client,
                      robot_state_client,
                      x,
                      y,
                      yaw,
                      start_frame=None,
                      timeout=10.0,
                      block=False,
                      verbose=False):
        if start_frame is not None:
            vision_tform_body = start_frame
        else:
            # Get robot state information. Specifically, we are getting the vision_tform_body transform to understand
            # the robot's current position in the vision frame.
            vision_tform_body = get_vision_tform_body(
                robot_state_client.get_robot_state(
                ).kinematic_state.transforms_snapshot)

        # We want to command a trajectory to go forward one meter in the x-direction of the body.
        # It is simple to define this trajectory relative to the body frame, since we know that will be
        # just 1 meter forward in the x-axis of the body.
        # Note that the rotation is just math_helpers.Quat(), which is the identity quaternion. We want the
        # rotation of the body at the goal to match the rotation of the body currently, so we do not need
        # to transform the rotation.
        rot_quat = math_helpers.Quat().from_yaw(yaw)

        body_tform_goal = math_helpers.SE3Pose(x=x, y=y, z=0, rot=rot_quat)
        # We can then transform this transform to get the goal position relative to the vision frame.
        vision_tform_goal = vision_tform_body * body_tform_goal

        # print ((vision_tform_body.x, vision_tform_body.y), (vision_tform_goal.x, vision_tform_goal.y), )
        # print (np.rad2deg(vision_tform_body.rotation.to_yaw()), np.rad2deg(vision_tform_goal.rotation.to_yaw()))

        # Command the robot to go to the goal point in the vision frame. The command will stop at the new
        # position in the vision frame.
        robot_cmd = RobotCommandBuilder.trajectory_command(  # synchro_se2_trajectory_command(  #
            goal_x=vision_tform_goal.x,
            goal_y=vision_tform_goal.y,
            goal_heading=vision_tform_goal.rot.to_yaw(),
            frame_name=VISION_FRAME_NAME)
        # robot_command_client.robot_command(lease=None, command=robot_cmd, end_time_secs=time.time() + timeout)
        command_id = robot_command_client.robot_command(
            lease=None, command=robot_cmd, end_time_secs=time.time() + timeout)

        # This will only issue the command, but it is not blocking. So we wait and check status manually
        now = time.time()
        start_time = time.time()
        end_time = time.time() + timeout
        while now < end_time:
            time_until_timeout = end_time - now
            rpc_timeout = max(time_until_timeout, 1)
            start_call_time = time.time()
            try:
                response = robot_command_client.robot_command_feedback(
                    command_id, timeout=rpc_timeout)
            except TimedOutError:
                # Excuse the TimedOutError and let the while check bail us out if we're out of time.
                print("Response timeout error")
                pass
            else:
                # print (response.status, robot_command_pb2.RobotCommandFeedbackResponse.STATUS_PROCESSING)
                # pdb.set_trace()
                if response.status != robot_command_pb2.RobotCommandFeedbackResponse.STATUS_PROCESSING:
                    raise ValueError(
                        'Stand (ID {}) no longer processing (now {})'.format(
                            command_id, response.Status.Name(response.status)))

                if verbose:
                    print(
                        response.feedback.mobility_feedback.
                        se2_trajectory_feedback.status, basic_command_pb2.
                        SE2TrajectoryCommand.Feedback.STATUS_AT_GOAL)
                if (response.feedback.mobility_feedback.
                        se2_trajectory_feedback.status == basic_command_pb2.
                        SE2TrajectoryCommand.Feedback.STATUS_AT_GOAL):
                    # basic_command_pb2.StandCommand.Feedback.STATUS_IS_STANDING):
                    if verbose: print(response)
                    break
            # delta_t = time.time() - start_call_time
            # time.sleep(max(min(delta_t, update_time), 0.0))
            time.sleep(0.1)
            now = time.time()
        if verbose: print("Took %f sec" % (time.time() - start_time))
Beispiel #12
0
    def go_to_tag(self, tvec, source_name):
        """Transform the fiducial position to the world frame (kinematic odometry frame)
           Command the robot to move to this position."""
        #Transform the tag position from camera coordinates to world coordinates
        tag_pose_in_camera = np.array([
            float(tvec[0][0]) / 1000.0,
            float(tvec[1][0]) / 1000.0,
            float(tvec[2][0]) / 1000.0
        ])
        tag_pose_in_body = self.transform_to_frame(self._camera_T_body,
                                                   tag_pose_in_camera)
        tag_pose_body_offset = self.offset_tag_pose(tag_pose_in_body)
        self._current_tag_world_pose = self.transform_to_frame(
            self._body_T_world, tag_pose_body_offset)

        #Get the robot's current position in the world
        robot_state = self.robot_state.kinematic_state.ko_tform_body
        robot_angle = self.quat_to_euler(
            (robot_state.rotation.x, robot_state.rotation.y,
             robot_state.rotation.z, robot_state.rotation.w))[2]

        #Compute the heading angle to turn the robot to face the tag
        self._angle_desired = self.get_desired_angle(robot_angle,
                                                     robot_state.position)

        if self._debug:
            print("Camera: " + str(source_name))
            print("Tag pose in camera", tag_pose_in_camera)
            print("Tag pose in body", tag_pose_in_body)
            print("Tag pose in body offsetted", tag_pose_body_offset)
            print("Tag pose in ko", self._current_tag_world_pose)
            print("Robot Pose in ko", robot_state.position)
            print("Robot heading Angle", robot_angle)
            print("Desired heading angle", self._angle_desired)

        #Command the robot to go to the tag in kinematic odometry frame
        frame_name = geometry_pb2.Frame(base_frame=geometry_pb2.FRAME_KO)
        mobility_params = self.set_mobility_params()
        tag_cmd = RobotCommandBuilder.trajectory_command(
            goal_x=self._current_tag_world_pose[0],
            goal_y=self._current_tag_world_pose[1],
            goal_heading=self._angle_desired,
            frame=frame_name,
            params=mobility_params,
            body_height=0.0,
            locomotion_hint=spot_command_pb2.HINT_AUTO)
        end_time = 5.0
        if self._movement_on:
            #Issue the command to the robot
            self._robot_command_client.robot_command(
                lease=None,
                command=tag_cmd,
                end_time_secs=time.time() + end_time)
            # #Feedback to check and wait until the robot is in the desired position or timeout
            start_time = time.time()
            current_time = time.time()
            while (not self.final_state()
                   and current_time - start_time < end_time):
                time.sleep(.25)
                current_time = time.time()
        return