Ejemplo n.º 1
0
    def test_operations(self):
        """
        Check consistency & correctness of transformation operations:
            inv, apply, __mul__, identity
        """
        r1_scipy = Rs.random()
        q1_scipy = standardize_quat(r1_scipy.as_quat())
        r1 = R.from_quat(torch.Tensor(q1_scipy))
        p1 = torch.rand(3)
        t1 = T.from_rot_xyz(rotation=r1, translation=p1)

        r2_scipy = Rs.random()
        q2_scipy = standardize_quat(r2_scipy.as_quat())
        r2 = R.from_quat(torch.Tensor(q2_scipy))
        p2 = torch.rand(3)
        t2 = T.from_rot_xyz(rotation=r2, translation=p2)

        v = torch.rand(3)

        # apply()
        assert np.allclose(
            t1.apply(v), r1_scipy.apply(v.numpy()) + p1.numpy(), rtol=1e-3
        )

        # apply + __mul__
        assert np.allclose((t1 * t2).apply(v), t1.apply(t2.apply(v)), rtol=1e-3)

        # inv + __mul__ + identity
        assert np.allclose(
            (t1 * t1.inv()).as_matrix(), T.identity().as_matrix(), atol=1e-3
        )
    def grasp_pose_to_pos_quat(self, grasp_pose, z):
        x, y, rz = grasp_pose
        pos = torch.Tensor([x, y, z])
        quat = (R.from_rotvec(torch.Tensor([0, 0, rz])) *
                R.from_quat(self.rest_quat)).as_quat()

        return pos, quat
Ejemplo n.º 3
0
def test_cartesian_planner(num_steps):
    pose_start = T.from_rot_xyz(
        translation=torch.rand(3),
        rotation=R.from_rotvec(torch.rand(3)),
    )
    pose_goal = T.from_rot_xyz(
        translation=torch.rand(3),
        rotation=R.from_rotvec(torch.rand(3)),
    )

    waypoints = toco.planning.generate_cartesian_space_min_jerk(
        start=pose_start,
        goal=pose_goal,
        time_to_go=TIME_TO_GO,
        hz=num_steps / TIME_TO_GO,
    )

    qx_ls = [waypoint["pose"].translation() for waypoint in waypoints]
    qr_ls = [waypoint["pose"].rotation().as_quat() for waypoint in waypoints]
    qd_ls = [waypoint["twist"] for waypoint in waypoints]
    qdd_ls = [waypoint["acceleration"] for waypoint in waypoints]

    assert torch.allclose(qx_ls[0], pose_start.translation())
    assert torch.allclose(qr_ls[0], pose_start.rotation().as_quat())
    assert torch.allclose(qx_ls[-1], pose_goal.translation())
    assert torch.allclose(qr_ls[-1], pose_goal.rotation().as_quat())

    output_dict = {
        "qx_arr": torch.stack(qx_ls),
        "qr_arr": torch.stack(qr_ls),
        "qd_arr": torch.stack(qd_ls),
        "qdd_arr": torch.stack(qdd_ls),
    }
    record_or_compare(f"module_planning_cartesian_{num_steps}", output_dict)
Ejemplo n.º 4
0
def test_new_ee_pose(robot, ee_pos_desired, ee_quat_desired):
    ee_pos, ee_quat = robot.get_ee_pose()
    print(f"Desired ee pose: pos={ee_pos_desired}, quat={ee_quat_desired}")
    print(f"New ee pose: pos={ee_pos}, quat={ee_quat}")

    assert torch.allclose(ee_pos, ee_pos_desired, atol=0.005)
    assert (R.from_quat(ee_quat).inv() *
            R.from_quat(ee_quat_desired)).magnitude() < 0.0174  # 1 degree

    return ee_pos, ee_quat
    def move_to(self, pos, quat, time_to_go=2.0):
        """
        Attempts to move to the given position and orientation by
        planning a Cartesian trajectory (a set of min-jerk waypoints)
        and updating the current policy's target to that
        end-effector position & orientation.

        Returns (num successes, num attempts)
        """
        # Plan trajectory
        pos_curr, quat_curr = self.arm.get_ee_pose()
        N = int(time_to_go / PLANNER_DT)

        waypoints = toco.planning.generate_cartesian_space_min_jerk(
            start=T.from_rot_xyz(R.from_quat(quat_curr), pos_curr),
            goal=T.from_rot_xyz(R.from_quat(quat), pos),
            time_to_go=time_to_go,
            hz=1 / PLANNER_DT,
        )

        # Execute trajectory
        t0 = time.time()
        t_target = t0
        successes = 0
        for i in range(N):
            # Update traj
            ee_pos_desired = waypoints[i]["pose"].translation()
            ee_quat_desired = waypoints[i]["pose"].rotation().as_quat()
            # ee_twist_desired = waypoints[i]["twist"]
            self.arm.update_current_policy({
                "ee_pos_desired": ee_pos_desired,
                "ee_quat_desired": ee_quat_desired,
                # "ee_vel_desired": ee_twist_desired[:3],
                # "ee_rvel_desired": ee_twist_desired[3:],
            })

            # Check if policy terminated due to issues
            if self.arm.get_previous_interval().end != -1:
                print("Interrupt detected. Reinstantiating control policy...")
                time.sleep(3)
                self.reset_policy()
                break
            else:
                successes += 1

            # Spin once
            t_target += PLANNER_DT
            t_remaining = t_target - time.time()
            time.sleep(max(t_remaining, 0.0))

        # Wait for robot to stabilize
        time.sleep(0.2)

        return successes, N
Ejemplo n.º 6
0
def main(argv):
    if len(argv) > 1:
        try:
            max_iters = int(argv[1])
        except ValueError as exc:
            print("Usage: python 4_free_space.py <max_iterations>")
            return
    else:
        max_iters = DEFAULT_MAX_ITERS

    # Initialize robot interface
    robot = RobotInterface(ip_address="localhost", )
    hz = robot.metadata.hz
    time.sleep(0.5)

    # Get reference state
    pos0, quat0 = robot.get_ee_pose()

    # Setup sampling
    pos_range_upper = torch.Tensor(POS_RANGE_UPPER)
    pos_range_lower = torch.Tensor(POS_RANGE_LOWER)
    ori_range = torch.Tensor(ORI_RANGE)

    # Random movements
    i = 0
    try:
        while True:
            robot.go_home()

            # Sample pose
            pos_sampled = uniform_sample(pos_range_lower, pos_range_upper)
            ori_sampled = R.from_quat(quat0) * R.from_rotvec(
                uniform_sample(-ori_range, ori_range))

            # Move to pose
            print(
                f"Moving to random pose ({i + 1}): pos={pos_sampled}, quat={ori_sampled.as_quat()}"
            )
            state_log = robot.move_to_ee_pose(
                position=pos_sampled,
                orientation=ori_sampled.as_quat(),
            )
            print(f"\t Episode length: {len(state_log)}")

            # Loop termination
            i += 1
            if max_iters > 0 and i >= max_iters:
                break

    except KeyboardInterrupt:
        print("Interrupted by user.")
Ejemplo n.º 7
0
def test_cartesian_joint_planner(num_steps):
    joint_start = torch.rand(N_DOFS)
    pose_goal = T.from_rot_xyz(
        translation=torch.rand(3),
        rotation=R.from_quat(torch.Tensor([0, 0, 0, 1])),
    )
    robot_model = FakeRobotModel(N_DOFS)

    waypoints = toco.planning.generate_cartesian_target_joint_min_jerk(
        joint_pos_start=joint_start,
        ee_pose_goal=pose_goal,
        time_to_go=TIME_TO_GO,
        hz=num_steps / TIME_TO_GO,
        robot_model=robot_model,
    )

    q_ls = [waypoint["position"] for waypoint in waypoints]
    qd_ls = [waypoint["velocity"] for waypoint in waypoints]
    qdd_ls = [waypoint["acceleration"] for waypoint in waypoints]

    assert torch.allclose(q_ls[0], joint_start)

    output_dict = {
        "q_arr": torch.stack(q_ls),
        "qd_arr": torch.stack(qd_ls),
        "qdd_arr": torch.stack(qdd_ls),
    }
    record_or_compare(f"module_planning_cartesian_joints_{num_steps}",
                      output_dict)
Ejemplo n.º 8
0
 def test_axis_magnitude(self):
     """
     Tests if axis() is unit vector
     Tests if axis & magnitude is consistent with as_rotvec
     """
     r_scipy = Rs.random()
     r = R.from_quat(torch.Tensor(r_scipy.as_quat()))
     assert np.allclose(torch.norm(r.axis()), torch.ones(1), rtol=1e-7)
     assert np.allclose((r.axis() * r.magnitude()), r.as_rotvec(), rtol=1e-7)
Ejemplo n.º 9
0
def test_cartesian_pd():
    Kp = torch.rand(6, 6)
    Kd = torch.rand(6, 6)
    ee_pose_current = T.from_rot_xyz(
        translation=torch.rand(3),
        rotation=R.from_rotvec(torch.rand(3)),
    )
    ee_pose_desired = T.from_rot_xyz(
        translation=torch.rand(3),
        rotation=R.from_rotvec(torch.rand(3)),
    )
    ee_twist_current = torch.rand(6)
    ee_twist_desired = torch.rand(6)

    controller = CartesianSpacePD(Kp=Kp, Kd=Kd)
    output = controller(ee_pose_current, ee_twist_current, ee_pose_desired,
                        ee_twist_desired)

    record_or_compare("module_feedback_cpd", {"output": output})
Ejemplo n.º 10
0
    def test_operations(self):
        """
        Check consistency & correctness of rotation operations:
            inv, apply, __mul__, identity
        """
        r1_scipy = Rs.random()
        q1_scipy = standardize_quat(r1_scipy.as_quat())
        r1 = R.from_quat(torch.Tensor(q1_scipy))
        r2_scipy = Rs.random()
        q2_scipy = standardize_quat(r2_scipy.as_quat())
        r2 = R.from_quat(torch.Tensor(q2_scipy))

        v = torch.rand(3)

        # inv
        assert np.allclose(
            standardize_quat(r1.inv().as_quat()),
            standardize_quat(r1_scipy.inv().as_quat()),
            atol=1e-3,
        )

        # apply
        assert np.allclose(r1.apply(v), r1_scipy.apply(v.numpy()), rtol=1e-3)

        # __mul__
        assert np.allclose(
            standardize_quat((r1 * r2).as_quat()),
            standardize_quat((r1_scipy * r2_scipy).as_quat()),
            atol=1e-3,
        )

        # apply + __mul__
        assert np.allclose((r1 * r2).apply(v), r1.apply(r2.apply(v)), rtol=1e-3)

        # inv + __mul__ + identity
        assert np.allclose(
            standardize_quat((r1 * r1.inv()).as_quat()),
            standardize_quat(R.identity().as_quat()),
            atol=1e-3,
        )
Ejemplo n.º 11
0
    def test_creation_conversion(self):
        """
        Tests conversions between quaternion, rotation matrix, and rotation vector
        Checks against scipy.spatial.transforms.Rotation
        """
        r_scipy = Rs.random()
        q_scipy = standardize_quat(r_scipy.as_quat())
        m_scipy = r_scipy.as_matrix()
        rv_scipy = r_scipy.as_rotvec()

        rt_q = R.from_quat(torch.Tensor(q_scipy))
        assert np.allclose(standardize_quat(rt_q.as_quat()), q_scipy, atol=1e-3)
        assert np.allclose(rt_q.as_matrix(), m_scipy, rtol=1e-3)
        assert np.allclose(rt_q.as_rotvec(), rv_scipy, rtol=1e-3)

        rt_m = R.from_matrix(torch.Tensor(m_scipy))
        assert np.allclose(standardize_quat(rt_m.as_quat()), q_scipy, atol=1e-3)
        assert np.allclose(rt_m.as_matrix(), m_scipy, rtol=1e-3)
        assert np.allclose(rt_m.as_rotvec(), rv_scipy, rtol=1e-3)

        rt_rv = R.from_rotvec(torch.Tensor(rv_scipy))
        assert np.allclose(standardize_quat(rt_rv.as_quat()), q_scipy, atol=1e-3)
        assert np.allclose(rt_rv.as_matrix(), m_scipy, rtol=1e-3)
        assert np.allclose(rt_rv.as_rotvec(), rv_scipy, rtol=1e-3)
Ejemplo n.º 12
0
    def move_to_ee_pose(
        self,
        position: torch.Tensor,
        orientation: torch.Tensor = None,
        time_to_go: float = None,
        delta: bool = False,
        Kx: torch.Tensor = None,
        Kxd: torch.Tensor = None,
        **kwargs,
    ) -> List[RobotState]:
        """Uses an operational space controller to move to a desired end-effector position (and, optionally orientation).
        Args:
            positions: Desired target end-effector position.
            positions: Desired target end-effector orientation (quaternion).
            time_to_go: Amount of time to execute the motion. Uses an adaptive value if not specified (see `_adaptive_time_to_go` for details).
            delta: Whether the specified `position` and `orientation` are relative to current pose or absolute.
            Kx: P gains for the tracking controller. Uses default values if not specified.
            Kxd: D gains for the tracking controller. Uses default values if not specified.

        Returns:
            Same as `send_torch_policy`
        """
        assert (
            self.robot_model is not None
        ), "Robot model not assigned! Call 'set_robot_model(<path_to_urdf>, <ee_link_name>)' to enable use of dynamics controllers"

        ee_pos_current, ee_quat_current = self.get_ee_pose()

        # Parse parameters
        ee_pos_desired = torch.Tensor(position)
        if delta:
            ee_pos_desired += ee_pos_current

        if orientation is None:
            ee_quat_desired = ee_quat_current
        else:
            assert (len(orientation) == 4
                    ), "Only quaternions are accepted as orientation inputs."
            ee_quat_desired = torch.Tensor(orientation)
            if delta:
                ee_quat_desired = (R.from_quat(ee_quat_desired) *
                                   R.from_quat(ee_quat_current)).as_quat()

        ee_pose_current = T.from_rot_xyz(rotation=R.from_quat(ee_quat_current),
                                         translation=ee_pos_current)
        ee_pose_desired = T.from_rot_xyz(rotation=R.from_quat(ee_quat_desired),
                                         translation=ee_pos_desired)
        # Roughly estimate joint diff by linearizing around current joint pose
        joint_pos_current = self.get_joint_positions()
        jacobian = self.robot_model.compute_jacobian(joint_pos_current)

        ee_pose_diff = ee_pose_desired * ee_pose_current.inv()
        joint_pos_diff = torch.linalg.pinv(jacobian) @ ee_pose_diff.as_twist()
        time_to_go_adaptive = self._adaptive_time_to_go(joint_pos_diff)

        if time_to_go is None:
            time_to_go = time_to_go_adaptive
        elif time_to_go < time_to_go_adaptive:
            log.warn(
                "The specified 'time_to_go' might not be large enough to ensure accurate movement."
            )

        # Plan trajectory
        waypoints = toco.planning.generate_cartesian_space_min_jerk(
            start=ee_pose_current,
            goal=ee_pose_desired,
            time_to_go=time_to_go,
            hz=self.hz,
        )

        # Create & execute policy
        torch_policy = toco.policies.EndEffectorTrajectoryExecutor(
            ee_pose_trajectory=[waypoint["pose"] for waypoint in waypoints],
            ee_twist_trajectory=[waypoint["twist"] for waypoint in waypoints],
            Kp=Kx or self.Kx_default,
            Kd=Kxd or self.Kxd_default,
            robot_model=self.robot_model,
            ignore_gravity=self.use_grav_comp,
        )

        return self.send_torch_policy(torch_policy=torch_policy, **kwargs)
Ejemplo n.º 13
0
def generate_cartesian_space_min_jerk(
    start: T.TransformationObj,
    goal: T.TransformationObj,
    time_to_go: float,
    hz: float,
) -> List[Dict]:
    """Initializes planner object and plans the trajectory

    Args:
        start: Start pose
        goal: Goal pose
        time_to_go: Trajectory duration in seconds
        hz: Frequency of output trajectory

    Returns:
        q_traj: Joint position trajectory
        qd_traj: Joint velocity trajectory
        qdd_traj: Joint acceleration trajectory
    """
    steps = _compute_num_steps(time_to_go, hz)
    dt = 1.0 / hz

    p_traj, pd_traj, pdd_traj = _min_jerk_spaces(steps, time_to_go)

    # Plan translation
    x_start = start.translation()
    x_goal = goal.translation()

    D = x_goal - x_start
    x_traj = x_start[None, :] + D[None, :] * p_traj[:, None]
    xd_traj = D[None, :] * pd_traj[:, None]
    xdd_traj = D[None, :] * pdd_traj[:, None]

    # Plan rotation
    r_start = start.rotation()
    r_goal = goal.rotation()
    r_delta = r_goal * r_start.inv()
    rv_delta = r_delta.as_rotvec()

    r_traj = torch.empty([steps, 4])
    for i in range(steps):
        r = R.from_rotvec(rv_delta * p_traj[i]) * r_start
        r_traj[i, :] = r.as_quat()
    rd_traj = rv_delta[None, :] * pd_traj[:, None]
    rdd_traj = rv_delta[None, :] * pdd_traj[:, None]

    # Combine results
    ee_twist_traj = torch.cat([xd_traj, rd_traj], dim=-1)
    ee_accel_traj = torch.cat([xdd_traj, rdd_traj], dim=-1)

    waypoints = [
        {
            "time_from_start": i * dt,
            "pose": T.from_rot_xyz(
                rotation=R.from_quat(r_traj[i, :]), translation=x_traj[i, :]
            ),
            "twist": ee_twist_traj[i, :],
            "acceleration": ee_accel_traj[i, :],
        }
        for i in range(steps)
    ]

    return waypoints
Ejemplo n.º 14
0
def generate_cartesian_target_joint_min_jerk(
    joint_pos_start: torch.Tensor,
    ee_pose_goal: T.TransformationObj,
    time_to_go: float,
    hz: float,
    robot_model: torch.nn.Module,
) -> List[Dict]:
    """
    Cartesian space minimum jerk trajectory planner, but outputs plan in joint space.
    Assumes zero velocity & acceleration at start & goal.

    Args:
        start: Start pose
        goal: Goal pose
        time_to_go: Trajectory duration in seconds
        hz: Frequency of output trajectory
        robot_model: A valid robot model module from torchcontrol.models

    Returns:
        q_traj: Joint position trajectory
        qd_traj: Joint velocity trajectory
        qdd_traj: Joint acceleration trajectory
    """
    steps = _compute_num_steps(time_to_go, hz)
    dt = 1.0 / hz

    # Compute start pose
    ee_pos_start, ee_quat_start = robot_model.forward_kinematics(joint_pos_start)
    ee_pose_start = T.from_rot_xyz(
        rotation=R.from_quat(ee_quat_start), translation=ee_pos_start
    )
    cartesian_waypoints = generate_cartesian_space_min_jerk(
        ee_pose_start, ee_pose_goal, time_to_go, hz
    )

    # Extract plan & convert to joint space
    q_traj = torch.zeros(steps, joint_pos_start.shape[0])
    qd_traj = torch.zeros(steps, joint_pos_start.shape[0])
    qdd_traj = torch.zeros(steps, joint_pos_start.shape[0])

    q_traj[0, :] = joint_pos_start
    for i in range(0, steps - 1):
        # Get current joint state & jacobian
        joint_pos_current = q_traj[i, :]
        jacobian = robot_model.compute_jacobian(joint_pos_current)
        jacobian_pinv = torch.pinverse(jacobian)

        # Query Cartesian plan for next step & compute diff
        ee_pose_desired = cartesian_waypoints[i + 1]["pose"]
        ee_twist_desired = cartesian_waypoints[i + 1]["twist"]
        ee_accel_desired = cartesian_waypoints[i + 1]["acceleration"]

        # Convert next step to joint plan
        qdd_traj[i + 1, :] = jacobian_pinv @ ee_accel_desired
        qd_traj[i + 1, :] = jacobian_pinv @ ee_twist_desired
        q_delta = qd_traj[i + 1, :] * dt
        q_traj[i + 1, :] = joint_pos_current + q_delta

        # Null space correction
        null_space_proj = torch.eye(joint_pos_start.shape[0]) - jacobian_pinv @ jacobian
        q_null_err = -null_space_proj @ q_traj[i + 1, :]
        q_null_err_norm = q_null_err.norm() + 1e-27  # prevent zero division
        q_null_err_clamped = (
            q_null_err / q_null_err_norm * min(q_null_err_norm, q_delta.norm())
        )  # norm of correction clamped to norm of current action
        q_traj[i + 1, :] = q_traj[i + 1, :] + q_null_err_clamped

    waypoints = [
        {
            "time_from_start": i * dt,
            "position": q_traj[i, :],
            "velocity": qd_traj[i, :],
            "acceleration": qdd_traj[i, :],
        }
        for i in range(steps)
    ]

    return waypoints
Ejemplo n.º 15
0
         joint_vel_trajectory=[
             torch.rand(num_dofs) for _ in range(time_horizon)
         ],
         Kp=torch.rand(num_dofs, num_dofs),
         Kd=torch.rand(num_dofs, num_dofs),
         robot_model=robot_model,
         ignore_gravity=True,
     ),
     True,
     None,
 ),
 (
     toco.policies.EndEffectorTrajectoryExecutor,
     dict(
         ee_pose_trajectory=[
             T.from_rot_xyz(rotation=R.from_rotvec(torch.rand(3)),
                            translation=torch.rand(3))
             for _ in range(time_horizon)
         ],
         ee_twist_trajectory=[torch.rand(6) for _ in range(time_horizon)],
         Kp=torch.rand(6, 6),
         Kd=torch.rand(6, 6),
         robot_model=robot_model,
         ignore_gravity=True,
     ),
     True,
     None,
 ),
 (
     toco.policies.iLQR,
     dict(