def __init__(self, robot_kwargs={}, gripper_kwargs={}):
        self.arm = RobotInterface(**robot_kwargs)
        self.gripper = GripperInterface(**gripper_kwargs)
        time.sleep(0.5)

        # Set continuous control policy
        self.reset_policy()

        # Reset to rest pose
        self.rest_pos = torch.Tensor(REST_POSE[0])
        self.rest_quat = torch.Tensor(REST_POSE[1])
        self.reset()
Пример #2
0
def connect_and_send_policy():
    try:
        # Initialize robot interface
        robot = RobotInterface(
            ip_address="localhost",
        )
        hz = robot.metadata.hz
        robot.go_home()
        time.sleep(0.5)

        # Get joint positions
        joint_pos = robot.get_joint_positions()
        print(f"Initial joint positions: {joint_pos}")

        # Go to joint positions
        print("=== RobotInterface.move_to_joint_positions ===")
        delta_joint_pos_desired = torch.Tensor([0.0, 0.0, 0.0, 0.5, 0.0, -0.5, 0.0])
        joint_pos_desired = joint_pos + delta_joint_pos_desired

        time_to_go = 4.0
        state_log = robot.move_to_joint_positions(
            joint_pos_desired, time_to_go=time_to_go
        )
        check_episode_log(state_log, int(time_to_go * hz))

        joint_pos = robot.get_joint_positions()
        assert torch.allclose(joint_pos, joint_pos_desired, atol=0.01)

        success.append(True)
    except Exception as e:
        exceptions.append(e)
Пример #3
0
def main(argv):
    if len(argv) > 1:
        try:
            max_iters = int(argv[1])
        except ValueError as exc:
            print("Usage: python 4_free_space.py <max_iterations>")
            return
    else:
        max_iters = DEFAULT_MAX_ITERS

    # Initialize robot interface
    robot = RobotInterface(ip_address="localhost", )
    hz = robot.metadata.hz
    time.sleep(0.5)

    # Get reference state
    pos0, quat0 = robot.get_ee_pose()

    # Setup sampling
    pos_range_upper = torch.Tensor(POS_RANGE_UPPER)
    pos_range_lower = torch.Tensor(POS_RANGE_LOWER)
    ori_range = torch.Tensor(ORI_RANGE)

    # Random movements
    i = 0
    try:
        while True:
            robot.go_home()

            # Sample pose
            pos_sampled = uniform_sample(pos_range_lower, pos_range_upper)
            ori_sampled = R.from_quat(quat0) * R.from_rotvec(
                uniform_sample(-ori_range, ori_range))

            # Move to pose
            print(
                f"Moving to random pose ({i + 1}): pos={pos_sampled}, quat={ori_sampled.as_quat()}"
            )
            state_log = robot.move_to_ee_pose(
                position=pos_sampled,
                orientation=ori_sampled.as_quat(),
            )
            print(f"\t Episode length: {len(state_log)}")

            # Loop termination
            i += 1
            if max_iters > 0 and i >= max_iters:
                break

    except KeyboardInterrupt:
        print("Interrupted by user.")
Пример #4
0
import torchcontrol as toco

num_dofs = 7
time_to_go = 0.1


def run_unending_policy(robot, policy, time_to_go):
    robot.send_torch_policy(policy, blocking=False)
    time.sleep(time_to_go)
    robot.terminate_current_policy()


if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(
        ip_address="localhost",
    )
    robot.go_home()
    time.sleep(0.5)

    # Params
    hz = robot.metadata.hz
    robot_model = robot.robot_model
    joint_pos_current = robot.get_joint_positions()
    time_horizon = int(time_to_go * hz)

    # Run torchcontrol policies
    print("=== torchcontrol.policies.JointImpedanceControl ===")
    policy = toco.policies.JointImpedanceControl(
        joint_pos_current=joint_pos_current,
        Kp=torch.zeros(num_dofs, num_dofs),
# Copyright (c) Facebook, Inc. and its affiliates.

# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch

from polymetis import RobotInterface

if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(ip_address="localhost", )

    # Reset
    robot.go_home()

    # Get joint positions
    joint_positions = robot.get_joint_positions()
    print(f"Current joint positions: {joint_positions}")

    # Command robot to pose (move 4th and 6th joint)
    joint_positions_desired = torch.Tensor(
        [-0.14, -0.02, -0.05, -1.57, 0.05, 1.50, -0.91])
    print(f"\nMoving joints to: {joint_positions_desired} ...\n")
    state_log = robot.move_to_joint_positions(joint_positions_desired,
                                              time_to_go=2.0)

    # Get updated joint positions
    joint_positions = robot.get_joint_positions()
    print(f"New joint positions: {joint_positions}")
Пример #6
0
from polymetis import RobotInterface
from utils import check_episode_log


def test_new_joint_pos(robot, joint_pos_desired):
    joint_pos = robot.get_joint_positions()
    print(f"Desired joint positions: {joint_pos_desired}")
    print(f"New joint positions: {joint_pos}")
    assert torch.allclose(joint_pos, joint_pos_desired, atol=0.01)
    return joint_pos


if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(ip_address="localhost", )
    hz = robot.metadata.hz
    robot.go_home()
    time.sleep(0.5)

    # Get joint positions
    joint_pos = robot.get_joint_positions()
    print(f"Initial joint positions: {joint_pos}")

    # Go to joint positions
    print("=== RobotInterface.move_to_joint_positions ===")
    delta_joint_pos_desired = torch.Tensor(
        [0.0, 0.0, 0.0, 0.5, 0.0, -0.5, 0.0])
    joint_pos_desired = joint_pos + delta_joint_pos_desired

    state_log = robot.move_to_joint_positions(joint_pos_desired)
from polymetis import RobotInterface


def output_episode_stats(episode_name, robot_states):
    latency_arr = np.array([
        robot_state.prev_controller_latency_ms for robot_state in robot_states
    ])
    latency_mean = np.mean(latency_arr)
    latency_std = np.std(latency_arr)
    latency_max = np.max(latency_arr)
    latency_min = np.min(latency_arr)

    print(
        f"{episode_name}: {latency_mean:.4f}/ {latency_std:.4f} / {latency_max:.4f} / {latency_min:.4f}"
    )


if __name__ == "__main__":
    robot = RobotInterface()

    print(
        "Control loop latency stats in milliseconds (avg / std / max / min): ")

    # Test joint PD
    robot_states = robot.move_to_joint_positions(robot.get_joint_positions())
    output_episode_stats("Joint PD", robot_states)

    # Test cartesian PD
    robot_states = robot.move_to_ee_pose(robot.get_ee_pose()[0])
    output_episode_stats("Cartesian PD", robot_states)
Пример #8
0
# Copyright (c) Facebook, Inc. and its affiliates.

# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch

from polymetis import RobotInterface


if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(
        ip_address="localhost",
    )

    # Reset
    robot.go_home()

    # Get ee pose
    ee_pos, ee_quat = robot.get_ee_pose()
    print(f"Current ee position: {ee_pos}")
    print(f"Current ee orientation: {ee_quat}  (xyzw)")

    # Command robot to ee xyz position
    ee_pos_desired = torch.Tensor([0.5, 0.0, 0.4])
    print(f"\nMoving ee pos to: {ee_pos_desired} ...\n")
    state_log = robot.move_to_ee_pose(
        position=ee_pos_desired, orientation=None, time_to_go=2.0
    )

    # Get updated ee pose
# Copyright (c) Facebook, Inc. and its affiliates.

# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import time

from polymetis import RobotInterface

if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(ip_address="localhost", )

    # Reset
    robot.go_home()

    # Joint impedance control
    joint_positions = robot.get_joint_positions()

    print("Performing joint impedance control...")
    robot.start_joint_impedance()

    for i in range(40):
        joint_positions += torch.Tensor([0.0, 0.0, 0.0, 0.0, 0.0, -0.015, 0.0])
        robot.update_desired_joint_positions(joint_positions)
        time.sleep(0.1)

    robot.terminate_current_policy()

    # Cartesian impedance control
    print("Performing Cartesian impedance control...")
Пример #10
0
def test_new_ee_pose(robot, ee_pos_desired, ee_quat_desired):
    ee_pos, ee_quat = robot.get_ee_pose()
    print(f"Desired ee pose: pos={ee_pos_desired}, quat={ee_quat_desired}")
    print(f"New ee pose: pos={ee_pos}, quat={ee_quat}")

    assert torch.allclose(ee_pos, ee_pos_desired, atol=0.005)
    assert (R.from_quat(ee_quat).inv() *
            R.from_quat(ee_quat_desired)).magnitude() < 0.0174  # 1 degree

    return ee_pos, ee_quat


if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(ip_address="localhost", )
    hz = robot.metadata.hz
    robot.go_home()
    time.sleep(0.5)

    # Get ee pose
    ee_pos, ee_quat = robot.get_ee_pose()
    print(f"Initial ee pose: pos={ee_pos}, quat={ee_quat}")

    # Go to ee_pose
    print("=== RobotInterface.move_to_ee_pose ===")
    ee_pos_desired = ee_pos + torch.Tensor([0.0, 0.05, -0.05])
    ee_quat_desired = torch.Tensor([1, 0, 0, 0])  # pointing straight down
    time_to_go = 4.0

    state_log = robot.move_to_ee_pose(ee_pos_desired,
        # Parse states
        q_current = state_dict["joint_positions"]
        qd_current = state_dict["joint_velocities"]

        # Execute PD control
        output = self.feedback(
            q_current, qd_current, self.q_desired, torch.zeros_like(qd_current)
        )

        return {"joint_torques": output}


if __name__ == "__main__":
    # Initialize robot interface
    robot = RobotInterface(
        ip_address="localhost",
    )

    # Reset
    robot.go_home()

    # Create policy instance
    q_initial = robot.get_joint_positions()
    default_kq = torch.Tensor(robot.metadata.default_Kq)
    default_kqd = torch.Tensor(robot.metadata.default_Kqd)
    policy = MyPDPolicy(
        joint_pos_current=q_initial,
        kq=default_kq,
        kqd=default_kqd,
    )
class ManipulatorSystem:
    def __init__(self, robot_kwargs={}, gripper_kwargs={}):
        self.arm = RobotInterface(**robot_kwargs)
        self.gripper = GripperInterface(**gripper_kwargs)
        time.sleep(0.5)

        # Set continuous control policy
        self.reset_policy()

        # Reset to rest pose
        self.rest_pos = torch.Tensor(REST_POSE[0])
        self.rest_quat = torch.Tensor(REST_POSE[1])
        self.reset()

    def __del__(self):
        self.arm.terminate_current_policy()

    def reset(self, time_to_go=2.0):
        self.move_to(self.rest_pos, self.rest_quat, time_to_go)
        self.open_gripper()

    def reset_policy(self):
        # Go home
        self.arm.go_home()

        # Send PD controller
        joint_pos_current = self.arm.get_joint_positions()
        policy = toco.policies.CartesianImpedanceControl(
            joint_pos_current=joint_pos_current,
            Kp=torch.Tensor(self.arm.metadata.default_Kx),
            Kd=torch.Tensor(self.arm.metadata.default_Kxd),
            robot_model=self.arm.robot_model,
        )
        self.arm.send_torch_policy(policy, blocking=False)

    def move_to(self, pos, quat, time_to_go=2.0):
        """
        Attempts to move to the given position and orientation by
        planning a Cartesian trajectory (a set of min-jerk waypoints)
        and updating the current policy's target to that
        end-effector position & orientation.

        Returns (num successes, num attempts)
        """
        # Plan trajectory
        pos_curr, quat_curr = self.arm.get_ee_pose()
        N = int(time_to_go / PLANNER_DT)

        waypoints = toco.planning.generate_cartesian_space_min_jerk(
            start=T.from_rot_xyz(R.from_quat(quat_curr), pos_curr),
            goal=T.from_rot_xyz(R.from_quat(quat), pos),
            time_to_go=time_to_go,
            hz=1 / PLANNER_DT,
        )

        # Execute trajectory
        t0 = time.time()
        t_target = t0
        successes = 0
        for i in range(N):
            # Update traj
            ee_pos_desired = waypoints[i]["pose"].translation()
            ee_quat_desired = waypoints[i]["pose"].rotation().as_quat()
            # ee_twist_desired = waypoints[i]["twist"]
            self.arm.update_current_policy({
                "ee_pos_desired": ee_pos_desired,
                "ee_quat_desired": ee_quat_desired,
                # "ee_vel_desired": ee_twist_desired[:3],
                # "ee_rvel_desired": ee_twist_desired[3:],
            })

            # Check if policy terminated due to issues
            if self.arm.get_previous_interval().end != -1:
                print("Interrupt detected. Reinstantiating control policy...")
                time.sleep(3)
                self.reset_policy()
                break
            else:
                successes += 1

            # Spin once
            t_target += PLANNER_DT
            t_remaining = t_target - time.time()
            time.sleep(max(t_remaining, 0.0))

        # Wait for robot to stabilize
        time.sleep(0.2)

        return successes, N

    def close_gripper(self):
        self.gripper.grasp(speed=0.1, force=1.0)
        time.sleep(0.5)

        # Check state
        state = self.gripper.get_state()
        assert state.width < state.max_width

    def open_gripper(self):
        max_width = self.gripper.get_state().max_width
        self.gripper.goto(width=max_width, speed=0.1, force=1.0)
        time.sleep(0.5)

        # Check state
        state = self.gripper.get_state()
        assert state.width > 0.0

    def grasp_pose_to_pos_quat(self, grasp_pose, z):
        x, y, rz = grasp_pose
        pos = torch.Tensor([x, y, z])
        quat = (R.from_rotvec(torch.Tensor([0, 0, rz])) *
                R.from_quat(self.rest_quat)).as_quat()

        return pos, quat

    def grasp(self, grasp_pose0, grasp_pose1):
        results = []

        # Move to pregrasp
        pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, PREGRASP_HEIGHT)
        results.append(self.move_to(pos, quat))

        # Lower (slower than other motions to prevent sudden collisions)
        pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, GRASP_HEIGHT)
        results.append(self.move_to(pos, quat, time_to_go=4.0))

        # Grasp
        self.close_gripper()

        # Lift to pregrasp
        pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, PREGRASP_HEIGHT)
        results.append(self.move_to(pos, quat))

        # Move to new pregrasp
        pos, quat = self.grasp_pose_to_pos_quat(grasp_pose1, PREGRASP_HEIGHT)
        results.append(self.move_to(pos, quat))

        # Release
        self.open_gripper()

        # Reset
        self.reset()

        total_successes = sum([r[0] for r in results])
        total_tries = sum([r[1] for r in results])
        return total_successes, total_tries

    def continuously_grasp(self, max_iters=1000):
        # Setup sampling
        gp_range_upper = torch.Tensor(GP_RANGE_UPPER)
        gp_range_lower = torch.Tensor(GP_RANGE_LOWER)

        # Perform grasping
        i = 0
        total_successes, total_tries = 0, 0
        try:
            while True:
                # Sample grasp
                grasp_pose0 = uniform_sample(gp_range_lower, gp_range_upper)
                grasp_pose1 = uniform_sample(gp_range_lower, gp_range_upper)

                # Perform grasp
                print(
                    f"Grasp {i + 1}: grasp={grasp_pose0}, release={grasp_pose1}"
                )
                n_successes, n_tries = self.grasp(grasp_pose0, grasp_pose1)
                total_successes += n_successes
                total_tries += n_tries

                # Loop termination
                i += 1
                if max_iters > 0 and i >= max_iters:
                    break

        except KeyboardInterrupt:
            print("Interrupted by user.")

        return total_successes, total_tries