コード例 #1
0
num_cycles = input(
    "Please enter the number of cycles you would like to test: ")

shuff = raw_input("Do you want to shuffle the pose order? [y/n] ")
if shuff != 'y':
    print "Order will not be shuffled"

for i in range(num_cycles):  #iterate over the number of cycles
    if shuff == 'y':
        random.shuffle(
            location_list
        )  #randomly shuffles the order of the list so that it goes to the poses in a different random order each time
    print "Cycle #%d" % (i + 1)
    for j in range(len(location_list)):  #iterate over the shuffled pose list

        curr_position = kk.get_joint_positions()
        num_steps = 20
        final = np.array(location_list[j][1])
        for i in range(num_steps):
            increment = (final - curr_position) / num_steps
            incremental = curr_position + i * increment
            kk.set_joint_positions(
            )  # tell the robot to go to a set of joint angles

        print('position set')
        time.sleep(3)  #wait for any residual motion to subside
        measured_pose = vv.get_vive_tracker_data(
        )  #measure the pose using the vive system
        #print(measured_position)
        if measured_pose == 0:
            print "Don't forget to restart the Vive system on the other computer. Quitting now."
コード例 #2
0
class BlueRobotEnv(object):
    def __init__(self, from_pixels=True, height=100, width=100, camera_id=0, control="torque"):
        side = "right"
        ip = "127.0.0.1"
        self.blue = BlueInterface(side, ip)
        self.blue.calibrate_gripper()
        self._from_pixels = from_pixels
        self.control = control
        self._frame_skip = frame_skip
        self.channels_first=True
        
        self.camera = StreamVideo(height=height, width=width, camera_id=camera_id)
        self.camera.start()

        # true and normalized action spaces
        self._true_action_space = spaces.Box(
            low=[-3.3999, -2.2944, -2.6761, -2.2944, -2.6761, -2.2944, -2.6761, 0.0],
            high=[2.3412, 0, 2.6761, 0, 2.6761, 0, 2.676, 0.0],
            shape=np.zeros(8),
            dtype=np.float32
        )
        self._norm_action_space = spaces.Box(
            low=-1.0,
            high=1.0,
            shape=self._true_action_space.shape,
            dtype=np.float32
        )

        # create observation space
        if from_pixels:
            shape = [3, height, width] if self.channels_first else [height, width, 3]
            self._observation_space = spaces.Box(
                low=0, high=255, shape=shape, dtype=np.uint8
            )
        else:
            self._observation_space = _spec_to_box(
                self._env.observation_spec().values()
            )
            
        self._state_space = _spec_to_box(
                self._env.observation_spec().values()
        )
        
        self.current_state = None

        # set seed
        self.seed(seed=task_kwargs.get('random', 1))

    def __delete__(self):
        self.camera.stop()

    @property
    def observation_space(self):
        return self._observation_space

    @property
    def state_space(self):
        return self._state_space

    @property
    def action_space(self):
        return self._norm_action_space

    def step(self,a):
        for _ in range(self._frame_skip):
            # time_step = self._env.step(action)
            # reward += time_step.reward or 0
            if control=="torque":
                self.blue.set_joint_torques(a[:-1])
                self.blue.command_gripper(a[-1], 20.0, wait=True)
            else:
                self.blue.set_joint_positions(a[:-1], duration=0.1)
                self.blue.command_gripper(a[-1], 20.0, wait=True)
        obs = self._get_obs()
        reward = 0
        done = False
        return obs, reward, done, None

    def reset(self):
        init_pos = np.array([0, -2.31, 1.57, -0.75, -1.57, 0, 0])
        self.blue.set_joint_positions(init_pos, duration=0.0)
        self.blue.calibrate_gripper()
        self.blue.command_gripper(0.0, 20.0, wait=False)
        obs = self._get_obs()
        return obs

    def _get_obs(self):
        if self._from_pixels:
            obs = self.render()
            if self._channels_first:
                obs = obs.transpose(2, 0, 1).copy()
        else:
            obs = get_robot_joints()
        return obs

    def render(self):
        return self.camera()

    @property
    def get_qpos(self):
        joint = self.blue.get_joint_positions()
        gripper = self.blue.get_gripper_position()
        return np.concatenate((joint,gripper),axis=0)

    @property
    def get_qvel(self):
        joint = self.blue.get_joint_velocities()
        gripper = 0
        return np.concatenate((joint,gripper),axis=0)

    def get_robot_joints(self):
        return np.concatenate([
            self.get_qpos,
            self.get_qvel,
        ])
コード例 #3
0
        arm, address, port
    )  #creates object of class KokoInterface at the IP in quotes with the name 'blue'
    blue.disable_control(
    )  #this turns off any other control currently on the robot (leaves it in gravtiy comp mode)

    joint_angle_list = []  #Initialize the list to hold our joint positions
    pose_list = []
    gripper_list = []

    input(
        "Press enter to start recording. To finish recording press <ctrl+c>.")

    try:
        last_time = 0.0
        while True:
            position = blue.get_joint_positions(
            )  #record the pose, this function returns a dictionary object
            joint_angle_list.append(position)
            pose = blue.get_cartesian_pose()
            pose_list.append(pose)
            gripper_pos = blue.get_gripper_position()
            gripper_list.append(gripper_pos)
            print("Position recorded!")
            #while time.time() - last_time < 1.0/frequency:
            #    pass
            sleep_time = 1.0 / frequency - (time.time() - last_time)
            if sleep_time > 0:
                time.sleep(sleep_time)
            last_time = time.time()
    except:
        print(joint_angle_list)
コード例 #4
0
    args = parser.parse_args()

    arm = consts.default_arm
    address = args.address
    port = args.port
    filename = args.tape_file

    blue = BlueInterface(arm, address, port) #creates object of class KokoInterface at the IP in quotes with the name 'blue'

    print("..")
    recorded_positions = []
    error = 0.5

    blue.disable_control()
    blue.disable_gripper()

    #TODO: make number of positions a command line arg
    with open(filename, 'rb') as handle:
        recorded_positions = pickle.load(handle)

    input("Press enter to start trajectory.")

    while True:
        for desired_position in recorded_positions:
            current_position = (blue.get_gripper_position(), blue.get_joint_positions())
            blue.set_joint_positions(np.array(desired_position[1]))
            blue.command_gripper(desired_position[0], 2.0, True)
            while np.linalg.norm(desired_position[1] - current_position[1]) > error:
                current_position = (blue.get_gripper_position(), blue.get_joint_positions())
                time.sleep(0.1)
コード例 #5
0
#!/usr/bin/env python3

# A basic example of reading information about Blue's current state.

import time

import numpy as np

from blue_interface import BlueInterface

side = "right"
ip = "127.0.0.1"
blue = BlueInterface(side, ip)


def print_aligned(left, right):
    print("{:30} {}".format(left, np.round(right, 4)))


while True:
    print_aligned("End Effector Position:", blue.get_cartesian_pose()["position"])
    print_aligned("End Effector Orientation:", blue.get_cartesian_pose()["orientation"])
    print_aligned("Joint Positions:", blue.get_joint_positions())
    print_aligned("Joint Velocities:", blue.get_joint_velocities())
    print_aligned("Joint Torques:", blue.get_joint_torques())
    print_aligned("Gripper Position:", blue.get_gripper_position())
    print_aligned("Gripper Effort:", blue.get_gripper_effort())
    print("=" * 30)
    time.sleep(0.5)
コード例 #6
0
#!/usr/bin/env python3

# A basic example of using BlueInterface for joint positions control.

from blue_interface import BlueInterface
import numpy as np
import sys

assert len(sys.argv) == 2

side = "right"
ip = sys.argv[1]
blue = BlueInterface(side, ip)

current_joints = blue.get_joint_positions()
blue.set_joint_positions(current_joints, duration=3.0)

while True:
    pass

# When this script terminates (eg via Ctrl+C), the robot will automatically
# stop all controllers and go back to gravity compensation mode

コード例 #7
0
    port = args.port
    num_positions = args.num_positions
    filename = args.tape_file

    blue = BlueInterface(arm, address, port) #creates object of class KokoInterface at the IP in quotes with the name 'blue'

    recorded_positions = []
    error = 0.1

    blue.disable_control()
    blue.disable_gripper()

    #TODO: make number of positions a command line arg
    for i in range(num_positions):
        input("Press enter to record current joint position " + str(i) + ".")
        recorded_positions.append((blue.get_gripper_position(), blue.get_joint_positions()))

    with open(filename, 'wb') as handle:
        pickle.dump(recorded_positions, handle, protocol=pickle.HIGHEST_PROTOCOL)

    input("Press enter to start trajectory.")

    while True:
        for desired_position in recorded_positions:
            current_position = (blue.get_gripper_position(), blue.get_joint_positions())
            blue.set_joint_positions(np.array(desired_position[1]))
            blue.command_gripper(desired_position[0], 10.0, True)
            while np.linalg.norm(desired_position[1] - current_position[1]) > error:
                current_position = (blue.get_gripper_position(), blue.get_joint_positions())
                time.sleep(0.1)
コード例 #8
0
import time

side = "right"
ip = "127.0.0.1"
blue = BlueInterface(side, ip)

# Compute IK solution
target_position = [0.4, 0, 0]  # x, y, z
target_orientation = [0.6847088, -0.17378805, -0.69229771,
                      -0.1472938]  # x, y, z, w
target_joint_positions = blue.inverse_kinematics(target_position,
                                                 target_orientation)

# Send command to robot
blue.set_joint_positions(target_joint_positions, duration=5)

# Wait for system to settle
time.sleep(5)

# Print results
joint_positions = blue.get_joint_positions()
pose = blue.get_cartesian_pose()
print_aligned = lambda left, right: print("{:30} {}".format(
    left, np.round(right, 4)))
print_aligned("Target joint positions: ", target_joint_positions)
print_aligned("End joint positions: ", joint_positions)
print_aligned("Target cartesian position:", target_position)
print_aligned("End cartesian position:", pose["position"])
print_aligned("Target orientation:", target_orientation)
print_aligned("End orientation:", pose["orientation"])