Пример #1
0
def init_pos(pr):
	agent = UR10()
	ee_init_pos = np.array([1, 0.2, 1])
	# Get a path to the target (rotate so z points down)

	path = agent.get_path(
		position=ee_init_pos, euler=[-2.2, 3.1415,
									 0])  # generate path given position and euler angles. NOTE: sometime the end-eff knock over the obj, why?

	done = False
	while not done:
		done = path.step()  # how does step works?
		pr.step()

	target = Shape.create(type=PrimitiveShape.CUBOID,  # the cuboid
						  size=[0.05, 0.05, 0.4],
						  mass=0.1,
						  smooth=False,
						  color=[1.0, 0.1, 0.1],
						  static=False, respondable=True)
	target.set_position(np.array([1.0, 0.2, 1.0]))  # initial position of the target

	time.sleep(0.5)

	return agent, target
Пример #2
0
 def __init__(self, ep_length=100):
     """
     Pollos environment for testing purposes
     :param dim: (int) the size of the dimensions you want to learn
     :param ep_length: (int) the length of each episodes in timesteps
     """
     # 5 points in 3D space forming the trajectory
     self.action_space = Box(low=-0.3, high=0.3, shape=(5,3), dtype=np.float32)
     self.observation_space = Box(low=np.array([-0.1, -0.2, 0, -1.5, 0.0, 0.0, -0.5, -0.2, -0.2]), 
                                  high=np.array([0.1, 1.7, 2.5, 0, 0.0, 0.0, 0.5, 0.2, 1.0]))
     
     #
     self.pr = PyRep()
     self.pr.launch(SCENE_FILE, headless=False)
     self.pr.start()
     self.agent = UR10()
     self.agent.max_velocity = 1.2
     self.joints = [Joint('UR10_joint1'), Joint('UR10_joint2'), Joint('UR10_joint3'), Joint('UR10_joint4'), Joint('UR10_joint5'), Joint('UR10_joint6')]
     self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]] 
                           for j in self.joints]
     print(self.joints_limits)
     self.agent_hand = Shape('hand')
     self.agent.set_control_loop_enabled(True)
     self.agent.set_motor_locked_at_zero_velocity(True)
     self.initial_agent_tip_position = self.agent.get_tip().get_position()
     self.initial_agent_tip_quaternion = self.agent.get_tip().get_quaternion()
     self.target = Dummy('UR10_target')
     self.initial_joint_positions = self.agent.get_joint_positions()
     self.pollo_target = Dummy('pollo_target')
     self.pollo = Shape('pollo')
     self.table_target = Dummy('table_target')
     self.initial_pollo_position = self.pollo.get_position()
     self.initial_pollo_orientation = self.pollo.get_quaternion()
     self.table_target = Dummy('table_target')
     # objects
     self.scene_objects = [Shape('table0'), Shape('Plane'), Shape('Plane0'), Shape('ConcretBlock')]
     #self.agent_tip = self.agent.get_tip()
     self.initial_distance = np.linalg.norm(np.array(self.initial_pollo_position)-np.array(self.initial_agent_tip_position))
     
     # camera 
     self.camera = VisionSensor('kinect_depth')
     self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(self.camera.get_handle(),-1)
     self.np_camera_matrix_extrinsics = np.delete(np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
     width = 640.0
     height = 480.0
     angle = math.radians(57.0)
     focalx_px = (width/2.0) / math.tan(angle/2.0)
     focaly_px = (height/2.0) / math.tan(angle/2.0)
     self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                  [0.0, -focalx_px, 240.0],
                                                  [0.0, 0.0, 1.0]])
                                     
     self.reset()
Пример #3
0
    def __init__(self, visualize=True, mode="train", **params):
        super().__init__(visualize=visualize, mode=mode)

        # Scene selection
        scene_file_path = os.path.join(
            os.getcwd(), 'deep_simulation/scenes/UR10_gripper.ttt')

        # Simulator launch
        self.env = PyRep()
        self.env.launch(scene_file_path, headless=False)
        self.env.start()
        self.env.step()

        # Task related initialisations in Simulator
        self.vision_sensor = objects.vision_sensor.VisionSensor(
            "Vision_sensor")
        self.gripper = objects.dummy.Dummy("UR10_target")
        self.gripper_zero_pose = self.gripper.get_pose()
        self.goal = objects.dummy.Dummy("goal_target")
        self.goal_STL = objects.shape.Shape("goal")
        self.goal_STL_zero_pose = self.goal_STL.get_pose()
        self.grasped_STL = objects.shape.Shape("BaxterGripper")
        self.stacking_area = objects.shape.Shape("Plane")
        self.vision_sensor = objects.vision_sensor.VisionSensor(
            "Vision_sensor")

        self.baxter_gripper = BaxterGripper()
        self.UR10_arm = UR10()
        self.gripper_dummy = objects.dummy.Dummy("gripper_dummy")

        self.step_counter = 0
        self.max_step_count = 100
        self.target_pose = None
        self.initial_distance = None
        self.image_width, self.image_height = 320, 240
        self.vision_sensor.set_resolution(
            (self.image_width, self.image_height))
        self._history_len = 1

        self._observation_space = Dict({
            "cam_image":
            Box(0,
                255, [self.image_height, self.image_width, 1],
                dtype=np.uint8)
        })

        self._action_space = Box(-1, 1, (3, ))
        self._state_space = extend_space(self._observation_space,
                                         self._history_len)
 def test_copy_arm(self):
     arm = UR10()
     new_arm = arm.copy()
     self.assertNotEqual(arm, new_arm)
Пример #5
0
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        self.vrep = vrep
        logging.basicConfig(level=logging.DEBUG)
        # they have to be simmetric. We interpret actions as angular increments
        #self.action_space = Box(low=np.array([-0.1, -1.7, -2.5, -1.5, 0.0, 0.0]),
        #                        high=np.array([0.1, 1.7, 2.5, 1.5, 0.0, 0.0]))
        inc = 0.1
        self.action_space = Box(low=np.array([-inc, -inc, -inc, -inc, 0,
                                              -inc]),
                                high=np.array([inc, inc, inc, inc, 0, inc]))

        self.observation_space = Box(low=np.array([-0.5, 0.0, -0.2]),
                                     high=np.array([0.5, 1.0, 1.0]))

        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 1
        self.agent.set_control_loop_enabled(True)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.joints = [
            Joint('UR10_joint1'),
            Joint('UR10_joint2'),
            Joint('UR10_joint3'),
            Joint('UR10_joint4'),
            Joint('UR10_joint5'),
            Joint('UR10_joint6')
        ]
        #self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]]
        #                      for j in self.joints]
        self.high_joints_limits = [0.1, 1.7, 2.7, 0.0, 0.02, 0.3]
        self.low_joints_limits = [-0.1, -0.2, 0.0, -1.5, -0.02, -0.5]
        #self.agent_hand = Shape('hand')

        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip(
        ).get_quaternion()
        self.initial_agent_tip_euler = self.agent.get_tip().get_orientation()
        self.target = Dummy('UR10_target')
        self.initial_target_orientation = self.target.get_orientation()
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        #self.table_target = Dummy('table_target')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()
        #self.table_target = Dummy('table_target')

        #self.succion = Shape('suctionPad')
        self.waypoints = [Dummy('dummy_gancho%d' % i) for i in range(4)]

        # objects
        #self.scene_objects = [Shape('table0'), Shape('Plane'), Shape('Plane0'), Shape('ConcretBlock')]
        #self.agent_tip = self.agent.get_tip()
        self.initial_distance = np.linalg.norm(
            np.array(self.initial_pollo_position) -
            np.array(self.initial_agent_tip_position))

        # camera
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(
            self.camera.get_handle(), -1)
        self.np_camera_matrix_extrinsics = np.delete(
            np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width / 2.0) / math.tan(angle / 2.0)
        focaly_px = (height / 2.0) / math.tan(angle / 2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])

        self.reset()
from os.path import dirname, join, abspath
from pyrep import PyRep
from pyrep.robots.arms.ur10 import UR10
from pyrep.objects.shape import Shape  # modify scene
from pyrep.const import PrimitiveShape
from pyrep.errors import ConfigurationPathError
import numpy as np
import math
import time

LOOPS = 30
SCENE_FILE = join(dirname(abspath(__file__)), 'UR10_reach_003.ttt')
pr = PyRep()
pr.launch(SCENE_FILE, headless=False)  # lunch the ttt file
pr.start()
agent = UR10()  # take a look --

# We could have made this target in the UR10_reach.ttt scene, but lets create one dynamically

# position_min, position_max = [0.8, -0.2, 1.0], [1.5, 0.7, 1.0]

starting_joint_positions = [-1.2234,-0.3290,1.6263,1.1871,1.9181,1.5707]       # the robot joints parameters, what are these params??--
print('ee_pos_init:', agent.get_tip().get_position())


for i in range(LOOPS):
    target = Shape.create(type=PrimitiveShape.CUBOID,
                          size=[0.05, 0.05, 0.4],
                          color=[1.0, 0.1, 0.1],
                          static=False,
                          position=[1.0, 0.2, 0.95],
Пример #7
0
def main():
    data = deque()
    ground_truth = deque()

    # RTDE Stuff
    #rtd = rtde_helper('conf/record_configuration.xml', 'localhost', 30004, 125)

    # ZMQ stuff
    zmq_port = "5556"
    context = zmq.Context()
    socket = context.socket(zmq.PUSH)
    socket.bind("tcp://*:%s" % zmq_port)

    # Launch the application with a scene file in headless mode
    pr = PyRep()
    pr.launch('/home/sarthak/PycharmProjects/research_ws/scenes/bill_dummy.ttt', headless=True)
    pr.set_simulation_timestep(0.1)
    expr = Experiment(5, 0)
    ur10 = UR10()
    pr.start()  # Start the simulation
    time.sleep(0.1)
    pr.step()

    velocities = [5, 5, 5, 5, 5, 5]

    ur10.set_joint_target_velocities(velocities)

    sim_step = 0
    STEPS = 150000
    buffer = deque()


    pr.step()
    time.sleep(5)
    data = np.load("/home/sarthak/PycharmProjects/research_ws/data/real-data/dataset14.npy")
    data_store = []
    #ctr = 0
    while True:#sim_step <= STEPS:

        #q, _ = rtd.get_joint_states()
        q = data[sim_step][51:57]
        q[1], q[3] = q[1] + 1.57079, q[3] + 1.57079

        hum_pose = list(data[sim_step][-3:])
        # ur10.set_joint_target_positions(q)
        #q = np.random.randn(6,)
        #print(q)
        ur10.set_joint_target_positions(q)
        hum_pose[2] = 0
        vrep.simSetObjectPosition(expr.get_object_handle("Bill"), expr.world_handle, hum_pose)
        # raw distance readings
        base = expr.get_proximity_reading_from('Base')
        elbow = expr.get_proximity_reading_from('Elbow')
        tool = expr.get_proximity_reading_from('Tool')

        # 3d point readings with id
        base_pc, base_obj = expr.get_points_from('Base')
        elbow_pc, elbow_obj = expr.get_points_from('Elbow')
        tool_pc, tool_obj = expr.get_points_from('Tool')

        base_obs = np.hstack([base_pc, base_obj])
        elbow_obs = np.hstack([elbow_pc, elbow_obj])
        tool_obs = np.hstack([tool_pc, tool_obj])

        complete_obs = np.vstack([base_obs, elbow_obs, tool_obs])

        # human position
        #hum_position = np.array(expr.get_human_position()).reshape(1, 3)

        if len(buffer) == 1:
            buffer.popleft()
        else:
            buffer.append(complete_obs)

        if len(buffer) > 0:
            buff_stack = np.vstack(buffer)
            socket.send_pyobj([buff_stack, hum_pose, q])

        pr.step()
        sim_step += 1
        time.sleep(0.008)


    rtd.stop()
    pr.stop()  # Stop the simulation
    pr.shutdown()  # Close the application
    import sys
    sys.exit()
Пример #8
0
def main():
    data = deque()
    ground_truth = deque()

    # RTDE Stuff
    rtd = rtde_helper('conf/record_configuration.xml', 'localhost', 30004, 125)

    # ZMQ stuff
    zmq_port = "5556"
    context = zmq.Context()
    socket = context.socket(zmq.PUSH)
    socket.bind("tcp://*:%s" % zmq_port)

    # Launch the application with a scene file in headless mode
    pr = PyRep()
    pr.launch('/home/sarthak/PycharmProjects/research_ws/scenes/bill_new.ttt',
              headless=True)
    pr.set_simulation_timestep(0.1)
    expr = Experiment(5, 0)
    ur10 = UR10()
    pr.start()  # Start the simulation
    time.sleep(0.1)
    pr.step()

    velocities = [5, 5, 5, 5, 5, 5]

    ur10.set_joint_target_velocities(velocities)

    sim_step = 0
    STEPS = 150000
    buffer = deque()
    depth_cam = vrep.simGetObjectHandle('eyecam')  # Bill head cam
    # js_thread = Thread(target=sim_work, args=(rtd, pr, ur10))
    pr.step()
    time.sleep(5)
    data_store = []

    while True:  #sim_step <= STEPS:

        q, _ = rtd.get_joint_states()
        # ur10.set_joint_target_positions(q)
        #q = np.random.randn(6,)
        #print(q)
        ur10.set_joint_target_positions(q)

        # raw distance readings
        base = expr.get_proximity_reading_from('Base')
        elbow = expr.get_proximity_reading_from('Elbow')
        tool = expr.get_proximity_reading_from('Tool')

        # 3d point readings with id
        base_pc, base_obj = expr.get_points_from('Base')
        elbow_pc, elbow_obj = expr.get_points_from('Elbow')
        tool_pc, tool_obj = expr.get_points_from('Tool')

        base_obs = np.hstack([base_pc, base_obj])
        elbow_obs = np.hstack([elbow_pc, elbow_obj])
        tool_obs = np.hstack([tool_pc, tool_obj])

        complete_obs = np.vstack([base_obs, elbow_obs, tool_obs])

        # human position
        hum_position = np.array(expr.get_human_position()).reshape(1, 3)

        if len(buffer) == 1:
            buffer.popleft()
        else:
            buffer.append(complete_obs)

        if len(buffer) > 0:
            buff_stack = np.vstack(buffer)
            socket.send_pyobj([buff_stack, hum_position, q])

        pr.step()
        sim_step += 1

    # np.save('data.npy', np.vstack(data_store))
    # np.save('gt.npy', np.vstack(ground_truth))
    rtd.stop()
    pr.stop()  # Stop the simulation
    pr.shutdown()  # Close the application
    import sys
    sys.exit()
Пример #9
0
def sample(policy):
	args = arguements.achieve_args()

	# SCENE_FILE = join(dirname(abspath(__file__)), 'UR10_reach_003.ttt')
	SCENE_FILE = join(dirname(abspath(__file__)), 'UR10_reach_002.ttt')
	pr = PyRep()
	pr.launch(SCENE_FILE, headless=False)  # lunch the ttt file
	pr.start()

	agent = UR10()

	starting_joint_positions = [-1.5547982454299927, -0.11217942088842392, 2.505795478820801, 0.7483376860618591,
								1.587110161781311, 4.083085536956787]  # these angles correspond to [1.0, 0.2, 1.0]

	agent.set_joint_positions(starting_joint_positions)

	# agent.set_control_loop_enabled(False)
	agent.set_motor_locked_at_zero_velocity(True)

	# ee_pos = np.array([1.0, 0.2, 1.0])
	success_num = 0


	traj_num = 0
	avg_reward = []
	while traj_num < 20:

		agent, target = init_pos(pr)  # init agent and target

		ee = agent.get_tip()
		ee_pos = ee.get_position()
		ee_orient = ee.get_orientation()
		# print('initial_ee_pos:', ee_pos)
		# print('initial_ee_orient:', ee_orient)

		traj_reward = 0
		traj_num += 1
		for i in range(100):  # 100 steps max
			# print('step:', i)

			y = ee_pos[1]

			z = ee_pos[2]
			# print('y:', y)

			# action = policy.select_action(Variable(torch.Tensor([y]).unsqueeze(0)))[0]   # add noise to actuib
			action = policy(Variable(torch.Tensor([y]).unsqueeze(0)))[0]               # no noise

			action = np.squeeze(action.detach().numpy())
			v = action[0]
			omega = action[1]
			# print('v:', v)
			# print('omega:', omega)

			# v = 0.5  # velocity along y axis, cont here, can be change to s(t)

			# print('action:', )
			dy = 0.07 * v  # the step length along y axis
			# print('dy:', dy)
			y_ = y + dy  # estimated next y pos
			z_ = y_ ** 2 - 0.4 * y_ + 1.04  # estimated next z pos

			dz = z_ - z
			# print('dz:', dz)
			# print('omega:', omega)
			# print('ee_orient:', ee_orient)

			ee_pos, ee_orient, curr_joint_angles = move(dy, dz, omega, ee_pos, ee_orient, pr,
														agent)  # move the ee for 20 mini steps


			# check each step after ee_orient > 2.6, if stable, success, break, if not
			if ee_orient[0] > 2.6:  # 2.2 is largest angle of th ee
				for _ in range(5):
					agent.set_joint_target_positions(curr_joint_angles)  # wait for 5 loops to see if it's really stable

				if is_stable(ee, target) is True:
					print('success!')
					success_num += 1
					time.sleep(0.5)  # for observation
					# target.set_position([-10, -10, -10])   #
					r = get_reward(2, target, ee, args)    # success
					traj_reward += r
					target.remove()
					break
				else:
					r = get_reward(0, target, ee, args)  # fall
					traj_reward += r
					target.remove()
					break

			else:
				# check each step before ee_orient > 2.2, if stable, continue, if not break
				if is_stable(ee, target) is True:
					r = get_reward(1, target, ee, args)  # going on
					traj_reward += r

				else:
					r = get_reward(0, target, ee, args)  # fall
					traj_reward += r
					target.remove()
					break

		# print('traj length:', i)

		avg_reward.append(traj_reward)

	pr.stop()  # Stop the simulation
	pr.shutdown()  # Close the application

	success_rate = success_num / traj_num
	avg_reward = np.mean(avg_reward)
	print('success_rate:', success_rate)
	print('avg_reward:', avg_reward)

	return success_rate, avg_reward
Пример #10
0
from pyrep import PyRep
from pyrep.robots.arms.ur10 import UR10
from pyrep.objects.shape import Shape
from pyrep.const import PrimitiveShape
from pyrep.errors import ConfigurationPathError
from pyrep.objects.dummy import Dummy
import numpy as np
import math, time

LOOPS = 20
SCENE_FILE = '/home/pbustos/software/vrep/pollos/ur10-only.ttt'
pr = PyRep()
pr.launch(SCENE_FILE, headless=False)
pr.start()
agent = UR10()

# We could have made this target in the scene, but lets create one dynamically
target = Shape.create(type=PrimitiveShape.SPHERE,
                      size=[0.05, 0.05, 0.05],
                      color=[1.0, 0.1, 0.1],
                      static=True,
                      respondable=False)

position_min, position_max = [-0.1, -0.1, 0], [0.1, 0.1, 0]

starting_joint_positions = agent.get_joint_positions()

for i in range(LOOPS):

    # Reset the arm at the start of each 'episode'
    agent.set_joint_positions(starting_joint_positions)