Пример #1
0
 def __init__(self):
     super().__init__()
     self.path_point_nums = 50
     self.pose = None
     self.home_joints = [0, -np.pi/4, 0, -3 * np.pi/4, 0, np.pi/2, 0]
     self.position = self.get_position()
     self.kine = FrankaKinematics()
     self.position_min = [0.8, -0.3, 0.83]
     self.position_max = [1.0, 0.3, 1.2]
     self.gripper = PandaGripper()
     self.clear_path = False
Пример #2
0
 def test_run_task_validator(self):
     for task_file in TASKS:
         test_name = task_file.split('.py')[0]
         with self.subTest(task=test_name):
             if test_name in FLAKY_TASKS:
                 self.skipTest('Flaky task.')
             sim = PyRep()
             ttt_file = os.path.join(
                 DIR_PATH, '..', '..', 'rlbench', TTT_FILE)
             sim.launch(ttt_file, headless=True)
             sim.step_ui()
             sim.set_simulation_timestep(50.0)
             sim.step_ui()
             sim.start()
             robot = Robot(Panda(), PandaGripper())
             obs = ObservationConfig()
             obs.set_all(False)
             scene = Scene(sim, robot, obs)
             sim.start()
             task_class = task_file_to_task_class(task_file)
             active_task = task_class(sim, robot)
             try:
                 task_smoke(active_task, scene, variation=-1,
                            max_variations=2, success=0.25)
             except Exception as e:
                 sim.stop()
                 sim.shutdown()
                 raise e
             sim.stop()
             sim.shutdown()
Пример #3
0
    def launch(self):
        if self._pyrep is not None:
            raise RuntimeError('Already called launch!')
        self._pyrep = PyRep()
        self._pyrep.launch(join(DIR_PATH, TTT_FILE), headless=self._headless)
        self._pyrep.set_simulation_timestep(0.005)

        self._robot = Robot(Panda(), PandaGripper())
        self._scene = Scene(self._pyrep, self._robot, self._obs_config)
        self._set_arm_control_action()
Пример #4
0
 def __init__(self, headless_mode: bool, variation="1container"):
     # Inicialización de la tarea, lanzando PyRep, cargando la escena en el simulador, cargando el robot y los
     # elementos de la escena y inicializando las listas.
     self.pr = PyRep()
     self.variation = variation
     self.ttt_file = 'pick_and_place_' + self.variation + '.ttt'
     self.pr.launch(join(DIR_PATH, self.ttt_file), headless=headless_mode)
     self.robot = Robot(Panda(), PandaGripper(), Dummy('Panda_tip'))
     self.task = InitTask(self.variation)
     self.lists = Lists()
Пример #5
0
 def __init__(self, headless_mode: bool, variation: str):
     # Al inicializar la clase se carga PyRep, se carga la escena en el simulador, se carga el Robot y los objetos
     # de la escena y se inicializan las listas.
     self.pyrep = PyRep()
     self.variation = variation
     self.ttt_file = 'slide_block_' + self.variation + ".ttt"
     self.pyrep.launch(join(DIR_PATH, self.ttt_file),
                       headless=headless_mode)
     self.robot = Robot(Panda(), PandaGripper(), Dummy('Panda_tip'))
     self.task = InitTask(variation)
     self.lists = Lists()
Пример #6
0
 def __init__(self, headless_mode: bool, variation='1button'):
     self.pyrep = PyRep()
     self.variation = variation
     self.ttt_file = 'push_button_' + self.variation + '.ttt'
     self.pyrep.launch(join(DIR_PATH, self.ttt_file), headless=headless_mode)
     self.robot = Robot(Panda(), PandaGripper(), Dummy('Panda_tip'))
     self.task = InitTask(self.variation)
     self.param = Parameters(self.variation)
     self.param.original_pos0 = self.task.joint0.get_joint_position()
     self.param.original_or = self.task.wp0.get_orientation()
     if self.variation == '2button':
         self.param.original_pos1 = self.task.joint1.get_joint_position()
     self.lists = Lists()
    def launch(self):
        if self._pyrep is not None:
            raise RuntimeError('Already called launch!')
        self._pyrep = PyRep()
        self._pyrep.launch(join(DIR_PATH, TTT_FILE), headless=self._headless)
        self._pyrep.set_simulation_timestep(0.005)

        self._robot = Robot(Panda(), PandaGripper())
        self._scene = DomainRandomizationScene(self._pyrep, self._robot,
                                               self._obs_config,
                                               self._randomize_every,
                                               self._frequency,
                                               self._visual_rand_config,
                                               self._dynamics_rand_config)
        self._set_arm_control_action()
        # Raise the domain randomized floor.
        Shape('Floor').set_position(Dummy('FloorAnchor').get_position())
Пример #8
0
    def init(self, display=False):
        if self._pyrep is not None:
            self.finalize()

        with suppress_std_out_and_err():
            self._pyrep = PyRep()
            # TODO: TTT_FILE should be defined by robot, but now robot depends on launched pyrep
            self._pyrep.launch(join(DIR_PATH, TTT_FILE), headless=not display)
            self._pyrep.set_simulation_timestep(0.005)

            # TODO: Load arm and gripper from name
            self._robot = Robot(Panda(), PandaGripper())
            self._scene = Scene(self._pyrep, self._robot, self._obs_config)
            self._set_arm_control_action()

            # Str comparison because sometimes class comparison doesn't work.
            if self._task is not None:
                self._task.unload()
            self._task = self._get_class_by_name(self._task_name, tasks)(self._pyrep, self._robot)
            self._scene.load(self._task)
            self._pyrep.start()
Пример #9
0
    args = parser.parse_args()

    python_file = os.path.join(TASKS_PATH, args.task)
    if not os.path.isfile(python_file):
        raise RuntimeError('Could not find the task file: %s' % python_file)

    task_class = task_file_to_task_class(args.task)

    DIR_PATH = os.path.dirname(os.path.abspath(__file__))
    sim = PyRep()
    ttt_file = os.path.join(DIR_PATH, '..', 'rlbench', TTT_FILE)
    sim.launch(ttt_file, headless=True)
    sim.step_ui()
    sim.set_simulation_timestep(0.005)
    sim.step_ui()
    sim.start()

    robot = Robot(Panda(), PandaGripper())

    active_task = task_class(sim, robot)
    obs = ObservationConfig()
    obs.set_all(False)
    scene = Scene(sim, robot, obs)
    try:
        task_smoke(active_task, scene, variation=2)
    except TaskValidationError as e:
        sim.shutdown()
        raise e
    sim.shutdown()
    print('Validation successful!')
Пример #10
0
 def __init__(self, headless_mode: bool):
     self.pyrep = PyRep()
     self.pyrep.launch(join(DIR_PATH, TTT_FILE), headless=headless_mode)
     self.robot = Robot(Panda(), PandaGripper(), Dummy('Panda_tip'))
     self.task = InitTask()
     self.lists = Lists()
Пример #11
0
 def setUp(self):
     self.pyrep = PyRep()
     self.pyrep.launch(join(environment.DIR_PATH, TTT_FILE), headless=True)
     self.pyrep.set_simulation_timestep(0.005)
     self.robot = Robot(Panda(), PandaGripper())
Пример #12
0
class Franka(Panda):

    def __init__(self):
        super().__init__()
        self.path_point_nums = 50
        self.pose = None
        self.home_joints = [0, -np.pi/4, 0, -3 * np.pi/4, 0, np.pi/2, 0]
        self.position = self.get_position()
        self.kine = FrankaKinematics()
        self.position_min = [0.8, -0.3, 0.83]
        self.position_max = [1.0, 0.3, 1.2]
        self.gripper = PandaGripper()
        self.clear_path = False
        
    def grasp(self,env,obj:None,force_mode=False):
        '''
        gripper grasp
        '''
        while not self.gripper.actuate(0.0,0.1):
            env.step()
        self.grasped_obj = obj
        if force_mode:
            self.gripper._grasped_objects.append(self.grasped_obj)
            self.gripper._old_parents.append(self.grasped_obj.get_parent())  # type: ignore
            self.obj.set_parent(self.gripper._attach_point, keep_in_place=True)
        else:
            self.gripper.grasp(self.grasped_obj)

    def release(self,env):
        '''
        gripper open
        '''
        while not self.gripper.actuate(1.0,0.1):
            env.step()
        if self.grasped_obj is not None:
            self.gripper.release()
            self.grasped_obj = None

    def _rot_value(self,euler: Union[List[float], np.ndarray] = None,
                    quaternion: Union[List[float], np.ndarray] = None):

        if euler is not None:
            return R.from_euler('xyz',euler)
        elif quaternion is not None:
            return R.from_quat(quaternion)
        else:
            raise ValueError('input eluer or quternion')

    def _get_linear_path(self, position: Union[List[float], np.ndarray],
                        euler: Union[List[float], np.ndarray] = None,
                        quaternion: Union[List[float], np.ndarray] = None
                        ) -> ArmConfigurationPath:
        # start
        joints = self.get_joint_positions()
        H_start = self.kine.fk(joints)
        # rot ~
        rots = [get_rotation_part(H_start),self._rot_value(euler,quaternion)]
        slerp = Slerp([0,1], rots)
        times = [x/self.path_point_nums for x in range(self.path_point_nums+1)]
        interp_rots = slerp(times)
        # trans ~
        d_position = (position - self.pose)/self.path_point_nums
        # ik
        ret_floats = []
        q_guess = self.home_joints
        start_position = get_transition_part(H_start)
        for i in range(self.path_point_nums+1):
            H_target = set_rotation_part(np.eye(4),interp_rots[i])
            H_target = set_position_part(H_target,start_position)
            q = self.kine.ik(H_target, q_guess) # solve_ik
            ret_floats.append(q)
            q_guess = q
        return ArmConfigurationPath(self, ret_floats)


    def _get_nonlinear_path(self, position: Union[List[float], np.ndarray],
                            euler: Union[List[float], np.ndarray] = None,
                            quaternion: Union[List[float], np.ndarray] = None) -> ArmConfigurationPath:
        r = self._rot_value(euler,quaternion)
        H_target = set_position_part(set_rotation_part(np.eye(4),r),position)
        q_target = self.kine.ik(H_target,self.home_joints)
        #self.move_j(q_target)

    def move_j(self,q_target,env):
        _q_target = copy.copy(q_target)
        _q_target[6] += np.pi/4
        q_start = np.array(self.get_joint_positions())
        dq = (_q_target - q_start)/self.path_point_nums
        res = []
        for i in range(self.path_point_nums):
            res.append(q_start + dq * i)
        res = np.array(res)
        res = res.reshape((1,-1))
        path =  ArmConfigurationPath(self, res.tolist()[0])
        done = False
        while not done:
            done = path.step()
            env.step()

    def home(self,env):
        self.move_j(self.home_joints,env)
    
    def move(self,env,
            position: Union[List[float], np.ndarray],
            euler: Union[List[float], np.ndarray] = None,
            quaternion: Union[List[float], np.ndarray] = None):
        path = self.get_path(
            position=position, euler=euler, quaternion = quaternion)
        if path is None:
            raise RuntimeError('no path found')
        path.visualize()
        env.step()
        
        # Step the simulation and advance the agent along the path
        done = False
        while not done:
            done = path.step()
            env.step()
        if self.clear_path:
            path.clear_visualization()

    def go_to_position(self,position: Union[List[float], np.ndarray],
                            euler: Union[List[float], np.ndarray] = None,
                            quaternion: Union[List[float], np.ndarray] = None) -> ArmConfigurationPath:
        r = self._rot_value(euler,quaternion)
        H_target = set_position_part(set_rotation_part(np.eye(4),r),np.array(position))
        q = self.kine.ik(H_target,self.home_joints)
        self.set_joint_positions(q)

    def get_path(self, position: Union[List[float], np.ndarray],
                 euler: Union[List[float], np.ndarray] = None,
                 quaternion: Union[List[float], np.ndarray] = None,
                 ignore_collisions=False,
                 trials=100, max_configs=60, trials_per_goal=6,
                 algorithm=Algos.SBL
                 ) -> ArmConfigurationPath:
        '''
        para
        ---
            position(franka frame)
            euler or quaternion
        '''
        #position = np.array(position) + np.array(self.position)
        position = np.array(position)
        try:
            p = self.get_linear_path(position, euler, quaternion,
                                     ignore_collisions=ignore_collisions)
            return p
        except ConfigurationPathError:
            print('get linear path fail\n')
            pass  # Allowed. Try again, but with non-linear.
        
        try: 
            # TODO: _get_linear_path
            #p = self._get_linear_path(position,euler,quaternion)
            #return p
            pass
        except ConfigurationError:
            pass

        try:
            p = self.get_nonlinear_path(
                position, euler, quaternion, ignore_collisions, trials, max_configs,
                trials_per_goal, algorithm)
            return p
        except ConfigurationPathError:
            print('get nonlinear path fail\n')
            #p = self._get_nonlinear_path(position,euler,quaternion)
            #return p
            pass
Пример #13
0
        Q_object_target.eval()

    G_li = []
    loss_li = []
    all_times_per_steps = []
    all_times_per_updates = []

    max_episode_steps = 200

    max_episode_steps_eval = 200

    for episode in range(params['max_episode']):
        epsilon = 1.0 / numpy.power(episode + 1,
                                    1.0 / params['policy_parameter'])
        print("episode {}".format(episode), "epsilon {}".format(epsilon))
        gripper = PandaGripper()
        arm = Panda()
        tip = arm.get_tip()
        target = Shape('target')

        s, done, t = env.reset(), False, 0
        # new s is [arm.get_joint_positions(), tip.get_pose(), target.get_position()]
        s = [*s[8:15], *s[22:29], *s[-3:]]

        #override s to make observation space smaller
        # new s is [arm.get_joint_positions(), tip.get_pose(), target.get_position()]

        #you can use the following function to get extra information of the scene

        temp_buffer = []
        #indicate whether the arm tip has reached the target
Пример #14
0
SCENE_FILE = join(dirname(abspath(__file__)), 'petanque.ttt')
pr = PyRep()
pr.launch(SCENE_FILE, headless=False)  # Launch the application with a scene file that contains a robot


class MyRobot(object):  # Definir la estructura del robot
    def __init__(self, my_robot_arm, my_robot_gripper, my_robot_tip):
        self.arm = my_robot_arm
        self.gripper = my_robot_gripper
        self.tip = my_robot_tip
        self.pos = self.arm.get_position()


arm = Panda()  # Get the panda from the scene
gripper = PandaGripper()  # Get the gripper
tip = Dummy('Panda_tip')
my_panda = MyRobot(arm, gripper, tip)  # Create the robot structure


# Declaración y definición de los elementos de la tarea
class InitTask(object):
    def __init__(self):
        self.sphere = Shape('Sphere')
        self.wp0 = Dummy('waypoint0')
        self.wp1 = Dummy('waypoint1')
        self.target = Shape('target')
        self.success = ProximitySensor('success')


task = InitTask()
Пример #15
0
sim_result = []
inputs = []
for i in range(max_sims):
    pr.launch("scenes/emptyVortex.ttt", headless=False, responsive_ui=True)

    scene_view = Camera('DefaultCamera')
    scene_view.set_position([3.45, 0.18, 2.0])
    scene_view.set_orientation(np.array([180, -70, 90]) * np.pi / 180.0)

    ex_world, used_its = scenario.generate()
    inputs.append(ex_world)

    # Import Robots
    c_objs = cop_from_prs(pr, ex_world)

    panda_1, gripper_1 = Panda(0), PandaGripper(0)
    panda_2, gripper_2 = Panda(1), PandaGripper(1)

    pr.start()
    pr.step()

    ## Neatness setup
    d_cam: VisionSensor = c_objs["Camera"][0]
    cube = c_objs["CUBOID"][0]
    tr1, tr2 = c_objs["Tray"]
    d_cam.set_entity_to_render(cube.get_handle())
    pr.step()

    d_im = np.array(d_cam.capture_depth())

    ## Robot movement code
Пример #16
0
from pyrep.robots.arms.ur3 import UR3
from pyrep.robots.arms.panda import Panda
from pyrep.robots.end_effectors.baxter_gripper import BaxterGripper
from pyrep.robots.end_effectors.panda_gripper import PandaGripper

# SCENE_FILE = join(dirname(abspath(__file__)), 'scene_ur3_reinforcement_learning_env.ttt')
SCENE_FILE = join(dirname(abspath(__file__)),
                  'scene_reinforcement_learning_env.ttt')

pr = PyRep()
# Launch the application with a scene file that contains a robot
pr.launch(SCENE_FILE, headless=False)
pr.start()  # Start the simulation

arm = Panda()
gripper = PandaGripper()

arm.set_control_loop_enabled(False)
# arm.set_motor_locked_at_zero_velocity(True)

for i in range(300):
    arm.set_joint_target_velocities(
        list(np.random.uniform(-0.5, 0.5, size=(7, ))))
    pr.step()

for i in range(100):
    gripper_state = random.random()
    done = False
    # Open the gripper to gripper_state (1 is open 0 is closed) at a velocity of 0.04.
    while not done:
        done = gripper.actuate(gripper_state, velocity=0.04)