示例#1
0
 def test_get_object(self):
     self.assertEqual(Object.get_object('dynamic_cube'), self.dynamic_cube)
     self.assertEqual(Object.get_object('dummy'), self.dummy)
     self.assertEqual(Object.get_object(self.dynamic_cube.get_handle()),
                      self.dynamic_cube)
     self.assertEqual(Object.get_object(self.dummy.get_handle()),
                      self.dummy)
示例#2
0
    def _get_objects_in_tree(root_object=None,
                             object_type=ObjectType.ALL,
                             exclude_base=True,
                             first_generation_only=False) -> List['Object']:
        if root_object is None:
            root_object = sim.sim_handle_scene
        elif isinstance(root_object, Object):
            root_object = root_object.get_handle()
        elif not isinstance(root_object, int):
            raise ValueError('root_object must be None, int or Object')

        options = 0
        if exclude_base:
            options |= 1
        if first_generation_only:
            options |= 2
        handles = sim.simGetObjectsInTree(root_object, object_type.value,
                                          options)
        objects = []
        for handle in handles:
            try:
                objects.append(Object.get_object(handle))
            except KeyError:
                # e.g., CAMERA and LIGHT are not officially supported
                name = Object.get_object_name(handle)
                type = Object.get_object_type(name)
                warnings.warn("Object ({}, '{}') has {}, "
                              'which is not supported'.format(
                                  handle, name, type))
        return objects
示例#3
0
    def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)

        self.data = {
            'walls': [],
            'goal': [],
            'humans': None,
            'robot_position': None,
            'robot_orientation': None,
            'simulator_mutex': threading.RLock()
        }

        self.soda = SODA(proxy_map,
                         self.data,
                         scene_file='dataset_lowres_top.ttt')
        self.min_max_data = {
            "min_humans": 0,
            "max_humans": 4,
            "min_wandering_humans": 0,
            "max_wandering_humans": 4,
            "min_tables": 0,
            "max_tables": 4,
            "min_plants": 0,
            "max_plants": 4,
            "min_relations": 0,
            "max_relations": 4
        }

        self.data, self.wandering_humans = self.soda.room_setup(
            self.min_max_data['min_humans'],
            self.min_max_data['min_wandering_humans'],
            self.min_max_data['min_plants'],
            self.min_max_data['min_tables'],
            self.min_max_data['min_relations'],
            self.min_max_data['max_humans'],
            self.min_max_data['max_wandering_humans'],
            self.min_max_data['max_plants'],
            self.min_max_data['max_tables'],
            self.min_max_data['max_relations'],
            robot_random_pose=False,
            show_goal=False,
            show_relations=False)

        #Loop
        self.adv = self.rot = 0.
        self.last_ten = time.time() - 10
        self.last_point_one = time.time() - 10
        self.end_simulation = False
        self.vision_sensor = Object.get_object('Vision_sensor')
示例#4
0
 def get_object(self, name: str):
     return Object.get_object(name)
def resetTask(task):
    obs = task.get_observation()
    obj_poses = obj_pose_sensor.get_poses()
    surface = Object.get_object('worksurface')
    #get items in cupboard

    in_cupboard = []
    print('started reseting')
    for k in [
            'crackers', 'mustard', 'coffee', 'sugar', 'spam', 'tuna', 'soup',
            'strawberry_jello', 'chocolate_jello'
    ]:
        if check_if_in_cupboard(k, obj_poses):
            in_cupboard.append(k)

    #drop anything in hand
    actions = agent2.ungrasp_object(obs)
    obs, reward, terminal = task.step(actions)

    #move to start position
    actions = agent2.move_to_pos([0.25, 0, 1], False)
    obs, reward, terminal = task.step(actions)
    print('moved to start')

    while len(in_cupboard) > 0:
        #move to above object location
        if (len(in_cupboard) > 1):
            random = np.random.randint(len(in_cupboard) - 1)
            print(random)
            obj = in_cupboard[random]
        else:
            obj = in_cupboard[0]
        actions = agent2.move_above_cabinet(obj_poses, obj, False)
        obs, reward, terminal = task.step(actions)
        print('move above cabinet')
        target_obj = Object.get_object(obj)
        #attempt straight grasp
        grasped = False
        actions = agent2.move_to_cabinet_object(obj_poses, obj, False)
        prev_forces = obs.joint_forces
        while np.linalg.norm(obs.gripper_pose -
                             actions[:-1]) > 0.01 and not grasped and np.sum(
                                 np.abs(obs.joint_forces - prev_forces)) <= 50:
            prev_forces = obs.joint_forces
            print('stepping to target')
            obs, reward, terminate = task.step(actions)

            grasped = task._robot.gripper.grasp(target_obj)
            print(obj, grasped)

        #if failed kick the object to the back of the line and try another
        #if (not grasped):
        #    in_cupboard.append(obj)
        #    print('kicking')
        #    continue

        #remove from cabinet
        actions = agent2.move_above_cabinet_num(obs, obj_poses, 5)
        obs, reward, terminal = task.step(actions)
        print('moved above cabinet_num')

        #place on table
        print('place on table')
        # Go to post-grasp location

        actions = [0.25, 0, 0.99, 0, 1, 0, 0, 0]
        prev_forces = obs.joint_forces
        while np.linalg.norm(obs.gripper_pose - actions[:-1]) > 0.01 and (
                np.sum(np.abs(obs.joint_forces - prev_forces)) <= 50):
            prev_forces = obs.joint_forces
            print('stepping to post-grasp staging')
            obs, reward, terminate = task.step(actions)
        print('moved to post-grasp location', actions, obs.gripper_pose)

        while grasped:
            reset_x, reset_y, reset_z = sample_reset_pos(surface)

            print('Randomly chosen reset location: ', reset_x, ', ', reset_y)

            _, _, _, _, target_zmin, target_zmax = target_obj.get_bounding_box(
            )
            actions = [
                reset_x, reset_y, target_zmax - target_zmin + reset_z, 0, 1, 0,
                0, 0
            ]
            print('Reset location actions: ', actions)
            try:
                prev_forces = obs.joint_forces
                while np.linalg.norm(
                        obs.gripper_pose - actions[:-1]) > 0.01 and (np.sum(
                            np.abs(obs.joint_forces - prev_forces)) <= 50):
                    prev_forces = obs.joint_forces
                    print('stepping to reset location')
                    obs, reward, terminate = task.step(actions)
            except ConfigurationPathError:
                print('Bad choice! Pick again.')
                continue
            print('moved to reset location', actions, obs.gripper_pose)
            task._robot.gripper.release()
            grasped = False

        print('nextobject')
        in_cupboard.clear()
        obj_poses = obj_pose_sensor.get_poses()
        for k in [
                'crackers', 'mustard', 'coffee', 'sugar', 'spam', 'tuna',
                'soup', 'strawberry_jello', 'chocolate_jello'
        ]:
            if check_if_in_cupboard(k, obj_poses):
                in_cupboard.append(k)

    #open hand
    actions = agent2.ungrasp_object(obs)
    obs, reward, terminal = task.step(actions)

    #move to start position
    actions = agent2.move_to_pos([0.25, 0, 1])
    obs, reward, terminal = task.step(actions)
    print('finished resetting')

    descriptions = None
    #original reset task
    #descriptions, obs = task.reset()
    return descriptions, obs
            print('moved above object')

            actions = agent2.grasp_object(obs)
            obs, reward, terminal = task.step(actions)
            print('grasp object')

            actions = agent2.move_above_cabinet_num(obs, obj_poses,
                                                    1 + item_number)
            obs, reward, terminal = task.step(actions)
            print('moved above cabinet')

            actions = agent2.move_into_cabinet(obs, obj_poses, 1 + item_number)
            obs, reward, terminal = task.step(actions)
            print('moved into cabinet')

            target_obj = Object.get_object(item)
            while (not task._robot.gripper._grasped_objects == []):
                task._robot.gripper.release()
                actions = agent2.ungrasp_object(obs)
                obs, reward, terminal = task.step(actions)
                print('ungrasp object')

            actions = agent2.move_above_cabinet_num(obs, obj_poses,
                                                    1 + item_number)
            obs, reward, terminal = task.step(actions)
            print('moved above cabinet num')

        resetTask(task)

        condition = False
        if (condition):
示例#7
0
        for obj in objs:
            name = obj.get_name()
            pose = obj.get_pose()

            obj_poses[name] = pose
        return obj_poses


if __name__ == "__main__":
    action_mode = ActionMode(ArmActionMode.ABS_EE_POSE_PLAN)  # See rlbench/action_modes.py for other action modes

    env = Environment(action_mode, '', ObservationConfig(), False)
    # available tasks: EmptyContainer, PlayJenga, PutGroceriesInCupboard, SetTheTable
    task = env.get_task(PutGroceriesInCupboard)

    surface = Object.get_object('worksurface')
    print('Surface bounding box: ', surface.get_bounding_box())

    agent = AutonAgentAbsolute_Mode()

    obj_pose_sensor = HonestObjectPoseSensor(env)

    descriptions, obs = task.reset()
    print(descriptions)

    for _ in range(5):
        # Go to staging location
        actions = [0.25, 0, 0.99, 0, 1, 0, 0, 0]
        while np.linalg.norm(obs.gripper_pose - actions[:-1]) > 0.01:
            print('stepping to staging')
            obs, reward, terminate = task.step(actions)