def __init__(self, abstract_state, abstract_action):
     ConcreteNodeState.__init__(self, abstract_state, abstract_action)
     self.key_configs = self.abstract_state.prm_vertices
     self.pick_collision_vector = self.get_konf_obstacles(abstract_state.current_collides)
     self.place_collision_vector = None
     self.abs_robot_pose = utils.clean_pose_data(utils.get_body_xytheta(self.problem_env.robot))
     self.abs_obj_pose = utils.clean_pose_data(utils.get_body_xytheta(self.problem_env.env.GetKinBody(self.obj)))
     self.abs_goal_obj_poses = [np.array([0, 0, 0])]
def make_konfs_relative_to_pose(obj_pose, key_configs):
    rel_konfs = []
    utils.clean_pose_data(obj_pose)
    for k in key_configs:
        konf = utils.clean_pose_data(k)
        rel_konf = utils.get_relative_robot_pose_wrt_body_pose(konf, obj_pose)
        rel_konf = utils.encode_pose_with_sin_and_cos_angle(rel_konf)
        rel_konfs.append(rel_konf)
    return np.array(rel_konfs)
Пример #3
0
def generate_policy_smpl_batch(smpler_state, policy, noise_batch):
    goal_flags, rel_konfs, collisions, poses = prepare_input(smpler_state)
    obj = smpler_state.obj
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)

    smpler_state.abs_obj_pose = obj_pose
    if smpler_state.rel_konfs is None:
        key_configs = smpler_state.key_configs
        rel_konfs = data_processing_utils.make_konfs_relative_to_pose(obj_pose, key_configs)
        rel_konfs = np.array(rel_konfs).reshape((1, 615, 4, 1))
        smpler_state.rel_konfs = rel_konfs
    else:
        rel_konfs = smpler_state.rel_konfs
    goal_flags = smpler_state.goal_flags
    collisions = smpler_state.collision_vector

    n_smpls = len(noise_batch)
    goal_flags = np.tile(goal_flags, (n_smpls, 1, 1, 1))
    rel_konfs = np.tile(rel_konfs, (n_smpls, 1, 1, 1))
    collisions = np.tile(collisions, (n_smpls, 1, 1, 1))
    poses = poses[:, :8]
    poses = np.tile(poses, (n_smpls, 1))
    noise_batch = np.array(noise_batch).squeeze()
    pred_batch = policy.policy_model.predict([goal_flags, rel_konfs, collisions, poses, noise_batch])
    return pred_batch
Пример #4
0
def prepare_input(smpler_state,
                  noise_batch,
                  delete=False,
                  region=None,
                  filter_konfs=False):
    poses = data_processing_utils.get_processed_poses_from_state(
        smpler_state, None)[None, :]
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)

    smpler_state.abs_obj_pose = obj_pose
    goal_flags = smpler_state.goal_flags
    collisions = smpler_state.pick_collision_vector

    key_configs = pickle.load(open('prm.pkl', 'r'))[0]
    key_configs = np.array(
        [utils.encode_pose_with_sin_and_cos_angle(p) for p in key_configs])
    key_configs = key_configs.reshape((1, len(key_configs), 4, 1))
    key_configs = key_configs.repeat(len(poses), axis=0)

    n_smpls = len(noise_batch)
    goal_flags = np.tile(goal_flags, (n_smpls, 1, 1, 1))
    key_configs = np.tile(key_configs, (n_smpls, 1, 1, 1))
    collisions = np.tile(collisions, (n_smpls, 1, 1, 1))
    poses = np.tile(poses, (n_smpls, 1))
    if len(noise_batch) > 1:
        noise_batch = np.array(noise_batch).squeeze()

    inp = [goal_flags, key_configs, collisions, poses, noise_batch]
    return inp
Пример #5
0
def visualize_samples(policy, pidx):
    problem_env = load_problem(pidx)
    utils.viewer()

    obj = problem_env.object_names[1]
    utils.set_color(obj, [1, 0, 0])
    smpler_state = get_smpler_state(pidx, obj, problem_env)
    smpler_state.abs_obj_pose = utils.clean_pose_data(
        smpler_state.abs_obj_pose)
    """
    w_values = generate_w_values(smpler_state, policy)
    transformed_konfs = generate_transformed_key_configs(smpler_state, policy)
    print "Visualizing top-k transformed konfs..."
    visualize_key_configs_with_top_k_w_vals(w_values, transformed_konfs, k=5)
    print "Visualizing top-k konfs..."
    visualize_key_configs_with_top_k_w_vals(w_values, smpler_state.key_configs, k=10)
    """

    place_smpls = []
    noises_used = []
    for i in range(10):
        noise = i / 10.0 * np.random.normal(size=(1, 4)).astype('float32')
        smpl = generate_smpls_using_noise(smpler_state, policy, noise)[0]
        place_smpls.append(smpl)
        noises_used.append(noise)
    import pdb
    pdb.set_trace()
    utils.visualize_path(place_smpls[0:10])
def get_processed_poses_from_state(state):
    if state_data_mode == 'absolute':
        obj_pose = utils.encode_pose_with_sin_and_cos_angle(state.abs_obj_pose)
        robot_pose = utils.encode_pose_with_sin_and_cos_angle(state.robot_pose)
        goal_obj_pose = utils.encode_pose_with_sin_and_cos_angle(
            state.abs_goal_obj_pose)
    elif state_data_mode == 'robot_rel_to_obj':
        obj_pose = utils.encode_pose_with_sin_and_cos_angle(state.abs_obj_pose)

        robot_pose = utils.get_relative_robot_pose_wrt_body_pose(
            state.abs_robot_pose, state.abs_obj_pose)
        robot_pose = utils.encode_pose_with_sin_and_cos_angle(robot_pose)

        goal_obj_pose = utils.get_relative_robot_pose_wrt_body_pose(
            state.abs_goal_obj_pose, state.abs_obj_pose)
        recovered = utils.clean_pose_data(
            utils.get_absolute_pose_from_relative_pose(
                goal_obj_pose, state.abs_obj_pose.squeeze()))
        assert np.all(np.isclose(recovered, state.abs_goal_obj_pose.squeeze()))
        goal_obj_pose = utils.encode_pose_with_sin_and_cos_angle(goal_obj_pose)
    else:
        raise not NotImplementedError

    pose = np.hstack([obj_pose, goal_obj_pose, robot_pose])
    return pose
Пример #7
0
def generate_transformed_key_configs(smpler_state, policy):
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)
    goal_flags, rel_konfs, collisions, poses = prepare_input(smpler_state)
    n_data = len(goal_flags)
    a_dim = 4
    noise_smpls = np.random.normal(size=(n_data, a_dim)).astype('float32')
    smpls = policy.value_model.predict([goal_flags, rel_konfs, collisions, poses, noise_smpls]).squeeze()
    transformed = [data_processing_utils.get_unprocessed_placement(s, obj_pose) for s in smpls]
    return np.array(transformed)
Пример #8
0
def generate_smpls_using_noise(smpler_state, policy, noises):
    goal_flags, rel_konfs, collisions, poses = prepare_input(smpler_state)
    obj = smpler_state.obj
    utils.set_color(obj, [1, 0, 0])
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)

    places = []
    smpls = policy.generate_given_noise(goal_flags, rel_konfs, collisions, poses, noises)
    placement = data_processing_utils.get_unprocessed_placement(smpls.squeeze(), obj_pose)
    places.append(placement)
    return places
Пример #9
0
def visualize_samples_at_noises(problem_env, policy, pidx, noises):
    utils.viewer()

    obj = problem_env.object_names[1]
    utils.set_color(obj, [1, 0, 0])
    smpler_state = get_smpler_state(pidx, obj, problem_env)
    smpler_state.abs_obj_pose = utils.clean_pose_data(
        smpler_state.abs_obj_pose)

    place_smpls = []
    for noise in noises:
        smpl = generate_smpls_using_noise(smpler_state, policy, noise)[0]
        place_smpls.append(smpl)
    utils.visualize_path(place_smpls)
    def __init__(self,
                 problem_env,
                 obj,
                 region,
                 goal_entities,
                 key_configs=None,
                 collision_vector=None):
        self.obj = obj
        self.region = region
        self.goal_entities = goal_entities

        self.key_configs = self.get_key_configs(key_configs)
        self.collision_vector = self.get_collison_vector(collision_vector)

        if type(obj) == str or type(obj) == unicode:
            obj = problem_env.env.GetKinBody(obj)

        self.abs_robot_pose = utils.clean_pose_data(
            utils.get_body_xytheta(problem_env.robot))
        self.abs_obj_pose = utils.clean_pose_data(utils.get_body_xytheta(obj))
        self.abs_goal_obj_pose = utils.clean_pose_data(
            utils.get_body_xytheta('square_packing_box1'))
        self.goal_flags = self.get_goal_flags()
        self.rel_konfs = None
    def __init__(self, abstract_state, abstract_action, key_configs, parent_state=None):
        ConcreteNodeState.__init__(self, abstract_state, abstract_action)
        self.parent_state = parent_state
        self.key_configs = key_configs
        init_config = utils.get_rightarm_torso_config(abstract_state.problem_env.robot)
        self.pick_collision_vector = self.get_konf_obstacles_from_key_configs()
        #self.konf_collisions_with_obj_poses = self.get_object_pose_collisions()
        #self.pick_collision_vector = self.get_konf_obstacles(self.konf_collisions_with_obj_poses)
        utils.set_rightarm_torso(init_config, abstract_state.problem_env.robot)
        self.place_collision_vector = None

        # following three variables are used in get_processed_poses_from_state
        self.abs_robot_pose = utils.get_rightarm_torso_config(self.problem_env.robot)
        self.abs_obj_pose = utils.clean_pose_data(utils.get_body_xytheta(self.problem_env.env.GetKinBody(self.obj)))
        self.abs_goal_obj_poses = [np.array([0, 0, 0])]
Пример #12
0
def generate_smpls(smpler_state, policy, n_data, noise_smpls_tried=None):
    goal_flags, rel_konfs, collisions, poses = prepare_input(smpler_state)
    obj = smpler_state.obj
    utils.set_color(obj, [1, 0, 0])
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)

    places = []
    noises_used = []
    for _ in range(n_data):
        smpls, noises_used = policy.generate(goal_flags, rel_konfs, collisions, poses, noises_used)
        placement = data_processing_utils.get_unprocessed_placement(smpls.squeeze(), obj_pose)
        places.append(placement)
    if noise_smpls_tried is not None:
        return places, noises_used
    else:
        return places
Пример #13
0
def prepare_input(smpler_state):
    # poses = np.hstack(
    #    [utils.encode_pose_with_sin_and_cos_angle(utils.get_body_xytheta(obj).squeeze()), 0, 0, 0, 0]).reshape((1, 8))
    poses = data_processing_utils.get_processed_poses_from_state(smpler_state)[None, :]
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)

    # todo compute this only once, and store it in smpler state

    if smpler_state.rel_konfs is None:
        key_configs = smpler_state.key_configs
        rel_konfs = data_processing_utils.make_konfs_relative_to_pose(obj_pose, key_configs)
        rel_konfs = np.array(rel_konfs).reshape((1, 615, 4, 1))
        smpler_state.rel_konfs = rel_konfs
    else:
        rel_konfs = smpler_state.rel_konfs

    goal_flags = smpler_state.goal_flags
    collisions = smpler_state.collision_vector
    poses = poses[:, :8]

    return goal_flags, rel_konfs, collisions, poses
Пример #14
0
def prepare_input_except_noise(smpler_state,
                               delete=False,
                               region=None,
                               filter_konfs=False):
    poses = data_processing_utils.get_processed_poses_from_state(
        smpler_state, None)[None, :]
    obj_pose = utils.clean_pose_data(smpler_state.abs_obj_pose)

    smpler_state.abs_obj_pose = obj_pose
    goal_flags = smpler_state.goal_flags
    collisions = smpler_state.pick_collision_vector

    key_configs = pickle.load(open('prm.pkl', 'r'))[0]
    """
    key_configs = np.delete(key_configs, [415, 586, 615, 618, 619], axis=0)
    if delete:
        indices_to_delete = get_indices_to_delete(region, key_configs)
        key_configs = np.delete(key_configs, indices_to_delete, axis=0)
        collisions = np.delete(collisions, indices_to_delete, axis=1)
        goal_flags = np.delete(goal_flags, indices_to_delete, axis=1)

    if filter_konfs:
        key_configs = data_processing_utils.filter_configs_that_are_too_close(key_configs)
    """

    key_configs = np.array(
        [utils.encode_pose_with_sin_and_cos_angle(p) for p in key_configs])
    key_configs = key_configs.reshape((1, len(key_configs), 4, 1))
    key_configs = key_configs.repeat(len(poses), axis=0)
    inp = {
        'goal_flags': goal_flags,
        'key_configs': key_configs,
        'collisions': collisions,
        'poses': poses
    }
    return inp
Пример #15
0
    def get_action_info(action):
        # Information regarding actions
        pick_action_info = action.continuous_parameters['pick']
        place_action_info = action.continuous_parameters['place']

        pick_parameters = pick_action_info['action_parameters']
        place_obj_abs_pose = utils.clean_pose_data(
            place_action_info['object_pose'])
        if 'one_arm' in action.type:
            pick_base_pose = utils.clean_pose_data(
                pick_action_info['q_goal'][-3:])
            place_base_pose = utils.clean_pose_data(
                place_action_info['q_goal'][-3:])
            pick_motion = None
            place_motion = None
        else:
            pick_base_pose = utils.clean_pose_data(pick_action_info['q_goal'])
            place_base_pose = utils.clean_pose_data(
                place_action_info['q_goal'])
            pick_motion = [
                utils.clean_pose_data(q) for q in pick_action_info['motion']
            ]
            place_motion = [
                utils.clean_pose_data(q) for q in place_action_info['motion']
            ]
        action_info = {
            'object_name': action.discrete_parameters['object'],
            'region_name': action.discrete_parameters['place_region'],
            'pick_action_parameters': pick_parameters,
            'pick_abs_base_pose': pick_base_pose,
            'place_abs_base_pose': place_base_pose,
            'place_obj_abs_pose': place_obj_abs_pose,
            'pick_motion': pick_motion,
            'place_motion': place_motion
        }
        return action_info
def get_processed_poses_from_action(state, action):
    if action_data_mode == 'absolute':
        pick_pose = utils.encode_pose_with_sin_and_cos_angle(
            action['pick_abs_base_pose'])
        place_pose = utils.encode_pose_with_sin_and_cos_angle(
            action['place_abs_base_pose'])
    elif action_data_mode == 'pick_relative':
        pick_pose = action['pick_abs_base_pose']
        pick_pose = utils.get_relative_robot_pose_wrt_body_pose(
            pick_pose, state.abs_obj_pose)
        pick_pose = utils.encode_pose_with_sin_and_cos_angle(pick_pose)
        place_pose = utils.encode_pose_with_sin_and_cos_angle(
            action['place_abs_base_pose'])
    elif action_data_mode == 'pick_relative_place_relative_to_region':
        pick_pose = action['pick_abs_base_pose']
        pick_pose = utils.get_relative_robot_pose_wrt_body_pose(
            pick_pose, state.abs_obj_pose)
        pick_pose = utils.encode_pose_with_sin_and_cos_angle(pick_pose)
        place_pose = get_place_pose_wrt_region(action['place_abs_base_pose'],
                                               action['region_name'])
    elif action_data_mode == 'pick_parameters_place_relative_to_region':
        pick_pose = action['pick_abs_base_pose']
        portion, base_angle, facing_angle_offset \
            = utils.get_ir_parameters_from_robot_obj_poses(pick_pose, state.abs_obj_pose)
        base_angle = utils.encode_angle_in_sin_and_cos(base_angle)
        pick_pose = np.hstack([portion, base_angle, facing_angle_offset])
        place_pose = get_place_pose_wrt_region(action['place_abs_base_pose'],
                                               action['region_name'])
        place_pose = utils.encode_pose_with_sin_and_cos_angle(place_pose)
    elif action_data_mode == 'pick_parameters_place_normalized_relative_to_region':
        pick_pose = action['pick_abs_base_pose']
        portion, base_angle, facing_angle_offset \
            = utils.get_ir_parameters_from_robot_obj_poses(pick_pose, state.abs_obj_pose)
        base_angle = utils.encode_angle_in_sin_and_cos(base_angle)
        pick_pose = np.hstack([portion, base_angle, facing_angle_offset])
        place_pose = normalize_place_pose_wrt_region(
            action['place_abs_base_pose'], action['region_name'])
        place_pose = utils.encode_pose_with_sin_and_cos_angle(place_pose)
    elif action_data_mode == 'pick_parameters_place_relative_to_pick':
        pick_pose = action['pick_abs_base_pose']
        portion, base_angle, facing_angle_offset \
            = utils.get_ir_parameters_from_robot_obj_poses(pick_pose, state.abs_obj_pose)
        base_angle = utils.encode_angle_in_sin_and_cos(base_angle)
        pick_params = np.hstack([portion, base_angle, facing_angle_offset])
        place_pose = action['place_abs_base_pose']
        place_pose = utils.get_relative_robot_pose_wrt_body_pose(
            place_pose, pick_pose)
        pick_pose = pick_params
        place_pose = utils.encode_pose_with_sin_and_cos_angle(place_pose)
    elif action_data_mode == 'pick_parameters_place_relative_to_object':
        pick_pose = action['pick_abs_base_pose']
        portion, base_angle, facing_angle_offset \
            = utils.get_ir_parameters_from_robot_obj_poses(pick_pose, state.abs_obj_pose)
        base_angle = utils.encode_angle_in_sin_and_cos(base_angle)
        pick_params = np.hstack([portion, base_angle, facing_angle_offset])
        pick_pose = pick_params

        place_pose = action['place_abs_base_pose']
        obj_pose = state.abs_obj_pose
        rel_place_pose = utils.get_relative_robot_pose_wrt_body_pose(
            place_pose, obj_pose)
        place_pose = utils.encode_pose_with_sin_and_cos_angle(rel_place_pose)

    unprocessed_place = utils.clean_pose_data(
        get_unprocessed_placement(place_pose, obj_pose))
    target = utils.clean_pose_data(action['place_abs_base_pose'])

    is_recovered = np.all(np.isclose(unprocessed_place, target))
    try:
        assert is_recovered
    except:
        import pdb
        pdb.set_trace()
    action = np.hstack([pick_pose, place_pose])

    return action
Пример #17
0
def get_placements(state, poses, admon, smpler_state):
    stime = time.time()
    # placement, value = admon.get_max_x(state, poses)
    max_x = None
    max_val = -np.inf
    exp_val = {}

    cols = state[:, :, 0:2, :]
    goal_flags = state[:, :, 2:, :]

    key_configs = pickle.load(open('prm.pkl', 'r'))[0]
    key_configs = np.delete(key_configs, [415, 586, 615, 618, 619], axis=0)
    rel_konfs = []
    for k in key_configs:
        konf = utils.clean_pose_data(k)
        obj_pose = utils.clean_pose_data(smpler_state.obj_pose)
        rel_konf = utils.subtract_pose2_from_pose1(konf, obj_pose)
        rel_konfs.append(rel_konf)
    rel_konfs = np.array(rel_konfs).reshape((1, 615, 3, 1))

    x_range = np.linspace(0., 3.5, 10)
    y_range = np.linspace(-8., -6., 10)
    placement = np.array([0., 0., 0.])
    for x in x_range:
        for y in y_range:
            placement = placement.squeeze()
            placement[0] = x
            placement[1] = y
            placement = utils.clean_pose_data(placement)
            obj_pose = utils.clean_pose_data(smpler_state.obj_pose)
            rel_placement = utils.subtract_pose2_from_pose1(
                placement, obj_pose)
            obj_pose = utils.encode_pose_with_sin_and_cos_angle(obj_pose)
            val = admon.q_mse_model.predict([
                rel_placement[None, :], goal_flags, obj_pose[None, :],
                rel_konfs, cols
            ])
            #print np.mean(admon.relevance_model.predict([rel_placement[None, :], goal_flags, rel_konfs, cols]).squeeze())
            if val > max_val:
                max_x = copy.deepcopy(placement)
                max_val = val
            exp_val[(x, y)] = np.exp(val) * 100
            print rel_placement, x, y, val, exp_val[(x, y)]
    total = np.sum(exp_val.values())
    total = 1
    i = 0
    utils.viewer()

    for x in x_range:
        for y in y_range:
            height = exp_val[(x, y)]  # / total + 1
            # print x, y, height
            placement = placement.squeeze()
            placement[0] = x
            placement[1] = y
            # absx, absy = unnormalize_pose_wrt_region(placement, 'loading_region')[0:2]
            # make_body(height, i, absx, absy)

            if height == np.exp(max_val) * 100:
                make_body(height, i, x, y, color=[1, 0, 0])
            else:
                make_body(height, i, x, y)
            i += 1
    placement = max_x
    print placement, max_val, np.exp(max_val)
    print 'maximizing x time', time.time() - stime
Пример #18
0
    def add_trajectory(self, plan):
        print "Problem idx", self.problem_idx
        self.set_seed(self.problem_idx)
        problem_env, openrave_env = self.create_environment()
        self.problem_env = problem_env

        #admon = create_imle_model(1)

        state = None
        utils.viewer()
        for action_idx, action in enumerate(plan):
            if 'pick' in action.type:
                associated_place = plan[action_idx + 1]
                state = self.compute_state(
                    action.discrete_parameters['object'],
                    associated_place.discrete_parameters['region'])

                ## Visualization purpose
                """
                obj = action.discrete_parameters['object']
                region = associated_place.discrete_parameters['region']
                collision_vec = np.delete(state.state_vec, [415, 586, 615, 618, 619], axis=1)
                smpls = generate_smpls(obj, collision_vec, state, admon)
                utils.visualize_path(smpls)
                utils.visualize_path([associated_place.continuous_parameters['q_goal']])
                import pdb; pdb.set_trace()
                """
                ##############################################################################

                action.execute()
                obj_pose = utils.get_body_xytheta(
                    action.discrete_parameters['object'])
                robot_pose = utils.get_body_xytheta(self.problem_env.robot)
                pick_parameters = utils.get_ir_parameters_from_robot_obj_poses(
                    robot_pose, obj_pose)
                recovered = utils.get_absolute_pick_base_pose_from_ir_parameters(
                    pick_parameters, obj_pose)
                pick_base_pose = action.continuous_parameters['q_goal']
                pick_base_pose = utils.clean_pose_data(pick_base_pose)
                recovered = utils.clean_pose_data(recovered)
                if pick_parameters[0] > 1:
                    sys.exit(-1)
                assert np.all(np.isclose(pick_base_pose, recovered))
            else:
                if action == plan[-1]:
                    reward = 0
                else:
                    reward = -1
                action.execute()
                place_base_pose = action.continuous_parameters['q_goal']

                action_info = {
                    'object_name': action.discrete_parameters['object'],
                    'region_name': action.discrete_parameters['region'],
                    'pick_base_ir_parameters': pick_parameters,
                    'place_abs_base_pose': place_base_pose,
                    'pick_abs_base_pose': pick_base_pose,
                }
                self.add_sar_tuples(state, action_info, reward)

        self.add_state_prime()
        print "Done!"
        openrave_env.Destroy()