예제 #1
0
def visualize_samples(policy, pidx):
    problem_env = load_problem(pidx)
    utils.viewer()

    obj = problem_env.object_names[1]
    utils.set_color(obj, [1, 0, 0])
    smpler_state = get_smpler_state(pidx, obj, problem_env)
    smpler_state.abs_obj_pose = utils.clean_pose_data(
        smpler_state.abs_obj_pose)
    """
    w_values = generate_w_values(smpler_state, policy)
    transformed_konfs = generate_transformed_key_configs(smpler_state, policy)
    print "Visualizing top-k transformed konfs..."
    visualize_key_configs_with_top_k_w_vals(w_values, transformed_konfs, k=5)
    print "Visualizing top-k konfs..."
    visualize_key_configs_with_top_k_w_vals(w_values, smpler_state.key_configs, k=10)
    """

    place_smpls = []
    noises_used = []
    for i in range(10):
        noise = i / 10.0 * np.random.normal(size=(1, 4)).astype('float32')
        smpl = generate_smpls_using_noise(smpler_state, policy, noise)[0]
        place_smpls.append(smpl)
        noises_used.append(noise)
    import pdb
    pdb.set_trace()
    utils.visualize_path(place_smpls[0:10])
def main():
    plans, pidxs = load_plan()
    plan = plans[2]
    problem_env = make_environment(59)
    utils.set_body_transparency('top_wall_1', 0.5)
    #utils.set_robot_config(np.array([[ 3.6153236 ,  0.93829982,  5.63509206]]))
    utils.viewer()
    [utils.set_color(obj, [0.0, 0.0, 0.7]) for obj in problem_env.objects]
    utils.set_color('c_obst1', [1.0, 1.0, 0])
    viewer = problem_env.env.GetViewer()
    cam_transform = np.array(
        [[0.58774001, -0.61021391, 0.53122562, 0.93478185],
         [-0.80888257, -0.45655478, 0.37049525, -1.98781455],
         [0.01645225, -0.64745402, -0.76192691, 4.26729631], [0., 0., 0., 1.]])
    viewer.SetCamera(cam_transform)

    robot = problem_env.robot
    utils.open_gripper(robot)
    manip = robot.GetManipulator('rightarm_torso')
    robot.SetActiveDOFs(manip.GetArmIndices(),
                        DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis,
                        [0, 0, 1])

    motion_plan_file = './test_scripts/jobtalk_figures/one_arm_domain_motion_plans_for_job_talk.pkl'
    mp_plans = get_motion_plans(plan, problem_env, motion_plan_file)

    import pdb
    pdb.set_trace()

    play_plan(plan, mp_plans, robot)
    import pdb
    pdb.set_trace()
    problem_env.init_saver.Restore()
def visualize(plan, problem_idx, algo):
    np.random.seed(problem_idx)
    random.seed(problem_idx)

    problem_env, openrave_env = create_environment(problem_idx)
    key_configs = pickle.load(open('prm.pkl', 'r'))[0]

    state = None
    learned_smpler = get_learned_smpler(algo)
    utils.viewer()
    for action_idx, action in enumerate(plan):
        if 'pick' in action.type:
            associated_place = plan[action_idx + 1]
            state = compute_state(
                action.discrete_parameters['object'],
                associated_place.discrete_parameters['region'], problem_env,
                key_configs)
            smpler = LearnedGenerator(action, problem_env, learned_smpler,
                                      state)
            smples = np.vstack([smpler.generate() for _ in range(10)])
            action.discrete_parameters[
                'region'] = associated_place.discrete_parameters['region']
            pick_base_poses = get_pick_base_poses(action, smples)
            place_base_poses = get_place_base_poses(action, smples,
                                                    problem_env)
            utils.visualize_path(place_base_poses)
            import pdb
            pdb.set_trace()

        action.execute()
예제 #4
0
def main():
    problem_idx = 0

    problem_env = Mover(problem_idx, problem_type='jobtalk')
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'rrt'))
    utils.viewer()

    seed = 1
    np.random.seed(seed)
    random.seed(seed)

    pick_smpler, place_smpler = create_sampler(problem_env)

    # Set camera view - to get camera transform: viewer.GetCameraTransform()
    cam_transform = np.array(
        [[-0.54866337, -0.70682829, 0.44650004, -1.45953619],
         [-0.83599448, 0.45806221, -0.30214604, 2.02016926],
         [0.00904058, -0.53904803, -0.8422265, 4.88620949], [0., 0., 0., 1.]])
    cam_transform = np.array(
        [[0.76808539, 0.51022899, -0.38692533, 2.7075901],
         [0.63937823, -0.57785198, 0.50723029, -2.0267117],
         [0.03521803, -0.6369878, -0.77006898, 4.52542162], [0., 0., 0., 1.]])

    init_goal_cam_transform = np.array(
        [[0.99941927, -0.00186311, 0.03402425, 1.837726],
         [-0.02526303, -0.71058334, 0.70315937, -5.78141165],
         [0.022867, -0.70361058, -0.71021775, 6.03373909], [0., 0., 0., 1.]])
    goal_obj_poses = pickle.load(
        open('./test_scripts/jobtalk_figure_cache_files/goal_obj_poses.pkl',
             'r'))
    for o in problem_env.objects:
        utils.set_obj_xytheta(goal_obj_poses[o.GetName()], o)
    viewer = problem_env.env.GetViewer()
    viewer.SetCamera(init_goal_cam_transform)
    """
    # how do you go from intrinsic params to [fx, fy, cx, cy]? What are these anyways?
    # cam_intrinsic_params = viewer.GetCameraIntrinsics()
    I = problem_env.env.GetViewer().GetCameraImage(1920, 1080, cam_transform, cam_intrinsic_params)
    scipy.misc.imsave('test.png', I)
    """

    utils.set_obj_xytheta(np.array([0.01585576, 1.06516767, 5.77099297]),
                          target_obj_name)
    pick_smpls = visualize_pick(pick_smpler, problem_env)
    feasible_pick_op = get_feasible_pick(problem_env, pick_smpls)
    feasible_pick_op.execute()
    utils.visualize_placements(place_smpler(100),
                               problem_env.robot.GetGrabbed()[0])
    import pdb
    pdb.set_trace()

    # Visualize path
    """
예제 #5
0
def main():
    config = parse_arguments()
    problem_env = make_environment(config)
    plan = load_plan(config)
    utils.viewer()
    T_viewer = np.array([[-0.02675345, 0.86098854, -0.50792025, 6.43201399],
                         [0.99806971, -0.00548069, -0.06186132, -2.36350584],
                         [-0.05604564, -0.50859482, -0.85917995, 8.97449112],
                         [0., 0., 0., 1.]])
    viewer = problem_env.env.GetViewer()
    viewer.SetCamera(T_viewer)
    import pdb;pdb.set_trace()
    play_plan(plan)
예제 #6
0
def visualize_samples_at_noises(problem_env, policy, pidx, noises):
    utils.viewer()

    obj = problem_env.object_names[1]
    utils.set_color(obj, [1, 0, 0])
    smpler_state = get_smpler_state(pidx, obj, problem_env)
    smpler_state.abs_obj_pose = utils.clean_pose_data(
        smpler_state.abs_obj_pose)

    place_smpls = []
    for noise in noises:
        smpl = generate_smpls_using_noise(smpler_state, policy, noise)[0]
        place_smpls.append(smpl)
    utils.visualize_path(place_smpls)
예제 #7
0
    def __init__(self, sampler, abstract_state, abstract_action):
        Sampler.__init__(self, sampler)
        self.key_configs = abstract_state.prm_vertices
        self.abstract_state = abstract_state
        self.obj = abstract_action.discrete_parameters['object']
        self.region = abstract_action.discrete_parameters['place_region']

        goal_entities = self.abstract_state.goal_entities
        stime = time.time()
        self.smpler_state = ConcreteNodeState(abstract_state.problem_env, self.obj, self.region, goal_entities, key_configs=self.key_configs)
        print "Concre node creation time", time.time()-stime

        self.samples = self.sample_new_points(100)
        utils.viewer()

        self.curr_smpl_idx = 0
예제 #8
0
def main():
    seed = int(sys.argv[1])
    epoch = sys.argv[2]
    algo = str(sys.argv[3])

    atype = 'place'
    placeholder_config_definition = collections.namedtuple(
        'config',
        'algo dtype tau seed atype epoch region pick_seed place_seed filtered')
    placeholder_config = placeholder_config_definition(algo=algo,
                                                       tau=1.0,
                                                       dtype='n_objs_pack_1',
                                                       seed=seed,
                                                       atype=atype,
                                                       epoch=epoch,
                                                       region='home_region',
                                                       pick_seed=0,
                                                       place_seed=seed,
                                                       filtered=True)

    problem_seed = 0
    np.random.seed(problem_seed)
    random.seed(problem_seed)
    problem_env, openrave_env = create_environment(problem_seed)

    sampler = create_policy(placeholder_config)
    load_sampler_weights(sampler, placeholder_config)

    #check_feasibility_rate(problem_env, atype, sampler, placeholder_config)
    use_uniform = False
    utils.viewer()
    # obj_to_visualize = 'square_packing_box3'
    obj_to_visualize = 'rectangular_packing_box2'
    obj_to_visualize = 'rectangular_packing_box2'

    obj_to_visualize = 'rectangular_packing_box1'
    obj_to_visualize = 'square_packing_box4'
    smpls = get_smpls(problem_env, atype, sampler, obj_to_visualize,
                      placeholder_config, use_uniform)
    if atype == 'place':
        visualize_samples(smpls[1], problem_env, obj_to_visualize, atype)
    else:
        visualize_samples(smpls, problem_env, obj_to_visualize, atype)
예제 #9
0
def visualize_in_training_env(problem_env, learned_sampler, plan):
    key_configs = pickle.load(open('prm.pkl', 'r'))[0]
    state = None
    utils.viewer()
    for action_idx, action in enumerate(plan):
        if 'pick' in action.type:
            associated_place = plan[action_idx + 1]
            state = compute_state(
                action.discrete_parameters['object'],
                associated_place.discrete_parameters['region'], problem_env)
            smpler = LearnedGenerator(action, problem_env, learned_sampler,
                                      state)
            smples = np.vstack([smpler.generate() for _ in range(10)])
            action.discrete_parameters[
                'region'] = associated_place.discrete_parameters['region']
            pick_base_poses = get_pick_base_poses(action, smples)
            place_base_poses = get_place_base_poses(action, smples,
                                                    problem_env)
            utils.visualize_path(place_base_poses)
        action.execute()
def main():
    data_dir = './planning_experience/processed/domain_two_arm_mover/n_objs_pack_1/sahs/uses_rrt/sampler_trajectory_data/includes_n_in_way/'
    #data_dir = 'planning_experience/raw/uses_rrt/two_arm_mover/n_objs_pack_1/qlearned_hcount_old_number_in_goal/q_config_num_train_5000_mse_weight_1.0_use_region_agnostic_False_mix_rate_1.0/n_mp_limit_10_n_iter_limit_200/'
    traj_files = [f for f in os.listdir(data_dir) if 'pidx' in f]
    placements = []
    for traj_file in traj_files:
        print traj_file
        data = pickle.load(open(data_dir + traj_file, 'r'))
        placement_pose = get_first_placement_in_home(data)
        if placement_pose is not None:
            placements.append(placement_pose)
        if len(placements) > 100:
            break
    print(np.array(placements) == 4).mean()
    import pdb
    pdb.set_trace()
    problem_env = PaPMoverEnv(0)
    utils.viewer()
    utils.visualize_path(placements)

    import pdb
    pdb.set_trace()
def main():
    config = parse_arguments()

    plan, problem_env = load_planning_experience_data(config)
    goal_objs = problem_env.goal_objects
    goal_region = problem_env.goal_region

    if config.v:
        utils.viewer()

    set_seeds(config.seed)

    total_ik_checks, total_pick_mp_checks, total_pick_mp_infeasible, total_place_mp_checks, \
    total_place_mp_infeasible, total_mp_checks, total_infeasible_mp, n_total_actions, goal_reached = \
        execute_policy(plan, problem_env, goal_objs + [goal_region], config)

    logfile = get_logfile_name(config)
    result_log = "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n" % (
        config.pidx, config.seed, total_ik_checks, total_pick_mp_checks,
        total_pick_mp_infeasible, total_place_mp_checks,
        total_place_mp_infeasible, total_mp_checks, total_infeasible_mp,
        n_total_actions, goal_reached)
    logfile.write(result_log)
def main():
    problem_seed = 1
    states, konf_relevance, poses, rel_konfs, goal_flags, actions, sum_rewards = \
        get_data('n_objs_pack_4', 'place', 'loading_region')
    actions = actions[:, -4:]
    actions = [utils.decode_pose_with_sin_and_cos_angle(a) for a in actions]

    problem_env, openrave_env = create_environment(problem_seed)

    key_configs = pickle.load(open('prm.pkl', 'r'))[0]
    key_configs = np.delete(key_configs, [415, 586, 615, 618, 619], axis=0)
    indices_to_delete = sampler_utils.get_indices_to_delete(
        'loading_region', key_configs)
    key_configs = np.delete(key_configs, indices_to_delete, axis=0)

    filtered_konfs = filter_configs_that_are_too_close(actions)
    import pdb
    pdb.set_trace()

    utils.viewer()
    utils.visualize_placements(filtered_konfs, 'square_packing_box1')
    import pdb
    pdb.set_trace()
def main():
    problem_seed = 6
    raw_dir = './planning_experience/raw/uses_rrt/two_arm_mover/n_objs_pack_1/qlearned_hcount_old_number_in_goal/q_config_num_train_5000_mse_weight_1.0_use_region_agnostic_False_mix_rate_1.0/n_mp_limit_10_n_iter_limit_200/'
    fname = 'pidx_%d_planner_seed_0_gnn_seed_0.pkl' % problem_seed
    plan_data = pickle.load(open(raw_dir + fname, 'r'))
    plan = plan_data['plan']
    if len(plan) == 4:
        sys.exit(-1)

    np.random.seed(problem_seed)
    random.seed(problem_seed)
    problem_env, openrave_env = create_environment(problem_seed)
    # load up the plan data

    config = parse_args()
    n_key_configs = 618
    n_collisions = 618
    sampler = create_policy(config, n_collisions, n_key_configs)
    sampler.load_weights(additional_name='epoch_' + str(600))

    utils.viewer()
    smpls = generate_smpls(problem_env, sampler, plan)
    import pdb
    pdb.set_trace()
예제 #14
0
    def add_trajectory(self, plan):
        print "Problem idx", self.problem_idx
        self.set_seed(self.problem_idx)
        problem_env, openrave_env = self.create_environment()
        self.problem_env = problem_env

        #admon = create_imle_model(1)

        state = None
        utils.viewer()
        for action_idx, action in enumerate(plan):
            if 'pick' in action.type:
                associated_place = plan[action_idx + 1]
                state = self.compute_state(
                    action.discrete_parameters['object'],
                    associated_place.discrete_parameters['region'])

                ## Visualization purpose
                """
                obj = action.discrete_parameters['object']
                region = associated_place.discrete_parameters['region']
                collision_vec = np.delete(state.state_vec, [415, 586, 615, 618, 619], axis=1)
                smpls = generate_smpls(obj, collision_vec, state, admon)
                utils.visualize_path(smpls)
                utils.visualize_path([associated_place.continuous_parameters['q_goal']])
                import pdb; pdb.set_trace()
                """
                ##############################################################################

                action.execute()
                obj_pose = utils.get_body_xytheta(
                    action.discrete_parameters['object'])
                robot_pose = utils.get_body_xytheta(self.problem_env.robot)
                pick_parameters = utils.get_ir_parameters_from_robot_obj_poses(
                    robot_pose, obj_pose)
                recovered = utils.get_absolute_pick_base_pose_from_ir_parameters(
                    pick_parameters, obj_pose)
                pick_base_pose = action.continuous_parameters['q_goal']
                pick_base_pose = utils.clean_pose_data(pick_base_pose)
                recovered = utils.clean_pose_data(recovered)
                if pick_parameters[0] > 1:
                    sys.exit(-1)
                assert np.all(np.isclose(pick_base_pose, recovered))
            else:
                if action == plan[-1]:
                    reward = 0
                else:
                    reward = -1
                action.execute()
                place_base_pose = action.continuous_parameters['q_goal']

                action_info = {
                    'object_name': action.discrete_parameters['object'],
                    'region_name': action.discrete_parameters['region'],
                    'pick_base_ir_parameters': pick_parameters,
                    'place_abs_base_pose': place_base_pose,
                    'pick_abs_base_pose': pick_base_pose,
                }
                self.add_sar_tuples(state, action_info, reward)

        self.add_state_prime()
        print "Done!"
        openrave_env.Destroy()
예제 #15
0
def get_placements(state, poses, admon, smpler_state):
    stime = time.time()
    # placement, value = admon.get_max_x(state, poses)
    max_x = None
    max_val = -np.inf
    exp_val = {}

    cols = state[:, :, 0:2, :]
    goal_flags = state[:, :, 2:, :]

    key_configs = pickle.load(open('prm.pkl', 'r'))[0]
    key_configs = np.delete(key_configs, [415, 586, 615, 618, 619], axis=0)
    rel_konfs = []
    for k in key_configs:
        konf = utils.clean_pose_data(k)
        obj_pose = utils.clean_pose_data(smpler_state.obj_pose)
        rel_konf = utils.subtract_pose2_from_pose1(konf, obj_pose)
        rel_konfs.append(rel_konf)
    rel_konfs = np.array(rel_konfs).reshape((1, 615, 3, 1))

    x_range = np.linspace(0., 3.5, 10)
    y_range = np.linspace(-8., -6., 10)
    placement = np.array([0., 0., 0.])
    for x in x_range:
        for y in y_range:
            placement = placement.squeeze()
            placement[0] = x
            placement[1] = y
            placement = utils.clean_pose_data(placement)
            obj_pose = utils.clean_pose_data(smpler_state.obj_pose)
            rel_placement = utils.subtract_pose2_from_pose1(
                placement, obj_pose)
            obj_pose = utils.encode_pose_with_sin_and_cos_angle(obj_pose)
            val = admon.q_mse_model.predict([
                rel_placement[None, :], goal_flags, obj_pose[None, :],
                rel_konfs, cols
            ])
            #print np.mean(admon.relevance_model.predict([rel_placement[None, :], goal_flags, rel_konfs, cols]).squeeze())
            if val > max_val:
                max_x = copy.deepcopy(placement)
                max_val = val
            exp_val[(x, y)] = np.exp(val) * 100
            print rel_placement, x, y, val, exp_val[(x, y)]
    total = np.sum(exp_val.values())
    total = 1
    i = 0
    utils.viewer()

    for x in x_range:
        for y in y_range:
            height = exp_val[(x, y)]  # / total + 1
            # print x, y, height
            placement = placement.squeeze()
            placement[0] = x
            placement[1] = y
            # absx, absy = unnormalize_pose_wrt_region(placement, 'loading_region')[0:2]
            # make_body(height, i, absx, absy)

            if height == np.exp(max_val) * 100:
                make_body(height, i, x, y, color=[1, 0, 0])
            else:
                make_body(height, i, x, y)
            i += 1
    placement = max_x
    print placement, max_val, np.exp(max_val)
    print 'maximizing x time', time.time() - stime
예제 #16
0
def main():
    problem_idx = 0

    env_name = 'two_arm_mover'
    if env_name == 'one_arm_mover':
        problem_env = PaPOneArmMoverEnv(problem_idx)
        goal = ['rectangular_packing_box1_region'
                ] + [obj.GetName() for obj in problem_env.objects[:3]]
        state_fname = 'one_arm_state_gpredicate.pkl'
    else:
        problem_env = PaPMoverEnv(problem_idx)
        goal_objs = [
            'square_packing_box1', 'square_packing_box2',
            'rectangular_packing_box3', 'rectangular_packing_box4'
        ]
        goal_region = 'home_region'
        goal = [goal_region] + goal_objs
        state_fname = 'two_arm_state_gpredicate.pkl'

    problem_env.set_goal(goal)
    if os.path.isfile(state_fname):
        state = pickle.load(open(state_fname, 'r'))
    else:
        statecls = helper.get_state_class(env_name)
        state = statecls(problem_env, problem_env.goal)

    utils.viewer()

    if env_name == 'one_arm_mover':
        obj_name = 'c_obst9'
        pick_op = Operator(
            operator_type='one_arm_pick',
            discrete_parameters={
                'object': problem_env.env.GetKinBody(obj_name)
            },
            continuous_parameters=state.pick_params[obj_name][0])
        pick_op.execute()
        problem_env.env.Remove(problem_env.env.GetKinBody('computer_table'))
        utils.set_color(obj_name, [0.9, 0.8, 0.0])
        utils.set_color('c_obst0', [0, 0, 0.8])
        utils.set_obj_xytheta(
            np.array([[4.47789478, -0.01477591, 4.76236795]]), 'c_obst0')
        T_viewer = np.array(
            [[-0.69618481, -0.41674492, 0.58450867, 3.62774134],
             [-0.71319601, 0.30884202, -0.62925993, 0.39102399],
             [0.08172004, -0.85495045, -0.51223194, 1.70261502],
             [0., 0., 0., 1.]])

        viewer = problem_env.env.GetViewer()
        viewer.SetCamera(T_viewer)

        import pdb
        pdb.set_trace()
        utils.release_obj()
    else:
        T_viewer = np.array(
            [[0.99964468, -0.00577897, 0.02602139, 1.66357124],
             [-0.01521307, -0.92529857, 0.37893419, -7.65383244],
             [0.02188771, -0.37919541, -0.92505771, 6.7393589],
             [0., 0., 0., 1.]])
        viewer = problem_env.env.GetViewer()
        viewer.SetCamera(T_viewer)

        import pdb
        pdb.set_trace()
        # prefree and occludespre
        target = 'rectangular_packing_box4'
        utils.set_obj_xytheta(np.array([[0.1098148, -6.33305931, 0.22135689]]),
                              target)
        utils.get_body_xytheta(target)
        utils.visualize_path(
            state.cached_pick_paths['rectangular_packing_box4'])
        import pdb
        pdb.set_trace()

        # manipfree and occludesmanip
        pick_obj = 'square_packing_box2'
        pick_used = state.pick_used[pick_obj]
        utils.two_arm_pick_object(pick_obj, pick_used.continuous_parameters)
        utils.visualize_path(state.cached_place_paths[(u'square_packing_box2',
                                                       'home_region')])

    import pdb
    pdb.set_trace()
예제 #17
0
def main():
    config = parse_arguments()
    solution_file_name = get_solution_file_name(config)
    is_problem_solved_before = os.path.isfile(solution_file_name)
    if is_problem_solved_before and not config.f:
        print "***************Already solved********************"
        with open(solution_file_name, 'rb') as f:
            trajectory = pickle.load(f)
            success = trajectory['success']
            tottime = trajectory['tottime']
            num_nodes = trajectory['num_nodes']
            plan_length = len(trajectory['plan']) if success else 0
            print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d' % (tottime, success, plan_length, num_nodes)
        sys.exit(-1)

    if config.gather_planning_exp:
        config.timelimit = np.inf
    np.random.seed(config.pidx)
    random.seed(config.pidx)

    if config.domain == 'two_arm_mover':
        goal_objs = ['square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3', 'rectangular_packing_box4']
        goal_region = 'home_region'
    elif config.domain == 'one_arm_mover':
        goal_objs = ['c_obst0', 'c_obst1', 'c_obst2', 'c_obst3']
        goal_region = 'rectangular_packing_box1_region'
    else:
        raise NotImplementedError
    problem_env = get_problem_env(config, goal_region, goal_objs)
    set_problem_env_config(problem_env, config)
    if config.v:
        utils.viewer()

    is_use_gnn = 'qlearned' in config.h_option
    if is_use_gnn:
        pap_model = get_pap_gnn_model(problem_env, config)
    else:
        pap_model = None

    if config.integrated or config.integrated_unregularized_sampler:
        raise NotImplementedError
        # smpler = get_learned_smpler(config.sampler_seed, config.sampler_epoch, config.sampler_algo)
    else:
        smpler = None

    [utils.set_color(o, [1, 0, 0]) for o in goal_objs]
    t = time.time()
    np.random.seed(config.planner_seed)
    random.seed(config.planner_seed)
    nodes_to_goal, plan, (num_ik_checks, num_mp_checks), nodes = search(problem_env, config, pap_model, goal_objs,
                                                   goal_region, smpler, None)
    num_nodes = 0
    tottime = time.time() - t
    n_feasibility_checks = get_total_n_feasibility_checks(nodes)

    success = plan is not None
    plan_length = len(plan) if success else 0
    if success and config.domain == 'one_arm_mover':
        make_pklable(plan)

    if config.gather_planning_exp:
        [make_node_pklable(nd) for nd in nodes]
    else:
        nodes = None

    h_for_sampler_training = []
    if success:
        h_for_sampler_training = []
        for n in nodes_to_goal:
            h_for_sampler_training.append(n.h_for_sampler_training)

    data = {
        'n_objs_pack': config.n_objs_pack,
        'tottime': tottime,
        'success': success,
        'plan_length': plan_length,
        'num_nodes': num_nodes,
        'plan': plan,
        'nodes': nodes,
        'hvalues': h_for_sampler_training,
        'n_feasibility_checks': {'mp': num_mp_checks, 'ik': num_ik_checks}
    }
    with open(solution_file_name, 'wb') as f:
        pickle.dump(data, f)
    print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d' % (tottime, success, plan_length, num_nodes)
예제 #18
0
def main():
    parameters = parse_parameters()

    save_dir = make_and_get_save_dir(parameters)
    file_path = save_dir + '/seed_' + str(parameters.planner_seed) + '_pidx_' + str(parameters.pidx) + '.pkl'
    quit_if_already_tested(file_path, parameters.f)

    # for creating problem
    np.random.seed(parameters.pidx)
    random.seed(parameters.pidx)
    is_two_arm_env = parameters.domain.find('two_arm') != -1
    if is_two_arm_env:
        environment = Mover(parameters.pidx)
    else:
        environment = OneArmMover(parameters.pidx)
    environment.initial_robot_base_pose = get_body_xytheta(environment.robot)

    if parameters.domain == 'two_arm_mover':
        goal_region = 'home_region'
        if parameters.n_objs_pack == 4:
            goal_object_names = ['square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3',
                                 'rectangular_packing_box4']
        else:
            goal_object_names = ['square_packing_box1']
        environment.set_goal(goal_object_names, goal_region)
    elif parameters.domain == 'one_arm_mover':
        goal_region = 'rectangular_packing_box1_region'
        assert parameters.n_objs_pack == 1
        goal_object_names = ['c_obst1']
        environment.set_goal(goal_object_names, goal_region)
    goal_entities = goal_object_names + [goal_region]

    # for randomized algorithms
    np.random.seed(parameters.planner_seed)
    random.seed(parameters.planner_seed)

    if parameters.v:
        utils.viewer()

    environment.set_motion_planner(BaseMotionPlanner(environment, 'rrt'))
    # from manipulation.bodies.bodies import set_color
    # set_color(environment.env.GetKinBody(goal_object_names[0]), [1, 0, 0])
    start_time = time.time()

    n_mp = n_ik = 0

    goal_object_names, high_level_plan, (mp, ik) = find_plan_without_reachability(environment, goal_object_names,
                                                                                  start_time,
                                                                                  parameters)  # finds the plan
    total_time_taken = time.time() - start_time
    n_mp += mp
    n_ik += ik

    total_n_nodes = 0
    total_plan = []
    idx = 0
    found_solution = False
    timelimit = parameters.timelimit
    while total_time_taken < timelimit:
        goal_obj_name = goal_object_names[idx]
        plan, n_nodes, status, (mp, ik) = find_plan_for_obj(goal_obj_name, high_level_plan[idx], environment,
                                                            start_time,
                                                            timelimit, parameters)
        total_n_nodes += n_nodes
        total_time_taken = time.time() - start_time
        print goal_obj_name, goal_object_names, total_n_nodes
        print "Time taken: %.2f" % total_time_taken
        if total_time_taken > timelimit:
            break
        if status == 'HasSolution':
            execute_plan(plan)
            environment.initial_robot_base_pose = utils.get_body_xytheta(environment.robot)
            total_plan += plan
            save_plan(total_plan, total_n_nodes, len(goal_object_names) - idx, found_solution, file_path, goal_entities,
                      total_time_taken, {'mp': n_mp, 'ik': n_ik}, parameters)
            idx += 1
        else:
            # Note that HPN does not have any recourse if this happens. We re-plan at the higher level.
            goal_object_names, plan, (mp, ik) = find_plan_without_reachability(environment, goal_object_names,
                                                                               start_time, parameters)  # finds the plan
            n_mp += mp
            n_ik += ik
            total_plan = []
            idx = 0
        if idx == len(goal_object_names):
            found_solution = True
            break
        else:
            idx %= len(goal_object_names)
    total_time_taken = time.time() - start_time
    save_plan(total_plan, total_n_nodes, len(goal_object_names) - idx, found_solution, file_path, goal_entities,
              total_time_taken, {'mp': n_mp, 'ik': n_ik}, parameters)
    print 'plan saved'
예제 #19
0
def main():
    config = parse_arguments()
    solution_file_name = get_solution_file_name(config)
    is_problem_solved_before = os.path.isfile(solution_file_name)
    if is_problem_solved_before and not config.f:
        print "***************Already solved********************"
        with open(solution_file_name, 'rb') as f:
            trajectory = pickle.load(f)
            success = trajectory['success']
            tottime = trajectory['tottime']
            num_nodes = trajectory['num_nodes']
            plan_length = len(trajectory['plan']) if success else 0
            print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d N_feasible: %d' % (
                tottime, success, plan_length, num_nodes, trajectory['n_feasibility_checks']['ik'])
        sys.exit(-1)

    if config.gather_planning_exp:
        assert config.h_option == 'hcount_old_number_in_goal'
        #config.timelimit =

    goal_objs, goal_region = get_goal_obj_and_region(config)
    print "Goal:", goal_objs, goal_region
    problem_env = get_problem_env(config, goal_region, goal_objs)
    set_problem_env_config(problem_env, config)
    if config.v:
        utils.viewer()

    is_use_gnn = 'qlearned' in config.h_option
    if is_use_gnn:
        pap_model = get_pap_gnn_model(problem_env, config)
    else:
        pap_model = None

    t = time.time()
    np.random.seed(config.planner_seed)
    random.seed(config.planner_seed)
    learned_sampler_model = get_learned_sampler_models(config)
    nodes_to_goal, plan, num_nodes, nodes = search(problem_env, config, pap_model, goal_objs,
                                                   goal_region, learned_sampler_model)
    tottime = time.time() - t
    n_feasibility_checks = get_total_n_feasibility_checks(nodes)

    success = plan is not None
    plan_length = len(plan) if success else 0
    if success and config.domain == 'one_arm_mover':
        make_pklable(plan)

    if config.gather_planning_exp:
        [make_node_pklable(nd) for nd in nodes]
    else:
        nodes = None

    h_for_sampler_training = []
    if success:
        h_for_sampler_training = []
        for n in nodes_to_goal:
            h_for_sampler_training.append(n.h_for_sampler_training)

    data = {
        'n_objs_pack': config.n_objs_pack,
        'tottime': tottime,
        'success': success,
        'plan_length': plan_length,
        'num_nodes': num_nodes,
        'plan': plan,
        'nodes': nodes,
        'hvalues': h_for_sampler_training,
        'n_feasibility_checks': n_feasibility_checks
    }
    with open(solution_file_name, 'wb') as f:
        pickle.dump(data, f)
    print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d' % (tottime, success, plan_length, num_nodes)
    print(data['n_feasibility_checks'])
def main():
    device = torch.device("cpu")
    edges = pickle.load(open('prm_edges_for_reachability_gnn.pkl', 'r'))
    n_msg_passing = 0
    net = ReachabilityNet(edges,
                          n_key_configs=618,
                          device=device,
                          n_msg_passing=n_msg_passing)
    action_type = 'place'
    load_weights(net, 35, action_type, 1, n_msg_passing, device)

    get_data_test_acc = False
    if get_data_test_acc:
        seed = 0
        np.random.seed(seed)
        torch.cuda.manual_seed_all(seed)
        dataset = GNNReachabilityDataset(action_type)
        n_train = int(len(dataset) * 0.9)
        trainset, testset = torch.utils.data.random_split(
            dataset, [n_train, len(dataset) - n_train])
        testloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=32,
                                                 shuffle=False,
                                                 num_workers=20,
                                                 pin_memory=True)
        print "Getting test accuracy for n_test %d..." % len(testset)
        test_acc = get_test_acc(testloader, net, device, len(testset))
        print test_acc
        import pdb
        pdb.set_trace()

    problem_seed = 1
    problem_env, openrave_env = create_environment(problem_seed)

    if 'Encoded' in net.__class__.__name__:
        tmp, _ = pickle.load(open('prm.pkl', 'r'))
        key_configs = np.zeros((618, 4))
        for pidx, p in enumerate(tmp):
            key_configs[pidx] = utils.encode_pose_with_sin_and_cos_angle(p)
    else:
        key_configs, _ = pickle.load(open('prm.pkl', 'r'))

    compute_clf_acc = False
    if compute_clf_acc:
        compute_clf_rates_from_diff_pinstances(problem_env, key_configs, net,
                                               action_type)

    utils.viewer()
    if action_type == 'place':
        status = "NoSolution"
        while status != "HasSolution":
            sampler = UniformSampler(problem_env.regions['home_region'])
            checker = TwoArmPaPFeasibilityCheckerWithoutSavingFeasiblePick(
                problem_env)
            pick_param = sampler.sample()
            target_obj = problem_env.objects[np.random.randint(8)]
            abstract_action = Operator(
                operator_type='two_arm_pick_two_arm_place',
                discrete_parameters={
                    'object': target_obj,
                    'place_region': 'home_region'
                })
            op_parameters, status = checker.check_feasibility(
                abstract_action, pick_param)
        utils.two_arm_pick_object(target_obj, op_parameters['pick'])

    collisions = utils.compute_occ_vec(key_configs)
    col = utils.convert_binary_vec_to_one_hot(collisions.squeeze())
    qg = sample_qg(problem_env, 'home_region')
    utils.visualize_path([qg])

    vertex = make_vertices(qg, key_configs, col, net)
    vertex_outputs = net.get_vertex_activations(vertex).data.numpy().squeeze()
    """
    top_activations = np.max(vertex_outputs, axis=0)
    top_k_args = np.argsort(abs(top_activations))[-10:]
    """
    top_k_args = np.argsort(abs(vertex_outputs))[-30:]
    top_k_key_configs = key_configs[top_k_args, :]

    import pdb
    pdb.set_trace()
    utils.visualize_path(top_k_key_configs)
    # todo visualize the activations

    import pdb
    pdb.set_trace()