def __init__(self, problem_idx):
        Mover.__init__(self, problem_idx)
        self.operator_names = ['one_arm_pick', 'one_arm_place']
        set_robot_config([4.19855789, 2.3236321, 5.2933337], self.robot)
        set_obj_xytheta([3.35744004, 2.19644156, 3.52741118], self.objects[1])
        self.boxes = self.objects
        self.objects = self.problem_config['shelf_objects']
        self.objects = [k for v in self.objects.values() for k in v]
        self.objects[0], self.objects[1] = self.objects[1], self.objects[0]

        self.target_box = self.env.GetKinBody('rectangular_packing_box1')
        utils.randomly_place_region(self.target_box, self.regions['home_region'])
        self.regions['rectangular_packing_box1_region'] = self.compute_box_region(self.target_box)
        self.shelf_regions = self.problem_config['shelf_regions']
        self.target_box_region = self.regions['rectangular_packing_box1_region']
        self.regions.update(self.shelf_regions)
        self.entity_names = [obj.GetName() for obj in self.objects] + ['rectangular_packing_box1_region',
                                                                       'center_shelf_region']
        self.name = 'one_arm_mover'
        self.init_saver = utils.CustomStateSaver(self.env)

        self.object_names = self.entity_names

        # fix incorrectly named regions
        self.regions = {
            region.name: region
            for region in self.regions.values()
        }
Пример #2
0
def main():
    problem_idx = 0

    problem_env = Mover(problem_idx, problem_type='jobtalk')
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'rrt'))
    utils.viewer()

    seed = 1
    np.random.seed(seed)
    random.seed(seed)

    pick_smpler, place_smpler = create_sampler(problem_env)

    # Set camera view - to get camera transform: viewer.GetCameraTransform()
    cam_transform = np.array(
        [[-0.54866337, -0.70682829, 0.44650004, -1.45953619],
         [-0.83599448, 0.45806221, -0.30214604, 2.02016926],
         [0.00904058, -0.53904803, -0.8422265, 4.88620949], [0., 0., 0., 1.]])
    cam_transform = np.array(
        [[0.76808539, 0.51022899, -0.38692533, 2.7075901],
         [0.63937823, -0.57785198, 0.50723029, -2.0267117],
         [0.03521803, -0.6369878, -0.77006898, 4.52542162], [0., 0., 0., 1.]])

    init_goal_cam_transform = np.array(
        [[0.99941927, -0.00186311, 0.03402425, 1.837726],
         [-0.02526303, -0.71058334, 0.70315937, -5.78141165],
         [0.022867, -0.70361058, -0.71021775, 6.03373909], [0., 0., 0., 1.]])
    goal_obj_poses = pickle.load(
        open('./test_scripts/jobtalk_figure_cache_files/goal_obj_poses.pkl',
             'r'))
    for o in problem_env.objects:
        utils.set_obj_xytheta(goal_obj_poses[o.GetName()], o)
    viewer = problem_env.env.GetViewer()
    viewer.SetCamera(init_goal_cam_transform)
    """
    # how do you go from intrinsic params to [fx, fy, cx, cy]? What are these anyways?
    # cam_intrinsic_params = viewer.GetCameraIntrinsics()
    I = problem_env.env.GetViewer().GetCameraImage(1920, 1080, cam_transform, cam_intrinsic_params)
    scipy.misc.imsave('test.png', I)
    """

    utils.set_obj_xytheta(np.array([0.01585576, 1.06516767, 5.77099297]),
                          target_obj_name)
    pick_smpls = visualize_pick(pick_smpler, problem_env)
    feasible_pick_op = get_feasible_pick(problem_env, pick_smpls)
    feasible_pick_op.execute()
    utils.visualize_placements(place_smpler(100),
                               problem_env.robot.GetGrabbed()[0])
    import pdb
    pdb.set_trace()

    # Visualize path
    """
Пример #3
0
    def create_environment(self):
        problem_env = Mover(self.problem_idx)
        openrave_env = problem_env.env
        target_object = openrave_env.GetKinBody('square_packing_box1')
        set_color(target_object, [1, 0, 0])

        return problem_env, openrave_env
Пример #4
0
 def create_environment(self):
     problem_env = Mover(self.problem_idx)
     openrave_env = problem_env.env
     return problem_env, openrave_env
Пример #5
0
def main():
    parameters = parse_parameters()

    save_dir = make_and_get_save_dir(parameters)
    file_path = save_dir + '/seed_' + str(parameters.planner_seed) + '_pidx_' + str(parameters.pidx) + '.pkl'
    quit_if_already_tested(file_path, parameters.f)

    # for creating problem
    np.random.seed(parameters.pidx)
    random.seed(parameters.pidx)
    is_two_arm_env = parameters.domain.find('two_arm') != -1
    if is_two_arm_env:
        environment = Mover(parameters.pidx)
    else:
        environment = OneArmMover(parameters.pidx)
    environment.initial_robot_base_pose = get_body_xytheta(environment.robot)

    if parameters.domain == 'two_arm_mover':
        goal_region = 'home_region'
        if parameters.n_objs_pack == 4:
            goal_object_names = ['square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3',
                                 'rectangular_packing_box4']
        else:
            goal_object_names = ['square_packing_box1']
        environment.set_goal(goal_object_names, goal_region)
    elif parameters.domain == 'one_arm_mover':
        goal_region = 'rectangular_packing_box1_region'
        assert parameters.n_objs_pack == 1
        goal_object_names = ['c_obst1']
        environment.set_goal(goal_object_names, goal_region)
    goal_entities = goal_object_names + [goal_region]

    # for randomized algorithms
    np.random.seed(parameters.planner_seed)
    random.seed(parameters.planner_seed)

    if parameters.v:
        utils.viewer()

    environment.set_motion_planner(BaseMotionPlanner(environment, 'rrt'))
    # from manipulation.bodies.bodies import set_color
    # set_color(environment.env.GetKinBody(goal_object_names[0]), [1, 0, 0])
    start_time = time.time()

    n_mp = n_ik = 0

    goal_object_names, high_level_plan, (mp, ik) = find_plan_without_reachability(environment, goal_object_names,
                                                                                  start_time,
                                                                                  parameters)  # finds the plan
    total_time_taken = time.time() - start_time
    n_mp += mp
    n_ik += ik

    total_n_nodes = 0
    total_plan = []
    idx = 0
    found_solution = False
    timelimit = parameters.timelimit
    while total_time_taken < timelimit:
        goal_obj_name = goal_object_names[idx]
        plan, n_nodes, status, (mp, ik) = find_plan_for_obj(goal_obj_name, high_level_plan[idx], environment,
                                                            start_time,
                                                            timelimit, parameters)
        total_n_nodes += n_nodes
        total_time_taken = time.time() - start_time
        print goal_obj_name, goal_object_names, total_n_nodes
        print "Time taken: %.2f" % total_time_taken
        if total_time_taken > timelimit:
            break
        if status == 'HasSolution':
            execute_plan(plan)
            environment.initial_robot_base_pose = utils.get_body_xytheta(environment.robot)
            total_plan += plan
            save_plan(total_plan, total_n_nodes, len(goal_object_names) - idx, found_solution, file_path, goal_entities,
                      total_time_taken, {'mp': n_mp, 'ik': n_ik}, parameters)
            idx += 1
        else:
            # Note that HPN does not have any recourse if this happens. We re-plan at the higher level.
            goal_object_names, plan, (mp, ik) = find_plan_without_reachability(environment, goal_object_names,
                                                                               start_time, parameters)  # finds the plan
            n_mp += mp
            n_ik += ik
            total_plan = []
            idx = 0
        if idx == len(goal_object_names):
            found_solution = True
            break
        else:
            idx %= len(goal_object_names)
    total_time_taken = time.time() - start_time
    save_plan(total_plan, total_n_nodes, len(goal_object_names) - idx, found_solution, file_path, goal_entities,
              total_time_taken, {'mp': n_mp, 'ik': n_ik}, parameters)
    print 'plan saved'
Пример #6
0
def main():
    parameters = parse_parameters()

    save_dir = make_and_get_save_dir(parameters)
    file_path = save_dir + '/seed_' + str(
        parameters.planner_seed) + '_pidx_' + str(parameters.pidx) + '.pkl'
    quit_if_already_tested(file_path, parameters.f)

    # for creating problem
    np.random.seed(parameters.pidx)
    random.seed(parameters.pidx)
    is_one_arm_env = parameters.domain.find('two_arm') != -1
    if is_one_arm_env:
        environment = Mover(parameters.pidx)
        goal_region = ['home_region']
    else:
        environment = OneArmMover(parameters.pidx)
        goal_region = ['rectangular_packing_box1_region']

    goal_object_names = [
        obj.GetName() for obj in environment.objects[:parameters.n_objs_pack]
    ]
    goal_entities = goal_object_names + goal_region

    # for randomized algorithms
    np.random.seed(parameters.planner_seed)
    random.seed(parameters.planner_seed)

    if parameters.v:
        environment.env.SetViewer('qtcoin')

    # from manipulation.bodies.bodies import set_color
    # set_color(environment.env.GetKinBody(goal_object_names[0]), [1, 0, 0])
    stime = time.time()

    goal_object_names, high_level_plan = find_plan_without_reachability(
        environment, goal_object_names)  # finds the plan

    total_n_nodes = 0
    total_plan = []
    idx = 0
    total_time_taken = 0
    found_solution = False
    timelimit = parameters.timelimit
    timelimit = np.inf
    while total_n_nodes < 1000 and total_time_taken < timelimit:
        goal_obj_name = goal_object_names[idx]
        plan, n_nodes, status = find_plan_for_obj(goal_obj_name,
                                                  high_level_plan[idx],
                                                  environment, stime,
                                                  timelimit)
        total_n_nodes += n_nodes
        total_time_taken = time.time() - stime
        print goal_obj_name, goal_object_names, total_n_nodes
        print "Time taken: %.2f" % total_time_taken
        if status == 'HasSolution':
            execute_plan(plan)
            environment.initial_robot_base_pose = utils.get_body_xytheta(
                environment.robot)
            total_plan += plan
            save_plan(total_plan, total_n_nodes,
                      len(goal_object_names) - idx, found_solution, file_path,
                      goal_entities, total_time_taken)
            idx += 1
        else:
            # Note that HPN does not have any recourse if this happens. We re-plan at the higher level.
            goal_object_names, plan = find_plan_without_reachability(
                environment, goal_object_names)  # finds the plan
            total_plan = []
            idx = 0

        if idx == len(goal_object_names):
            found_solution = True
            break
        else:
            idx %= len(goal_object_names)

    save_plan(total_plan, total_n_nodes,
              len(goal_object_names) - idx, found_solution, file_path,
              goal_entities, total_time_taken)
    print 'plan saved'
def create_environment(problem_idx):
    problem_env = Mover(problem_idx)
    openrave_env = problem_env.env
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'prm'))
    return problem_env, openrave_env
Пример #8
0
def generate_training_data_single():
    np.random.seed(config.pidx)
    random.seed(config.pidx)
    if config.domain == 'two_arm_mover':
        mover = Mover(config.pidx, config.problem_type)
    elif config.domain == 'one_arm_mover':
        mover = OneArmMover(config.pidx)
    else:
        raise NotImplementedError
    np.random.seed(config.planner_seed)
    random.seed(config.planner_seed)
    mover.set_motion_planner(BaseMotionPlanner(mover, 'prm'))
    mover.seed = config.pidx

    # todo change the root node
    mover.init_saver = DynamicEnvironmentStateSaver(mover.env)

    hostname = socket.gethostname()
    if hostname in {'dell-XPS-15-9560', 'phaedra', 'shakey', 'lab', 'glaucus', 'luke-laptop-1'}:
        root_dir = './'
        #root_dir = '/home/beomjoon/Dropbox (MIT)/cloud_results/'
    else:
        root_dir = '/data/public/rw/pass.port/tamp_q_results/'

    solution_file_dir = root_dir + '/test_results/greedy_results_on_mover_domain/domain_%s/n_objs_pack_%d'\
                        % (config.domain, config.n_objs_pack)

    if config.dont_use_gnn:
        solution_file_dir += '/no_gnn/'
    elif config.dont_use_h:
        solution_file_dir += '/gnn_no_h/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/'
    elif config.hcount:
        solution_file_dir += '/hcount/'
    elif config.hadd:
        solution_file_dir += '/gnn_hadd/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/'
    else:
        solution_file_dir += '/gnn/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/'

    solution_file_name = 'pidx_' + str(config.pidx) + \
                         '_planner_seed_' + str(config.planner_seed) + \
                         '_train_seed_' + str(config.train_seed) + \
                         '_domain_' + str(config.domain) + '.pkl'
    if not os.path.isdir(solution_file_dir):
        os.makedirs(solution_file_dir)

    solution_file_name = solution_file_dir + solution_file_name

    is_problem_solved_before = os.path.isfile(solution_file_name)
    if is_problem_solved_before and not config.plan:
        with open(solution_file_name, 'rb') as f:
            trajectory = pickle.load(f)
            success = trajectory.metrics['success']
            tottime = trajectory.metrics['tottime']
    else:
        t = time.time()
        trajectory, num_nodes = get_problem(mover)
        tottime = time.time() - t
        success = trajectory is not None
        plan_length = len(trajectory.actions) if success else 0
        if not success:
            trajectory = Trajectory(mover.seed, mover.seed)
        trajectory.states = [s.get_predicate_evaluations() for s in trajectory.states]
        trajectory.state_prime = None
        trajectory.metrics = {
            'n_objs_pack': config.n_objs_pack,
            'tottime': tottime,
            'success': success,
            'plan_length': plan_length,
            'num_nodes': num_nodes,
        }

        #with open(solution_file_name, 'wb') as f:
        #    pickle.dump(trajectory, f)
    print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d' % (tottime, success, trajectory.metrics['plan_length'],
                                                                    trajectory.metrics['num_nodes'])
    import pdb;pdb.set_trace()

    """
    print("time: {}".format(','.join(str(trajectory.metrics[m]) for m in [
        'n_objs_pack',
        'tottime',
        'success',
        'plan_length',
        'num_nodes',
    ])))
    """

    print('\n'.join(str(a.discrete_parameters.values()) for a in trajectory.actions))

    def draw_robot_line(env, q1, q2):
        return draw_line(env, list(q1)[:2] + [.5], list(q2)[:2] + [.5], color=(0,0,0,1), width=3.)

    mover.reset_to_init_state_stripstream()
    if not config.dontsimulate:
        if config.visualize_sim:
            mover.env.SetViewer('qtcoin')
            set_viewer_options(mover.env)

            raw_input('Start?')
        handles = []
        for step_idx, action in enumerate(trajectory.actions):
            if action.type == 'two_arm_pick_two_arm_place':
                def check_collisions(q=None):
                    if q is not None:
                        set_robot_config(q, mover.robot)
                    collision = False
                    if mover.env.CheckCollision(mover.robot):
                        collision = True
                    for obj in mover.objects:
                        if mover.env.CheckCollision(obj):
                            collision = True
                    if collision:
                        print('collision')
                        if config.visualize_sim:
                            raw_input('Continue after collision?')

                check_collisions()
                o = action.discrete_parameters['two_arm_place_object']
                pick_params = action.continuous_parameters['pick']
                place_params = action.continuous_parameters['place']

                full_path = [get_body_xytheta(mover.robot)[0]] + pick_params['motion'] + [pick_params['q_goal']] + \
                            place_params['motion'] + [place_params['q_goal']]
                for i, (q1, q2) in enumerate(zip(full_path[:-1], full_path[1:])):
                    if np.linalg.norm(np.squeeze(q1)[:2] - np.squeeze(q2)[:2]) > 1:
                        print(i, q1, q2)
                        import pdb;
                        pdb.set_trace()

                pickq = pick_params['q_goal']
                pickt = pick_params['motion']
                check_collisions(pickq)
                for q in pickt:
                    check_collisions(q)
                obj = mover.env.GetKinBody(o)
                if len(pickt) > 0:
                    for q1, q2 in zip(pickt[:-1], pickt[1:]):
                        handles.append(
                            draw_robot_line(mover.env, q1.squeeze(), q2.squeeze()))
                # set_robot_config(pickq, mover.robot)
                # if config.visualize_sim:
                #   raw_input('Continue?')
                # set_obj_xytheta([1000,1000,0], obj)
                two_arm_pick_object(obj, pick_params)
                if config.visualize_sim:
                    raw_input('Place?')

                o = action.discrete_parameters['two_arm_place_object']
                r = action.discrete_parameters['two_arm_place_region']
                placeq = place_params['q_goal']
                placep = place_params['object_pose']
                placet = place_params['motion']
                check_collisions(placeq)
                for q in placet:
                    check_collisions(q)
                obj = mover.env.GetKinBody(o)
                if len(placet) > 0:
                    for q1, q2 in zip(placet[:-1], placet[1:]):
                        handles.append(
                            draw_robot_line(mover.env, q1.squeeze(), q2.squeeze()))
                # set_robot_config(placeq, mover.robot)
                # if config.visualize_sim:
                #   raw_input('Continue?')
                # set_obj_xytheta(placep, obj)
                two_arm_place_object(place_params)
                check_collisions()

                if config.visualize_sim:
                    raw_input('Continue?')

            elif action.type == 'one_arm_pick_one_arm_place':
                def check_collisions(q=None):
                    if q is not None:
                        set_robot_config(q, mover.robot)
                    collision = False
                    if mover.env.CheckCollision(mover.robot):
                        collision = True
                    for obj in mover.objects:
                        if mover.env.CheckCollision(obj):
                            collision = True
                    if collision:
                        print('collision')
                        if config.visualize_sim:
                            raw_input('Continue after collision?')

                check_collisions()
                pick_params = action.continuous_parameters['pick']
                place_params = action.continuous_parameters['place']

                one_arm_pick_object(mover.env.GetKinBody(action.discrete_parameters['object']), pick_params)

                check_collisions()

                if config.visualize_sim:
                    raw_input('Place?')

                one_arm_place_object(place_params)

                check_collisions()

                if config.visualize_sim:
                    raw_input('Continue?')
            else:
                raise NotImplementedError

        n_objs_pack = config.n_objs_pack
        if config.domain == 'two_arm_mover':
            goal_region = 'home_region'
        elif config.domain == 'one_arm_mover':
            goal_region = 'rectangular_packing_box1_region'
        else:
            raise NotImplementedError

        if all(mover.regions[goal_region].contains(o.ComputeAABB()) for o in
               mover.objects[:n_objs_pack]):
            print("successful plan length: {}".format(len(trajectory.actions)))
        else:
            print('failed to find plan')
        if config.visualize_sim:
            raw_input('Finish?')

    return