Exemple #1
0
    def check_obj_reachable(self, obj):
        if len(self.problem_env.robot.GetGrabbed()) > 0:
            return False

        obj = self.problem_env.env.GetKinBody(obj)
        obj_name = str(obj.GetName())
        if self.problem_env.name.find('one_arm') != -1:
            op = Operator('one_arm_pick', {'object': obj})
        else:
            op = Operator('two_arm_pick', {'object': obj})

        if obj_name in self.pick_used:
            motion_plan_goals = self.pick_used[obj_name].continuous_parameters['q_goal']
        else:
            motion_plan_goals = self.generate_potential_pick_configs(op, n_pick_configs=10)

        if motion_plan_goals is not None:
            motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
            motion, status = motion_planner.get_motion_plan(motion_plan_goals)
            is_feasible_param = status == 'HasSolution'
        else:
            is_feasible_param = False

        if is_feasible_param:
            op.make_pklable()
            op.continuous_parameters = {'q_goal': motion[-1]}
            self.motion_plans[obj_name] = motion

            if obj_name not in self.pick_used:
                self.pick_used[obj_name] = op
            self.evaluations[obj_name] = True
            return True
        else:
            self.evaluations[obj_name] = False
            return False
Exemple #2
0
 def set_cached_pick_paths(self, parent_state, moved_obj):
     motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
     for obj, op_instance in self.pick_used.items():
         motion_plan_goals = op_instance.continuous_parameters['q_goal']
         self.cached_pick_paths[obj] = None
         if not self.use_shortest_path:
             continue
         if parent_state is not None and obj in parent_state.cached_pick_paths and obj != moved_obj:
             self.cached_pick_paths[obj] = parent_state.cached_pick_paths[
                 obj]
         else:
             self.cached_pick_paths[obj] = motion_planner.get_motion_plan(
                 motion_plan_goals, cached_collisions={})[0]
             if len(motion_plan_goals) == 0:
                 # This pick path is used for checking the pickinway predicate and to pickup the object in place in way predicate.
                 #
                 assert False
                 self.cached_pick_paths[obj] = None
             else:
                 try:
                     # how can this be, since we disable all the collisions?
                     assert self.cached_pick_paths[obj] is not None
                 except:
                     import pdb
                     pdb.set_trace()
                 op_instance.continuous_parameters[
                     'potential_q_goals'] = motion_plan_goals
                 op_instance.continuous_parameters[
                     'q_goal'] = self.cached_pick_paths[obj][-1]
    def get_minimum_constraint_path_to(self, goal_config, target_obj):
        print "Planning to goal config:", goal_config
        if self.use_shortest_path:
            motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        else:
            motion_planner = MinimumConstraintPlanner(self.problem_env, target_obj, 'prm')
        motion, status = motion_planner.get_motion_plan(goal_config, cached_collisions=self.collides)

        if motion is None:
            return None, 'NoSolution'
        return motion, status
Exemple #4
0
 def two_arm_domain_region_reachability_check(self, region):
     motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
     print "Motion planning to ", region
     motion, status = motion_planner.get_motion_plan(self.problem_env.regions[region],
                                                     cached_collisions=self.collides)
     if status == 'HasSolution':
         self.motion_plans[region] = motion
         self.evaluations[region] = True
         return True
     else:
         self.evaluations[region] = False
         return False
Exemple #5
0
    def set_cached_place_paths(self, parent_state, moved_obj):
        motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        for region_name, region in self.problem_env.regions.items():
            if region.name == 'entire_region':
                continue
            for obj, pick_path in self.cached_pick_paths.items():
                self.cached_place_paths[(obj, region_name)] = None

                saver = CustomStateSaver(self.problem_env.env)
                pick_used = self.pick_used[obj]
                pick_used.execute()
                if region.contains(self.problem_env.env.GetKinBody(obj).ComputeAABB()):
                    path = [get_body_xytheta(self.problem_env.robot).squeeze()]
                    #self.reachable_regions_while_holding.append((obj, region_name))
                else:
                    if self.holding_collides is not None:
                        path, status = motion_planner.get_motion_plan(region, cached_collisions=self.holding_collides)
                    else:
                        # I think the trouble here is that we do not hold the object when checking collisions
                        # So, the best way to solve this problem is to not keep reachable_regions_while_holding
                        # and just use the cached path. But I am wondering how we got a colliding-path in
                        # the first place. It must be from place_in_way? No, we execute this function first,
                        # and then using the cached paths, compute the place_in_way.
                        # Also, there is something wrong with the collision checking too.
                        # I think this has to do with the fact that the collisions are computed using
                        # the robot only, not with the object in hand.
                        # So, here is what I propose:
                        #   Plan motions here, but do not add to reachable regions while holding.
                        # This way, it will plan motions as if it is not holding the object,
                        # but check collisions inside place_in_way
                        path, status = motion_planner.get_motion_plan(region,
                                                                      cached_collisions=self.collisions_at_all_obj_pose_pairs)
                    if status == 'HasSolution':
                        pass
                    else:
                        obj_not_moved = obj != moved_obj
                        parent_state_has_cached_path_for_obj = parent_state is not None \
                                                               and obj in parent_state.cached_place_paths
                        # todo: What is this logic here...?
                        #  it is about re-using the parent place path;
                        #  but this assumes that the object has not moved?
                        if parent_state_has_cached_path_for_obj and obj_not_moved:
                            path = parent_state.cached_place_paths[(obj, region_name)]
                        else:
                            path, _ = motion_planner.get_motion_plan(region, cached_collisions={})
                saver.Restore()
                # assert path is not None
                self.cached_place_paths[(obj, region_name)] = path
    def approximate_minimal_collision_path(self, goal_configuration, path_ignoring_all_objects,
                                           collisions_in_path_ignoring_all_objects, cached_collisions):
        enabled_objects = {obj.GetName() for obj in self.problem_env.objects}
        enabled_objects -= {obj.GetName() for obj in collisions_in_path_ignoring_all_objects}

        [o.Enable(False) for o in collisions_in_path_ignoring_all_objects]
        minimal_objects_in_way = []
        minimal_collision_path = path_ignoring_all_objects
        for obj in collisions_in_path_ignoring_all_objects:
            obj.Enable(True)
            [o.Enable(False) for o in minimal_objects_in_way]
            enabled_objects.add(obj.GetName())
            enabled_objects -= {obj.GetName() for obj in minimal_objects_in_way}
            if self.problem_env.name.find('one_arm') != -1:
                path, status = ArmBaseMotionPlanner.get_motion_plan(self, goal_configuration,
                                                                    cached_collisions=cached_collisions)
            else:
                path, status = BaseMotionPlanner.get_motion_plan(self,
                                                                 goal_configuration,
                                                                 cached_collisions=cached_collisions,
                                                                 n_iterations=[20, 50, 100])
            if status != 'HasSolution':
                minimal_objects_in_way.append(obj)
            else:
                minimal_collision_path = path
        self.problem_env.enable_objects_in_region('entire_region')
        return minimal_collision_path
    def set_cached_place_paths(self, parent_state, moved_obj):
        motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        for region_name, region in self.problem_env.regions.items():
            if region.name == 'entire_region':
                continue
            for obj, pick_path in self.cached_pick_paths.items():
                self.cached_place_paths[(obj, region_name)] = None
                parent_state_has_cached_path_for_obj \
                    = parent_state is not None and obj in parent_state.cached_place_paths and obj != moved_obj
                cached_path_is_shortest_path = parent_state is not None and \
                                               not (obj, region_name) in parent_state.reachable_regions_while_holding

                saver = CustomStateSaver(self.problem_env.env)
                pick_used = self.pick_used[obj]
                pick_used.execute()
                if region.contains(self.problem_env.env.GetKinBody(obj).ComputeAABB()):
                    path = [get_body_xytheta(self.problem_env.robot).squeeze()]
                    self.reachable_regions_while_holding.append((obj, region_name))
                else:
                    if region.name == 'home_region':
                        # a location right at the entrance of home
                        goal = [np.array([0.73064842, -2.85306871, 4.87927762])]
                    else:
                        goal = region
                    if self.holding_collides is not None:
                        path, status, start_and_prm_idxs = motion_planner.get_motion_plan(goal,
                                                                                          cached_collisions=self.holding_collides,
                                                                                          return_start_set_and_path_idxs=True)
                    else:
                        # note: self.collides is computed without holding the object.
                        path, status, start_and_prm_idxs = motion_planner.get_motion_plan(goal,
                                                                                          cached_collisions=self.collides,
                                                                                          return_start_set_and_path_idxs=True)
                    if status == 'HasSolution':
                        self.reachable_regions_while_holding.append((obj, region_name))
                    else:
                        if parent_state_has_cached_path_for_obj and cached_path_is_shortest_path:
                            path = parent_state.cached_place_paths[(obj, region_name)]
                            start_and_prm_idxs = parent_state.cached_place_start_and_prm_idxs[(obj, region_name)]
                        else:
                            path, _, start_and_prm_idxs = motion_planner.get_motion_plan(goal, cached_collisions={},
                                                                                         return_start_set_and_path_idxs=True)
                saver.Restore()
                # assert path is not None

                self.cached_place_paths[(obj, region_name)] = path
                self.cached_place_start_and_prm_idxs[(obj, region_name)] = start_and_prm_idxs
    def add_trajectory(self, plan, goal_entities):
        print "Problem idx", self.problem_idx
        self.set_seed(self.problem_idx)
        problem_env, openrave_env = self.create_environment()
        motion_planner = BaseMotionPlanner(problem_env, 'prm')
        problem_env.set_motion_planner(motion_planner)

        idx = 0
        parent_state = None
        parent_action = None

        moved_objs = {p.discrete_parameters['object'] for p in plan}
        moved_obj_regions = {(p.discrete_parameters['object'],
                              p.discrete_parameters['region'])
                             for p in plan}
        n_times_objs_moved = {obj_name: 0 for obj_name in moved_objs}
        n_times_obj_region_moved = {
            (obj_name, region_name): 0
            for obj_name, region_name in moved_obj_regions
        }

        self.paps_used = self.get_pap_used_in_plan(plan)
        curr_paps_used = self.account_for_used_picks_and_places(
            n_times_objs_moved, n_times_obj_region_moved)
        state = self.compute_state(parent_state, parent_action, goal_entities,
                                   problem_env, curr_paps_used, 0)

        for action_idx, action in enumerate(plan):
            action.execute()

            # mark that a pick or place in the plan has been used
            target_obj_name = action.discrete_parameters['object']
            target_region_name = action.discrete_parameters['region']
            n_times_objs_moved[target_obj_name] += 1
            n_times_obj_region_moved[(target_obj_name,
                                      target_region_name)] += 1

            curr_paps_used = self.account_for_used_picks_and_places(
                n_times_objs_moved, n_times_obj_region_moved)
            parent_state = state
            parent_action = action
            state = self.compute_state(parent_state, parent_action,
                                       goal_entities, problem_env,
                                       curr_paps_used, action_idx)

            # execute the pap action
            if action == plan[-1]:
                reward = 0
            else:
                reward = -1

            print "The reward is ", reward

            self.add_sar_tuples(parent_state, action, reward)
            print "Executed", action.discrete_parameters

        self.add_state_prime()
        openrave_env.Destroy()
        openravepy.RaveDestroy()
Exemple #9
0
    def set_cached_pick_paths(self, parent_state, moved_obj):
        motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        for obj, op_instance in self.pick_used.items():
            motion_plan_goals = op_instance.continuous_parameters['q_goal']
            assert len(motion_plan_goals) > 0
            self.cached_pick_paths[obj] = None

            path, status = motion_planner.get_motion_plan(
                motion_plan_goals, cached_collisions=self.collides)
            if status == 'HasSolution':
                self.reachable_entities.append(obj)
            else:
                path, _ = motion_planner.get_motion_plan(motion_plan_goals,
                                                         cached_collisions={})
            assert path is not None
            self.cached_pick_paths[obj] = path
            op_instance.low_level_motion = path
    def add_trajectory(self, plan, goal_entities):
        print "Problem idx", self.problem_idx
        self.set_seed(self.problem_idx)
        problem_env, openrave_env = self.create_environment()
        motion_planner = BaseMotionPlanner(problem_env, 'prm')
        problem_env.set_motion_planner(motion_planner)

        idx = 0
        parent_state = None
        parent_action = None

        self.plan_sanity_check(problem_env, plan)
        paps_used = self.get_pap_used_in_plan(plan)
        pick_used = paps_used[0]
        place_used = paps_used[1]
        reward_function = ShapedRewardFunction(problem_env,
                                               ['square_packing_box1'],
                                               'home_region', 3 * 8)
        # utils.viewer()
        state = self.compute_state(parent_state, parent_action, goal_entities,
                                   problem_env, paps_used, 0)
        for action_idx, action in enumerate(plan):
            if 'place' in action.type:
                continue

            target_obj = openrave_env.GetKinBody(
                action.discrete_parameters['object'])
            color_before = get_color(target_obj)
            set_color(target_obj, [1, 1, 1])

            pick_used, place_used = self.delete_moved_objects_from_pap_data(
                pick_used, place_used, target_obj)
            paps_used = [pick_used, place_used]

            action.is_skeleton = False
            pap_action = copy.deepcopy(action)
            pap_action = pap_action.merge_operators(plan[action_idx + 1])
            pap_action.is_skeleton = False
            pap_action.execute()
            # set_color(target_obj, color_before)

            parent_state = state
            parent_action = pap_action
            state = self.compute_state(parent_state, parent_action,
                                       goal_entities, problem_env, paps_used,
                                       action_idx)

            # execute the pap action
            reward = reward_function(parent_state, state, parent_action,
                                     action_idx)
            print "The reward is ", reward

            self.add_sar_tuples(parent_state, pap_action, reward)
            print "Executed", action.discrete_parameters

        self.add_state_prime()
        openrave_env.Destroy()
        openravepy.RaveDestroy()
    def set_cached_pick_paths(self, parent_state, moved_obj):
        motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        # making it consistent with the feasibility checker
        self.problem_env.set_robot_to_default_dof_values()

        for obj, op_instance in self.pick_used.items():
            # print self.problem_env.robot.GetDOFValues()
            motion_plan_goals = op_instance.continuous_parameters['q_goal']
            assert len(motion_plan_goals) > 0
            self.cached_pick_paths[obj] = None

            path, status = motion_planner.get_motion_plan(motion_plan_goals, cached_collisions=self.collides)
            if status == 'HasSolution':
                self.reachable_entities.append(obj)
            else:
                path, _ = motion_planner.get_motion_plan(motion_plan_goals, cached_collisions={})
            [t.Enable(False) for t in self.problem_env.objects]
            rrt_motion_planner = BaseMotionPlanner(self.problem_env, 'rrt')
            # motion_plan_goals[0] = np.array([-0.11255534, -0.26290062,  1.64126379])
            # utils.set_robot_config(motion_plan_goals[0])
            for _ in range(100):
                path, status = rrt_motion_planner.get_motion_plan(motion_plan_goals[0])
                if status == 'HasSolution':
                    break
            [t.Enable(True) for t in self.problem_env.objects]
            assert path is not None, 'Even RRT failed!'

            self.cached_pick_paths[obj] = path
            op_instance.low_level_motion = path
Exemple #12
0
    def get_binary_edge_features(self, a, b):
        if (a, b) in self.parent_binary_predicates:
            return self.parent_binary_predicates[(a, b)]
        else:
            # todo rename cached_pick_path and cached_place_path as shortest paths
            if self.use_shortest_path:
                # todo this needs to be re-computed too when we are using shortest paths, because
                #   this is true if for all b in (a,b,r), b is not in the way of shortest path to r while holidng a
                #   Since the shortest path plans a path without collision-checking, this is not an accurate computation
                if a in self.problem_env.object_names and b in self.problem_env.region_names and b != 'entire_region':
                    if a not in self.reachable_entities:
                        is_place_in_b_reachable_while_holding_a = False
                    else:
                        saver = CustomStateSaver(self.problem_env.env)
                        self.pick_used[a].execute(
                        )  # it should not be in collision
                        motion_planner = BaseMotionPlanner(
                            self.problem_env, 'prm')
                        # note that it doesn't check the collision with the object held
                        plan, status = motion_planner.get_motion_plan(
                            self.problem_env.regions[b],
                            cached_collisions=self.collides)
                        saver.Restore()
                        is_place_in_b_reachable_while_holding_a = status == 'HasSolution'
                else:
                    is_place_in_b_reachable_while_holding_a = False

            else:
                is_place_in_b_reachable_while_holding_a = (
                    a, b) in self.place_in_way.reachable_obj_region_pairs

            if self.use_shortest_path:
                if b.find('region') != -1:
                    cached_path = None
                else:
                    cached_path = self.cached_pick_paths[b]
                is_a_in_pick_path_of_b = self.pick_in_way(
                    a, b, cached_path=cached_path)
            else:
                is_a_in_pick_path_of_b = self.pick_in_way(a, b)

            return [
                self.in_region(a, b), is_a_in_pick_path_of_b,
                is_place_in_b_reachable_while_holding_a
            ]
Exemple #13
0
    def set_cached_place_paths(self, parent_state, moved_obj):
        motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        for region_name, region in self.problem_env.regions.items():
            if region.name == 'entire_region':
                continue
            for obj, pick_path in self.cached_pick_paths.items():
                self.cached_place_paths[(obj, region_name)] = None
                if not self.use_shortest_path:
                    continue
                if pick_path is None:
                    continue
                if parent_state is not None and obj in parent_state.cached_pick_paths and obj != moved_obj:
                    self.cached_place_paths[(
                        obj, region_name)] = parent_state.cached_place_paths[(
                            obj, region_name)]
                else:
                    saver = CustomStateSaver(self.problem_env.env)
                    pick_used = self.pick_used[obj]
                    pick_used.execute()
                    if region.contains(
                            self.problem_env.env.GetKinBody(
                                obj).ComputeAABB()):
                        self.cached_place_paths[(obj, region_name)] = [
                            get_body_xytheta(self.problem_env.robot).squeeze()
                        ]
                    else:
                        self.cached_place_paths[(
                            obj,
                            region_name)] = motion_planner.get_motion_plan(
                                region, cached_collisions={})[0]
                        if self.cached_place_paths[(obj, region_name)] is None:
                            import pdb
                            pdb.set_trace()

                    saver.Restore()
                try:
                    assert self.cached_place_paths[(obj,
                                                    region_name)] is not None
                except:
                    import pdb
                    pdb.set_trace()
def main():
    problem_idx = 0

    problem_env = Mover(problem_idx, problem_type='jobtalk')
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'rrt'))
    utils.viewer()

    seed = 1
    np.random.seed(seed)
    random.seed(seed)

    pick_smpler, place_smpler = create_sampler(problem_env)

    # Set camera view - to get camera transform: viewer.GetCameraTransform()
    cam_transform = np.array(
        [[-0.54866337, -0.70682829, 0.44650004, -1.45953619],
         [-0.83599448, 0.45806221, -0.30214604, 2.02016926],
         [0.00904058, -0.53904803, -0.8422265, 4.88620949], [0., 0., 0., 1.]])
    cam_transform = np.array(
        [[0.76808539, 0.51022899, -0.38692533, 2.7075901],
         [0.63937823, -0.57785198, 0.50723029, -2.0267117],
         [0.03521803, -0.6369878, -0.77006898, 4.52542162], [0., 0., 0., 1.]])

    init_goal_cam_transform = np.array(
        [[0.99941927, -0.00186311, 0.03402425, 1.837726],
         [-0.02526303, -0.71058334, 0.70315937, -5.78141165],
         [0.022867, -0.70361058, -0.71021775, 6.03373909], [0., 0., 0., 1.]])
    goal_obj_poses = pickle.load(
        open('./test_scripts/jobtalk_figure_cache_files/goal_obj_poses.pkl',
             'r'))
    for o in problem_env.objects:
        utils.set_obj_xytheta(goal_obj_poses[o.GetName()], o)
    viewer = problem_env.env.GetViewer()
    viewer.SetCamera(init_goal_cam_transform)
    """
    # how do you go from intrinsic params to [fx, fy, cx, cy]? What are these anyways?
    # cam_intrinsic_params = viewer.GetCameraIntrinsics()
    I = problem_env.env.GetViewer().GetCameraImage(1920, 1080, cam_transform, cam_intrinsic_params)
    scipy.misc.imsave('test.png', I)
    """

    utils.set_obj_xytheta(np.array([0.01585576, 1.06516767, 5.77099297]),
                          target_obj_name)
    pick_smpls = visualize_pick(pick_smpler, problem_env)
    feasible_pick_op = get_feasible_pick(problem_env, pick_smpls)
    feasible_pick_op.execute()
    utils.visualize_placements(place_smpler(100),
                               problem_env.robot.GetGrabbed()[0])
    import pdb
    pdb.set_trace()

    # Visualize path
    """
Exemple #15
0
    def set_cached_place_paths(self, parent_state, moved_obj):
        motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
        for region_name, region in self.problem_env.regions.items():
            if region.name == 'entire_region':
                continue
            for obj, pick_path in self.cached_pick_paths.items():
                self.cached_place_paths[(obj, region_name)] = None
                parent_state_has_cached_path_for_obj \
                    = parent_state is not None and obj in parent_state.cached_place_paths and obj != moved_obj
                cached_path_is_shortest_path = parent_state is not None and \
                                               not (obj, region_name) in parent_state.reachable_regions_while_holding

                saver = CustomStateSaver(self.problem_env.env)
                pick_used = self.pick_used[obj]
                pick_used.execute()
                if region.contains(
                        self.problem_env.env.GetKinBody(obj).ComputeAABB()):
                    path = [get_body_xytheta(self.problem_env.robot).squeeze()]
                    self.reachable_regions_while_holding.append(
                        (obj, region_name))
                else:
                    if self.holding_collides is not None:
                        path, status = motion_planner.get_motion_plan(
                            region, cached_collisions=self.holding_collides)
                    else:
                        path, status = motion_planner.get_motion_plan(
                            region, cached_collisions=self.collides)
                    if status == 'HasSolution':
                        self.reachable_regions_while_holding.append(
                            (obj, region_name))
                    else:
                        if parent_state_has_cached_path_for_obj and cached_path_is_shortest_path:
                            path = parent_state.cached_place_paths[(
                                obj, region_name)]
                        else:
                            path, _ = motion_planner.get_motion_plan(
                                region, cached_collisions={})
                saver.Restore()
                # assert path is not None
                self.cached_place_paths[(obj, region_name)] = path
 def compute_path_ignoring_obstacles(self, goal_configuration):
     self.problem_env.disable_objects_in_region('entire_region')
     if self.target_object is not None:
         self.target_object.Enable(True)
     if self.problem_env.name.find('one_arm') != -1:
         path, status = ArmBaseMotionPlanner.get_motion_plan(self, goal_configuration)
     else:
         stime = time.time()
         path, status = BaseMotionPlanner.get_motion_plan(self, goal_configuration)
         print "Motion plan time", time.time()-stime
     self.problem_env.enable_objects_in_region('entire_region')
     if path is None:
         import pdb; pdb.set_trace()
     return path
Exemple #17
0
    def get_node_features(self, entity, goal_entities):
        isobj = entity not in self.problem_env.regions
        obj = self.problem_env.env.GetKinBody(entity) if isobj else None
        pose = get_body_xytheta(obj)[0] if isobj else None

        if isobj:
            if self.use_shortest_path:
                motion_planner = BaseMotionPlanner(self.problem_env, 'prm')
                pick_for_obj = self.pick_used[obj.GetName()]
                plan, status = motion_planner.get_motion_plan(
                    pick_for_obj.continuous_parameters['potential_q_goals'],
                    cached_collisions=self.collides)
                pick_for_obj.low_level_motion = plan
                if status == 'HasSolution':
                    pick_for_obj.continuous_parameters['q_goal'] = plan[-1]
                    self.reachable_entities.append(entity)
                    is_entity_reachable = True
                else:
                    is_entity_reachable = False
            else:
                is_entity_reachable = obj.GetName() in self.reachable_entities
        else:
            is_entity_reachable = True

        return [
            0,  # l
            0,  # w
            0,  # h
            pose[0] if isobj else 0,  # x
            pose[1] if isobj else 0,  # y
            pose[2] if isobj else 0,  # theta
            entity not in self.problem_env.regions,  # IsObj
            entity in self.problem_env.regions,  # IsRoom
            entity in self.goal_entities,  # IsGoal
            is_entity_reachable,
            self.is_holding_goal_entity(),
        ]
Exemple #18
0
    def approximate_minimal_collision_path(
            self, goal_configuration, path_ignoring_all_objects,
            collisions_in_path_ignoring_all_objects, cached_collisions):
        # enabled objects = all the objects except the ones that are in collision
        enabled_objects = {obj.GetName() for obj in self.problem_env.objects}
        enabled_objects -= {
            obj.GetName()
            for obj in collisions_in_path_ignoring_all_objects
        }

        [o.Enable(False) for o in collisions_in_path_ignoring_all_objects]

        # enable object one by one. If we can find a path with it turned off, then it is not actually in the way.
        # minimal objects in way = a set of objects that we cannot ignore
        # for other objects in collisions_in_path_ignoring_all_objects, we can ignore
        minimal_objects_in_way = []
        minimal_collision_path = path_ignoring_all_objects
        print "Approximating MCR path..."
        for obj in collisions_in_path_ignoring_all_objects:
            obj.Enable(True)
            [o.Enable(False) for o in minimal_objects_in_way]
            enabled_objects.add(obj.GetName())
            enabled_objects -= {
                obj.GetName()
                for obj in minimal_objects_in_way
            }
            if self.problem_env.name.find('one_arm') != -1:
                path, status = ArmBaseMotionPlanner.get_motion_plan(
                    self,
                    goal_configuration,
                    cached_collisions=cached_collisions)
            else:
                path, status = BaseMotionPlanner.get_motion_plan(
                    self,
                    goal_configuration,
                    cached_collisions=cached_collisions,
                    n_iterations=[20, 50, 100, 500, 1000])
            if status != 'HasSolution':
                minimal_objects_in_way.append(obj)
            else:
                minimal_collision_path = path

        self.problem_env.enable_objects_in_region('entire_region')
        return minimal_collision_path
def execute_policy(plan, problem_env, goal_entities, config):
    try:
        init_abstract_state = pickle.load(open('temp111.pkl', 'r'))
    except:
        init_abstract_state = make_abstract_state(problem_env, goal_entities)
        init_abstract_state.make_pklable()
        pickle.dump(init_abstract_state, open('temp111.pkl', 'wb'))

    init_abstract_state.make_plannable(problem_env)

    abstract_state = init_abstract_state
    total_ik_checks = 0
    total_mp_checks = 0
    total_pick_mp_checks = 0
    total_place_mp_checks = 0
    total_pick_mp_infeasible = 0
    total_place_mp_infeasible = 0

    total_infeasible_mp = 0
    plan_idx = 0
    n_total_actions = 0
    goal_reached = False
    stime = time.time()
    samples_tried = {i: [] for i in range(len(plan))}
    sample_values = {i: [] for i in range(len(plan))}

    learned_sampler_model = get_learned_sampler_models(config)
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'rrt'))
    while plan_idx < len(plan):
        goal_reached = problem_env.is_goal_reached()
        if goal_reached:
            break
        if n_total_actions >= 100:
            break

        action = plan[plan_idx]
        generator = get_generator(abstract_state, action,
                                  learned_sampler_model, config)
        if 'two_arm' in config.domain:
            cont_smpl = generator.sample_next_point(samples_tried[plan_idx],
                                                    sample_values[plan_idx])
        else:
            pick_smpl, place_smpl, status = generator.sample_next_point(
                samples_tried[plan_idx], sample_values[plan_idx])
            cont_smpl = {
                'pick': pick_smpl,
                'place': place_smpl,
                'is_feasible': status == "HasSolution"
            }

        total_ik_checks += generator.n_ik_checks
        total_mp_checks += generator.n_mp_checks
        total_infeasible_mp += generator.n_mp_infeasible
        total_pick_mp_checks += generator.n_pick_mp_checks
        total_place_mp_checks += generator.n_place_mp_checks
        total_pick_mp_infeasible += generator.n_pick_mp_infeasible
        total_place_mp_infeasible += generator.n_place_mp_infeasible

        n_total_actions += 1

        if cont_smpl['is_feasible']:
            print "Action executed"
            action.continuous_parameters = cont_smpl
            action.execute()
            import pdb
            pdb.set_trace()
            plan_idx += 1
            abstract_state = make_abstract_state(problem_env,
                                                 goal_entities,
                                                 parent_state=abstract_state,
                                                 parent_action=action)
        else:
            print "No feasible action"
            problem_env.init_saver.Restore()
            plan_idx = 0
            """
            for s in cont_smpl['samples']:
                samples_tried[plan_idx].append(s)
                sample_values[plan_idx].append(generator.sampler.infeasible_action_value)
            """
            abstract_state = init_abstract_state
        goal_reached = plan_idx == len(plan)
        print "Total IK checks {} Total actions {}".format(
            total_ik_checks, n_total_actions)
    import pdb
    pdb.set_trace()
    print time.time() - stime
    return total_ik_checks, total_pick_mp_checks, total_pick_mp_infeasible, total_place_mp_checks, \
           total_place_mp_infeasible, total_mp_checks, total_infeasible_mp, n_total_actions, goal_reached
def create_environment(problem_idx):
    problem_env = Mover(problem_idx)
    openrave_env = problem_env.env
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'prm'))
    return problem_env, openrave_env
def main():
    parameters = parse_parameters()

    save_dir = make_and_get_save_dir(parameters)
    file_path = save_dir + '/seed_' + str(parameters.planner_seed) + '_pidx_' + str(parameters.pidx) + '.pkl'
    quit_if_already_tested(file_path, parameters.f)

    # for creating problem
    np.random.seed(parameters.pidx)
    random.seed(parameters.pidx)
    is_two_arm_env = parameters.domain.find('two_arm') != -1
    if is_two_arm_env:
        environment = Mover(parameters.pidx)
    else:
        environment = OneArmMover(parameters.pidx)
    environment.initial_robot_base_pose = get_body_xytheta(environment.robot)

    if parameters.domain == 'two_arm_mover':
        goal_region = 'home_region'
        if parameters.n_objs_pack == 4:
            goal_object_names = ['square_packing_box1', 'square_packing_box2', 'rectangular_packing_box3',
                                 'rectangular_packing_box4']
        else:
            goal_object_names = ['square_packing_box1']
        environment.set_goal(goal_object_names, goal_region)
    elif parameters.domain == 'one_arm_mover':
        goal_region = 'rectangular_packing_box1_region'
        assert parameters.n_objs_pack == 1
        goal_object_names = ['c_obst1']
        environment.set_goal(goal_object_names, goal_region)
    goal_entities = goal_object_names + [goal_region]

    # for randomized algorithms
    np.random.seed(parameters.planner_seed)
    random.seed(parameters.planner_seed)

    if parameters.v:
        utils.viewer()

    environment.set_motion_planner(BaseMotionPlanner(environment, 'rrt'))
    # from manipulation.bodies.bodies import set_color
    # set_color(environment.env.GetKinBody(goal_object_names[0]), [1, 0, 0])
    start_time = time.time()

    n_mp = n_ik = 0

    goal_object_names, high_level_plan, (mp, ik) = find_plan_without_reachability(environment, goal_object_names,
                                                                                  start_time,
                                                                                  parameters)  # finds the plan
    total_time_taken = time.time() - start_time
    n_mp += mp
    n_ik += ik

    total_n_nodes = 0
    total_plan = []
    idx = 0
    found_solution = False
    timelimit = parameters.timelimit
    while total_time_taken < timelimit:
        goal_obj_name = goal_object_names[idx]
        plan, n_nodes, status, (mp, ik) = find_plan_for_obj(goal_obj_name, high_level_plan[idx], environment,
                                                            start_time,
                                                            timelimit, parameters)
        total_n_nodes += n_nodes
        total_time_taken = time.time() - start_time
        print goal_obj_name, goal_object_names, total_n_nodes
        print "Time taken: %.2f" % total_time_taken
        if total_time_taken > timelimit:
            break
        if status == 'HasSolution':
            execute_plan(plan)
            environment.initial_robot_base_pose = utils.get_body_xytheta(environment.robot)
            total_plan += plan
            save_plan(total_plan, total_n_nodes, len(goal_object_names) - idx, found_solution, file_path, goal_entities,
                      total_time_taken, {'mp': n_mp, 'ik': n_ik}, parameters)
            idx += 1
        else:
            # Note that HPN does not have any recourse if this happens. We re-plan at the higher level.
            goal_object_names, plan, (mp, ik) = find_plan_without_reachability(environment, goal_object_names,
                                                                               start_time, parameters)  # finds the plan
            n_mp += mp
            n_ik += ik
            total_plan = []
            idx = 0
        if idx == len(goal_object_names):
            found_solution = True
            break
        else:
            idx %= len(goal_object_names)
    total_time_taken = time.time() - start_time
    save_plan(total_plan, total_n_nodes, len(goal_object_names) - idx, found_solution, file_path, goal_entities,
              total_time_taken, {'mp': n_mp, 'ik': n_ik}, parameters)
    print 'plan saved'
Exemple #22
0
def set_problem_env_config(problem_env, config):
    np.random.seed(config.planner_seed)
    random.seed(config.planner_seed)
    problem_env.set_motion_planner(BaseMotionPlanner(problem_env, 'prm'))
    problem_env.seed = config.pidx
    problem_env.init_saver = DynamicEnvironmentStateSaver(problem_env.env)
Exemple #23
0
def generate_training_data_single():
    np.random.seed(config.pidx)
    random.seed(config.pidx)
    if config.domain == 'two_arm_mover':
        mover = Mover(config.pidx, config.problem_type)
    elif config.domain == 'one_arm_mover':
        mover = OneArmMover(config.pidx)
    else:
        raise NotImplementedError
    np.random.seed(config.planner_seed)
    random.seed(config.planner_seed)
    mover.set_motion_planner(BaseMotionPlanner(mover, 'prm'))
    mover.seed = config.pidx

    # todo change the root node
    mover.init_saver = DynamicEnvironmentStateSaver(mover.env)

    hostname = socket.gethostname()
    if hostname in {'dell-XPS-15-9560', 'phaedra', 'shakey', 'lab', 'glaucus', 'luke-laptop-1'}:
        root_dir = './'
        #root_dir = '/home/beomjoon/Dropbox (MIT)/cloud_results/'
    else:
        root_dir = '/data/public/rw/pass.port/tamp_q_results/'

    solution_file_dir = root_dir + '/test_results/greedy_results_on_mover_domain/domain_%s/n_objs_pack_%d'\
                        % (config.domain, config.n_objs_pack)

    if config.dont_use_gnn:
        solution_file_dir += '/no_gnn/'
    elif config.dont_use_h:
        solution_file_dir += '/gnn_no_h/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/'
    elif config.hcount:
        solution_file_dir += '/hcount/'
    elif config.hadd:
        solution_file_dir += '/gnn_hadd/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/'
    else:
        solution_file_dir += '/gnn/loss_' + str(config.loss) + '/num_train_' + str(config.num_train) + '/'

    solution_file_name = 'pidx_' + str(config.pidx) + \
                         '_planner_seed_' + str(config.planner_seed) + \
                         '_train_seed_' + str(config.train_seed) + \
                         '_domain_' + str(config.domain) + '.pkl'
    if not os.path.isdir(solution_file_dir):
        os.makedirs(solution_file_dir)

    solution_file_name = solution_file_dir + solution_file_name

    is_problem_solved_before = os.path.isfile(solution_file_name)
    if is_problem_solved_before and not config.plan:
        with open(solution_file_name, 'rb') as f:
            trajectory = pickle.load(f)
            success = trajectory.metrics['success']
            tottime = trajectory.metrics['tottime']
    else:
        t = time.time()
        trajectory, num_nodes = get_problem(mover)
        tottime = time.time() - t
        success = trajectory is not None
        plan_length = len(trajectory.actions) if success else 0
        if not success:
            trajectory = Trajectory(mover.seed, mover.seed)
        trajectory.states = [s.get_predicate_evaluations() for s in trajectory.states]
        trajectory.state_prime = None
        trajectory.metrics = {
            'n_objs_pack': config.n_objs_pack,
            'tottime': tottime,
            'success': success,
            'plan_length': plan_length,
            'num_nodes': num_nodes,
        }

        #with open(solution_file_name, 'wb') as f:
        #    pickle.dump(trajectory, f)
    print 'Time: %.2f Success: %d Plan length: %d Num nodes: %d' % (tottime, success, trajectory.metrics['plan_length'],
                                                                    trajectory.metrics['num_nodes'])
    import pdb;pdb.set_trace()

    """
    print("time: {}".format(','.join(str(trajectory.metrics[m]) for m in [
        'n_objs_pack',
        'tottime',
        'success',
        'plan_length',
        'num_nodes',
    ])))
    """

    print('\n'.join(str(a.discrete_parameters.values()) for a in trajectory.actions))

    def draw_robot_line(env, q1, q2):
        return draw_line(env, list(q1)[:2] + [.5], list(q2)[:2] + [.5], color=(0,0,0,1), width=3.)

    mover.reset_to_init_state_stripstream()
    if not config.dontsimulate:
        if config.visualize_sim:
            mover.env.SetViewer('qtcoin')
            set_viewer_options(mover.env)

            raw_input('Start?')
        handles = []
        for step_idx, action in enumerate(trajectory.actions):
            if action.type == 'two_arm_pick_two_arm_place':
                def check_collisions(q=None):
                    if q is not None:
                        set_robot_config(q, mover.robot)
                    collision = False
                    if mover.env.CheckCollision(mover.robot):
                        collision = True
                    for obj in mover.objects:
                        if mover.env.CheckCollision(obj):
                            collision = True
                    if collision:
                        print('collision')
                        if config.visualize_sim:
                            raw_input('Continue after collision?')

                check_collisions()
                o = action.discrete_parameters['two_arm_place_object']
                pick_params = action.continuous_parameters['pick']
                place_params = action.continuous_parameters['place']

                full_path = [get_body_xytheta(mover.robot)[0]] + pick_params['motion'] + [pick_params['q_goal']] + \
                            place_params['motion'] + [place_params['q_goal']]
                for i, (q1, q2) in enumerate(zip(full_path[:-1], full_path[1:])):
                    if np.linalg.norm(np.squeeze(q1)[:2] - np.squeeze(q2)[:2]) > 1:
                        print(i, q1, q2)
                        import pdb;
                        pdb.set_trace()

                pickq = pick_params['q_goal']
                pickt = pick_params['motion']
                check_collisions(pickq)
                for q in pickt:
                    check_collisions(q)
                obj = mover.env.GetKinBody(o)
                if len(pickt) > 0:
                    for q1, q2 in zip(pickt[:-1], pickt[1:]):
                        handles.append(
                            draw_robot_line(mover.env, q1.squeeze(), q2.squeeze()))
                # set_robot_config(pickq, mover.robot)
                # if config.visualize_sim:
                #   raw_input('Continue?')
                # set_obj_xytheta([1000,1000,0], obj)
                two_arm_pick_object(obj, pick_params)
                if config.visualize_sim:
                    raw_input('Place?')

                o = action.discrete_parameters['two_arm_place_object']
                r = action.discrete_parameters['two_arm_place_region']
                placeq = place_params['q_goal']
                placep = place_params['object_pose']
                placet = place_params['motion']
                check_collisions(placeq)
                for q in placet:
                    check_collisions(q)
                obj = mover.env.GetKinBody(o)
                if len(placet) > 0:
                    for q1, q2 in zip(placet[:-1], placet[1:]):
                        handles.append(
                            draw_robot_line(mover.env, q1.squeeze(), q2.squeeze()))
                # set_robot_config(placeq, mover.robot)
                # if config.visualize_sim:
                #   raw_input('Continue?')
                # set_obj_xytheta(placep, obj)
                two_arm_place_object(place_params)
                check_collisions()

                if config.visualize_sim:
                    raw_input('Continue?')

            elif action.type == 'one_arm_pick_one_arm_place':
                def check_collisions(q=None):
                    if q is not None:
                        set_robot_config(q, mover.robot)
                    collision = False
                    if mover.env.CheckCollision(mover.robot):
                        collision = True
                    for obj in mover.objects:
                        if mover.env.CheckCollision(obj):
                            collision = True
                    if collision:
                        print('collision')
                        if config.visualize_sim:
                            raw_input('Continue after collision?')

                check_collisions()
                pick_params = action.continuous_parameters['pick']
                place_params = action.continuous_parameters['place']

                one_arm_pick_object(mover.env.GetKinBody(action.discrete_parameters['object']), pick_params)

                check_collisions()

                if config.visualize_sim:
                    raw_input('Place?')

                one_arm_place_object(place_params)

                check_collisions()

                if config.visualize_sim:
                    raw_input('Continue?')
            else:
                raise NotImplementedError

        n_objs_pack = config.n_objs_pack
        if config.domain == 'two_arm_mover':
            goal_region = 'home_region'
        elif config.domain == 'one_arm_mover':
            goal_region = 'rectangular_packing_box1_region'
        else:
            raise NotImplementedError

        if all(mover.regions[goal_region].contains(o.ComputeAABB()) for o in
               mover.objects[:n_objs_pack]):
            print("successful plan length: {}".format(len(trajectory.actions)))
        else:
            print('failed to find plan')
        if config.visualize_sim:
            raw_input('Finish?')

    return
 def __init__(self, problem_env, target_object, planning_algorithm):
     BaseMotionPlanner.__init__(self, problem_env, planning_algorithm)
     if type(target_object) == str:
         self.target_object = self.problem_env.env.GetKinBody(target_object)
     else:
         self.target_object = target_object