def allocatePlanner(si, plannerType):
    if plannerType.lower() == "bfmtstar":
        return og.BFMT(si)
    elif plannerType.lower() == "bitstar":
        return og.BITstar(si)
    elif plannerType.lower() == "fmtstar":
        return og.FMT(si)
    elif plannerType.lower() == "informedrrtstar":
        return og.InformedRRTstar(si)
    elif plannerType.lower() == "prmstar":
        return og.PRMstar(si)
    elif plannerType.lower() == "rrtstar":
        return og.RRTstar(si)
    elif plannerType.lower() == "sorrtstar":
        return og.SORRTstar(si)
    elif plannerType.lower() == "rrtxstatic":
        return og.RRTXstatic(si)
    elif plannerType.lower() == "rrtsharp":
        return og.RRTsharp(si)

    # multi-query
    elif plannerType.lower() == "lazyprmstar":
        return og.LazyPRMstar(si)

    # single-query
    elif plannerType.lower() == "rrtconnect":
        return og.RRTConnect(si)
    elif plannerType.lower() == "lbtrrt":
        return og.LBTRRT(si)
    elif plannerType.lower() == "lazylbtrrt":
        return og.LazyLBTRRT(si)

    else:
        ou.OMPL_ERROR(
            "Planner-type is not implemented in allocation function.")
예제 #2
0
 def __init__(self, shape, calc_time, resolution, distance):
     ou.setLogLevel(ou.LOG_WARN)
     self._calc_time = calc_time
     self._space = ob.RealVectorStateSpace(2)
     bounds = ob.RealVectorBounds(2)
     bounds.setLow(0.0)
     bounds.setHigh(0, shape[0] - 1)
     bounds.setHigh(1, shape[1] - 1)
     self._space.setBounds(bounds)
     self._si = ob.SpaceInformation(self._space)
     self._validity_checker = OmplPlannerImpl.ValidityChecker(self._si)
     self._si.setStateValidityChecker(self._validity_checker)
     self._si.setStateValidityCheckingResolution(resolution)
     self._si.setup()
     self._start = ob.State(self._space)
     self._goal = ob.State(self._space)
     self._pdef = ob.ProblemDefinition(self._si)
     objective = ob.MultiOptimizationObjective(self._si)
     objective.addObjective(OmplPlannerImpl.MapCostObjective(self._si), 0.5)
     # ## setting length objective
     length_objective = ob.PathLengthOptimizationObjective(self._si)
     objective.addObjective(length_objective, 0.1)
     # objective.setCostThreshold(1000.0)
     self._pdef.setOptimizationObjective(objective)
     self._planner = og.RRTstar(self._si)
     self._planner.setRange(distance)
     self._planner.setup()
예제 #3
0
def test_optimal(calc_time=0.01):
    space = ob.RealVectorStateSpace(2)
    space.setBounds(0, WIDTH)
    si = ob.SpaceInformation(space)
    si.setStateValidityChecker(ValidityChecker(si))
    si.setup()
    start = ob.State(space)
    goal = ob.State(space)
    start[0] = 0.0
    start[1] = 0.0
    goal[0] = WIDTH - 1
    goal[1] = WIDTH - 1
    pdef = ob.ProblemDefinition(si)
    pdef.setStartAndGoalStates(start, goal)
    objective = ob.MultiOptimizationObjective(si)
    objective.addObjective(MapCostObjective(si), 0.9)
    objective.addObjective(ob.PathLengthOptimizationObjective(si), 0.1)
    pdef.setOptimizationObjective(objective)
    planner = og.RRTstar(si)
    planner.setProblemDefinition(pdef)
    planner.setup()
    status = planner.solve(calc_time)
    if pdef.hasExactSolution():
        states = pdef.getSolutionPath().getStates()
        cost = pdef.getSolutionPath().cost(objective).value()
        states_np = np.array([(state[0], state[1]) for state in states])
        print(states)
        print(cost)
        print(states_np)
예제 #4
0
    def __init__(self, lower_bound, upper_bound):
        """Initialize the object."""
        # Create an R2 state space
        self.space = ob.RealVectorStateSpace(2)

        # Set lower and upper bounds
        bounds = ob.RealVectorBounds(2)
        bounds.setLow(lower_bound)
        bounds.setHigh(upper_bound)
        self.space.setBounds(bounds)

        # Create a new space information
        self.si = ob.SpaceInformation(self.space)

        # Set the state validity checker
        self.si.setStateValidityChecker(
            ob.StateValidityCheckerFn(self.is_state_valid))
        self.si.setup()

        # Set the problem definition
        self.pdef = ob.ProblemDefinition(self.si)

        # Setup an optimizing planner with RRT*
        self.optimizingPlanner = og.RRTstar(self.si)

        # Empty obstacles list
        self.obstacles = []
예제 #5
0
def plan():
    # construct the state space we are planning in
    space = ob.SE2StateSpace()

    # set the bounds for R^3 portion of SE(3)
    bounds = ob.RealVectorBounds(2)
    bounds.setLow(0, -40)
    bounds.setHigh(0, 40)
    bounds.setLow(1, 0)
    bounds.setHigh(1, 90)
    space.setBounds(bounds)

    # define a simple setup class
    ss = og.SimpleSetup(space)

    # create a start state
    start = ob.State(space)
    start().setX(0)
    start().setY(0)
    start().setYaw(0)
    # start().setZ(-9)
    # start().rotation().setIdentity()

    # create a goal state
    goal = ob.State(space)
    goal().setX(-25)
    goal().setY(60)
    goal().setYaw(0)
    # goal().setZ(-9)
    # goal().rotation().setIdentity()

    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    # set the start and goal states
    ss.setStartAndGoalStates(start, goal, 0.05)

    # Lets use PRM.  It will have interesting PlannerData
    planner = og.RRTstar(ss.getSpaceInformation())
    ss.setPlanner(planner)
    ss.setup()

    # attempt to solve the problem
    # To change time change value given to ss.solve
    solved = ss.solve(1.0)

    if solved:
        # print the path to screen
        print("Found solution:\n%s" % ss.getSolutionPath())

        # Extracting planner data from most recent solve attempt
        pd = ob.PlannerData(ss.getSpaceInformation())
        ss.getPlannerData(pd)

        # Computing weights of all edges based on state space distance
        pd.computeEdgeWeights()

        path = get_path(ss)
        draw_graph(path)
        plt.show()
예제 #6
0
 def setPlanner_3d(self):
     self.si = self.ss.getSpaceInformation()
     if self.plannerType.lower() == "bitstar":
         planner = og.BITstar(self.si)
     elif self.plannerType.lower() == "fmtstar":
         planner = og.FMT(self.si)
     elif self.plannerType.lower() == "informedrrtstar":
         planner = og.InformedRRTstar(self.si)
     elif self.plannerType.lower() == "prmstar":
         planner = og.PRMstar(self.si)
     elif self.plannerType.lower() == "rrtstar":
         planner = og.RRTstar(self.si)
     elif self.plannerType.lower() == "sorrtstar":
         planner = og.SORRTstar(self.si)
     else:
         print("Planner-type is not implemented in allocation function.")
         planner = og.RRTstar(self.si)
     self.ss.setPlanner(planner)
예제 #7
0
def plan():
    # create an Vector state space
    space = ob.RealVectorStateSpace(SPACE_DIM)

    # # set lower and upper bounds
    bounds = ob.RealVectorBounds(SPACE_DIM)
    for i in range(SPACE_DIM - 3):
        bounds.setLow(i, min_bound)
        bounds.setHigh(i, max_bound)
    for i in range(SPACE_DIM - 3):
        bounds.setLow(i + 3, -np.pi)
        bounds.setHigh(i + 3, np.pi)
    space.setBounds(bounds)
    space.setBounds(bounds)

    # create a simple setup object
    ss = og.SimpleSetup(space)

    # set planner
    planner = og.RRTstar(ss.getSpaceInformation())
    ss.setPlanner(planner)

    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    start = ob.State(space)
    # start.random()
    start_state = start.get()
    for i in range(SPACE_DIM):
        start_state[i] = start_s[i]
    # print(start_state[i])

    goal = ob.State(space)
    # goal.random()
    goal_state = goal.get()
    for i in range(SPACE_DIM):
        goal_state[i] = goal_s[i]
    # print(goal_state[i])

    ss.setStartAndGoalStates(start, goal)

    # default parameters
    solved = ss.solve(4.0)

    if solved:
        # パスの簡単化を実施
        ss.simplifySolution()
        # 結果を取得
        path_result = ss.getSolutionPath()
        print(path_result)

        si = ss.getSpaceInformation()
        pdata = ob.PlannerData(si)
        ss.getPlannerData(pdata)

        space = path_result.getSpaceInformation().getStateSpace()
        plot_result(space, path_result, pdata)
예제 #8
0
def allocatePlanner(si, plannerType):
    if plannerType.lower() == "bitstar":
        return og.BITstar(si)
    elif plannerType.lower() == "fmtstar":
        return og.FMT(si)
    elif plannerType.lower() == "prmstar":
        return og.PRMstar(si)
    elif plannerType.lower() == "rrtstar":
        return og.RRTstar(si)
    else:
        OMPL_ERROR("Planner-type is not implemented in allocation function.")
예제 #9
0
 def allocatePlanner(self, si, plannerType):
     if plannerType.lower() == "bfmtstar":
         return og.BFMT(si)
     elif plannerType.lower() == "bitstar":
         return og.BITstar(si)
     elif plannerType.lower() == "fmtstar":
         return og.FMT(si)
     elif plannerType.lower() == "informedrrtstar":
         return og.InformedRRTstar(si)
     elif plannerType.lower() == "prmstar":
         return og.PRMstar(si)
     elif plannerType.lower() == "rrtstar":
         return og.RRTstar(si)
     elif plannerType.lower() == "sorrtstar":
         return og.SORRTstar(si)
     else:
         ou.OMPL_ERROR(
             "Planner-type is not implemented in allocation function.")
예제 #10
0
def test_rigid(calc_time=0.01):
    space = ob.RealVectorStateSpace(2)
    space.setBounds(0, WIDTH)
    si = ob.SpaceInformation(space)
    si.setStateValidityChecker(ValidityChecker(si))
    si.setup()
    start = ob.State(space)
    goal = ob.State(space)
    start[0] = 0.0
    start[1] = 0.0
    goal[0] = WIDTH - 1
    goal[1] = WIDTH - 1
    pdef = ob.ProblemDefinition(si)
    pdef.setStartAndGoalStates(start, goal)
    planner = og.RRTstar(si)
    planner.setProblemDefinition(pdef)
    planner.setup()
    status = planner.solve(calc_time)
    if pdef.hasExactSolution():
        cost = pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()
        print(cost)
예제 #11
0
    def plan_rrt(self, name, init, goal):

        init = self.createState(*tuple(init))
        goal = self.createState(*tuple(goal))

        self.name_target = name

        planner = og.RRTstar(self.si)
        ss = og.SimpleSetup(self.si)
        ss.setStateValidityChecker(ob.StateValidityCheckerFn(self.isStateValid))    
        ss.setPlanner(planner)
        ss.setStartAndGoalStates(init, goal)        
        solved = ss.solve(1.0)
        if solved:
            ss.simplifySolution()
            path = ss.getSolutionPath()

            path_res = []
            for state in path.getStates():
                path_res.append((state.getX(),state.getY(),state.getYaw()))
            return path_res
        else:
            return None
    def __init__(self, ndof, iksolver, step_size=0.05):
        self.ndof = ndof
        self.iksolver = iksolver
        self.state_space = MyStateSpace(ndof)
        self.control_space = MyControlSpace(self.state_space, ndof)
        self.simple_setup = oc.SimpleSetup(self.control_space)
        si = self.simple_setup.getSpaceInformation()
        si.setPropagationStepSize(step_size)
        si.setMinMaxControlDuration(1, 1)
        si.setDirectedControlSamplerAllocator(
            oc.DirectedControlSamplerAllocator(
                directedControlSamplerAllocator))

        vc = MyStateValidityChecker(self.simple_setup.getSpaceInformation(),
                                    self.iksolver, ndof)
        self.simple_setup.setStateValidityChecker(vc)

        ob = MyOptimizationObjective(self.simple_setup.getSpaceInformation())
        self.simple_setup.setOptimizationObjective(ob)

        propagator = MyStatePropagator(self.simple_setup.getSpaceInformation(),
                                       ndof)
        self.simple_setup.setStatePropagator(propagator)

        # self.planner = oc.KPIECE1(self.simple_setup.getSpaceInformation())
        # self.planner.setup()
        # ========= RRT planner ============
        self.planner = og.RRTstar(self.simple_setup.getSpaceInformation())
        p_goal = 0.0
        self.planner.setGoalBias(p_goal)
        self.planner.setRange(step_size)

        IPython.embed()
        # =========== PRM planner =============
        # self.planner = og.PRM(self.simple_setup.getSpaceInformation())

        self.simple_setup.setPlanner(self.planner)
예제 #13
0
def allocatePlanner(si, plannerType):
    if plannerType.lower() == "bfmtstar":
        return og.BFMT(si)
    elif plannerType.lower() == "bitstar":
        planner = og.BITstar(si)
        planner.setPruning(False)
        planner.setSamplesPerBatch(200)
        planner.setRewireFactor(20.)
        return planner
    elif plannerType.lower() == "fmtstar":
        return og.FMT(si)
    elif plannerType.lower() == "informedrrtstar":
        return og.InformedRRTstar(si)
    elif plannerType.lower() == "prmstar":
        return og.PRMstar(si)
    elif plannerType.lower() == "rrtstar":
        return og.RRTstar(si)
    elif plannerType.lower() == "sorrtstar":
        return og.SORRTstar(si)
    elif plannerType.lower() == 'rrtconnect':
        return og.RRTConnect(si)
    else:
        ou.OMPL_ERROR(
            "Planner-type is not implemented in allocation function.")
    def plan(self, controlled_object, x1, y1, v1, a1):
        a1 = (a1 + 2 * np.pi) % (2 * np.pi)

        car = deepcopy(controlled_object)

        space = ob.RealVectorStateSpace(3)
        bounds = ob.RealVectorBounds(3)
        bounds.setLow(0, 0)
        bounds.setLow(1, 0)
        bounds.setLow(2, 0)
        bounds.setHigh(0, 1000)
        bounds.setHigh(1, 1000)
        bounds.setHigh(2, 2 * np.pi)
        space.setBounds(bounds)
        si = ob.SpaceInformation(space)

        validityChecker = ValidityChecker(si, self.state, car)
        si.setStateValidityChecker(validityChecker)
        #si.setMotionValidator(MotionValidityChecker(si, state, agent_num))
        si.setup()

        start = ob.State(space)
        start[0] = car.x
        start[1] = car.y
        start[2] = car.angle

        goal = ob.State(space)
        goal[0] = x1
        goal[1] = y1
        goal[2] = a1

        pdef = ob.ProblemDefinition(si)
        pdef.setStartAndGoalStates(start, goal)
        pdef.setOptimizationObjective(ob.PathLengthOptimizationObjective(si))

        def objStateCost(state):
            return ob.Cost(0)

        def objMotionCost(state1, state2):
            x, y, a = state1[0], state1[1], state1[2]
            xt, yt, at = state2[0], state2[1], state2[2]
            yd, xd = yt - y, xt - x
            ax = np.arctan(yd / (xd + 0.001))
            ax = ax % (np.pi)
            if yd < 0:
                ax = ax + np.pi
            ad = min(np.abs(at - ax), np.abs((ax - at)))
            return 10 * ad

        pathObj = ob.OptimizationObjective(si)
        pathObj.stateCost = objStateCost
        pathObj.motionCost = objMotionCost
        pathObj.setCostThreshold(1)

        #pdef.setOptimizationObjective(pathObj)

        optimizingPlanner = og.RRTstar(si)
        optimizingPlanner.setRange(self.inter_point_d)
        optimizingPlanner.setProblemDefinition(pdef)
        optimizingPlanner.setup()

        solved = optimizingPlanner.solve(self.planning_time)
        sol = pdef.getSolutionPath()

        if not sol:
            return []

        sol = sol.printAsMatrix()

        s = [[float(j) for j in i.split(" ")[:-1]]
             for i in sol.splitlines()][:-1]

        v0 = car.vel
        num_points = len(s)
        for i in range(num_points):
            [i].append(v0 * (1 - float(i) / float(num_points)) + v1 *
                       (float(i) / float(num_points)))
        return s
예제 #15
0
    start = ob.State(space)
    start()[0] = 0
    start()[1] = 0

    goal = ob.State(space)
    goal()[0] = 2
    goal()[1] = 1.5

    ss.setStartAndGoalStates(start, goal)

    si = ss.getSpaceInformation()
    si.setValidStateSamplerAllocator(
        ob.ValidStateSamplerAllocator(allocMyValidStateSampler))

    planner = og.RRTstar(si)
    planner = og.PRM(si)
    ss.setPlanner(planner)

    solved = ss.solve(0.05)

    c.draw()

    if solved:
        ss.simplifySolution()
        pd = ob.PlannerData(ss.getSpaceInformation())
        ss.getPlannerData(pd)
        pd.computeEdgeWeights()

        Vn = pd.numVertices()
        for n in range(0, Vn):
예제 #16
0
def plan(grid):

    #agent and goal are represented by a point(x,y) and radius
    global x
    global time
    x = grid
    agent: Agent = grid.agent
    goal: Goal = grid.goal

    # Construct the robot state space in which we're planning. R2
    space = ob.RealVectorStateSpace(2)

    # Set the bounds of space to be inside Map
    bounds = ob.RealVectorBounds(2)

    # projection
    pj = ProjectionEvaluator(space)
    print('pj=', pj)

    pj.setCellSizes(list2vec([1.0, 1.0]))
    space.registerDefaultProjection(pj)

    # Construct the robot state space in which we're planning.
    bounds.setLow(0, 0)  #set min x to _    0
    bounds.setHigh(0, grid.size.width)  #set max x to _    x width
    bounds.setLow(1, 0)  #set min y to _    0
    bounds.setHigh(1, grid.size.height)  #set max y to _    y height

    space.setBounds(bounds)

    print("bounds=", bounds.getVolume())

    # Construct a space information instance for this state space
    si = ob.SpaceInformation(space)

    # Set the object used to check which states in the space are valid
    si.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    # Set robot's starting state to agent's position (x,y) -> e.g. (0,0)
    start = ob.State(space)
    start[0] = float(agent.position.x)
    start[1] = float(agent.position.y)

    print(start[0], start[1])
    # Set robot's goal state (x,y) -> e.g. (1.0,0.0)
    goal = ob.State(space)
    goal[0] = float(grid.goal.position.x)
    goal[1] = float(grid.goal.position.y)

    # Create a problem instance
    pdef = ob.ProblemDefinition(si)

    # Set the start and goal states
    pdef.setStartAndGoalStates(start, goal)

    # Create the optimization objective specified by our command-line argument.
    # This helper function is simply a switch statement.
    #pdef.setOptimizationObjective(allocateObjective(si, objectiveType))

    # ******create a planner for the defined space
    planner = og.RRTstar(si)

    # set the problem we are trying to solve for the planner
    planner.setProblemDefinition(pdef)

    print(planner)
    #print('checking projection',planner.getProjectionEvaluator())

    print("__________________________________________________________")
    # perform setup steps for the planner
    planner.setup()

    # print the settings for this space
    print("space settings\n")
    print(si.settings())

    print("****************************************************************")
    print("problem settings\n")
    # print the problem settings
    print(pdef)

    # attempt to solve the problem within ten second of planning time
    solved = planner.solve(time)

    # For troubleshooting
    if solved:
        # get the goal representation from the problem definition (not the same as the goal state)
        # and inquire about the found path
        path = pdef.getSolutionPath()
        print("Found solution:\n%s" % path)
        #return trace for _find_path_internal method
        print(
            "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
        )

        if path is None:
            return None

        x = pdef.getSolutionPath().printAsMatrix()
        lst = x.split()
        lst = [int(round(float(x), 0)) for x in lst]

        print(x)
        print(lst)

        trace = []
        for i in range(0, len(lst), 2):
            trace.append(Point(lst[i], lst[i + 1]))
            print(trace)
        return trace
    else:
        print("No solution found")
예제 #17
0
def plan(fname=None):
    # Construct the robot state space in which we're planning. We're
    # planning in [0,1]x[0,1], a subset of R^2.
    space = ob.RealVectorStateSpace(2)

    # Set the bounds of space to be in [0,1].
    space.setBounds(0.0, 1.0)

    # Construct a space information instance for this state space
    si = ob.SpaceInformation(space)

    # Set the object used to check which states in the space are valid
    validityChecker = ValidityChecker(si)
    si.setStateValidityChecker(validityChecker)

    si.setup()

    # Set our robot's starting state to be the bottom-left corner of
    # the environment, or (0,0).
    start = ob.State(space)
    start[0] = 0.0
    start[1] = 0.0

    # Set our robot's goal state to be the top-right corner of the
    # environment, or (1,1).
    goal = ob.State(space)
    goal[0] = 1.0
    goal[1] = 1.0

    # Create a problem instance
    pdef = ob.ProblemDefinition(si)

    # Set the start and goal states
    pdef.setStartAndGoalStates(start, goal)

    # Since we want to find an optimal plan, we need to define what
    # is optimal with an OptimizationObjective structure. Un-comment
    # exactly one of the following 6 lines to see some examples of
    # optimization objectives.
    pdef.setOptimizationObjective(getPathLengthObjective(si))
    # pdef.setOptimizationObjective(getThresholdPathLengthObj(si))
    # pdef.setOptimizationObjective(getClearanceObjective(si))
    # pdef.setOptimizationObjective(getBalancedObjective1(si))
    # pdef.setOptimizationObjective(getBalancedObjective2(si))
    # pdef.setOptimizationObjective(getPathLengthObjWithCostToGo(si))

    # Construct our optimal planner using the RRTstar algorithm.
    optimizingPlanner = og.RRTstar(si)

    # Set the problem instance for our planner to solve
    optimizingPlanner.setProblemDefinition(pdef)
    optimizingPlanner.setup()

    # attempt to solve the planning problem within one second of
    # planning time
    solved = optimizingPlanner.solve(10.0)

    if solved:
        # Output the length of the path found
        print("Found solution of path length %g" %
              pdef.getSolutionPath().length())

        # If a filename was specified, output the path as a matrix to
        # that file for visualization
        if fname:
            with open(fname, 'w') as outFile:
                outFile.write(pdef.getSolutionPath().printAsMatrix())
    else:
        print("No solution found.")