Exemple #1
0
def plan():
    # create an R^3 state space
    space = ob.RealVectorStateSpace(3)
    # set lower and upper bounds
    bounds = ob.RealVectorBounds(3)
    bounds.setLow(-1)
    bounds.setHigh(1)
    space.setBounds(bounds)
    # create a simple setup object
    ss = og.SimpleSetup(space)
    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))
    start = ob.State(space)
    start()[0] = -1.
    start()[1] = -1.
    start()[2] = -1.
    goal = ob.State(space)
    goal()[0] = 1.
    goal()[1] = 1.
    goal()[2] = 1.
    ss.setStartAndGoalStates(start, goal, .05)
    # set the planner
    planner = RandomWalkPlanner(ss.getSpaceInformation())
    ss.setPlanner(planner)

    result = ss.solve(10.0)
    if result:
        if result.getStatus() == ob.PlannerStatus.APPROXIMATE_SOLUTION:
            print("Solution is approximate")
        # try to shorten the path
        ss.simplifySolution()
        # print the simplified path
        print(ss.getSolutionPath())
Exemple #2
0
def planBITstar(q_st, q_g, res):
    # create an SE2 state space
    space = ob.RealVectorStateSpace(3)

    # set lower and upper bounds
    bounds = ob.RealVectorBounds(3)
    bounds.setLow(0)
    bounds.setHigh(res)
    space.setBounds(bounds)

    # construct an instance of space information from this state space
    si = ob.SpaceInformation(space)

    # set state validity checking for this space
    si.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    # create start state
    start = ob.State(space)
    start[0] = q_st[0]
    start[1] = q_st[1]

    # create goal state
    goal = ob.State(space)
    goal[0] = q_g[0]
    goal[1] = q_g[1]

    # create a problem instance
    pdef = ob.ProblemDefinition(si)

    # set the start and goal states
    pdef.setStartAndGoalStates(start, goal)

    # create a planner for the defined space
    planner = og.BITstar(si)

    # set the problem we are trying to solve for the planner
    planner.setProblemDefinition(pdef)

    # perform setup steps for the planner
    planner.setup()

    # attempt to solve the problem within one second of planning time
    solved = planner.solve(1.0)

    if solved:
        # get the goal representation from the problem definition (not the same as the goal state)
        # and inquire about the found path
        path = pdef.getSolutionPath()
        print("Found solution:\n")
        path_arr = []
        print(path.getStateCount())
        for i in range(path.getStateCount()):
            path_arr.append([])
            path_arr[i].append(path.getState(i)[0])
            path_arr[i].append(path.getState(i)[1])
        return path_arr
    else:
        print("No solution found")
        return []
Exemple #3
0
    def _get_path(
        self,
        is_state_valid: Callable[[np.ndarray], bool],
        start_js: np.ndarray,
        robot_targ: RobotTarget,
        use_sim: MpSim,
        mp_space: MpSpace,
        timeout: int,
    ):
        """
        Does the low-level path planning with OMPL.
        """
        if not self._should_render:
            ou.setLogLevel(ou.LOG_ERROR)
        dim = mp_space.get_state_dim()
        space = ob.RealVectorStateSpace(dim)
        bounds = ob.RealVectorBounds(dim)

        lims = mp_space.get_state_lims()
        for i, lim in enumerate(lims):
            bounds.setLow(i, lim[0])
            bounds.setHigh(i, lim[1])
        space.setBounds(bounds)

        si = ob.SpaceInformation(space)
        si.setStateValidityChecker(ob.StateValidityCheckerFn(is_state_valid))
        si.setup()

        pdef = ob.ProblemDefinition(si)
        mp_space.set_problem(pdef, space, si, start_js, robot_targ)

        planner = mp_space.get_planner(si)
        planner.setProblemDefinition(pdef)
        planner.setup()
        if mp_space.get_range() is not None:
            planner.setRange(mp_space.get_range())

        solved = planner.solve(timeout)

        if not solved:
            self._log("Could not find plan")
            return None
        objective = pdef.getOptimizationObjective()
        if objective is not None:
            cost = (pdef.getSolutionPath().cost(
                pdef.getOptimizationObjective()).value())
        else:
            cost = np.inf

        self._log("Got a path of length %.2f and cost %.2f" %
                  (pdef.getSolutionPath().length(), cost))

        path = pdef.getSolutionPath()
        joint_plan = mp_space.convert_sol(path)
        self._is_approx_sol = pdef.hasApproximateSolution()

        return joint_plan
Exemple #4
0
def plan(runTime, plannerType, objectiveType, plotData, fname, pdfname):
    space = ob.RealVectorStateSpace(2)
    space.setBounds(0.0, 1.0)
    ss = og.SimpleSetup(space)

    start = ob.State(space)
    start[0] = 0.0
    start[1] = 0.0
    goal = ob.State(space)
    goal[0] = 1.0
    goal[1] = 1.0

    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))
    ss.setStartAndGoalStates(start, goal)

    si = ss.getSpaceInformation()
    # Create the optimization objective specified by our command-line argument.
    # This helper function is simply a switch statement.
    ss.setOptimizationObjective(allocateObjective(si, objectiveType))
    ss.setPlanner(allocatePlanner(si, plannerType))
    ss.setup()

    # attempt to solve the problem
    solved = ss.solve(runTime)

    if solved:
        # Output the length of the path found
        print('{0} found solution of path length {1:.4f} with an optimization ' \
            'objective value of {2:.4f}'.format( \
            ss.getSolutionPlannerName(), \
            ss.getSolutionPath().length(), \
            ss.getSolutionPath().cost(ss.getOptimizationObjective()).value()))

        # print the path to screen
        print("Found solution:\n%s" % ss.getSolutionPath())

        # Extracting planner data from most recent solve attempt
        pd = ob.PlannerData(si)
        ss.getPlannerData(pd)

        # Computing weights of all edges based on state space distance
        pd.computeEdgeWeights()

        if graphtool and plotData:
            useGraphTool(pd)

        # If a filename was specified, output the path as a matrix to
        # that file for visualization
        if fname:
            with open(fname, 'w') as outFile:
                outFile.write(ss.getSolutionPath().printAsMatrix())
        if pdfname:
            pds = ob.PlannerDataStorage()
            pds.store(pd, pdfname)
    else:
        print("No solution found.")
Exemple #5
0
def plan():
    # create an Vector state space
    space = ob.RealVectorStateSpace(SPACE_DIM)

    # # set lower and upper bounds
    bounds = ob.RealVectorBounds(SPACE_DIM)
    for i in range(SPACE_DIM - 3):
        bounds.setLow(i, min_bound)
        bounds.setHigh(i, max_bound)
    for i in range(SPACE_DIM - 3):
        bounds.setLow(i + 3, -np.pi)
        bounds.setHigh(i + 3, np.pi)
    space.setBounds(bounds)
    space.setBounds(bounds)

    # create a simple setup object
    ss = og.SimpleSetup(space)

    # set planner
    planner = og.RRTstar(ss.getSpaceInformation())
    ss.setPlanner(planner)

    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    start = ob.State(space)
    # start.random()
    start_state = start.get()
    for i in range(SPACE_DIM):
        start_state[i] = start_s[i]
    # print(start_state[i])

    goal = ob.State(space)
    # goal.random()
    goal_state = goal.get()
    for i in range(SPACE_DIM):
        goal_state[i] = goal_s[i]
    # print(goal_state[i])

    ss.setStartAndGoalStates(start, goal)

    # default parameters
    solved = ss.solve(4.0)

    if solved:
        # パスの簡単化を実施
        ss.simplifySolution()
        # 結果を取得
        path_result = ss.getSolutionPath()
        print(path_result)

        si = ss.getSpaceInformation()
        pdata = ob.PlannerData(si)
        ss.getPlannerData(pdata)

        space = path_result.getSpaceInformation().getStateSpace()
        plot_result(space, path_result, pdata)
 def __init__(self):
     ## add OMPL setting for different environments
     self.env_name = rospy.get_param('env_name')
     self.planner = rospy.get_param('planner_name')
     if self.env_name == 's2d':
         #data_loader = data_loader_2d
         IsInCollision = plan_s2d.IsInCollision
         # create an SE2 state space
         space = ob.RealVectorStateSpace(2)
         bounds = ob.RealVectorBounds(2)
         bounds.setLow(-20)
         bounds.setHigh(20)
         space.setBounds(bounds)
     elif self.env_name == 'c2d':
         #data_loader = data_loader_2d
         IsInCollision = plan_c2d.IsInCollision
         # create an SE2 state space
         space = ob.RealVectorStateSpace(2)
         bounds = ob.RealVectorBounds(2)
         bounds.setLow(-20)
         bounds.setHigh(20)
         space.setBounds(bounds)
     elif self.env_name == 'r2d':
         #data_loader = data_loader_r2d
         IsInCollision = plan_r2d.IsInCollision
         # create an SE2 state space
         space = ob.SE2StateSpace()
         bounds = ob.RealVectorBounds(2)
         bounds.setLow(-20)
         bounds.setHigh(20)
         space.setBounds(bounds)
     elif self.env_name == 'r3d':
         #data_loader = data_loader_r3d
         IsInCollision = plan_r3d.IsInCollision
         # create an SE2 state space
         space = ob.RealVectorStateSpace(3)
         bounds = ob.RealVectorBounds(3)
         bounds.setLow(-20)
         bounds.setHigh(20)
         space.setBounds(bounds)
     self.IsInCollision = IsInCollision
     self.space = space
Exemple #7
0
def plan(samplerIndex):
    # construct the state space we are planning in
    space = ob.RealVectorStateSpace(3)

    # set the bounds
    bounds = ob.RealVectorBounds(3)
    bounds.setLow(-1)
    bounds.setHigh(1)
    space.setBounds(bounds)

    # define a simple setup class
    ss = og.SimpleSetup(space)

    # set state validity checking for this space
    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    # create a start state
    start = ob.State(space)
    start[0] = 0
    start[1] = 0
    start[2] = 0

    # create a goal state
    goal = ob.State(space)
    goal[0] = 0
    goal[1] = 0
    goal[2] = 1

    # set the start and goal states;
    ss.setStartAndGoalStates(start, goal)

    # set sampler (optional; the default is uniform sampling)
    si = ss.getSpaceInformation()
    if samplerIndex == 1:
        # use obstacle-based sampling
        si.setValidStateSamplerAllocator(
            ob.ValidStateSamplerAllocator(allocOBValidStateSampler))
    elif samplerIndex == 2:
        # use my sampler
        si.setValidStateSamplerAllocator(
            ob.ValidStateSamplerAllocator(allocMyValidStateSampler))

    # create a planner for the defined space
    planner = og.PRM(si)
    ss.setPlanner(planner)

    # attempt to solve the problem within ten seconds of planning time
    solved = ss.solve(10.0)
    if solved:
        print("Found solution:")
        # print the path to screen
        print(ss.getSolutionPath())
    else:
        print("No solution found")
Exemple #8
0
def rrt_star_traj(start_conf, goal_conf, env_params, env, robot,
                  planner_params, obs_params):
    #RRTstar setup
    space = ob.RealVectorStateSpace(2)
    bounds = ob.RealVectorBounds(2)
    bounds.setLow(env_params['x_lims'][0])
    bounds.setHigh(env_params['x_lims'][1])
    space.setBounds(bounds)
    init_planner = RRTStar(space, bounds, env, robot, planner_params, 0.0)
    init_path = init_planner.plan(start_conf, goal_conf, 4.0)
    th_init = path_to_traj_avg_vel(init_path, planner_params['total_time_sec'],
                                   planner_params['dof'])
    return th_init
def plan(runTime, plannerType, objectiveType, fname, bound, start_pt, goal_pt):
    print "Run Time: ", runTime
    # Construct the robot state space in which we're planning. We're
    # planning in [0,1]x[0,1], a subset of R^2.
    space = ob.RealVectorStateSpace(2)
    # Set the bounds of space to be in [0,1].
    space.setBounds(bound[0], bound[1])
    # Construct a space information instance for this state space
    si = ob.SpaceInformation(space)
    # Set the object used to check which states in the space are valid
    validityChecker = ValidityChecker(si)
    si.setStateValidityChecker(validityChecker)
    si.setup()
    # Set our robot's starting state to be the bottom-left corner of
    # the environment, or (0,0).
    start = ob.State(space)
    start[0] = start_pt[0]
    start[1] = start_pt[1]
    # Set our robot's goal state to be the top-right corner of the
    # environment, or (1,1).
    goal = ob.State(space)
    goal[0] = goal_pt[0]
    goal[1] = goal_pt[1]

    # Create a problem instance
    pdef = ob.ProblemDefinition(si)
    # Set the start and goal states
    pdef.setStartAndGoalStates(start, goal)
    # Create the optimization objective specified by our command-line argument.
    # This helper function is simply a switch statement.
    pdef.setOptimizationObjective(allocateObjective(si, objectiveType))
    # Construct the optimal planner specified by our command line argument.
    # This helper function is simply a switch statement.
    optimizingPlanner = allocatePlanner(si, plannerType)
    # Set the problem instance for our planner to solve
    optimizingPlanner.setProblemDefinition(pdef)
    optimizingPlanner.setup()
    # attempt to solve the planning problem in the given runtime
    solved = optimizingPlanner.solve(runTime)
    if solved:
        # Output the length of the path found
        print("{0} found solution of path length {1:.4f} with an optimization objective value of {2:.4f}".format(optimizingPlanner.getName(), pdef.getSolutionPath().length(), pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))
        # If a filename was specified, output the path as a matrix to
        # that file for visualization
        if fname:
            with open(fname,'w') as outFile:
                outFile.write(pdef.getSolutionPath().printAsMatrix())
        return True, pdef.getSolutionPath().printAsMatrix()
    else:
        print("No solution found.")
        return False, "No solution found"
def initialize_space():
    dof = 7
    space = ob.RealVectorStateSpace(dof)

    # set the bounds
    bounds = ob.RealVectorBounds(dof)
    for key, value in JOINT_LIMITS.items():
        i = JOINT_INDEX[key]
        bounds.setLow(i, value[0])
        bounds.setHigh(i, value[1])
        print("Setting bound for the %sth joint: %s, bound: %s" %
              (i, key, value))
    space.setBounds(bounds)
    return space
Exemple #11
0
 def setup_ompl(self):
     self.space = ob.RealVectorStateSpace(dim=self.space_dimension)
     bounds = ob.RealVectorBounds(self.space_dimension)
     for i in xrange(self.space_dimension):
         bounds.setLow(i, self.joint_constraints[0])
         bounds.setHigh(i, self.joint_constraints[1])
     self.space.setBounds(bounds)
     self.si = ob.SpaceInformation(self.space)
     self.motion_validator = MotionValidator(self.si)
     self.motion_validator.set_link_dimensions(self.link_dimensions)
     self.motion_validator.set_workspace_dimension(self.workspace_dimension)
     self.motion_validator.set_max_distance(self.max_velocity, self.delta_t)
     #self.si.setStateValidityChecker(self.motion_validator.isValid)
     self.si.setMotionValidator(self.motion_validator)
     self.si.setup()
     self.problem_definition = ob.ProblemDefinition(self.si)
Exemple #12
0
    def __init__(self, ppm_file):
        self.ppm_ = ou.PPM()
        self.ppm_.loadFile(ppm_file)
        space = ob.RealVectorStateSpace()
        space.addDimension(0.0, self.ppm_.getWidth())
        space.addDimension(0.0, self.ppm_.getHeight())
        self.maxWidth_ = self.ppm_.getWidth() - 1
        self.maxHeight_ = self.ppm_.getHeight() - 1
        self.ss_ = og.SimpleSetup(space)

        # set state validity checking for this space
        self.ss_.setStateValidityChecker(ob.StateValidityCheckerFn(
            partial(Plane2DEnvironment.isStateValid, self)))
        space.setup()
        self.ss_.getSpaceInformation().setStateValidityCheckingResolution( \
            1.0 / space.getMaximumExtent())
Exemple #13
0
def plan():
    N = 10
    # create an R^2 state space
    space = ob.RealVectorStateSpace(2)
    # set lower and upper bounds
    bounds = ob.RealVectorBounds(2)
    bounds.setLow(0)
    bounds.setHigh(N)
    space.setBounds(bounds)
    # create a simple setup object
    ss = og.SimpleSetup(space)
    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))
    start = ob.State(space)
    start[0] = 0  # random.randint(0, int(N / 2))
    start[1] = 0  # random.randint(0, int(N / 2))
    goal = ob.State(space)
    goal[0] = N  # random.randint(int(N / 2), N)
    goal[1] = N  # random.randint(int(N / 2), N)
    ss.setStartAndGoalStates(start, goal)
    planner = Astar(ss.getSpaceInformation())
    ss.setPlanner(planner)

    result = ss.solve(.01)
    if result:
        if result.getStatus() == ob.PlannerStatus.APPROXIMATE_SOLUTION:
            print("Solution is approximate")
        matrix = ss.getSolutionPath().printAsMatrix()
        print(matrix)
        verts = []
        for line in matrix.split("\n"):
            x = []
            for item in line.split():
                x.append(float(item))
            if len(x) is not 0:
                verts.append(list(x))
        # print(verts)
        plt.axis([0, N, 0, N])
        x = []
        y = []
        for i in range(0, len(verts)):
            x.append(verts[i][0])
            y.append(verts[i][1])
        # plt.plot(verts[i][0], verts[i][1], 'r*-')
        plt.plot(x, y, 'ro-')
        plt.show()
Exemple #14
0
def plan():
    # create an Vector state space
    space = ob.RealVectorStateSpace(SPACE_DIM)
    # print(space)

    # # set lower and upper bounds
    bounds = ob.RealVectorBounds(SPACE_DIM)
    for i in range(SPACE_DIM):
        bounds.setLow(i, -1)
        bounds.setHigh(i, 1)
    space.setBounds(bounds)

    # create a simple setup object
    ss = og.SimpleSetup(space)

    # set planner
    planner = og.RRTConnect(ss.getSpaceInformation())
    ss.setPlanner(planner)

    ss.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    start = ob.State(space)
    # we can pick a random start state...
    start.random()
    start_state = start.get()
    for i in range(SPACE_DIM):
        # start_state[i] = 0.1
        print(start_state[i])

    goal = ob.State(space)
    # we can pick a random goal state...
    goal.random()

    ss.setStartAndGoalStates(start, goal)

    # default parameters
    solved = ss.solve(1.0)

    if solved:
        # try to shorten the path
        ss.simplifySolution()
        # print the simplified path
        print(ss.getSolutionPath())
Exemple #15
0
def rrt_star_traj(start_conf, goal_conf, env_params, env, robot,
                  planner_params, obs_params):
    #RRTstar setup
    space = ob.RealVectorStateSpace(2)
    bounds = ob.RealVectorBounds(2)
    bounds.setLow(env_params['x_lims'][0])
    bounds.setHigh(env_params['x_lims'][1])
    space.setBounds(bounds)

    init_planner = RRTStar(space, bounds, env, robot, planner_params,
                           obs_params)
    init_path = init_planner.plan(start_conf, goal_conf, 4.0)
    th_init = path_to_traj_avg_vel(init_path, planner_params['total_time_sec'],
                                   planner_params['dof'], device)
    start_vel = torch.tensor([[0., 0.]], device=device)
    goal_vel = torch.tensor([[0., 0.]], device=device)
    start = torch.cat((start_conf, start_vel), dim=1)
    goal = torch.cat((goal_conf, goal_vel), dim=1)
    return start, goal, th_init
Exemple #16
0
def test_rigid(calc_time=0.01):
    space = ob.RealVectorStateSpace(2)
    space.setBounds(0, WIDTH)
    si = ob.SpaceInformation(space)
    si.setStateValidityChecker(ValidityChecker(si))
    si.setup()
    start = ob.State(space)
    goal = ob.State(space)
    start[0] = 0.0
    start[1] = 0.0
    goal[0] = WIDTH - 1
    goal[1] = WIDTH - 1
    pdef = ob.ProblemDefinition(si)
    pdef.setStartAndGoalStates(start, goal)
    planner = og.RRTstar(si)
    planner.setProblemDefinition(pdef)
    planner.setup()
    status = planner.solve(calc_time)
    if pdef.hasExactSolution():
        cost = pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()
        print(cost)
Exemple #17
0
    def __init__(self, robot_env: DifferentialDriveEnv):
        self.robot_env = robot_env
        bounds = self.robot_env.get_bounds()
        self.state_bounds = bounds['state_bounds']
        self.control_bounds = bounds['control_bounds']

        # add the state space
        space = ob.RealVectorStateSpace(len(self.state_bounds))
        bounds = ob.RealVectorBounds(len(self.state_bounds))
        # add state bounds
        for k, v in enumerate(self.state_bounds):
            bounds.setLow(k, float(v[0]))
            bounds.setHigh(k, float(v[1]))
        space.setBounds(bounds)
        self.space = space
        # add the control space
        cspace = oc.RealVectorControlSpace(space, len(self.control_bounds))
        # set the bounds for the control space
        cbounds = ob.RealVectorBounds(len(self.control_bounds))
        for k, v in enumerate(self.control_bounds):
            cbounds.setLow(k, float(v[0]))
            cbounds.setHigh(k, float(v[1]))
        cspace.setBounds(cbounds)
        self.cspace = cspace
        # define a simple setup class
        self.ss = oc.SimpleSetup(cspace)
        self.ss.setStateValidityChecker(ob.StateValidityCheckerFn(
            partial(self.isStateValid, self.ss.getSpaceInformation())))
        self.ss.setStatePropagator(oc.StatePropagatorFn(self.propagate))

        # set the planner
        si = self.ss.getSpaceInformation()
        planner = oc.SST(si)
        self.ss.setPlanner(planner)
        si.setPropagationStepSize(.1)

        self.start = None
        self.goal = None
        self.planning_time = 0.0
        self.path = None
Exemple #18
0
def plan():
  # create an R^3 state space
  space = ob.RealVectorStateSpace(2)

  # set lower and upper bounds
  bounds = ob.RealVectorBounds(2)
  bounds.setLow(-2.5)
  bounds.setHigh(2.5)
  space.setBounds(bounds)

  # create a simple setup object
  ss = og.SimpleSetup(space)
  ss.setStateValidityChecker(ValidityChecker(ss.getSpaceInformation()))
  start = ob.State(space)
  start()[0] = 0.0
  start()[1] = -2.0
  goal = ob.State(space)
  goal()[0] = 0.0
  goal()[1] = 2.0
  ss.setStartAndGoalStates(start, goal, .05)

  # set the planner
  planner = DPMPPlanner(ss.getSpaceInformation(), 15, 50)
  ss.setPlanner(planner)

  result = ss.solve(10.0)
  if result:
    if result.getStatus() == ob.PlannerStatus.APPROXIMATE_SOLUTION:
      print("Solution is approximate")

    # try to shorten the path
    # ss.simplifySolution()

    path = ss.getSolutionPath()
    print(path)
    plot_solution(path)
Exemple #19
0
def test_convert_state_to_robot_state():
    space = ob.RealVectorStateSpace(7)
    state = ob.State(space)
    state[0] = 0
    state[1] = -0.55
    state[2] = 0
    state[3] = 0.75
    state[4] = 0
    state[5] = 1.26
    state[6] = 0
    # print("The list of states is: %s" % state)

    robot_state = convertStateToRobotState(state)
    expected_joint_names = [
        'right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1',
        'right_w2'
    ]
    expected_joint_values = [0.0, -0.55, 0.0, 0.75, 0.0, 1.26, 0.0]
    assert robot_state.joint_state.name ==  ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2'],\
        "joint names do not match! Expected: %s, Actual %s" % (expected_joint_names, robot_state.joint_state.name)
    assert robot_state.joint_state.position == expected_joint_values, \
        "joint values do not match! Expected: %s, Actual %s" % (expected_joint_values, robot_state.joint_state.position)

    print("Test for converting AbstractState to RobotState passed!")
    def __init__(self,
                 map,
                 pose,
                 target,
                 max_time,
                 objective='duration',
                 planner=og.RRTstar,
                 threshold=0.9,
                 tolerance=0.3,
                 planner_options={'range': 0.45}):
        space = ob.RealVectorStateSpace(2)
        bounds = ob.RealVectorBounds(2)
        bounds.setLow(0)
        bounds.setHigh(map.size)
        space.setBounds(bounds)
        s = ob.State(space)
        t = ob.State(space)

        self.theta = pose[-1]

        for arg, state in zip([pose[:2], target], [s, t]):
            for i, x in enumerate(arg):
                state[i] = x

        ss = og.SimpleSetup(space)
        ss.setStartAndGoalStates(s, t, tolerance)

        si = ss.getSpaceInformation()
        ss.setStateValidityChecker(
            ob.StateValidityCheckerFn(partial(valid, si)))

        self.motion_val = EstimatedMotionValidator(si,
                                                   map,
                                                   threshold,
                                                   theta=self.theta)
        si.setMotionValidator(self.motion_val)

        if objective == 'duration':
            self.do = DurationObjective(si, map, self.theta)
        elif objective == 'survival':
            self.do = SurvivalObjective(si, map, self.theta)
        elif objective == 'balanced':
            self.do = getBalancedObjective1(si, map, self.theta)
        else:  # the objective is the euclidean path length
            self.do = ob.PathLengthOptimizationObjective(si)
        ss.setOptimizationObjective(self.do)

        # RRTstar BITstar BFMT RRTsharp
        p = planner(si)
        if 'range' in planner_options:
            try:
                p.setRange(planner_options.get('range'))
            except AttributeError:
                pass

        ss.setPlanner(p)
        ss.setup()

        self.ss = ss
        self.max_time = max_time
        self.map = map
        self.s = pose
        self.t = list(target) + [pose[-1]]
        self.planner_options = planner_options
        if planner == og.RRTstar:
            self.planner_name = 'RRT*'
        else:
            self.planner_name = ''
        self.threshold = threshold
        self.tolerance = tolerance
        self.comp_duration = 0
        self.objective = objective
def isStateValid_R2(state):
    return boxConstraint(state[0], state[1])


if __name__ == "__main__":
    # Setup SE2
    SE2 = ob.SE2StateSpace()
    bounds = ob.RealVectorBounds(2)
    bounds.setLow(0)
    bounds.setHigh(1)
    SE2.setBounds(bounds)
    si_SE2 = ob.SpaceInformation(SE2)
    si_SE2.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid_SE2))

    # Setup Quotient-Space R2
    R2 = ob.RealVectorStateSpace(2)
    R2.setBounds(0, 1)
    si_R2 = ob.SpaceInformation(R2)
    si_R2.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid_R2))

    # Create vector of spaceinformationptr
    si_vec = og.vectorSpaceInformation()
    si_vec.append(si_R2)
    si_vec.append(si_SE2)

    # Define Planning Problem
    start_SE2 = ob.State(SE2)
    goal_SE2 = ob.State(SE2)
    start_SE2().setXY(0, 0)
    start_SE2().setYaw(0)
    goal_SE2().setXY(1, 1)
Exemple #22
0
            pcp.set_alpha(0.7)
            pcc.set_linestyle('dashed')
            pcp.set_linestyle('dashed')
            pcc.set_edgecolor('#73cdc9')
            pcp.set_edgecolor('#73cdc9')
            pcc.set_linewidth(0.8)
            pcp.set_linewidth(0.8)
            pcc.set_joinstyle('round')
            pcp.set_joinstyle('round')
        else:
            pcc.set_color('gray')
            pcp.set_color('gray')
        ax.add_collection(pcc)
        ax.add_collection(pcp)
    if args.plannerdata:
        pd = ob.PlannerData(ob.SpaceInformation(ob.RealVectorStateSpace(2)))
        pds = ob.PlannerDataStorage()
        pds.load(args.plannerdata, pd)
        pd.computeEdgeWeights()

        # Extract the graphml representation of the planner data
        graphml = pd.printGraphML()
        f = open("graph.graphml", 'w')
        f.write(graphml)
        f.close()

        # Load the graphml data using graph-tool
        graph = gt.load_graph("graph.graphml", fmt="xml")
        os.remove("graph.graphml")

        edgeweights = graph.edge_properties["weight"]
Exemple #23
0
def plan(mapfile,start_node, goal_node,runTime, plannerType, objectiveType, d, fname):

	boundary, blocks = load_map(mapfile)
	boundary[:,:3] = boundary[:,:3] +0.0005
	boundary[:,3:] = boundary[:,3:] -0.0005
	alpha = 1.05
	blocks = blocks*alpha
# Construct the robot state space in which we're planning. We're
	space = ob.RealVectorStateSpace(3)
	sbounds = ob.RealVectorBounds(3)
	sbounds.low[0] =float(boundary[0,0])
	sbounds.high[0] =float(boundary[0,3])

	sbounds.low[1] = float(boundary[0,1])
	sbounds.high[1] = float(boundary[0,4])

	sbounds.low[2] = float(boundary[0,2])
	sbounds.high[2] = float(boundary[0,5])

	space.setBounds(sbounds)
	# Construct a space information instance for this state space
	si = ob.SpaceInformation(space)

	# Set the object used to check which states in the space are valid
	validityChecker = ValidityChecker(si, boundary, blocks)
	si.setStateValidityChecker(validityChecker)

	mv = MyMotionValidator(si, boundary, blocks)
	si.setMotionValidator(mv)

	si.setup()

	# Set our robot's starting state
	start = ob.State(space)
	start[0] = start_node[0]
	start[1] = start_node[1]
	start[2] = start_node[2]
	# Set our robot's goal state 
	goal = ob.State(space)
	goal[0] = goal_node[0]
	goal[1] = goal_node[1]
	goal[2] = goal_node[2]

	# Create a problem instance
	pdef = ob.ProblemDefinition(si)

	# Set the start and goal states
	pdef.setStartAndGoalStates(start, goal)

	# Create the optimization objective specified by our command-line argument.
	# This helper function is simply a switch statement.
	pdef.setOptimizationObjective(allocateObjective(si, objectiveType))

	# Construct the optimal planner specified by our command line argument.
	# This helper function is simply a switch statement.
	optimizingPlanner = allocatePlanner(si,d, plannerType)

	# Set the problem instance for our planner to solve
	optimizingPlanner.setProblemDefinition(pdef)
	optimizingPlanner.setup()

	solved = "Approximate solution"
	# attempt to solve the planning problem in the given runtime
	t1 = tic()
	while(str(solved) == 'Approximate solution'):

		solved = optimizingPlanner.solve(runTime)
		# print(pdef.getSolutionPath().printAsMatrix())
	t2 = toc(t1)
	p = pdef.getSolutionPath()
	ps = og.PathSimplifier(si)
	ps.simplifyMax(p)

	if solved:
	 # Output the length of the path found
		print('{0} found solution of path length {1:.4f} with an optimization ' \
		 'objective value of {2:.4f}'.format( \
		 optimizingPlanner.getName(), \
		 pdef.getSolutionPath().length(), \
		 pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))

		# If a filename was specified, output the path as a matrix to
		# that file for visualization
	env = mapfile.split(".")[1].split("/")[-1]
	fname = fname + "{}_{}_{}_{}.txt".format(env, d,np.round(pdef.getSolutionPath().length(),2),np.round(t2,2))

	if fname:
		with open(fname, 'w') as outFile:
		 outFile.write(pdef.getSolutionPath().printAsMatrix())

	path = np.genfromtxt(fname)
	print("The found path : ")
	print(path)
	pathlength = np.sum(np.sqrt(np.sum(np.diff(path,axis=0)**2,axis=1)))
	print("pathlength : {}".format(pathlength))
	fig, ax, hb, hs, hg = draw_map(boundary, blocks, start_node, goal_node)  
	ax.plot(path[:,0],path[:,1],path[:,2],'r-')
	plt.show()
Exemple #24
0
        # dis = abs(st1[0] - st0[0]) + abs(st1[1]-st0[1])
#         print(dis)
#         dis = 1
        return float(dis)

# fmt_star
sucess = 0
total = 0
total_length = 0
total_collision_detection = 0
total_collision = 0

for i in range(0,2):
    print("here")
#     state space
    space = ob.RealVectorStateSpace(4)
    space = myStateSpace()
#     space.distance = myDistance
    # set lower and upper bounds
    bounds = ob.RealVectorBounds(4)
    bounds.setLow(0, 0)
    bounds.setHigh(0, 1)
    bounds.setLow(1, 0)
    bounds.setHigh(1, 1)
    bounds.setLow(2, -1)
    bounds.setHigh(2, 1)
    bounds.setLow(3, -1)
    bounds.setHigh(3, 1)
    space.setBounds(bounds)
    # construct an instance of space information from this state space
    si = ob.SpaceInformation(space)
    def plan(self, runTime, plannerType, objectiveType, fname):
        # Construct the robot state space in which we're planning. We're
        # planning in [0,1]x[0,1], a subset of R^2.
        space = ob.RealVectorStateSpace(2)

        # Set the bounds of space to be in [0,1].
        space.setBounds(0.0, 1.0)

        # Construct a space information instance for this state space
        si = ob.SpaceInformation(space)

        # Set the object used to check which states in the space are valid
        validityChecker = ValidityChecker(si)
        si.setStateValidityChecker(validityChecker)

        si.setup()

        # Set our robot's starting state to be the bottom-left corner of
        # the environment, or (0,0).
        start = ob.State(space)
        start[0] = self.start_x
        start[1] = self.start_y

        # Set our robot's goal state to be the top-right corner of the
        # environment, or (1,1).
        goal = ob.State(space)
        goal[0] = self.goal_x
        goal[1] = self.goal_y

        print("goal")
        print self.goal_x
        print self.goal_y

        # Create a problem instance
        pdef = ob.ProblemDefinition(si)

        # Set the start and goal states
        pdef.setStartAndGoalStates(start, goal)

        # Create the optimization objective specified by our command-line argument.
        # This helper function is simply a switch statement.
        pdef.setOptimizationObjective(allocateObjective(si, objectiveType))

        # Construct the optimal planner specified by our command line argument.
        # This helper function is simply a switch statement.
        optimizingPlanner = allocatePlanner(si, plannerType)

        # Set the problem instance for our planner to solve
        optimizingPlanner.setProblemDefinition(pdef)
        optimizingPlanner.setup()

        # attempt to solve the planning problem in the given runtime
        solved = optimizingPlanner.solve(runTime)

        if solved:
            # Output the length of the path found
            #print("{0} found solution of path length {1:.4f} with an optimization objective value of {2:.4f}".format(optimizingPlanner.getName(), pdef.getSolutionPath().length(), pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))

            str = pdef.getSolutionPath().printAsMatrix()
            #print(type(str))
            #
            x = str.split()
            self.path = [float(x) for x in x if x]
            #print(self.path)

            #talker()
        else:
            print("No solution found.")

        return self.path
Exemple #26
0
def plan(fname=None):
    # Construct the robot state space in which we're planning. We're
    # planning in [0,1]x[0,1], a subset of R^2.
    space = ob.RealVectorStateSpace(2)

    # Set the bounds of space to be in [0,1].
    space.setBounds(0.0, 1.0)

    # Construct a space information instance for this state space
    si = ob.SpaceInformation(space)

    # Set the object used to check which states in the space are valid
    validityChecker = ValidityChecker(si)
    si.setStateValidityChecker(validityChecker)

    si.setup()

    # Set our robot's starting state to be the bottom-left corner of
    # the environment, or (0,0).
    start = ob.State(space)
    start[0] = 0.0
    start[1] = 0.0

    # Set our robot's goal state to be the top-right corner of the
    # environment, or (1,1).
    goal = ob.State(space)
    goal[0] = 1.0
    goal[1] = 1.0

    # Create a problem instance
    pdef = ob.ProblemDefinition(si)

    # Set the start and goal states
    pdef.setStartAndGoalStates(start, goal)

    # Since we want to find an optimal plan, we need to define what
    # is optimal with an OptimizationObjective structure. Un-comment
    # exactly one of the following 6 lines to see some examples of
    # optimization objectives.
    pdef.setOptimizationObjective(getPathLengthObjective(si))
    # pdef.setOptimizationObjective(getThresholdPathLengthObj(si))
    # pdef.setOptimizationObjective(getClearanceObjective(si))
    # pdef.setOptimizationObjective(getBalancedObjective1(si))
    # pdef.setOptimizationObjective(getBalancedObjective2(si))
    # pdef.setOptimizationObjective(getPathLengthObjWithCostToGo(si))

    # Construct our optimal planner using the RRTstar algorithm.
    optimizingPlanner = og.RRTstar(si)

    # Set the problem instance for our planner to solve
    optimizingPlanner.setProblemDefinition(pdef)
    optimizingPlanner.setup()

    # attempt to solve the planning problem within one second of
    # planning time
    solved = optimizingPlanner.solve(10.0)

    if solved:
        # Output the length of the path found
        print("Found solution of path length %g" %
              pdef.getSolutionPath().length())

        # If a filename was specified, output the path as a matrix to
        # that file for visualization
        if fname:
            with open(fname, 'w') as outFile:
                outFile.write(pdef.getSolutionPath().printAsMatrix())
    else:
        print("No solution found.")
Exemple #27
0
    def solve(self, ptc):
        pdef = self.getProblemDefinition()
        goal = pdef.getGoal()
        si = self.getSpaceInformation()
        pi = self.getPlannerInputStates()
        st = pi.nextStart()
        while st:
            self.states_.append(st)
            st = pi.nextStart()
        solution = None
        approxsol = 0
        approxdif = 1e6
        start_state = pdef.getStartState(0)
        goal_state = goal.getState()
        start_node = Node(None, start_state)
        start_node.g = start_state.h = start_node.f = 0
        end_node = Node(None, goal_state)
        end_node.g = end_node.h = end_node.f = 0

        open_list = []
        closed_list = []
        heapq.heapify(open_list)
        adjacent_squares = ((0, -1, 3 * math.pi / 2), (0, 1, math.pi / 2),
                            (-1, 0, math.pi), (1, 0, 0), (-1, -1,
                                                          5 * math.pi / 4),
                            (-1, 1, 3 * math.pi / 2), (1, -1, 7 * math.pi / 4),
                            (1, 1, math.pi / 4))

        heapq.heappush(open_list, start_node)
        while len(open_list) > 0 and not ptc():
            current_node = heapq.heappop(open_list)

            if current_node == end_node:  # if we hit the goal
                current = current_node
                path = []
                while current is not None:
                    path.append(current.position)
                    current = current.parent
                for i in range(1, len(path)):
                    self.states_.append(path[len(path) - i - 1])
                solution = len(self.states_)
                break
            closed_list.append(current_node)

            children = []
            for new_position in adjacent_squares:
                node_position = si.allocState()
                # node_position = ob.State()
                if isinstance(si.getStateSpace(),
                              type(ob.ReedsSheppStateSpace(6))):
                    current_node_x = current_node.position.getX()
                    current_node_y = current_node.position.getY()
                    node_position.setXY(current_node_x + new_position[0],
                                        current_node_y + new_position[1])
                    node_position.setYaw(new_position[2])
                if isinstance(si.getStateSpace(),
                              type(ob.RealVectorStateSpace())):
                    node_position[0], node_position[1] = current_node.position[0] + new_position[0],\
                                                         current_node.position[1] + new_position[1]
                # print(node_position.getX(), node_position.getY(), node_position.getYaw())
                # node_position[0], node_position[1] = current_node_x + new_position[0], current_node_y + new_position[1]

                if not si.checkMotion(current_node.position, node_position):
                    continue

                if not si.satisfiesBounds(node_position):
                    continue

                new_node = Node(current_node, node_position)
                children.append(new_node)

            for child in children:
                if child in closed_list:
                    continue
                child.g = current_node.g + 1  # si.distance(child.position, current_node.position)
                child.h = goal.distanceGoal(child.position)
                child.f = child.g + child.h
                if len([i for i in open_list if child == i and child.g >= i.g
                        ]) > 0:
                    continue
                heapq.heappush(open_list, child)
                # open_list.append(child)
        solved = False
        approximate = False
        if not solution:
            solution = approxsol
            approximate = True
        if solution:
            path = og.PathGeometric(si)
            for s in self.states_[:solution]:
                path.append(s)
            pdef.addSolutionPath(path)
            solved = True
        return ob.PlannerStatus(solved, approximate)
Exemple #28
0
def plan(grid):

    #agent and goal are represented by a point(x,y) and radius
    global x
    global time
    x = grid
    agent: Agent = grid.agent
    goal: Goal = grid.goal

    # Construct the robot state space in which we're planning. R2
    space = ob.RealVectorStateSpace(2)

    # Set the bounds of space to be inside Map
    bounds = ob.RealVectorBounds(2)

    # projection
    pj = ProjectionEvaluator(space)
    print('pj=', pj)

    pj.setCellSizes(list2vec([1.0, 1.0]))
    space.registerDefaultProjection(pj)

    # Construct the robot state space in which we're planning.
    bounds.setLow(0, 0)  #set min x to _    0
    bounds.setHigh(0, grid.size.width)  #set max x to _    x width
    bounds.setLow(1, 0)  #set min y to _    0
    bounds.setHigh(1, grid.size.height)  #set max y to _    y height

    space.setBounds(bounds)

    print("bounds=", bounds.getVolume())

    # Construct a space information instance for this state space
    si = ob.SpaceInformation(space)

    # Set the object used to check which states in the space are valid
    si.setStateValidityChecker(ob.StateValidityCheckerFn(isStateValid))

    # Set robot's starting state to agent's position (x,y) -> e.g. (0,0)
    start = ob.State(space)
    start[0] = float(agent.position.x)
    start[1] = float(agent.position.y)

    print(start[0], start[1])
    # Set robot's goal state (x,y) -> e.g. (1.0,0.0)
    goal = ob.State(space)
    goal[0] = float(grid.goal.position.x)
    goal[1] = float(grid.goal.position.y)

    # Create a problem instance
    pdef = ob.ProblemDefinition(si)

    # Set the start and goal states
    pdef.setStartAndGoalStates(start, goal)

    # Create the optimization objective specified by our command-line argument.
    # This helper function is simply a switch statement.
    #pdef.setOptimizationObjective(allocateObjective(si, objectiveType))

    # ******create a planner for the defined space
    planner = og.RRTXstatic(si)

    # set the problem we are trying to solve for the planner
    planner.setProblemDefinition(pdef)

    print(planner)
    #print('checking projection',planner.getProjectionEvaluator())

    print("__________________________________________________________")
    # perform setup steps for the planner
    planner.setup()

    # print the settings for this space
    print("space settings\n")
    print(si.settings())

    print("****************************************************************")
    print("problem settings\n")
    # print the problem settings
    print(pdef)

    # attempt to solve the problem within ten second of planning time
    solved = planner.solve(time)

    # For troubleshooting
    if solved:
        # get the goal representation from the problem definition (not the same as the goal state)
        # and inquire about the found path
        path = pdef.getSolutionPath()
        print("Found solution:\n%s" % path)
        #return trace for _find_path_internal method
        print(
            "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
        )

        if path is None:
            return None

        x = pdef.getSolutionPath().printAsMatrix()
        lst = x.split()
        lst = [int(round(float(x), 0)) for x in lst]

        print(x)
        print(lst)

        trace = []
        for i in range(0, len(lst), 2):
            trace.append(Point(lst[i], lst[i + 1]))
            print(trace)
        return trace
    else:
        print("No solution found")
        return None
Exemple #29
0
def plan(runTime, plannerType, objectiveType, fname):
    # Construct the robot state space in which we're planning. We're
    # planning in [0,500]x[0,500], a subset of R^2.
    space = ob.RealVectorStateSpace(2)

    # Set the bounds of space to be in [0,500].
    space.setBounds(0.0, 500.0)

    # Construct a space information instance for this state space
    si = ob.SpaceInformation(space)

    # Set the object used to check which states in the space are valid
    validityChecker = ValidityChecker(si)
    si.setStateValidityChecker(validityChecker)

    si.setup()
 
    # Set our robot's starting state to be random
    start = ob.State(space)
    start[0] = random.randint(0,500)
    start[1] = random.randint(0,500)
 
    while (sqrt((start[0]-center[0])*(start[0]-center[0]) + (start[1]-center[0])*(start[1]-center[0])) - radius < 0):
        start[0] = random.randint(0,500)
        start[1] = random.randint(0,500)

    # Set our robot's goal state to be random 
    goal = ob.State(space)
    goal[0] = random.randint(0,500)
    goal[1] = random.randint(0,500)
    while (sqrt((goal[0]-center[0])*(goal[0]-center[0]) + (goal[1]-center[0])*(goal[1]-center[0])) - radius < 0):
        goal[0] = random.randint(0,500)
        goal[1] = random.randint(0,500)

    # Create a problem instance
    pdef = ob.ProblemDefinition(si)
 
    # Set the start and goal states
    pdef.setStartAndGoalStates(start, goal)
 
    # Create the optimization objective specified by our command-line argument.
    # This helper function is simply a switch statement.
    pdef.setOptimizationObjective(allocateObjective(si, objectiveType))
 
    # Construct the optimal planner specified by our command line argument.
    # This helper function is simply a switch statement.
    optimizingPlanner = allocatePlanner(si, plannerType)
 
    # Set the problem instance for our planner to solve
    optimizingPlanner.setProblemDefinition(pdef)
    optimizingPlanner.setup()
 
    # attempt to solve the planning problem in the given runtime
    solved = optimizingPlanner.solve(runTime)
 
    if solved:
        # Output the length of the path found
        print('{0} found solution of path length {1:.4f} with an optimization ' \
            'objective value of {2:.4f}'.format( \
            optimizingPlanner.getName(), \
            pdef.getSolutionPath().length(), \
            pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))
 
        # If a filename was specified, output the path as a matrix to
        # that file for visualization
        if fname:
            with open(fname, 'w') as outFile:
                outFile.write(pdef.getSolutionPath().printAsMatrix())
    else:
        print("No solution found.")
Exemple #30
0
    def OMPL_plan(self, runTime, plannerType, objectiveType, mult):
        # Construct the robot state space in which we're planning. We're
        # planning in [0,1]x[0,1], a subset of R^2.
        if self.dimension == '2D':
            space = ob.RealVectorStateSpace(2)
            # Set the bounds of space to be in [0,1].
            bounds = ob.RealVectorBounds(2)
            x_bound = []
            y_bound = []
            for idx in range(len(self.region)):
                x_bound.append(self.region[idx][0])
                y_bound.append(self.region[idx][1])

            bounds.setLow(0, min(x_bound))
            bounds.setHigh(0, max(x_bound))
            bounds.setLow(1, min(y_bound))
            bounds.setHigh(1, max(y_bound))

            self.x_bound = (min(x_bound), max(x_bound))
            self.y_bound = (min(y_bound), max(y_bound))
            #test = bounds.getDifference()
            #test = bounds.check()

            space.setBounds(bounds)
            # Set our robot's starting state to be the bottom-left corner of
            # the environment, or (0,0).
            start = ob.State(space)
            start[0] = self.start[0]
            start[1] = self.start[1]
            #start[2] = 0.0

            # Set our robot's goal state to be the top-right corner of the
            # environment, or (1,1).
            goal = ob.State(space)
            goal[0] = self.goal[0]
            goal[1] = self.goal[1]
            #goal[2] = 1.0
        else:
            pass

        # Construct a space information instance for this state space
        si = ob.SpaceInformation(space)

        # Set the object used to check which states in the space are valid

        validityChecker = self.ValidityChecker(si)
        validityChecker.setInteractive()
        validityChecker.obstacle(self.start, self.goal, self.region,
                                 self.obstacle, self.dimension)
        si.setStateValidityChecker(validityChecker)

        si.setup()

        # Create a problem instance
        pdef = ob.ProblemDefinition(si)

        # Set the start and goal states
        pdef.setStartAndGoalStates(start, goal)

        # Create the optimization objective specified by our command-line argument.
        # This helper function is simply a switch statement.
        pdef.setOptimizationObjective(self.allocateObjective(
            si, objectiveType))

        # Construct the optimal planner specified by our command line argument.
        # This helper function is simply a switch statement.
        optimizingPlanner = self.allocatePlanner(si, plannerType)

        # Set the problem instance for our planner to solve
        optimizingPlanner.setProblemDefinition(pdef)

        optimizingPlanner.setRewireFactor(1.1)

        optimizingPlanner.setup()

        # attempt to solve the planning problem in the given runtime
        solved = optimizingPlanner.solve(runTime)

        self.PlannerStates.append(
            (validityChecker.getInteractive(), plannerType))

        if solved:
            # Output the length of the path found
            try:
                print('{0} found solution of path length {1:.4f} with an optimization ' \
                    'objective value of {2:.4f}'.format( \
                    optimizingPlanner.getName(), \
                    pdef.getSolutionPath().length(), \
                    pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))

                return self.decodeSolutionPath(
                    pdef.getSolutionPath().printAsMatrix(), plannerType,
                    pdef.getSolutionPath().cost(
                        pdef.getOptimizationObjective()).value(), mult)
            except:
                print("No solution found.")
                pass

        else:
            print("No solution found.")