def solve(self, ptc):
     pdef = self.getProblemDefinition()
     goal = pdef.getGoal()
     si = self.getSpaceInformation()
     pi = self.getPlannerInputStates()
     st = pi.nextStart()
     while st:
         self.states_.append(st)
         st = pi.nextStart()
     solution = None
     approxsol = 0
     approxdif = 1e6
     while not ptc():
         rstate = si.allocState()
         # pick a random state in the state space
         self.sampler_.sampleUniform(rstate)
         # check motion
         if si.checkMotion(self.states_[-1], rstate):
             self.states_.append(rstate)
             sat = goal.isSatisfied(rstate)
             dist = goal.distanceGoal(rstate)
             if sat:
                 approxdif = dist
                 solution = len(self.states_)
                 break
             if dist < approxdif:
                 approxdif = dist
                 approxsol = len(self.states_)
     solved = False
     approximate = False
     if not solution:
         solution = approxsol
         approximate = True
     if solution:
         path = og.PathGeometric(si)
         for s in self.states_[:solution]:
             path.append(s)
         pdef.addSolutionPath(path)
         solved = True
     return ob.PlannerStatus(solved, approximate)
示例#2
0
 def solve(self, ptc):
     pdef = self.getProblemDefinition()
     si = self.getSpaceInformation()
     pi = self.getPlannerInputStates()
     goal = pdef.getGoal()
     st = pi.nextStart()
     while st:
         self.states_.append(st)
         st = pi.nextStart()
     start_state = pdef.getStartState(0)
     goal_state = goal.getState()
     self.tree.append(Node(None, start_state))
     solution = None
     approxsol = 0
     approxdif = 1e6
     while not ptc():
         if self.expand(self.tree, goal_state) == GrowState.Reached:
             current = self.tree[-1]
             path = []
             while current is not None:
                 path.append(current.position)
                 current = current.parent
             for i in range(1, len(path)):
                 self.states_.append(path[len(path) - i - 1])
             solution = len(self.states_)
             break
     solved = False
     approximate = False
     if not solution:
         solution = approxsol
         approximate = True
     if solution:
         path = og.PathGeometric(si)
         for s in self.states_[:solution]:
             path.append(s)
         pdef.addSolutionPath(path)
         solved = True
     return ob.PlannerStatus(solved, approximate)
示例#3
0
    def solve(self, ptc):
        pdef = self.getProblemDefinition()
        goal = pdef.getGoal()
        si = self.getSpaceInformation()
        pi = self.getPlannerInputStates()
        st = pi.nextStart()
        while st:
            self.states_.append(st)
            st = pi.nextStart()
        solution = None
        approxsol = 0
        approxdif = 1e6
        start_state = pdef.getStartState(0)
        goal_state = goal.getState()
        start_node = Node(None, start_state)
        start_node.g = start_state.h = start_node.f = 0
        end_node = Node(None, goal_state)
        end_node.g = end_node.h = end_node.f = 0

        open_list = []
        closed_list = []
        heapq.heapify(open_list)
        adjacent_squares = ((1, 0, 0), (1, 1, 45), (0, 1, 90), (-1, 1, 135),
                            (-1, 0, 0), (-1, -1, -135), (0, -1, -90), (1, -1,
                                                                       -45))

        heapq.heappush(open_list, start_node)
        while len(open_list) > 0 and not ptc():
            current_node = heapq.heappop(open_list)
            if current_node == end_node:  # if we hit the goal
                current = current_node
                path = []
                while current is not None:
                    path.append(current.position)
                    current = current.parent
                for i in range(1, len(path)):
                    self.states_.append(path[len(path) - i - 1])
                solution = len(self.states_)
                break
            closed_list.append(current_node)

            children = []
            for new_position in adjacent_squares:
                node_position = si.allocState()
                current_node_x = current_node.position.getX()
                current_node_y = current_node.position.getY()
                node_position.setXY(current_node_x + new_position[0],
                                    current_node_y + new_position[1])
                node_position.setYaw(new_position[2] * math.pi / 180)

                if not si.checkMotion(current_node.position, node_position):
                    continue
                if not si.satisfiesBounds(node_position):
                    continue
                new_node = Node(current_node, node_position)
                children.append(new_node)

            for child in children:
                if child in closed_list:
                    continue
                if child.position.getYaw() % (math.pi / 2) == 0:
                    child.g = current_node.g + 1
                else:
                    child.g = current_node.g + math.sqrt(2)
                child.h = goal.distanceGoal(child.position)
                child.f = child.g + child.h
                if len([i for i in open_list if child == i and child.g >= i.g
                        ]) > 0:
                    continue
                heapq.heappush(open_list, child)

        solved = False
        approximate = False
        if not solution:
            solution = approxsol
            approximate = True
        if solution:
            path = og.PathGeometric(si)
            for s in self.states_[:solution]:
                path.append(s)
            pdef.addSolutionPath(path)
            solved = True
        return ob.PlannerStatus(solved, approximate)
示例#4
0
  def solve(self, ptc):
    pdef = self.getProblemDefinition()
    si = self.getSpaceInformation()
    space = si.getStateSpace()
    svc = si.getStateValidityChecker()

    goal = pdef.getGoal()
    self._goal_state = sample_goal(space, goal)

    assert pdef.getStartStateCount() == 1
    self._start_state = pdef.getStartState(0)

    print 'Start/goal distance:', si.distance(self._start_state, self._goal_state)

    def huber(clearance, epsilon):
      if clearance < 0:
        return clearance - epsilon / 2.0
      elif (0 <= clearance) and (clearance <= epsilon):
        return -1.0 / (2.0 * epsilon) * ((clearance - epsilon) ** 2)
      else:
        return 0.0

    T = self.params()['n_waypoints'].getValue()
    n_particles = self.params()['n_particles'].getValue()
    max_iters = self.params()['max_iters'].getValue()
    clearance = self.params()['clearance'].getValue()
    avoidance_weight = self.params()['avoidance_weight'].getValue()
    sig_rw = self.params()['sig_rw'].getValue()
    uniform_proposal_weight = self.params()['uniform_proposal_weight'].getValue()

    nodes = range(T)

    obstacle_pots = {
      'obs_{}'.format(t):
        Factor([t], lambda x_t: avoidance_weight * huber(svc.clearance(x_t), clearance))
      for t in nodes
    }

    def pw_pot(x_u, x_v):
      return -1 * (si.distance(x_u, x_v) ** 2)

    def pw_pot_check_motion(x_u, x_v):
      if si.checkMotion(x_u, x_v):
        return -1 * (si.distance(x_u, x_v) ** 2)
      else:
        return -1e32

    start_pot = {
      'motion_start':
        Factor([0], lambda x_0: pw_pot(self._start_state, x_0))
    }
    motion_pots = {
      'motion_t{}'.format(t): Factor([t, t + 1], pw_pot)
      for t in range(T - 1)
    }
    goal_pot = {
      'motion_goal':
        Factor([T - 1], lambda x_T: -1 * goal.distanceGoal(x_T) ** 2)
    }

    # Create the MRF
    mrf = MRF(nodes, merge_dicts(
      obstacle_pots,
      start_pot,
      motion_pots,
      goal_pot))

    # Set up message passing
    maxsum = MaxSumMP(mrf, sched=tree_sched(mrf, 'motion_start'))

    x0 = {t: [interpolate(space,
                          self._start_state,
                          self._goal_state,
                          float(t) / T)]
          for t in mrf.nodes}

    def uniform_proposal(mrf, nAdd, x):
      return {v: [sample_uniform(space, self.sampler_)
                  for _ in range(nAdd[v])]
              for v in mrf.nodes}

    def rw_proposal(mrf, nAdd, x):
      return {v: [sample_gaussian(space,
                                  self.sampler_,
                                  random.choice(x[v]),
                                  sig_rw)
                  for _ in range(nAdd[v])]
              for v in mrf.nodes}

    # def callback(info):
    #   print info['iter']

    self._xMAP, self._xParticles, self._stats = DPMP_infer(
      mrf,
      x0,
      n_particles,
      mixture_proposal([uniform_proposal, rw_proposal],
                       weights=[uniform_proposal_weight,
                                1.0 - uniform_proposal_weight]),
      SelectLazyGreedy(),
      maxsum,
      conv_tol=None,
      max_iters=max_iters,
      # callback=callback,
      verbose=True)

    path_states = ([self._start_state]
                 + [self._xMAP[t] for t in range(T)]
                 + [self._goal_state])
    path = construct_path(si, path_states)
    path_valid = path.check()
    pdef.addSolutionPath(path)
    print 'DPMP path valid:', path_valid

    return ob.PlannerStatus(path_valid, False)
示例#5
0
    def solve(self, ptc):
        pdef = self.getProblemDefinition()
        si = self.getSpaceInformation()
        pi = self.getPlannerInputStates()
        goal = pdef.getGoal()
        st = pi.nextStart()
        while st:
            self.states_.append(st)
            st = pi.nextStart()
        start_state = pdef.getStartState(0)
        goal_state = goal.getState()
        self.treeA.append(Node(None, start_state))
        self.treeB.append(Node(None, goal_state))
        solution = None
        approxsol = 0
        approxdif = 1e6
        random_state = si.allocState()
        while not ptc():
            # new random node
            self.sampler_.sampleUniform(random_state)
            if self.expand(self.treeA, random_state) != GrowState.Trapped:
                if self.connect(self.treeB,
                                self.treeA[-1].position) == GrowState.Reached:
                    if self.treeA[0].position == start_state:
                        start_tree = self.treeA
                        end_tree = self.treeB
                        print('A to start')
                    else:
                        start_tree = self.treeB
                        end_tree = self.treeA
                        print('B to start')
                    path_from_mid_to_start = []
                    path_from_mid_to_end = []
                    current = start_tree[-1]
                    while current.position != start_state:
                        path_from_mid_to_start.append(current.position)
                        current = current.parent
                    current = end_tree[-1]
                    while current is not None:
                        path_from_mid_to_end.append(current.position)
                        current = current.parent
                    path = path_from_mid_to_start[::-1] + path_from_mid_to_end
                    for i in range(0, len(path)):
                        self.states_.append(path[i])
                    solution = len(self.states_)
                    #if self.treeB[0].position == start_state:
                    #    print('B to start')
                    #    path_from_mid_to_start = []
                    #    path_from_mid_to_end = []
                    #    current = self.treeB[-1]
                    #    while current.position != start_state:
                    #        path_from_mid_to_start.append(current.position)
                    #        current = current.parent
                    #    current = self.treeA[-1]
                    #    while current is not None:
                    #        path_from_mid_to_end.append(current.position)
                    #        current = current.parent
                    #    path = path_from_mid_to_start[::-1] + path_from_mid_to_end
                    #    for i in range(0, len(path)):
                    #        self.states_.append(path[i])
                    #    solution = len(self.states_)
                    break
            self.treeA, self.treeB = self.treeB, self.treeA
            # print('treeA:', self.treeA)
            # print('treeB:', self.treeB)

        solved = False
        approximate = False
        if not solution:
            solution = approxsol
            approximate = True
        if solution:
            path = og.PathGeometric(si)
            for s in self.states_[:solution]:
                path.append(s)
            pdef.addSolutionPath(path)
            solved = True
        return ob.PlannerStatus(solved, approximate)
示例#6
0
  def solve(self, ptc):
    pdef = self.getProblemDefinition()
    si = self.getSpaceInformation()
    space = si.getStateSpace()
    svc = si.getStateValidityChecker()

    goal = pdef.getGoal()
    goal_state = sample_goal(space, goal)

    assert pdef.getStartStateCount() == 1
    start_state = pdef.getStartState(0)

    def huber(clearance, epsilon):
      if clearance < 0:
        return clearance - epsilon / 2.0
      elif (0 <= clearance) and (clearance <= epsilon):
        return -1.0 / (2.0 * epsilon) * ((clearance - epsilon) ** 2)
      else:
        return 0.0

    T = self.n_waypoints
    nodes = range(T)

    obstacle_pots = {
      'obs_{}'.format(t):
        Factor([t], lambda x_t: huber(svc.clearance(x_t), 0.1))
      for t in nodes
    }

    start_pot = {
      'motion_start':
        Factor([0], lambda x_0: -1 * si.distance(start_state, x_0) ** 2)
    }
    motion_pots = {
      'motion_t{}'.format(t):
        Factor([t, t + 1], lambda x_u, x_v: -1 * si.distance(x_u, x_v) ** 2)
      for t in range(T - 1)
    }
    goal_pot = {
      'motion_goal':
        Factor([T - 1], lambda x_T: -1 * goal.distanceGoal(x_T) ** 2)
    }

    # Create the MRF
    mrf = MRF(nodes, merge_dicts(
      obstacle_pots,
      start_pot,
      motion_pots,
      goal_pot))

    # Set up message passing
    maxsum = MaxSumMP(mrf, sched=tree_sched(mrf, 'motion_start'))

    x0 = {t: [interpolate(space, start_state, goal_state, float(t) / T)]
          for t in mrf.nodes}

    def uniform_proposal(mrf, nAdd, x):
      return {v: [sample_uniform(space, self.sampler_)
                  for _ in range(nAdd[v])]
              for v in mrf.nodes}

    def rw_proposal(mrf, nAdd, x):
      return {v: [sample_gaussian(space, self.sampler_, random.choice(x[v]), 0.25)
                  for _ in range(nAdd[v])]
              for v in mrf.nodes}

    # Turn on matplotlib's interactive mode
    plt.ion()

    plt.figure(figsize=(8, 8))
    def callback(info):
      print info['iter']
      x = info['x']
      xMAP = info['xMAP']

      plt.clf()
      plt.axis([-2.5, 2.5, -2.5, 2.5])
      an = np.linspace(0, 2 * np.pi, 100)
      plt.plot(np.cos(an), np.sin(an))

      particles = [(x_t[0], x_t[1]) for t in range(T) for x_t in x[t]]
      c = [float(t) / float(T) for t in range(T) for x_t in x[t]]
      plt.scatter(*zip(*particles), s=10, c=c, marker='x')

      MAPparticles = [start_state] + [xMAP[t] for t in range(T)] + [goal_state]
      plt.plot(*zip(*[(s[0], s[1]) for s in MAPparticles]), marker='o')

      plt.xlabel('x_1')
      plt.ylabel('x_2')
      plt.title('Iter. %d' % info['iter'])

      plt.draw()

      time.sleep(0.25)

    xMAP, xParticles, stats = DPMP_infer(
      mrf,
      x0,
      self.n_particles,
      mixture_proposal([uniform_proposal, rw_proposal], weights=[0.0, 1.0]),
      SelectLazyGreedy(),
      maxsum,
      conv_tol=None,
      max_iters=25,
      callback=callback,
      verbose=False)

    # Turn off matplotlib's interactive mode
    plt.ioff()

    path_states = [start_state] + [xMAP[t] for t in range(T)] + [goal_state]
    path = construct_path(si, path_states)
    path_valid = path.check()

    pdef.addSolutionPath(path)
    return ob.PlannerStatus(path_valid, False)