Exemple #1
0
 def astar_act(self):
     '''an astar (time-based) agent'''
     path = astar.astar_path(self.world.get_astar_map(), (self.pos[ROW], self.pos[COL]), (self.dest[ROW], self.dest[COL]))
     if len(path) > 0: # has a path
         next_pos = path[-1] 
         if next_pos[ROW] - self.pos[ROW] == -1: act = UP
         elif next_pos[ROW] - self.pos[ROW] == 1: act = DOWN
         elif next_pos[COL] - self.pos[COL] == -1: act = LEFT
         elif next_pos[COL] - self.pos[COL] == 1: act = RIGHT
         self.act = act
     else: self.act = STAY
Exemple #2
0
    try:
        heuristic = planning_env.compute_heuristic
        #heuristic = lambda n1, n2: planning_env.compute_heuristic(
        #    G.nodes[n1]['config'], G.nodes[n2]['config'])
        #    n1, n2)
        end = time.time()
        if args.lazy:
            print("lazy A*")
            weight = planning_env.edge_validity_checker
            #weight = lambda n1, n2: planning_env.edge_validity_checker(n1, n2)
            #    G.nodes[n1]['config'], G.nodes[n2]['config'])
<<<<<<< HEAD
            path, dist = lazy_astar.astar_path(G,
                source=start_id, target=goal_id, weight=weight, heuristic=heuristic)
        else:
            path, dist = astar.astar_path(G,
                source=start_id, target=goal_id, heuristic=heuristic)
        print('plan time: ', time.time() - end)
        # print('path length: {}'.format(dist))
        #print("path we got", path)
        if args.shortcut:
            end = time.time()
            plan = planning_env.shortcut(G, path)
            print('short cut time: ', time.time() - end)
            print(path)
            dist = planning_env.compute_path_length(path)
            print('path length: {}'.format(dist))
=======
            path = lazy_astar.astar_path(G,
                source=start_id, target=goal_id, weight=weight, heuristic=heuristic)
        else:
            path = astar.astar_path(G,
if __name__ == '__main__':
    rb = robot()
    rospy.sleep(2)

    model = model_based(rb.grid, rb.start, rb.goal, rb.walls, rb.pits)
    # convert to probability
    for x in model:
        summ = sum(x)
        for y in range(0, len(x)):
            if (summ != 0):
                x[y] = x[y] * 1.0 / summ

    #print "model: \n", model, "\n\n"

    mypath = astar_path(rb.grid, rb.start, rb.goal, rb.walls, rb.pits,
                        rb.move_list)
    policy = mdp_policy(0, model, rb.grid, rb.start, rb.goal, rb.walls,
                        rb.pits, rb.move_list)
    policy1 = mdp_policy(1, model, rb.grid, rb.start, rb.goal, rb.walls,
                         rb.pits, rb.move_list)

    correct = 0
    for i in range(0, len(policy)):
        for j in range(0, len(policy[0])):
            if (policy[i][j] == policy1[i][j]):
                correct += 1

    percentage = correct * 100.0 / (len(policy) * len(policy[0]))
    print "Optimal policy from MDP: \n", policy, "\n\n"
    print "Optimal policy from model based learning: \n", policy1, "\n\n"
    print "Percentage of correctness: \n", percentage, "%", "\n\n"
Exemple #4
0
            path = lazy_astar.astar_path(G,
                                         source=start_id,
                                         target=goal_id,
                                         weight=weight,
                                         heuristic=heuristic,
                                         a=a)
            print("Lazy A* planning time: %s seconds" %
                  (time.time() - start_time))
            astar_time = time.time() - start_time
            print("Path Length: " + str(astar.path_length(G, path)))
        else:
            print("")
            start_time = time.time()
            path = astar.astar_path(G,
                                    source=start_id,
                                    target=goal_id,
                                    heuristic=heuristic,
                                    a=a)
            astar_time = time.time() - start_time
            print("A* planning time: ", astar_time)
            print("Path Length: " + str(astar.path_length(G, path)))

        planning_env.visualize_plan(G, path)

        if args.shortcut:
            start_time = time.time()
            shortcut_path = planning_env.shortcut(G, path)
            shortcut_time = time.time() - start_time
            print("")
            print("Shortcut planning time: ", shortcut_time)
            print("Total planning time: ", shortcut_time + astar_time)
Exemple #5
0
            G.add_node(u, attr_dict=MG_projected.nodes[u])
        if not G.has_node(v):
            G.add_node(v, attr_dict=MG_projected.nodes[v])
        G.add_edge(u,
                   v,
                   osmid=data['osmid'],
                   highway=data['highway'],
                   length=data['length'])

# find the shortest path from an east location to Lewis
# A location eastern to Lewis
source_node = 504599581  # 41.590134, -88.115686
# A location in Lewis
destination_node = 5126987679  # 41.605080, -88.080890
path, pushed_nodes = astar_path(G,
                                source_node,
                                destination_node,
                                weight='length')
write_pushed_nodes('east_dijkstra_nodes.js', 'east_dijkstra_nodes',
                   pushed_nodes, 'east_dijkstra_path', path)
path, pushed_nodes = astar_path(G,
                                source_node,
                                destination_node,
                                heuristic=heuristic,
                                weight='length')
write_pushed_nodes('east_astar_nodes.js', 'east_astar_nodes', pushed_nodes,
                   'east_astar_path', path)

# find the shortest path from a west location to Lewis
# A location western to Lewis
source_node = 237525410  # 41.597274, -88.048803
# A location in Lewis
 def move_to(self, state ) :
     """
     moves to state (if possible), outputs some shortest path trajectory (if such exists) 
     """
     options = []
     
     # it *could* be local
     try :
         traj = self.copy().move_to_on_road( state ).trajectory()
         options.append( ( traj.length(), traj ) )
     except OmniDirectional.CannotComply :
         pass
     
     other_sys = self.copy().set_state( state )
     p_road = self.road
     p_edge = self.mmgraph.edge( p_road )
     q_road = other_sys.road
     q_edge = self.mmgraph.edge( q_road )
     
     # but *probably* not
     if self.is_interior() :
         for u, condition in [ (1.,True), (-1., not p_edge.directed ) ] :
             if condition :
                 sys = self.copy()
                 sys.apply_control( u, None, ignore_direction=True ).move_to( state )
                 traj = sys.trajectory()
                 options.append( ( traj.length(), traj ) )
             
     elif other_sys.is_interior() :
         try :
             for u, condition in [ (-1.,True), (1., not q_edge.directed ) ] :
                 if condition :
                     end_state = other_sys.state()
                     mid_state = other_sys.copy().apply_control( u, None, ignore_direction=True ).state()
                     sys = self.copy()
                     sys.move_to( mid_state ).move_to_on_road( end_state )
                     traj = sys.trajectory()
                     options.append( ( traj.length(), traj ) )
         except Exception :
             print self.state(), mid_state, end_state
             print u, condition, self.available_jumps()
             
     else :
         if self.x <= 0. :
             u = p_edge.start
         else :
             u = p_edge.end
             
         if other_sys.x <= 0. :
             v = q_edge.start
         else :
             v = q_edge.end
             
         path = astar.astar_path( self.mmgraph, u, v )
         #print path
         
         # make a copy of self, and having it follow the "path"
         sys = self.copy() 
         for u, edge, v in triplet_slider( path ) :
             if u == edge.start :
                 side = 'start'
                 control = 1.
             elif u == edge.end :
                 side = 'end'
                 control = -1.
             else : raise Exception()
             
             if not sys.road == edge.id :
                 sys.jump( edge.id, side )
             sys.apply_control( control ) 
             
         if not sys.road == other_sys.road :
             if other_sys.x <= 0. :
                 sys.jump( other_sys.road, 'start' )
             else :
                 sys.jump( other_sys.road, 'end' )
         
         # store the trajectory information
         traj = sys.trajectory()
         options.append( ( traj.length(), traj ) )
         
         
     # selection
     _, traj = min( options )
     #print traj
     self.follow( traj )
     return self
if __name__ == '__main__':
  rb = robot()
  rospy.sleep(2)

  model = model_based(rb.grid, rb.start, rb.goal, rb.walls, rb.pits)
  # convert to probability
  for x in model:
    summ = sum(x)
    for y in range(0, len(x)):
      if(summ != 0):
        x[y] = x[y]*1.0/summ
      

  #print "model: \n", model, "\n\n"

  mypath = astar_path(rb.grid, rb.start, rb.goal, rb.walls, rb.pits, rb.move_list)
  policy = mdp_policy(0, model,rb.grid, rb.start, rb.goal, rb.walls, rb.pits, rb.move_list)
  policy1 = mdp_policy(1, model,rb.grid, rb.start, rb.goal, rb.walls, rb.pits, rb.move_list)
 
  correct = 0
  for i in range (0, len(policy)):
    for j in range (0, len(policy[0])):
      if(policy[i][j] == policy1[i][j]):
        correct += 1

  percentage = correct * 100.0 / (len(policy)*len(policy[0]))
  print "Optimal policy from MDP: \n", policy, "\n\n"
  print "Optimal policy from model based learning: \n", policy1, "\n\n"
  print "Percentage of correctness: \n", percentage, "%", "\n\n"
  #publish path and policy