예제 #1
0
    def testDijkstra(self):
        s = Vertex('s')
        t = Vertex('t')
        x = Vertex('x')
        y = Vertex('y')
        z = Vertex('z')
        vertices = [s, t, x, y, z]
        edges = [(s, t), (s, y), (t, x), (t, y), (x, z), (y, t), (y, x),
                 (y, z), (z, s), (z, x)]
        g = Graph(vertices, edges)
        weight = [10, 5, 1, 2, 4, 3, 9, 2, 7, 6]
        we = dict()
        for i, j in zip(edges, weight):
            we[i] = j

        def w(x, y):
            return we[(x, y)]

        g.Dijkstra(w, s)
        self.assertEqual([i.p for i in vertices], [None, y, t, s, y])
        self.assertEqual([i.d for i in vertices], [0, 8, 9, 5, 7])
        s = Vertex('s')
        t = Vertex('t')
        x = Vertex('x')
        y = Vertex('y')
        z = Vertex('z')
        vertices = [s, t, x, y, z]
        edges = [(s, t), (s, y), (t, x), (t, y), (x, z), (y, t), (y, x),
                 (y, z), (z, s), (z, x)]
        g = Graph(vertices, edges)
        weight = [3, 5, 6, 2, 2, 1, 4, 6, 3, 7]
        we = dict()
        for i, j in zip(edges, weight):
            we[i] = j

        def w(x, y):
            return we[(x, y)]

        g.Dijkstra(w, s)
        self.assertEqual([i.p for i in vertices], [None, s, t, s, y])
        self.assertEqual([i.d for i in vertices], [0, 3, 9, 5, 11])
        g.Dijkstra(w, z)
        self.assertEqual([i.p for i in vertices], [z, s, z, s, None])
        self.assertEqual([i.d for i in vertices], [3, 6, 7, 8, 0])
예제 #2
0
def gen_room_trajs(grid_width, grid_height, room_size):

    N = 300
    T = 50

    num_goals = 4

    expert_data_dict = {}
    env_data_dict = {
        'num_actions': num_goals,
        'num_goals': 4,
    }

    obstacles, rooms, room_centres = create_obstacles(grid_width,
                                                      grid_height,
                                                      env_name='room',
                                                      room_size=room_size)
    #T = TransitionFunction(grid_width, grid_height, obstacle_movement)
    set_diff = list(set(product(tuple(range(0, grid_width)),tuple(range(0, grid_height)))) \
                    - set(obstacles))

    room_set = set(rooms)
    room_centre_set = set(room_centres)

    graph = Graph()
    deltas = {(0, 1): 0, (0, -1): 1, (-1, 0): 2, (1, 0): 3}

    for node in set_diff:
        for a in deltas:
            neigh = (node[0] + a[0], node[1] + a[1])
            if neigh[0] >= 0 and neigh[0] < grid_width and \
                    neigh[1] >= 0 and neigh[1] < grid_height:
                if neigh not in obstacles:
                    graph.add_edge(node, neigh, 1)
                    graph.add_edge(neigh, node, 1)

    for n in range(N):
        states, actions, goals = [], [], []

        rem_len, path_key = T, str(n)
        expert_data_dict[path_key] = {'state': [], 'action': [], 'goal': []}

        #start_state = State(sample_start(set_diff), obstacles)

        # initial start state will never be at centre of any room
        start_state = State(
            sample_start(list(set(set_diff) - room_centre_set)), obstacles)
        while rem_len > 0:

            #apple_state = State(sample_start(
            #    list(room_set-set(start_state.coordinates))), obstacles)

            # randomly select one room (goal) and place apple at its centre
            goal = random.choice(range(len(room_centres)))
            while room_centres[goal] == start_state.coordinates:
                goal = random.choice(range(len(room_centres)))
            apple_state = State(room_centres[goal], obstacles)
            # randomly spawn agent in a room, but not at same location as apple
            #start_state = State(sample_start(list(set(set_diff) - set(room_centres[goal]))), obstacles)

            source = start_state.coordinates
            destination = apple_state.coordinates
            p = graph.Dijkstra(source)
            node = destination

            path = []
            while node != source:
                path.append(node)
                node = p[node]

            path.append(source)
            path.reverse()

            path_len = min(len(path) - 1, rem_len)

            for i in range(path_len):
                s = path[i]
                next_s = path[i + 1]

                #state = np.array(s + destination)
                state = np.array(s)
                action = (next_s[0] - s[0], next_s[1] - s[1])
                action_delta = deltas[action]

                states.append(state)
                actions.append(action_delta)
                #goals.append(destination)
                goal_onehot = np.zeros((num_goals, ))
                goal_onehot[goal] = 1.0
                goals.append(goal_onehot)

            rem_len = rem_len - path_len
            start_state.coordinates = destination

        expert_data_dict[path_key]['state'] = states
        expert_data_dict[path_key]['action'] = actions
        expert_data_dict[path_key]['goal'] = goals

    return env_data_dict, expert_data_dict, obstacles, set_diff
예제 #3
0
goal_posteriors = base_MDP.compute_goal_posteriors(prior_goals, init, goals,
                                                   one_step_cost, discount)

# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
'''Approximation algorithm 1'''

# This approximation algorithm performs on the base MDP model.
# It defines the costs for each state-action pair as follows:
#   Let c(s) be the posterior goal probability of the state s.
#   Let T(s) be the minimum time steps to reach the state s from the initial state.
#   Let base_MDP_costs(s,a) be the cost for the state-action pair (s,a), which is used in the linear problem
# We have base_MDP_costs(s,a) = c(s) * discount ** T(s)

graph = Graph(model)
min_times = graph.Dijkstra(init)
base_MDP_costs = {}
for state in base_MDP.states():
    for act in base_MDP.active_actions()[state]:
        if state not in absorb:
            base_MDP_costs[(state, act)] = goal_posteriors[0][
                (state, act)] * discount**min_times[state]
        else:
            base_MDP_costs[(state, act)] = 0

[a, policy
 ] = base_MDP.compute_min_cost_subject_to_max_reach(init, [true_goal], absorb,
                                                    base_MDP_costs)
f = open("grid_world_policy_1.csv", "w")
w_1 = csv.writer(f)
for key, val in policy.items():
예제 #4
0
파일: main.py 프로젝트: riyadh-ouz/Graphs
    # data from dijkstra.txt
    # Weighted directed graph

    print("Enter edges with costs")

    graph = Graph(5, 10, True, True)
    graph.visualise()

    graph.dfs(0)

    src = int(input("\nEnter the source ... "))

    print("\nnode | distance src " + str(src) + " | path\n", end='', sep='')

    distances = graph.Dijkstra(src)

    for i in range(5):

        path = graph.path(i, src)
        # path = graph.path_without_cost(i, src)

        print(str(i) + "      " + str(distances[i][0]) + "                 ",
              end='',
              sep='')

        for j in range(len(path)):
            if j != 0: print("->", end='', sep='')
            print(path[j], end='', sep='')

        print("\n", end='', sep='')
예제 #5
0
A = G.addVertex('A')
B = G.addVertex('B')
C = G.addVertex('C')
D = G.addVertex('D')
E = G.addVertex('E')

A.addEdge('D', 7)
A.addEdge('E', 15)
B.addEdge('A', 5)
B.addEdge('E', 10)
C.addEdge('D', 15)
D.addEdge('E', 5)
E.addEdge('C', 20)
'''SO, THE GRAPHS LOOKS LIKE THIS:

LVertex-|
        | 
        |-> [A] ----> [[D, 7], [E, 15]]
            [B] ----> [[A, 5], [E, 10]]
            [C] ----> [[D, 15]]
            [D] ----> [[E, 5]]
            [E] ----> [[C, 20]]'''

string1 = 'A'
string2 = 'C'

x = G.Dijkstra(string1, string2)
if x is not None:
    print('The Shortest Path from: ' + string1 + ' to ' + string2 + ' is: ' +
          str(x))