Ejemplo n.º 1
0
    def crossover(self, threshold, time):

        list1 = self.parents[-2][0]
        list2 = self.parents[-1][0]

        parent1 = Node()
        parent1.server_tab = list(list1)

        parent2 = Node()
        parent2.server_tab = list(list2)

        while True:

            index = random.randint(0, len(list2) - 1)
            tmp_cross = parent1.server_tab[index]
            parent1.server_tab[index] = parent2.server_tab[index]
            parent2.server_tab[index] = tmp_cross

            if parent2.calculate_all(time) >= threshold:
                del self.parents[-1]
                break

            elif parent1.calculate_all(time) >= threshold:
                del self.parents[-2]
                break

        self.parents.sort(key=lambda x: x[1])
Ejemplo n.º 2
0
 def test_it_can_topo_sort_a_graph(self):
     n5 = Node(105)
     n3 = Node(103, [n5])
     n4 = Node(104)
     n2 = Node(102, [n3, n4, n5])
     n1 = Node(101, [n2, n4])
     g = Graph([n1, n2, n3, n4, n5])
     topoSorted = g.topo_sort()
     r = g.min_path(topoSorted)
     self.assertEqual(r, [1, 2, 3, 4, 5])
Ejemplo n.º 3
0
    def generate(self, threshold, time, tab1):
        """ Do mutate and crossover again till I die """

        while len(self.parents) >= 2:
            self.crossover(threshold, time)

        for nodes in self.parents:
            node = Node()
            node.server_tab = list(nodes[0])
            self.mutate(node, tab1, threshold, time)
Ejemplo n.º 4
0
def build_and_test():
    nodes = []
    for name in range(6):
        nodes.append(Node(str(name)))  # Create 6 nodes
    g = GraphSearch()
    for n in nodes:
        g.addNode(n)
    g.addEdge(Edge(nodes[0], nodes[1]))
    g.addEdge(Edge(nodes[1], nodes[2]))
    g.addEdge(Edge(nodes[2], nodes[3]))
    g.addEdge(Edge(nodes[2], nodes[4]))
    g.addEdge(Edge(nodes[3], nodes[4]))
    g.addEdge(Edge(nodes[3], nodes[5]))
    g.addEdge(Edge(nodes[0], nodes[2]))
    g.addEdge(Edge(nodes[1], nodes[0]))
    g.addEdge(Edge(nodes[3], nodes[1]))
    g.addEdge(Edge(nodes[4], nodes[0]))

    # Print Directed Graph
    print("Graph:\n{}".format(g))

    # Shortest path using BFS.
    bfspath = g.bfsShortest(nodes[0], nodes[5])
    print("BFS Shortest path: {}".format(g.printpath(path=bfspath)))
    print("BFS Shortest path test passed:", bfspath == ["0", "2", "3", "5"])

    # Shortest path using DFS.
    dfspath = g.dfsShortest(nodes[0], nodes[5])
    print("DFS Shortest path: {}".format(g.printpath(path=dfspath)))
    print(
        "DFS Shortest path test passed:",
        [str(n) for n in dfspath] == ["0", "2", "3", "5"],
    )
Ejemplo n.º 5
0
 def makeNode(x, X, y, Y, node=None):
     if node is not None:
         u = node
     else:
         u = Node()
         u.ID = self.graph.getNextID()
         u.fill = '#C0804080'
         u.setIDAsLabel()
     u.w, u.h = X - x, Y - y
     u.x, u.y = x + u.w / 2.0, y + u.h / 2.0
     return u
Ejemplo n.º 6
0
 def makeNode(x, X, y, Y, node=None):
     if node is not None:
         u = node
     else:
         u = Node()
         u.ID = self.graph.getNextID()
         u.fill = '#C0804080'
         u.setIDAsLabel()
     u.w, u.h = X - x, Y - y
     u.x, u.y = x + u.w/2.0, y + u.h/2.0
     return u
Ejemplo n.º 7
0
 def setUp(self):
     n6 = Node(6)
     n5 = Node(5, [n6])
     n4 = Node(4, [n6])
     n3 = Node(3, [n4, n5])
     n2 = Node(2, [n4])
     n1 = Node(1, [n2, n3])
     self.g = Graph([n1, n2, n3, n4, n5, n6])
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 23:35:38 2016

@author: jpdjere
"""

from graphs import Node, Edge, Digraph, Graph

nodes = []
nodes.append(Node("ABC"))  # nodes[0]
nodes.append(Node("ACB"))  # nodes[1]
nodes.append(Node("BAC"))  # nodes[2]
nodes.append(Node("BCA"))  # nodes[3]
nodes.append(Node("CAB"))  # nodes[4]
nodes.append(Node("CBA"))  # nodes[5]

g = Graph()
for n in nodes:
    g.addNode(n)

g.addEdge(Edge(g.getNode('ABC'), g.getNode('ACB')))
g.addEdge(Edge(g.getNode('ACB'), g.getNode('CAB')))
g.addEdge(Edge(g.getNode('CAB'), g.getNode('CBA')))
g.addEdge(Edge(g.getNode('CBA'), g.getNode('BCA')))
g.addEdge(Edge(g.getNode('BCA'), g.getNode('BAC')))
g.addEdge(Edge(g.getNode('BAC'), g.getNode('ABC')))

print(g)
Ejemplo n.º 9
0
from graphs import Digraph, Node
import gpxpy

# use 'jmt.gpx' for actual JMT coordinate points and waypoints
file = open('test3.gpx')
jmt_gpx = gpxpy.parse(file)
gpxObj = gpxpy.gpx.GPX()

digraph = Digraph()
'''get starting track point'''
from_node = None
for track in jmt_gpx.tracks:
    for segment in track.segments:
        for point in segment.points:
            from_node = Node(
                (point.latitude, point.longitude, point.elevation))
            start_node = from_node
            break
'''add edge and weight (elevation difference) between each track point'''
to_node = None
for track in jmt_gpx.tracks:
    for segment in track.segments:
        for point in segment.points:
            to_node = Node((point.latitude, point.longitude, point.elevation))

            if to_node.id == from_node.id:
                pass
            else:
                if (from_node.id[2] == None) or (to_node.id[2] == None):
                    weight = 0
                else:
Ejemplo n.º 10
0
# Create 7 nodes to manually add to the graph
node_locs = list()
node_locs.append(np.array([1.0, 1.0]))
node_locs.append(np.array([1.4, 4.7]))
node_locs.append(np.array([3.2, 6.7]))
node_locs.append(np.array([3.8, 1.4]))
node_locs.append(np.array([4.4, 4.2]))
node_locs.append(np.array([6.7, 1.1]))
node_locs.append(np.array([7.1, 5.0]))
nodes = []
# Vocab of landmark IDs
vocab_vectors = np.zeros((len(node_locs), dim))
vocab = spa.Vocabulary(dim, max_similarity=0.01)
for i, loc in enumerate(node_locs):
    nodes.append(Node(index=i, data={'location': loc}))

    map_sp += encode_point(loc[0], loc[1], x_axis_sp, y_axis_sp)

    # Note: the landmark IDs don't have to be 'good' unitaries
    # landmark_ids.append(make_good_unitary(dim))
    # landmark_ids.append(spa.SemanticPointer(dim))

    # sp = spa.SemanticPointer(dim)
    # sp.make_unitary()

    sp = vocab.parse("Landmark{}".format(i))
    landmark_ids.append(sp)

    landmark_map_sp += landmark_ids[i] * encode_point(loc[0], loc[1],
                                                      x_axis_sp, y_axis_sp)
Ejemplo n.º 11
0
def postier_chinois(g):
    '''
    Retourne le chemin optimal pour le problème du postier chinois, ou None s'il n'y a pas de chemin.
    Attention : l'algorithme peut modifier le graphe.
    '''
    if not is_connected(g):
        return None

    if g.oriented:
        raise NotImplementedError()

    # Création du graphe partiel
    pg = Graph()
    pg.oriented = False

    # Copie des nœuds de degré impaire
    for node in g.nodes:
        if len(node.edges_out) % 2 == 1:
            pg.nodes.append(Node(node.data))

    if not pg.nodes: # graphe eulérien
        return eulerian_path_euler(g)

    # Copie des arêtes
    pg_nodes = set(pg.nodes)
    while pg_nodes:
        node_pg = pg_nodes.pop()
        node = filter(lambda n: n.data == node_pg.data, g.nodes)[0]
        for edge in node.edges_out:
            other_side_pg = filter(lambda n: n.data == edge.other_side(node).data, pg_nodes)
            if other_side_pg:
                other_side_pg = other_side_pg[0]
                edge_pg = Edge(node_pg, other_side_pg, edge.cost)
                node_pg.edges_out.add(edge_pg)
                other_side_pg.edges_out.add(edge_pg)

    # Transformation en clique
    pg_nodes = set(pg.nodes)
    while pg_nodes:
        node_pg = pg_nodes.pop()
        for node in pg_nodes:
            if not node_pg.exists_edge_to(node):
                # Récuperation des nœuds dans le graphe initial
                node_pg_g = filter(lambda n: n.data == node_pg.data, g.nodes)[0]
                node_g = filter(lambda n: n.data == node.data, g.nodes)[0]

                # Création de l'arête
                edge = Edge(node_pg, node, dijkstra_min_cost(node_pg_g, node_g))
                node_pg.edges_out.add(edge)
                node.edges_out.add(edge)

    # Recherche du couplage parfait de coût minimum
    edges = set() # ensemble des arêtes
    for node_pg in pg.nodes:
        edges.update(node_pg.edges_out)

    def aux(matching, nodes, cost):
        if len(nodes) == len(pg.nodes):
            return matching, cost

        best_matching, best_cost = None, 0

        for edge in edges:
            if edge.origin not in nodes and edge.dest not in nodes:
                matching_copy = matching[:]
                matching_copy.append(edge)
                nodes_copy = set(nodes)
                nodes_copy.add(edge.origin)
                nodes_copy.add(edge.dest)

                result_matching, result_cost = aux(matching_copy, nodes_copy, cost + edge.cost)
                if best_matching is None or best_cost > result_cost:
                    best_matching, best_cost = result_matching, result_cost

        return best_matching, best_cost

    best_matching, best_cost = aux([], set(), 0)

    # On double les arêtes dans best_matching
    for edge_pg in best_matching:
        origin = filter(lambda n: n.data == edge_pg.origin.data, g.nodes)[0]
        dest = filter(lambda n: n.data == edge_pg.dest.data, g.nodes)[0]
        path = dijkstra_min_cost_path(origin, dest)

        for edge in path:
            new_edge = Edge(edge.origin, edge.dest, edge.cost)
            edge.origin.edges_out.add(new_edge)
            edge.dest.edges_out.add(new_edge)

    return eulerian_path_euler(g)
Ejemplo n.º 12
0
 def test_it_can_calculate_diameter(self):
     i = Node('i')
     h = Node('h', [i])
     g = Node('g')
     f = Node('f', [h, g])
     e = Node('e', [i])
     d = Node('d', [e])
     c = Node('c')
     b = Node('b', [c, f])
     a = Node('a', [b, d])
     g = Graph([a, b, c, d, e, f, g, h, i])
     r = g.diameter()
     print('dia', r)
def generate_graph(dim, x_axis_sp, y_axis_sp, normalize=True):
    # TODO: make different graphs and different start/end nodes each time instead of the same one

    # Map
    map_sp = spa.SemanticPointer(data=np.zeros((dim, )))
    # version of the map with landmark IDs bound to each location
    landmark_map_sp = spa.SemanticPointer(data=np.zeros((dim, )))

    # Connectivity
    # contains each connection egocentrically
    con_ego_sp = spa.SemanticPointer(data=np.zeros((dim, )))
    # contains each connection allocentrically
    con_allo_sp = spa.SemanticPointer(data=np.zeros((dim, )))

    # Agent Location
    agent_sp = spa.SemanticPointer(data=np.zeros((dim, )))

    # True values for individual node connections, for debugging
    true_allo_con_sps = list()

    # Semantic Pointers for each landmark/node
    landmark_ids = list()

    # Hardcode a specific graph to work with for prototyping
    graph = Graph()

    # Create 7 nodes to manually add to the graph
    node_locs = list()
    node_locs.append(np.array([1.0, 1.0]))
    node_locs.append(np.array([1.4, 4.7]))
    node_locs.append(np.array([3.2, 6.7]))
    node_locs.append(np.array([3.8, 1.4]))
    node_locs.append(np.array([4.4, 4.2]))
    node_locs.append(np.array([6.7, 1.1]))
    node_locs.append(np.array([7.1, 5.0]))
    nodes = []
    # Vocab of landmark IDs
    vocab_vectors = np.zeros((len(node_locs), dim))
    vocab = spa.Vocabulary(dim, max_similarity=0.01)
    for i, loc in enumerate(node_locs):
        nodes.append(Node(index=i, data={'location': loc}))

        map_sp += encode_point(loc[0], loc[1], x_axis_sp, y_axis_sp)

        # Note: the landmark IDs don't have to be 'good' unitaries
        # landmark_ids.append(make_good_unitary(dim))
        # landmark_ids.append(spa.SemanticPointer(dim))

        # sp = spa.SemanticPointer(dim)
        # sp.make_unitary()

        sp = vocab.parse("Landmark{}".format(i))
        landmark_ids.append(sp)

        landmark_map_sp += landmark_ids[i] * encode_point(
            loc[0], loc[1], x_axis_sp, y_axis_sp)

        vocab_vectors[i, :] = landmark_ids[i].v

    if normalize:
        map_sp.normalize()
        landmark_map_sp.normalize()

    connectivity_list = [
        [1, 3],
        [0, 2],
        [1, 4],
        [0, 4],
        [2, 3, 6],
        [6],
        [4, 5],
    ]

    for i, node in enumerate(nodes):
        links_ego_sp = spa.SemanticPointer(data=np.zeros((dim, )))
        links_allo_sp = spa.SemanticPointer(data=np.zeros((dim, )))
        for j in connectivity_list[i]:
            vec_diff = node_locs[j] - node_locs[i]
            node.add_neighbor(neighbor_index=j,
                              distance=np.linalg.norm(vec_diff))

            # links_sp += encode_point(vec_diff[0], vec_diff[1], x_axis_sp, y_axis_sp)
            links_ego_sp += landmark_ids[j] * encode_point(
                vec_diff[0], vec_diff[1], x_axis_sp, y_axis_sp)
            links_allo_sp += landmark_ids[j] * encode_point(
                node_locs[j][0], node_locs[j][1], x_axis_sp, y_axis_sp)

        if normalize:
            links_ego_sp.normalize()
            links_allo_sp.normalize()
        con_ego_sp += landmark_ids[i] * links_ego_sp
        con_allo_sp += landmark_ids[i] * links_allo_sp

        true_allo_con_sps.append(links_allo_sp)

    if normalize:
        con_ego_sp.normalize()
        con_allo_sp.normalize()

    graph.nodes = nodes
    graph.n_nodes = 7

    return {
        'graph': graph,
        'start_landmark_id': 0,
        'end_landmark_id': 6,
        'landmark_map_sp': landmark_map_sp,
        'con_ego_sp': con_ego_sp,
        'con_allo_sp': con_allo_sp,
        'landmark_vectors': vocab_vectors,
        # params for debugging
        'true_allo_con_sps': true_allo_con_sps,
        'connectivity_list': connectivity_list,
    }