Пример #1
0
class TestFlightGraph(TestCase):

    def init_tests(self):
        f = open('data.json', 'r')
        parsed_JSON = json.loads(f.read())
        self.graphs = Graph()
        self.graphs.construct_nodes(parsed_JSON['metros'])
        self.graphs.contstruct_edges(parsed_JSON['routes'])
        self.stats = StatInfo()

    def test_init(self):
        assert (self.graphs.nodes.get('SFO').name == 'San Francisco')

    def test_longest_flight(self):
        assert (self.stats.longest_single_flight(self.graphs) == ('SYD', 'LAX', 12051))

    def test_shortest_flight(self):
        assert(self.stats.shortest_single_flight(self.graphs) == ('NYC', 'WAS', 334))

    def test_average_dist(self):
        assert (self.stats.average_network_distance(self.graphs) == 2300)

    def test_largest_pop(self):
        assert self.stats.largest_population(self.graphs) == ('TYO', 34000000)

    def test_smallest_pop(self):
        assert self.stats.smallest_population(self.graphs) == ('ESS', 589900)

    def test_average_network_pop(self):
        assert self.stats.average_network_population(self.graphs) == 11796143
Пример #2
0
 def Create(self, theGraphEditor):
     theGraphEditor.config(cursor="watch")
     
     dial = Dialog(theGraphEditor, 0, 0, "Create Complete Graph")
     if dial.result is None:
         theGraphEditor.config(cursor="")	    
         return
     
     n=dial.result[0]
     direction=dial.result[2]
     layout=dial.result[3]
     
     G=Graph()
     G.directed=direction
     
     for v in range(0,n):
         G.AddVertex()
         
     Edges=CompleteEdges(G,n,direction)
     
     for e in Edges:
         G.AddEdge(e[0],e[1])
         
     if layout==0:
         if RandomCoords(G):
             DrawNewGraph(theGraphEditor,G,direction)
     else:
         if CircularCoords(G):
             DrawNewGraph(theGraphEditor,G,direction)
             
     theGraphEditor.config(cursor="")
Пример #3
0
    def graph(self):
        '''
        Graphs the current functions and plots the user has entered.
        '''
        #Get x range for y to be calculated off of
        x = Graph.get_x_range(Graph.xmin,Graph.xmax,Graph.x_increment)
        
        #Get all functions to plot
        functions_to_plot = Graph.get_on_functions()
        
        for fx in functions_to_plot:
            fx.update_function(x)

        #Now set up and plot the functions
        fig = self.graph_2d_plot_panel.get_figure()
        axes = fig.gca()
                
        # clear the axes and replot everything
        axes.cla()
        
        for fx in functions_to_plot:
            axes.plot(fx.x_values, fx.y_values,fx.get_color()+fx.get_line_style(), linewidth=1.0, label=fx.function_name)
        #Set the limits for the graph TODO get this working
        #axes.set_xlim((-100,100))
        axes.set_ylim(-1,1)
Пример #4
0
def initGraph(data):
	g = Graph()
	for i in range(len(data)/2):
		g.addNode(i)
		for j in range(i):
			g.addEdge(i, j, distance(float(data[i * 2]) * scale, float(data[i * 2 + 1]) * scale, float(data[j * 2]) * scale, float(data[j * 2 + 1]) * scale))
	return g
Пример #5
0
 def Create(self, theGraphEditor):
     theGraphEditor.config(cursor="watch")
     
     dial = Dialog(theGraphEditor, 0, 1, "Create Random Graph")
     if dial.result is None:
         theGraphEditor.config(cursor="")
         return
         
     n=dial.result[0]
     m=dial.result[1]
     direction=dial.result[2]
     layout=dial.result[3]
     
     G=Graph()
     G.directed=direction
     
     for v in range(0,n):
         G.AddVertex()
         
     Edges=CompleteEdges(G,n,direction)
     
     for i in range(0,m):
         pos=random.randint(0,len(Edges)-1)
         G.AddEdge(Edges[pos][0],Edges[pos][1])
         del Edges[pos]
         
     if layout==0:
         if RandomCoords(G):
             DrawNewGraph(theGraphEditor,G,direction)
     else:
         if CircularCoords(G):
             DrawNewGraph(theGraphEditor,G,direction) 
             
     theGraphEditor.config(cursor="")          
Пример #6
0
def saveFriesFaceFFCC(graph1,graph2,count):

	g1 = graph1.getFaceGraph()
	g2 = graph2.getFaceGraph()

	v1 = makeVertexGraph(g1)
	v2 = makeVertexGraph(g2)

	G1 = Graph(g1,v1)
	G2 = Graph(g2,v2)


	structures1 = assignMatching(G1)
	structures2 = assignMatching(G2)

	Graph.comparison = 'fries'
	structures1.sort()
	structures2.sort()

	h1 = structures1[-1]
	h2 = structures2[-1]

	if not os.path.exists("FFCCConjectureConflicts"):
		os.mkdir("FFCCConjectureConflicts")
	folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)

	#setup folder
	if not os.path.exists(folderName):
		os.mkdir(folderName)
			#print "adding"
	fileName1 = folderName + "/fries1" + ".png"
	fileName2 = folderName + "/fries2" + ".png"
			#print fileName1
	saveSinglePNG(h1,fileName1)
	saveSinglePNG(h2,fileName2)
Пример #7
0
class graph_creationF(object):

    def __init__(self, weighted, wgraph):
        self.fname = wgraph
        self.wg = weighted
        self.g = Graph()
  
    def get_file(self, which_alg):
        self.g.alg = which_alg
        try:
            #self.fname = raw_input("Enter graph file: ")
            #self.wg = raw_input("Do you want to create a weighted Graph? ")

            file = open(self.fname, 'r')

            if self.wg[0] == 'y' or self.wg[0] == 'Y':
                for line in file:
                    s = line.split()
                    self.g.addEdge(s[0], s[1], int(s[2]))

            elif self.wg[0] == 'n' or self.wg[0] == 'N':
                for line in file:
                    s = line.split()
                    self.g.addEdge(s[0], s[1], 1)

            file.close()

            return self.g
        except:
            print "-----Graph File does not exist-----"
Пример #8
0
def main(files):

    cfg   = Config()
    graph = Graph(cfg)

    for file in files:
        print file
        qaida = readFromXmlPrePhrase(file)
        for parta in qaida:
            for matra in parta:
                for i in range(len(matra)-1):
                    graph.createEdge(
                        graph.createNode(matra[i]),
                        graph.createNode(matra[i+1]))

#     for node in graph.nodes: print node
#     for edge in graph.edges: print edge
    
#    dist  = Distort(graph=graph, argo=DistortRandomSimple)
    dist  = Distort(graph=graph, argo=DistortRandomWeight)
    patt  = []
    i     = 0
    while True:
        e = dist.run()
        s = EdgesToStr(e)
        if s not in patt:
            patt.append(s)
            print "%3d %s" % (i, s)
            i += 1
Пример #9
0
def small_example():
    
    graph = g.Graph('Small example graph')
    
    graph.add_node((0,1))
    graph.add_node((0,-1))
    graph.add_node((2,0))
    graph.add_node((4,0))
    graph.add_node((6,0))
    
    graph.add_link(1, 3, 1, delayfunc=g.create_delayfunc('Polynomial',(1.0, 1.0, [0.0])))
    graph.add_link(2, 3, 1, delayfunc=g.create_delayfunc('Polynomial',(2.0, 1.0, [0.0])))
    graph.add_link(3, 4, 1, delayfunc=g.create_delayfunc('Polynomial',(2.0, 1.0, [1.0])))
    graph.add_link(3, 4, 2, delayfunc=g.create_delayfunc('Polynomial',(1.0, 1.0, [2.0])))
    graph.add_link(4, 5, 1, delayfunc=g.create_delayfunc('Polynomial',(1.0, 1.0, [0.0])))
    
    graph.add_od(1, 5, 2.0)
    graph.add_od(2, 5, 3.0)
    
    graph.add_path([(1,3,1), (3,4,1), (4,5,1)])
    graph.add_path([(1,3,1), (3,4,2), (4,5,1)])
    graph.add_path([(2,3,1), (3,4,1), (4,5,1)])
    graph.add_path([(2,3,1), (3,4,2), (4,5,1)])
    
    return graph
Пример #10
0
 def genrate_actualtour(self, StartCity):
     """
     This method generates the graph of the bestTour. 
     
     It calls the move_branch method with a given StartCity. From the bestTour
     stack it filters out only the interestCities by leaving out all the 
     intersection points. It then creates an instance of graph class in the same
     order as in bestTour.
     """
     tour = Stack()  # create a new stack
     
     self.currentTour.push(StartCity)    # push the startCity in currentTour stack
     self.mark_visit(StartCity)  # mark it visited
     self.move_branch(StartCity) # call move_branch recursive function, with the given city
     
     # The following block of code removes vertices from bestTour and filters out
     # only the interest points and adds it to tour Stack
     while self.bestTour.size() != 0:    
         city = self.bestTour.pop()
         if city in self.interestCities:
             tour.push(city)
             
     # The following block of code generates a Graph object from the tour Stack
     newGraph = Graph(tour.items)    # adds only the vertices in the graph
     for i in range(len(tour.items)-1):
         newEdge = Edge(tour.items[i], tour.items[i+1])  # create an edge within two consecutive vertices
         newGraph.add_edge(newEdge)  # add the edge to the graph
     return newGraph
class Application:

    #. Contructor .#
    def __init__ (self):
        self.interface = Interface(self.ButtonCallBack)    # Create an interface
        self.graph = Graph()                               # Create a graph
        self.graph.formGraph()                             # Form the graph
        
    #. Call back function when button is pressed .#
    def ButtonCallBack(self):
        startName = self.interface.getStartEntry().encode('gbk')
        destName = self.interface.getDestEntry().encode('gbk')
        start = self.graph.findVertex(startName)           # Find the start vertex
        dest = self.graph.findVertex(destName)             # Find the dest vertex
        # If start or dest are not found
        if start == None:
            result = startName.encode('utf8') + ' doesn\'t exist'
        elif dest == None:
            result = destName.encode('utf8') + ' doesn\'t exist'
        else:    # Normal case
            self.graph.BFS(start)
            result = self.graph.shortestPath(dest).encode('utf8')
        self.interface.updateText(result)

    #. Run the program .#
    def run(self):
        mainloop()
Пример #12
0
def create_Graph_from_file(file_name):
    graph = Graph()
    with open(file_name) as inputfile:
        next(inputfile)
        for line in inputfile:
            graph.add_node_to_Graph(line)
    return graph
Пример #13
0
def main():
  if len(sys.argv) != 4:
    print "usage: python test.py numNodes, numWins, sizeWin"
    return
  
  # Call our initialize test function with 
  Graph.testInitialize(int(sys.argv[1]), \
                       int(sys.argv[2]), \
                       int(sys.argv[3]))
Пример #14
0
def tridiagonalGraph(rows):

    g = Graph(rows)
    for i in range(rows):
        g.insertIndex(i, i)
        if i > 0     : g.insertIndex(i, i-1)
        if i < rows-1: g.insertIndex(i, i+1)

    return g
Пример #15
0
def EmotionAnalysis(epsilon, figure):
    global news, CED
    EM.setEpsilon(epsilon)
    # for each new compute the emotional value and show it
    output = []  # output is a list of tuples with [day,CED of that day]
    for i in range(0, len(dayinterval)):
        EM.computeday(news[i], negativeLex, positiveLex, CED)
        output.append([news[i], CED.copy()])
    Graph.plotallfigure(figure, output, dayinterval)
    # after the execution we need to clean the values of the CED so they doesnt iterfere with next execution
    for word in cedwords:
        CED[word] = 0
Пример #16
0
 def parse_topology(generate_json):
     """"generate JSON file for visualization if generate_json == True"""
     
     tree = ET.parse("abilene-TM" + os.sep + "topo" + os.sep + "Abilene-Topo-10-04-2004.xml")
     root = tree.getroot()
     
     topology = root.find("topology")
     
     node_list = []
     link_list = []
     graph = Graph(node_list, link_list)
     
     if generate_json:
         f = open("data.json", "w")
         output = {"nodes":{}, "links":[]}
     
     for node in topology.iter("node"):
         new_node = Node(node.attrib["id"])
         node_list.append(new_node)   
         
         if generate_json:
             location = node.find("location")
             new_node.set_location(float(location.attrib["latitude"]),
                                   float(location.attrib["longitude"]))
             output["nodes"][new_node.node_id] =\
                 (float(location.attrib["latitude"]), 
                  float(location.attrib["longitude"])) 
                     
     for link in topology.iter("link"):
         link_id = link.attrib["id"]
         link_from = graph.find_node(link.find("from").attrib["node"])
         link_to = graph.find_node(link.find("to").attrib["node"])
         bw = int(link.find("bw").text)
         new_link = Link(link_id, link_from, link_to, bw)
         link_list.append(new_link)
         
         if generate_json:
             output["links"].append(\
                 ((link_from.lat, link_from.lng), 
                  (link_to.lat, link_to.lng)))
             
     igp = root.find("igp").find("links")
     for link in igp.iter("link"):
         link_id = link.attrib["id"]
         link_obj = graph.find_link_by_id(link_id)
         if link_obj != None:
             link_obj.metric = float(link.find("static").find("metric").text)
         
     if generate_json:    
         json.dump(output, f)            
         f.close()
         
     return graph
    def drawWithGraph(window, audioData):
        global p
        graph = Graph( Point(75, 50), 50)
        graph.draw(window)
        #p = None
        for d in audioData:
            if p:
                p.undraw()

            bx, by = normalize(d[0], d[1], d[2])

            p =  graph.createPoint(bx, by)
            p.draw(window)
Пример #18
0
class Control:
    def __init__(self):
        print "Control __init__"
        self.Access = Access()
        self.Graph = Graph()
        self.Player = Player()
        #self.Xml = Xml()
        self.Access.classAddresses(self.Graph,self.Player,self)
        self.Graph.classAddresses(self.Access,self.Player,self)
        self.Player.classAddresses(self.Graph,self.Access,self)
        #self.Xml.classAddresses(self.Graph,self.Access,self.Player,self)
        self.Access.start()
        self.Graph.getCode()
 def parse(self):
     path = raw_input("Bitte Pfad zur csv-Datein angeben: ")
     fobj = open(path, "r")
     for line in fobj:
         line = line.strip()
         stratum = line.split(";")
         later = set(stratum[5].split(", "))
         earlier = set(stratum[6].split(", "))
         equal = set(stratum[7].split(", "))
         partof = set(stratum[8].split(", "))
         nodes[stratum[0]] = Node(stratum[0], stratum[1], stratum[2], stratum[3], stratum[4], later, earlier, equal, partof)
         fobj.close()
     graph = Graph(nodes)
     graph.printGraph() 
Пример #20
0
 def Create(self, theGraphEditor):
     theGraphEditor.config(cursor="watch")
     
     dial = TreeDialog(theGraphEditor, 0, "Create Complete Tree")
     if dial.result is None:
         theGraphEditor.config(cursor="")
         return
         
     degree=dial.result[0]
     height=dial.result[1]
     direction=dial.result[3]
     layout=dial.result[4]
     
     G=Graph()
     G.directed=direction
     
     nodes={}
     nodes[0]=[]
     G.AddVertex()
     nodes[0].append(G.vertices[0])
     for h in range(0,height):
         nodes[h+1]=[]
         for v in nodes[h]:
             for d in range(0,degree):
                 new_v=G.AddVertex()
                 if direction==0: 
                     G.AddEdge(v,new_v)
                 else:
                     if random.randint(0,1):
                         G.AddEdge(v,new_v)
                     else:
                         G.AddEdge(new_v,v)
                 nodes[h+1].append(new_v)
                 
     if layout==0:
         if RandomCoords(G):
             DrawNewGraph(theGraphEditor,G,direction) 
     elif layout==1:
         if CircularCoords(G):
             DrawNewGraph(theGraphEditor,G,direction) 
     elif layout==2:
         if TreeCoords(G,G.vertices[0],"vertical"):
             DrawNewGraph(theGraphEditor,G,direction) 
     else:
         if BFSTreeCoords(G,G.vertices[0],"forward"):
             DrawNewGraph(theGraphEditor,G,direction) 
             
     theGraphEditor.config(cursor="")
Пример #21
0
 def empty_tour(self):
     '''
     Clears all the edges of the current tour
     '''
     self.best_tour = Graph(self.vertices)   # resets self.best_tour to an empty graph containing vertices
     self.best_label = []    # empties self.best_label
     self.best_coordinates =[]   # epmties self.best_coordinates
Пример #22
0
 def __init__(self):
     self.graph = Graph()
     self.start_node_index = None
     self.goal_node_index = None
     self.obstacles = None
     self.shortest_path = None
     self.wall = None
Пример #23
0
    def __init__(self, filename, k, errorcorrect=False):

        #loads file
        reads = Loader.load(filename)
        #gets graph
        self.graph = Graph(reads, k, errorcorrect)
        self.k = k
Пример #24
0
    def generate_rand_graph(self):
        '''
        Pre: A list if lables and coordinates
        Post: Updates self.tour with a randomly generated graph and
            and updates self.costOfTour with the cost of that generated tour
        '''
        copyOfLabels = copy.deepcopy(self.labels)   # creates a copy of labels
        copyofCoordinates = copy.deepcopy(self.coordinates) # creates a copy of coordinates

        self.tour_edges = []    # reset tour edges
        self.tour_vertices = [] # reset tour vertices

        i = random.randrange(0, len(copyOfLabels))  # generate a random first city
        start_city = Vertex(copyOfLabels.pop(i))
        start_city.pos = copyofCoordinates.pop(i)
        previous_city = start_city  # assign start city to previous city

        self.tour_vertices.append(start_city)   # append it to tour vertices

        for x in range(len(copyOfLabels)):  # find the next random naumber
            i = random.randrange(0, len(copyOfLabels))
            v = Vertex(copyOfLabels.pop(i)) # pop that vertex at that random number
            v.pos = copyofCoordinates.pop(i)    # pop out the coordinate for that vertex and create that new vertex
            self.tour_vertices.append(v)    # append it to the list
            e = Edge(previous_city, v)  # create a new edge between the previous city and new randomly generated city
            self.tour_edges.append(e)   # append the edge to edge list
            self.costOfTour += self.cal_distance_of_edge(e) # update the cost of the tour for travelling to the new city
            previous_city = v   # assign the new city to previous city

        e = Edge(previous_city, start_city)     # join the last edge to go back to the start city
        self.tour_edges.append(e)
        self.tour = Graph(self.tour_vertices, self.tour_edges)
        self.costOfTour += self.cal_distance_of_edge(e) # update teh cost of the tour for going back to the start city
Пример #25
0
 def init_tests(self):
     f = open('data.json', 'r')
     parsed_JSON = json.loads(f.read())
     self.graphs = Graph()
     self.graphs.construct_nodes(parsed_JSON['metros'])
     self.graphs.contstruct_edges(parsed_JSON['routes'])
     self.stats = StatInfo()
Пример #26
0
def moyenne50(n,p,algo) :
    if algo == 0:
        print("algoNaif")
    elif algo == 1 :
        print("algoDSATUR")
    elif algo == 2 :
        print("algoWelshPowel")
    temps = []
    couleurs = []
    for i in range(50) :
        graph = g.graphAlea(n,5,p)
        if algo == 0 :
            start_time = time.time()
            graph.algoNaif()
        elif algo == 1 :
            start_time = time.time()
            graph.algoDSATUR()
        elif algo == 2 :
            start_time = time.time()
            graph.algoWelshPowel()
        interval = time.time() - start_time
        temps.append(interval)
        couleurs.append(graph.totalColor())
    sommeT = sum(temps)
    sommeC = sum(couleurs)
    return (float(sommeT)/50.,float(sommeC)/50)
Пример #27
0
def checkConnected(d, g):

	totalFaceCount = getNumFaces(g)
	queue = []

	#make faceGraph
	faceGraph = []
	y = 0
	while y < len(g):
		row = g[y]		
		for x in row:
			faceGraph.append((Face(int(x), y)))
		y += 1

	vg = makeVertexGraph(faceGraph)
	graph = Graph(faceGraph, vg)
		
	queue.append(graph.getFaceGraph()[0])
	visited = set()

	while len(visited) < len(graph.getFaceGraph()):
		face = queue.pop(0)
		while face in visited:
			#print "in while"
			if len(queue) > 0:
				face = queue.pop(0)
			else:
				break
		#this means that the face is visited and grpah is disconnected	
		if face in visited:
			break

		nextGroup = face.getNeighbors()
		if len(nextGroup) == 0:
			break
		else:
			queue.extend(nextGroup)

		#print "stats"
		#print len(graph.getFaceGraph()), len(queue), len(visited)
		visited.add(face)

		#print "graph"
		#print graph
		#print "-------------------"
	return len(graph.getFaceGraph()) == len(visited)
Пример #28
0
def readGraph(filename):
    noNodes = 0
    with open(filename) as f:
        for line in f:
            if (noNodes == 0):
                noNodes = int(line.split()[0])
                graph = Graph(noNodes)
            else:
                data = []
                for x in line.split():
                    data.append(int(x))
                if len(data) == 3:
                    edge = WeightedEdge(data[0], data[1], data[2])
                elif len(data) == 4:
                    edge = WeightedDelayedEdge(data[0], data[1], data[2], data[3])
                graph.add(edge)
    return graph
Пример #29
0
  def __call__ (self, reduced_parameter_index):
    """
    The query returns a graph which contains the poset of gene parameter indices 
    corresponding to adjusting the parameter by changing the logic parameter associated 
    with the gene being queried.
    The graph is as follows: 

    * The vertices of the graph are named according to Gene Parameter Index (gpi). 
    * There is a directed edge p -> q iff p < q and the associated logic parameters are adjacent.
    * The graph is labelled with pairs (Parameter index, Morse graph index).

    In addition the following extra structures are provided:

    * `graph.data` is a dictionary from gene parameter index to (hex code, parameter index, morse graph index)
    * `graph.mgi` is a function which accepts a gpi and returns the associated Morse graph index
    * `graph.num_inputs` is the number of network edges which are inputs to the gene associated with the query
    * `graph.num_outputs`is the number of network edges which are outputs to the gene associated with the query
    * `graph.essential` is a boolean-valued function which determines if each vertex corresponds to an essential parameter node
    """
    c = self.database.conn.cursor()
    c.execute("select GeneParameterIndex,ParameterIndex,MorseGraphIndex from " + self.gene + " where ReducedParameterIndex=?" , (str(reduced_parameter_index),))
    gene_index = self.database.network.index(self.gene)
    factorgraph = self.database.parametergraph.factorgraph(gene_index)
    Q = { row[0] : (factorgraph[row[0]],  row[1], row[2]) for row in c }
    vertices = set(Q.keys())
    edges = [ (gpi1, gpi2) for gpi1 in Q.keys() for gpi2 in Q.keys() if isAdjacentHexcode(Q[gpi1][0], Q[gpi2][0]) ]
    graph = Graph(vertices,edges)
    graph.data = Q
    graph.mgi = lambda gpi : graph.data[gpi][2]
    graph.label = lambda gpi : str(graph.data[gpi][1]) + ':' + str(graph.data[gpi][2])
    graph.num_inputs = len(self.database.network.inputs(gene_index))
    graph.num_outputs = len(self.database.network.outputs(gene_index))
    graph.essential = lambda gpi : essential(graph.data[gpi][0],graph.num_inputs,graph.num_outputs)
    return graph
Пример #30
0
 def __init__(self, cities=[]):
     self.currentTour = Stack()
     self.bestTour = Stack()
     self.currentCost = 0
     self.bestCost = 10000000000000000000000000000000
     self.interestCities = [Vertex(city) for city in cities]
     self._map = Graph()
     self._visitedPoints = []
     self._visitedCities = []
Пример #31
0
 def test_getMostGross_k(self):
     data = load_data("data.json")
     movies_data = data["Movie"]
     actors_data = data["Actor"]
     graph = Graph()
     graph.setMovieData(movies_data)
     graph.setActorData(actors_data)
     graph.setMovieNames()
     graph.setActorNames()
     graph.getMostGross_k(2)
Пример #32
0
 def __setattr__(self, name, value):
     Graph.SetActionAttr(self.id, **{name: value})
Пример #33
0
 def __init__(self, caption, event, **keywords):
     object.__setattr__(self, "id", Graph.CreateAction())
     Graph.SetActionAttr(self.id, caption=caption, event=event, **keywords)
Пример #34
0
class World(object):
    def __init__(self):

        self.entities = {}
        self.entity_id = 0
        self.obstacles = []
        self.background = pygame.image.load(
            "assets/grass_bkgrd_1024_768.png").convert_alpha()

        self.graph = Graph(self)
        self.generate_pathfinding_graphs("pathfinding_graph.txt")
        self.scores = [0, 0]

        self.countdown_timer = TIME_LIMIT
        self.game_end = False

    # --- Reads a set of pathfinding graphs from a file ---
    def generate_pathfinding_graphs(self, filename):

        f = open(filename, "r")

        # Create the nodes
        line = f.readline()
        while line != "connections\n":
            data = line.split()
            self.graph.nodes[int(data[0])] = Node(self.graph, int(data[0]),
                                                  int(data[1]), int(data[2]))
            line = f.readline()

        # Create the connections
        line = f.readline()
        while line != "paths\n":
            data = line.split()
            node0 = int(data[0])
            node1 = int(data[1])
            distance = (Vector2(self.graph.nodes[node0].position) -
                        Vector2(self.graph.nodes[node1].position)).length()
            self.graph.nodes[node0].addConnection(self.graph.nodes[node1],
                                                  distance)
            self.graph.nodes[node1].addConnection(self.graph.nodes[node0],
                                                  distance)
            line = f.readline()

        # Create the orc paths, which are also Graphs
        self.paths = []
        line = f.readline()
        while line != "":
            path = Graph(self)
            data = line.split()

            # Create the nodes
            for i in range(0, len(data)):
                node = self.graph.nodes[int(data[i])]
                path.nodes[int(data[i])] = Node(path, int(data[i]),
                                                node.position[0],
                                                node.position[1])

            # Create the connections
            for i in range(0, len(data) - 1):
                node0 = int(data[i])
                node1 = int(data[i + 1])
                distance = (
                    Vector2(self.graph.nodes[node0].position) -
                    Vector2(self.graph.nodes[node1].position)).length()
                path.nodes[node0].addConnection(path.nodes[node1], distance)
                path.nodes[node1].addConnection(path.nodes[node0], distance)

            self.paths.append(path)

            line = f.readline()

        f.close()

    def add_entity(self, entity):

        self.entities[self.entity_id] = entity
        entity.id = self.entity_id
        self.entity_id += 1

    def remove_entity(self, entity):

        if entity.name == "base":
            self.game_end = True
            self.game_result = TEAM_NAME[1 - entity.team_id] + " wins!"
            self.final_scores = ("Time left - " +
                                 str(int(self.countdown_timer)) +
                                 " (base destroyed)")

        if entity.id in self.entities.keys():
            del self.entities[entity.id]

    def get(self, entity_id):

        if entity_id in self.entities:
            return self.entities[entity_id]

        else:
            return None

    def process(self, time_passed):

        time_passed_seconds = time_passed / 1000.0
        for entity in list(self.entities.values()):
            entity.process(time_passed_seconds)

        # --- Reduces the overall countdown timer
        self.countdown_timer -= time_passed_seconds

        # --- Checks if game has ended due to running out of time ---
        if self.countdown_timer <= 0:
            self.game_end = True

            if self.scores[0] > self.scores[1]:
                self.game_result = TEAM_NAME[0] + " wins!"
                self.final_scores = str(self.scores[0]) + " - " + str(
                    self.scores[1])
            elif self.scores[1] > self.scores[0]:
                self.game_result = TEAM_NAME[1] + " wins!"
                self.final_scores = str(self.scores[1]) + " - " + str(
                    self.scores[0])
            else:
                self.game_result = "DRAW"
                self.final_scores = str(self.scores[0]) + " - " + str(
                    self.scores[1])

    def render(self, surface):

        # draw background and text
        surface.blit(self.background, (0, 0))

        # draw graph if SHOW_PATHS is true
        if SHOW_PATHS:
            self.graph.render(surface)

        # draw all entities
        for entity in self.entities.values():
            entity.render(surface)

        # draw the scores
        font = pygame.font.SysFont("arial", 24, True)

        blue_score = font.render(
            TEAM_NAME[0] + " score = " + str(self.scores[0]), True,
            (0, 0, 255))
        surface.blit(blue_score, (150, 10))

        red_score = font.render(
            TEAM_NAME[1] + " score = " + str(self.scores[1]), True,
            (255, 0, 0))
        surface.blit(red_score, (870 - red_score.get_size()[0], 730))

        # draw the countdown timer
        timer = font.render(
            str("Time left = " + str(int(self.countdown_timer))), True,
            (255, 255, 255))
        w, h = timer.get_size()
        surface.blit(timer,
                     (SCREEN_WIDTH // 2 - w // 2, SCREEN_HEIGHT // 2 - h // 2))

        # game end
        if self.game_end:
            end_font = pygame.font.SysFont("arial", 60, True)

            msg = end_font.render(self.game_result, True, (255, 255, 255))
            w, h = msg.get_size()
            surface.blit(msg, (SCREEN_WIDTH // 2 - w // 2,
                               SCREEN_HEIGHT // 2 - h // 2 - 200))

            msg = end_font.render(self.final_scores, True, (255, 255, 255))
            w, h = msg.get_size()
            surface.blit(msg, (SCREEN_WIDTH // 2 - w // 2,
                               SCREEN_HEIGHT // 2 - h // 2 - 100))

    def get_entity(self, name):

        for entity in self.entities.values():
            if entity.name == name:
                return entity

        return None

    # --- returns the nearest opponent, which is a non-projectile, character from the opposing team that is not ko'd ---
    def get_nearest_opponent(self, char):

        nearest_opponent = None
        distance = 0.0

        for entity in self.entities.values():

            # neutral entity
            if entity.team_id == 2:
                continue

            # same team
            if entity.team_id == char.team_id:
                continue

            if entity.name == "projectile" or entity.name == "explosion":
                continue

            if entity.ko:
                continue

            if nearest_opponent is None:
                nearest_opponent = entity
                distance = (char.position - entity.position).length()
            else:
                if distance > (char.position - entity.position).length():
                    distance = (char.position - entity.position).length()
                    nearest_opponent = entity

        return nearest_opponent
Пример #35
0
			graph.placeObstacle(start, (0, 0, 0))

#################################################################################
# Main Functionality
#################################################################################

pygame.init();

screen = pygame.display.set_mode((Constants.WORLD_WIDTH, Constants.WORLD_HEIGHT))
clock = pygame.time.Clock()
sheepImage = pygame.image.load('sheep.png')
dogImage = pygame.image.load('collie.png')
bounds = Vector(Constants.WORLD_WIDTH, Constants.WORLD_HEIGHT)

# Setup the graph
graph = Graph()

# Setup the dog
dog = Player(dogImage, Vector(Constants.WORLD_WIDTH * .5, Constants.WORLD_HEIGHT * .5), 
			 Vector(Constants.DOG_WIDTH, Constants.DOG_HEIGHT), (0, 255, 0), 
			 Constants.DOG_SPEED, Constants.DOG_ANGULAR_SPEED)
print (dog.center)
# Setup the sheep (only 1 for now...)
herd = []
sheep = Sheep(sheepImage, Vector(randrange(int(bounds.x * .4), int(bounds.x * .6)), randrange(int(bounds.y * .6), int(bounds.y * .8))), Vector(Constants.DOG_WIDTH, Constants.DOG_HEIGHT), (0, 255, 0), Constants.SHEEP_SPEED, Constants.SHEEP_ANGULAR_SPEED)
#sheep = Sheep(sheepImage, Vector(100,200), Vector(16,32), (0,255,0), 5, 5)
herd.append(sheep)

# Setup the gates and obstacles
buildGates(graph)
buildObstacles(graph)
        if not start_vertex:
            # chosse a vertex from graph as a starting point
            start_vertex = vertices[0]
        vertices_encountered.add(start_vertex)
        if len(vertices_encountered) != len(vertices):
            for vertex in gdifrom graph2 import Graph

g = { "a" : ["d"],
      "b" : ["c"],
      "c" : ["b", "c", "d", "e"],
      "d" : ["a", "c"],
      "e" : ["c"],
      "f" : []
    }

graph = Graph(g)
print(graph)

for node in graph.vertices():
    print(graph.vertex_degree(node))

print("List of isolated vertices:")
print(graph.find_isolated_vertices())

print("""A path from "a" to "e":""")
print(graph.find_path("a", "e"))

print("""All pathes from "a" to "e":""")
print(graph.find_all_paths("a", "e"))

print("The maximum degree of the graph is:")
Пример #37
0
def find(data, i):
    if i != data[i]:
        data[i] = find(data, data[i])
    return data[i]


def union(data, i, j):
    pi, pj = find(data, i), find(data, j)
    if pi != pj:
        data[pi] = pj


def connected(data, i, j):
    return find(data, i) == find(data, j)

if __name__ == "__main__":
    # G = nx.Graph()
    # G.add_edge(0, 1, weight=4)
    # G.add_edge(1, 2, weight=4)
    # G.add_edge(1, 3, weight=2)
    # G.add_edge(3, 2, weight=3)
    # G.add_edge(2, 4, weight=4)
    # G.add_edge(4, 5, weight=10)
    # mst = minimum_spanning_edges(G)
    # edgelist = list(mst)
    # print(edgelist)
    G = Graph.build_G1()
    start = time.clock()
    minimum_spanning_edges(G)
    print(time.clock()-start)
Пример #38
0
edge_weight = lambda: random.random()
print "Ratio of largest to smallest edge weight = infinity.\n"

#
# Do the testing for grid graphs.
#

print "GRID GRAPHS:"
for size in range(20, 101, 20):

    # Make the edges for a rectangular grid graph.
    num_vertices, edges =\
    graph_generator.rectangular_grid( size, size, edge_weight )

    # Make a graph from the edges.
    graph = Graph()
    graph.make(num_vertices, edges)

    # Compute the shortest path tree with vertex 0 as the source.
    print "\nFor the", size, "x", size, "rectangular grid graph:",\
          len( graph.vertices ), "vertices,",\
          len( graph.edges ), "edges."
    ideal, actual = graph.determined_vertices(0)
    print "ideal fraction = ", ideal, " actual fraction = ", actual

#
# Do the testing for sparse graphs.
#

print "\n\nSPARSE GRAPHS:"
for size in range(200, 1001, 200):
Пример #39
0
#
# newgraph.addedge('A', 'B', 20)
# newgraph.addedge('A', 'C', 10)
# newgraph.addedge('A', 'E', 30)
# newgraph.addedge('B', 'F', 6)
# newgraph.addedge('B', 'D', 6)
# newgraph.addedge('C', 'F', 5)
# newgraph.addedge('C', 'G', 4)
#
# newgraph.addedge('G', 'E', 12)
# newgraph.addedge('F', 'D', 10)
# newgraph.addedge('E', 'D', 13)
# newgraph.createVisitedNodes()
# newgraph.UCS('A', 'D')

newgraph = g.Graph('A', True)
newgraph.addvertex('B')
newgraph.addvertex('C')
newgraph.addvertex('D')
newgraph.addvertex('E')
newgraph.addvertex('F')
newgraph.addvertex('L')
newgraph.addvertex('O')
newgraph.addvertex('M')
newgraph.addvertex('J')
newgraph.addvertex('H')
newgraph.addvertex('I')
newgraph.addvertex('K1')
newgraph.addvertex('K2')
newgraph.addvertex('K3')
newgraph.addedge('A', 'B', 10)
 def setUp(self):
     self.graph = Graph.Graph()
Пример #41
0
    return lS


def draw(G, lColor):
    fig = pylab.figure()
    nx.draw(G,
            node_color=lColor,
            pos=nx.get_node_attributes(G, 'Position'),
            with_labels=True)
    fig.canvas.draw()
    pylab.draw()
    pause(2)
    pylab.close(fig)


mtk = makeMatrix("venv/mat.txt")
lS = makeList("venv/test.txt")
# lG = []
G = Graph.Graph(lS, mtk)
pylab.ion()
print(G.listValue)
start = G.listValue[0]
goal = G.listValue[4]
T, sol = aBintang(G, start, goal)
print(T)
print(sol)
dis = 0
for i in range(0, len(sol) - 1):
    dis += G.getDistance(sol[i], sol[i + 1])
print("Distance from " + start + " to " + goal + " = ", dis)
class Robot:

    def __init__(self, sensors, motion, initialPosition, name):
        self.sensors = sensors
        self.motion = motion
        # initialize the graph
        self.graph = Graph()
        self.name = name

        # create a first node for the initial position
        node = Node(initialPosition, "position", 0)
        node.robot = self.name
        self.graph.addNode(node)

        # add a prior
        edge = PriorEdge(initialPosition, node, np.diag([1e-20,1e-20,1e-20]))
        self.graph.addEdge(edge)

        # keep track of the most recent position for dead reakoning
        self.pos = initialPosition
        self.posNode = node # node of the current position
        

    def move(self, cmd):
        """create a new node and edge with dead reakoned initial value"""
        nextPos = self.motion.move(self.pos, cmd)
        # create a new node for the next position
        # the descriptor is the index in time
        nextPosNode = Node(nextPos, "position", self.posNode.descriptor + 1)
        nextPosNode.robot = self.name
        self.graph.addNode(nextPosNode)
        # create a new edge between them
        edge = MotionEdge(self.motion, cmd, self.posNode, nextPosNode, "motion")
        self.graph.addEdge(edge)
        # update internal state
        self.pos = nextPos
        self.posNode = nextPosNode


    def simSense(self, simMap, simPos):
        """
        Simulate sensing landmarks on a map. The simulator gives the 
        robots actual position. 
        Create a new node for the landmark if necessay with a dead
        reakoned initial value with respect to the position node.
        """

        for sensor in self.sensors:
            sensorObsverations = sensor.simSense(simPos, simMap)

            for lmObs in sensorObsverations:
                # check if this landmark has been observed before
                lmNode = self.graph.getNodeOfTypeAndDescriptor(lmObs['sensorType'], lmObs['descriptor'])
                if lmNode == None: 
                    # if this landmark hasn't been observed before
                    # create a new node
                    lmPos = sensor.deadReckon(self.pos,lmObs['obs'])
                    # print self.pos
                    # print lmPos
                    # print lmObs['obs']
                    # wait()
                    lmNode = Node(lmPos, lmObs['sensorType'], lmObs['descriptor'])
                    self.graph.addNode(lmNode)
                else:
                    # update landmark guess as the average
                    lmPos = sensor.deadReckon(self.pos,lmObs['obs'])
                    lmNode.value = (lmNode.value + lmPos)/2.
                # create an edge between the nodes
                edge = ObservationEdge(sensor, lmObs['obs'], self.posNode, lmNode, "observation")
                self.graph.addEdge(edge)


    def reset(self):
        # initial position
        node = self.graph.nodes[0]
        # prior
        edge = self.graph.edges[0]

        self.posNode = node
        self.pos = node.value
        self.graph = Graph()
        self.graph.addNode(node)
        self.graph.addEdge(edge)


    def trajectory(self):
        positions = self.graph.getNodesOfType("position")
        sortedPos = sorted(positions, key=lambda x: x.descriptor)
        traj = map(lambda x: x.value, sortedPos)
        return array(traj)

    def trajectoryXY(self):
        positions = filter(lambda x: x.nodeType == "position" and x.robot == self.name, self.graph.nodes)
        sortedPos = sorted(positions, key=lambda x: x.descriptor)
        traj = map(lambda x: x.value, sortedPos)
        traj = array(traj)
        return traj[:, 0:2]  # hack off the angle

    def position(self):
        traj = self.trajectory()
        pos = traj[-1]
        return pos

    def positionXY(self):
        traj = self.trajectory()
        pos = traj[-1]
        return pos[0:2]
Пример #43
0
 def test_getPrintAllActorYears(self):
     data = load_data("data.json")
     movies_data = data["Movie"]
     actors_data = data["Actor"]
     graph = Graph()
     graph.setMovieData(movies_data)
     graph.setActorData(actors_data)
     graph.setMovieNames()
     graph.setActorNames()
     graph.getPrintAllActorYears()
Пример #44
0
 def test_printActorsInYear(self):
     data = load_data("data.json")
     movies_data = data["Movie"]
     actors_data = data["Actor"]
     graph = Graph()
     graph.setMovieData(movies_data)
     graph.setActorData(actors_data)
     graph.setMovieNames()
     graph.setActorNames()
     graph.printActorsInYear(2018)
Пример #45
0
        Func.To = 6.28318530717

        if self.CircleItem:
            Graph.FunctionList[Graph.FunctionList.index(
                self.CircleItem)] = Func
        else:
            Graph.FunctionList.append(Func)
        Graph.Redraw()
        self.Close()


def execute_action(action):
    d = CircleDialog()
    d.ShowModal()


def OnEdit(Item):
    if "CircleExample" in Item.PluginData:
        d = CircleDialog(Item)
        d.ShowModal()
        return True


Action = Graph.CreateAction(Caption="Insert circle...",
                            OnExecute=execute_action,
                            Hint="Create circle from center and radius.",
                            ShortCut="Ctrl+Shift+C",
                            IconFile="Circle.bmp")
Graph.AddActionToMainMenu(Action)
Graph.OnEdit.append(OnEdit)
Пример #46
0
    def setCase(self, case_id, act_name, act_timestamp, event_index):
        """
        The core function in the Process class.
        Sets a new case and controls the check point.
        If gpCreation is True, calculates the case metrics and uses
        them to train DenStream, recalculates the Nyquist and releases
        old cases if necessary.
        """
        self._event_count += 1
        self._total_cases.add(case_id)

        # check if case exists and creates one if it doesn't
        index = self.getCase(case_id)
        if index is None:
            self._cases.append(Case(case_id))
            self._cp_cases += 1
            index = self.getCase(case_id)
        # add activity
        self._cases[index].setActivity(act_name, act_timestamp)
        # act_conv = act_name
        act_conv = self.convertAct(act_name)
        self._cases[index]._trace.append(act_conv)
        self._cases[index]._timestamp.append(act_timestamp)
        # reorder list, putting the newest case in the first position
        self._cases.append(self._cases.pop(index))

        current_time = dt.strptime(self._cases[index].getLastTime(),
                                   '%Y/%m/%d %H:%M:%S.%f')

        if ((current_time - self._check_point).total_seconds() > self._th
                and not self._gpCreation):
            """
            Checks the first check point for CDESF initialization
            """
            self.initialiseCDESF(current_time)
        elif self._gpCreation:
            """
            If we are past the first check point, graph distances are calculated
            and DenStream triggered
            """
            gwd, twd = Graph.computeFeatures(self._process_graph,
                                             self._cases[index]._trace,
                                             self._cases[index]._timestamp)

            self._cases[index].setGwd(gwd)
            self._cases[index].setTwd(twd)

            # DENSTREAM
            self._denstream.train(self._cases[index]._point)

            # plots
            if self._gen_plot:
                self.genPlots()

            # metrics
            if self._gen_metrics:
                self.genClusterMetrics()
                self.genCaseMetrics(event_index, index)

            if (current_time - self._check_point).total_seconds() > self._th:
                """
                Check point
                """
                self._cp_count += 1

                if len(self._cases) > self._nyquist:
                    """
                    Recalculates nyquist, releases cases,
                    updates model (merges graphs)
                    """
                    self.delCases()
                    self._nyquist = self._cp_cases * 2

                    tr, ti = self.getList()
                    cp_graph = Graph.createGraph(tr, ti)
                    Graph.mergeGraphs(self._process_graph, cp_graph)

                self._cp_cases = 0

                # metrics
                if self._gen_metrics:
                    self.genPmgMetrics()
Пример #47
0
class MyAppAction:
    algo = [
        "DFS", "BFS", "iterative_deepening", "uniform cost", "best_first",
        "Astar", "iterative_a_star"
    ]
    algorithms = [
        Algorithms.dfs, Algorithms.bfs, Algorithms.iterative_deepening,
        Algorithms.uniform_cost, Algorithms.best_first, Algorithms.a_star,
        Algorithms.iterative_a_star
    ]
    '''
    define the selction of the algos, delay, start and finish node
    specify the stop, play, pause , save button and define the methods corresponding to the clicks on the corresponding buttons
    '''
    def __init__(self, algoSelect: QComboBox,
                 delaySelect: QtWidgets.QDoubleSpinBox, startSelect: QComboBox,
                 endSelect: QComboBox, textarea: QTextBrowser,
                 playbt: QToolButton, stopbt: QToolButton,
                 pausebt: QToolButton, view):
        self.algoSelect = algoSelect
        self.delaySelect = delaySelect
        self.startSelect = startSelect
        self.startSelect.setSizeAdjustPolicy(QComboBox.AdjustToContents)
        self.endSelect = endSelect
        self.endSelect.setSizeAdjustPolicy(QComboBox.AdjustToContents)
        self.textarea = textarea
        self.playbt = playbt
        self.stopbt = stopbt
        self.pausebt = pausebt
        self.view = view
        self.setAlgorithmList()
        self.setDelaiList()
        self.playbt.clicked.connect(self.play)
        self.pausebt.clicked.connect(self.pause)
        self.stopbt.clicked.connect(self.stop)
        self.result = False

    '''define the algos list'''

    def setAlgorithmList(self):
        self.algoSelect.addItems(MyAppAction.algo)

    '''define the delays possibles list'''

    def setDelaiList(self):
        self.delaySelect.setValue(1)

    '''display the updates of the graphs image according the delay sepecified'''

    def display_image(self, img):
        self.view.setPixmap(QtGui.QPixmap(img))
        self.view.show()
        QCoreApplication.processEvents()  # let Qt do his work
        if hasattr(self, 'delay'):
            time.sleep(self.delay)

    '''create the graph and get the data from the files xml or txt'''

    def createGraph(self, name):
        edgesdata = []
        heurisiticdata = []
        d = DataFromFile()
        if ("txt" in name[1]):
            (edgesdata, heurisiticdata) = d.getDataFromFileTXT(name[0])
        if ("xml" in name[1]):
            (edgesdata, heurisiticdata) = d.getDataFromFileXML(name[0])

        self.graph = Graph(display=self.display_image,
                           edgesdict=edgesdata,
                           heuristic=heurisiticdata)

        def cmp_to_key():
            'Convert a cmp= function into a key= function'

            class K(object):
                def __init__(self, obj, *args):
                    self.obj = obj

                def __lt__(self, other):
                    try:
                        return int(self.obj) < int(other.obj)
                    except ValueError:
                        return self.obj < other.obj

            return K

        self.graph.nodes.sort(key=cmp_to_key())
        self.startSelect.clear()
        self.endSelect.clear()
        self.startSelect.addItems(self.graph.nodes)
        self.endSelect.addItems(self.graph.nodes)
        self.endSelect.setCurrentIndex(0)
        self.startSelect.setCurrentIndex(0)
        self.graph.display()
        self.textarea.setText(self.graph.__str__())

    def display_result(self, text):
        self.textarea.setText(self.textarea.toPlainText() + "\n" + text)
        self.result = True

    def saveTrace(self, name):
        if (self.result):
            with open(name[0], 'w') as f:
                f.write(self.textarea.toPlainText())
            self.textarea.setText(self.textarea.toPlainText() +
                                  "\ntrace saved (y)")
        else:
            self.textarea.setText("no trace found")

    def pause(self):
        self.t.pausef()

    def stop(self):
        self.t.killf()
        self.result = False
        self.graph.display()
        self.textarea.setText(self.graph.__str__())
        self.disable_enable(True)

    def play(self):
        self.result = False
        self.disable_enable(False)
        self.textarea.setText(
            self.textarea.toPlainText() + "\n" +
            MyAppAction.algo[self.algoSelect.currentIndex()] + ":")
        self.graph.init()
        self.curAlgo = MyAppAction.algorithms[self.algoSelect.currentIndex()]
        a = self.startSelect.currentIndex()
        b = self.endSelect.currentIndex()
        self.startNode = self.startSelect.currentText()
        self.goleNode = self.endSelect.currentText()
        self.delay = self.delaySelect.value()
        self.t = MyThread(self.display_result, self.curAlgo, self.graph,
                          self.startNode, self.goleNode)
        self.t.start()

    def disable_enable(self, state):
        self.playbt.setEnabled(state)
        self.algoSelect.setEnabled(state)
        self.startSelect.setEnabled(state)
        self.endSelect.setEnabled(state)
        self.delaySelect.setEnabled(state)
Пример #48
0
        if self.CircleItem:
            Graph.FunctionList[Graph.FunctionList.index(
                self.CircleItem)] = Func
        else:
            Graph.FunctionList.append(Func)
        Graph.Redraw()
        self.Close()


def execute_action(action):
    d = CircleDialog()
    d.ShowModal()


def OnEdit(Item):
    if "CircleExample" in Item.PluginData:
        d = CircleDialog(Item)
        d.ShowModal()
        return True


_ = Graph.CreateTranslations(TranslationTexts).gettext
Action = Graph.CreateAction(Caption=_("Insert circle..."),
                            OnExecute=execute_action,
                            Hint=_("Create circle from center and radius."),
                            ShortCut="Ctrl+Shift+C",
                            IconFile="Circle.png")
Graph.AddActionToMainMenu(Action)
Graph.OnEdit.append(OnEdit)
Пример #49
0
from Graph import *
from Matrix import Matrix

graph = Graph()
############################################################
vertexes = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
edges = [
    ("1", 2, "2"),
    ("1", 4, "6"),
    ("1", 12, "7"),  # (<start_node>, <int>, <end_node>)
    ("2", 10, "3"),
    ("2", 9, "7"),
    ("3", 2, "4"),
    ("4", 1, "5"),
    ("6", 11, "5"),
    ("6", 2, "8"),
    ("7", 15, "8"),
    ("8", 9, "5"),
    ("8", 1, "9"),
    ("9", 4, "3"),
    ("9", 7, "4"),
    ("9", 3, "7")
]
graph.vertexes = vertexes
for edge in edges:
    graph.edges.append(Edge(edge[0], edge[2],
                            edge[1]))  # Edge(start, end, cost)
matrix = Matrix(len(graph.vertexes), len(graph.vertexes))

for row in range(len(graph.vertexes)):
    for column in range(len(graph.vertexes)):
Пример #50
0
import Graph
import networkx as nx
import datetime

if __name__ == '__main__':
    g = Graph.ActivityGraph()
    g.ReadEdgeList('HydroElectric.txt')
    # g.GetAttribute('label')
    # g.DrawGraph()
    datestart = input("Enter start date (mmm dd yyyy format): ")
    startdate= datetime.datetime.strptime(datestart, '%b %d %Y')
    g.FindDistCost('a','n', startdate )
    CPL, CPC, CP = g.FindCriticalPath('a','n')
    print("The critical path is: ")
    print(CP)
    enddate = startdate + datetime.timedelta(days = CPL)
    print(enddate.strftime('End date: %b %d %Y'))
    print("Total cost along critical path in thousand INRs is: "+str(CPC))
    duration = input("Enter the number of days since project start: " )
    print("The nodes that are presently awaiting completion are: ")
    print(g.FindStatusReport('a', int(duration)))
class CompleteController:
    def __init__(self, plane1, plane2):
        # Two aircraft
        self.plane1 = Aircraft(plane1.getXPos(), plane1.getYPos(),
                               plane1.getXFinal(), plane1.getYFinal())
        self.plane2 = Aircraft(plane2.getXPos(), plane2.getYPos(),
                               plane2.getXFinal(), plane2.getYFinal())

        self.safetyMonitor = SafetyMonitor()  # External safety monitor
        self.graph = Graph(plane1, plane2)  # Empty graph

        self.last1X = True  # Plane1 last moved in the x-direction
        self.last2X = True  # Plane2 last moved in the x-direction

    # Main algorithm
    def run(self):
        # Aircraft in communication zone
        if self.communicationZone():
            # Neither plane's final destination is in other's communication zone
            if not self.plane1Check() and not self.plane2Check():
                if self.plane1.xDistance() != 0 and self.plane2.yDistance(
                ) != 0:
                    self.setXAngle1()
                    self.setYAngle2()
                elif self.plane1.yDistance() != 0 and self.plane2.xDistance(
                ) != 0:
                    self.setYAngle1()
                    self.setXAngle2()
                elif self.plane1.yDistance() != 0 and self.plane2.yDistance(
                ) != 0:  # Head-on, y-direction
                    self.setXAngle2()  # Plane 2 moves out of the way
                elif self.plane1.xDistance() != 0 and self.plane2.xDistance(
                ) != 0:  # Head on, x-direction
                    self.setYAngle2()  # Plane 2 moves out of the way
                self.plane1.advance()
                self.plane2.advance()
            # Plane2's final destination is in plane1's communication zone
            elif not self.plane1Check() and self.plane2Check():
                if self.plane2.angle == 0 or self.plane2.angle == 180:  # X-direction
                    self.setYAngle1()
                else:  # Y-direction
                    self.setXAngle1()
                self.runPlane2()  # Plane2 continues, plane1 moves
                self.plane1.advance()
            # Plane1's final destination is in plane2's communication zone
            elif self.plane1Check() and not self.plane2Check():
                if self.plane1.angle == 0 or self.plane1.angle == 180:  # X-direction
                    self.setYAngle2()
                else:  # Y-direction
                    self.setXAngle2()
                self.runPlane1()  # Plane1, continues, plane2 moves
                self.plane2.advance()

        # Aircraft cannot communicate, run normally
        else:
            if self.plane1.xDistance() != 0 or self.plane1.yDistance() != 0:
                self.runPlane1()
            if self.plane2.xDistance() != 0 or self.plane2.yDistance() != 0:
                self.runPlane2()

        # Send output to safety monitor, add points to graph
        self.safetyMonitor.error(self.plane1, self.plane2)
        self.graph.addPoints(self.plane1.getXPos(), self.plane1.getYPos(),
                             self.plane2.getXPos(), self.plane2.getYPos())
        self.showPlot()

    # Set angle in x-direction for plane1
    def setXAngle1(self):
        if self.plane1.getXPos() - self.plane1.getXFinal(
        ) > 0 or self.plane1.angle == 180:
            self.plane1.setAngle(180)  # Left
        else:
            self.plane1.setAngle(0)  # Right

    # Sets angle in y-direction for plane1
    def setYAngle1(self):
        if self.plane1.getYPos() - self.plane1.getYFinal(
        ) > 0 or self.plane1.angle == 270:
            self.plane1.setAngle(270)  # Down
        else:
            self.plane1.setAngle(90)  # Up

    # Sets angle in x-direction for plane2
    def setXAngle2(self):
        if self.plane2.getXPos() - self.plane2.getXFinal(
        ) > 0 or self.plane2.angle == 180:
            self.plane2.setAngle(180)  # Left
        else:
            self.plane2.setAngle(0)  # Right

    # Sets angle in y-direction for plane2
    def setYAngle2(self):
        if self.plane2.getYPos() - self.plane2.getYFinal(
        ) > 0 or self.plane2.angle == 270:
            self.plane2.setAngle(270)  # Down
        else:
            self.plane2.setAngle(90)  # Up

    # Runs plane1 normally
    def runPlane1(self):
        if self.plane1.xDistance() != 0:
            self.setXAngle1()
            if not self.last1X or self.plane1.yDistance() == 0:
                self.plane1.advance()  # Move in x-direction
                self.last1X = not self.last1X

        if self.plane1.yDistance() != 0:
            self.setYAngle1()
            if self.last1X or self.plane1.xDistance() == 0:
                self.plane1.advance()  # Move in y-direction

    # Runs plane2 normally
    def runPlane2(self):
        if self.plane2.xDistance() != 0:
            self.setXAngle2()
            if not self.last2X or self.plane2.yDistance() == 0:
                self.plane2.advance()  # Move in x-direction

        if self.plane2.yDistance() != 0:
            self.setYAngle2()
            if self.last2X or self.plane2.xDistance() == 0:
                self.plane2.advance()  # Move in y-direction
                self.last2X = not self.last2X

    # Determines if one plane is at its final destination
    def finalDestinations(self):
        return (self.plane1.xDistance() == 0 and self.plane1.yDistance() == 0) and \
               (self.plane2.xDistance() == 0 and self.plane2.yDistance() == 0)

    # Check if planes are within a 2km square of each other
    def communicationZone(self):
        return abs(self.plane1.getXPos() - self.plane2.getXPos()) <= 2 and abs(
            self.plane1.getYPos() - self.plane2.getYPos()) <= 2

    # Check if plane1's final position is within plane2's communication zone
    def plane1Check(self):
        return abs(self.plane1.getXFinal() - self.plane2.getXPos(
        )) <= 2 and abs(self.plane1.getYFinal() - self.plane2.getYPos()) <= 2

    # Check if plane2's final position is within plane1's communication zone
    def plane2Check(self):
        return abs(self.plane2.getXFinal() - self.plane1.getXPos(
        )) <= 2 and abs(self.plane2.getYFinal() - self.plane1.getYPos()) <= 2

    # Show plot of run
    def showPlot(self):
        self.graph.createCompletePlot()

    def checkSafety(self):
        return self.safetyMonitor.safety()
Пример #52
0
    for city_name_src in graph.cities:
        print("-----------------------------------------------------------")
        for city_name_dest in graph.cities:
            show_result_all_heuristics(graph, city_name_src, city_name_dest)


def show_result_all_heuristics(graph, city_name_src, city_name_dest):
    city_src = graph.get_city_from_name(city_name_src)
    city_dest = graph.get_city_from_name(city_name_dest)

    way_h0, counter_0 = graph.a_star(city_src, city_dest, heuristic_0)
    way_h1, counter_1 = graph.a_star(city_src, city_dest, heuristic_1)
    way_h2, counter_2 = graph.a_star(city_src, city_dest, heuristic_2)
    way_h3, counter_3 = graph.a_star(city_src, city_dest, heuristic_3)
    way_h4, counter_4 = graph.a_star(city_src, city_dest, heuristic_4)
    show_way("Heuristic 0: ", way_h0, counter_0)
    show_way("Heuristic 1: ", way_h1, counter_1)
    show_way("Heuristic 2: ", way_h2, counter_2)
    show_way("Heuristic 3: ", way_h3, counter_3)
    show_way("Heuristic 4: ", way_h4, counter_4)


if __name__ == "__main__":
    graph = Graph()

    if bruteforce_mode:
        show_brute_force_a_star(graph)
    else:
        city_name_src, city_name_dest = ask_user_input()
        show_result_all_heuristics(graph, city_name_src, city_name_dest)
Пример #53
0
import Graph


graph = Graph.Graph()

graph.extend([Graph.Node(x + 1) for x in range(4)])
graph.add_edge(graph[1], graph[0], 5)
graph.add_edge(graph[0], graph[2], 4)
graph.add_edge(graph[1], graph[2], 6)
graph.add_edge(graph[3], graph[1], 8)
graph.add_edge(graph[2], graph[3], 12)

for island in range(1, 4):

    # calculate min dist from server to island
    graph.dijkstra(graph[0], graph[island])

    min_dist = Graph.Graph.INFINITE
    max_dist = 0

    dists = []
    for vertex in graph:
        dist = vertex.get_data()            # read distance result
        if (dist != 0) and (dist < min_dist):
            min_dist = dist
        if (dist != Graph.Graph.INFINITE) and (dist > max_dist):
            max_dist = dist

    dists.append(max_dist - min_dist)
Пример #54
0
def main():
    graph = Graph(8, [(0, 2), (2, 3), (2, 1), (2, 6), (3, 0), (4, 2), (5, 2),
                      (5, 7), (6, 1), (6, 7), (7, 4)], True)
    graph.make_adjacency_list()
    print(graph.get_cycles())
Пример #55
0
class SOM:
    def __init__(self,
                 data,
                 connexion_matrices,
                 eps_s=epsilon_start,
                 eps_e=epsilon_end,
                 sig_s=sigma_start,
                 sig_e=sigma_end,
                 ep_nb=epoch_nbr):
        self.epsilon = eps_s
        self.epsilon_stepping = (eps_e - eps_s) / ep_nb

        self.sigma = sig_s
        self.sigma_stepping = (sig_e - sig_s) / ep_nb

        self.data = np.array(data)
        self.vector_list = None
        data_shape = self.data.shape[1]
        data_max = np.max(self.data)
        data_min = np.min(self.data)

        # Initializing the neural grid
        self.nodes = np.empty((neuron_nbr, neuron_nbr), dtype=Neurone)
        for x in range(neuron_nbr):
            for y in range(neuron_nbr):
                self.nodes[x, y] = Neurone(x, y, data_shape, data_min,
                                           data_max, connexion_matrices[x][y])

        # Generating Connexions
        self.global_connections_graph = None
        self.neural_graph = None
        self.neural_adjacency_matrix = None
        self.neural_dist = None
        self.distance_vector = None
        self.refresh_distance_vector = True

        self.generate_global_connections_graph()
        self.neural_graph = self.global_connections_graph.extract_neurons_graph(
        )
        self.compute_neurons_distance()

        if log_graphs:
            self.neural_graph.print_graph()
            print(self.neural_graph.to_string())
            print(self.neural_dist)

    def copy(self):
        return copy.deepcopy(self)

    def generate_global_connections_graph(self):
        self.global_connections_graph = Graph()
        for x in range(
                neuron_nbr):  # Creating the links between inputs and outputs
            for y in range(neuron_nbr):
                if y != 0:
                    self.global_connections_graph.add_edge(
                        Edge("No" + str(x) + "," + str(y),
                             "Si" + str(x) + "," + str(y - 1), 0))
                if y != neuron_nbr - 1:
                    self.global_connections_graph.add_edge(
                        Edge("So" + str(x) + "," + str(y),
                             "Ni" + str(x) + "," + str(y + 1), 0))
                if x != neuron_nbr - 1:
                    self.global_connections_graph.add_edge(
                        Edge("Eo" + str(x) + "," + str(y),
                             "Wi" + str(x + 1) + "," + str(y), 0))
                if x != 0:
                    self.global_connections_graph.add_edge(
                        Edge("Wo" + str(x) + "," + str(y),
                             "Ei" + str(x - 1) + "," + str(y), 0))

        for x in range(neuron_nbr):
            for y in range(neuron_nbr):
                for i in range(5):
                    for j in range(5):
                        if self.nodes[x, y].connection_matrix[i, j] != 0:
                            input_vertex = SOM.get_index(
                                i, 'i') + str(x) + ',' + str(y)
                            output_vertex = SOM.get_index(
                                j, 'o') + str(x) + ',' + str(y)
                            e = Edge(input_vertex, output_vertex,
                                     SOM.neurons_only_weight(i, j))
                            self.global_connections_graph.add_edge(e)

    def compute_neurons_distance(self):
        self.neural_adjacency_matrix = self.neural_graph.get_adjacency_matrix()
        self.neural_dist = self.neural_graph.get_all_shortest_paths()
        self.neural_dist = self.neural_dist.astype(
            int
        )  # /!\there is a numpy bug that converts inf to a negative value
        self.distance_vector = np.empty(np.max(self.neural_dist) + 1,
                                        dtype=float)
        self.refresh_distance_vector = True

    @staticmethod
    def uniform_weight(i, j):
        return 1

    @staticmethod
    def neurons_only_weight(i, j):
        if i == 4 or j == 4:
            return 0.5
        return 0

    @staticmethod
    def get_index(index, type):
        return {
            0: "N" + type,  # North
            1: "E" + type,  # East
            2: "W" + type,  # West
            3: "S" + type,  # South
            4: "n"  # neuron
        }.get(index)

    def winner(self, vector, distance=dist_quad):
        dist = np.empty_like(self.nodes, dtype=float)
        for x in range(
                neuron_nbr
        ):  # Computes the distances between the tested vector and all nodes
            for y in range(neuron_nbr):
                self.nodes[x, y].t += 1
                dist[x, y] = distance(self.nodes[x, y].weight, vector)
        return np.unravel_index(
            np.argmin(dist, axis=None),
            dist.shape)  # Returning the Best Matching Unit's index.

    def winners(self):
        datacomp = np.zeros(
            len(self.data), dtype=int
        )  # datacomp est la liste du numero du neurone vainqueur pour l'imagette correspondante
        for i in range(len(self.data)):
            bmu = self.winner(self.data[i])
            datacomp[i] = bmu[1] * neuron_nbr + bmu[0]
        return datacomp

    def train(self,
              iteration,
              epoch_time,
              vector_coordinates,
              f=normalized_gaussian,
              distance=dist_quad):
        if iteration % epoch_time == 0:
            self.epsilon += self.epsilon_stepping
            self.sigma += self.sigma_stepping
            if psom and iteration > 0:
                self.pruning_neighbors()
            self.refresh_distance_vector = True
        if self.refresh_distance_vector:
            for i in range(len(self.distance_vector)):
                self.distance_vector[i] = f(
                    i / (len(self.distance_vector) - 1), self.sigma)
            if log_gaussian_vector:
                print(self.distance_vector)

        vector = self.data[vector_coordinates]

        # Getting the Best matching unit
        bmu = self.winner(vector, distance)
        self.nodes[bmu].t = 1
        self.updating_weights(bmu, vector)

        return bmu[0], bmu[1]

    def updating_weights(self, bmu, vector):
        for x in range(neuron_nbr):  # Updating weights of all nodes
            for y in range(neuron_nbr):
                dist = self.neural_dist[bmu[1] * neuron_nbr + bmu[0],
                                        y * neuron_nbr + x]
                if dist >= 0:  # exploiting here the numpy bug so that negative value equals no connections
                    self.nodes[
                        x, y].weight += self.epsilon * self.distance_vector[
                            dist] * (vector - self.nodes[x, y].weight)

    def pruning_neighbors(self):
        for x in range(neuron_nbr - 1):
            for y in range(neuron_nbr - 1):
                self.pruning_check(x, y, x + 1, y)
                self.pruning_check(x, y, x, y + 1)
        self.compute_neurons_distance()

    def pruning_check(self, x1, y1, x2, y2):
        one = y1 * neuron_nbr + x1
        two = y2 * neuron_nbr + x2
        if self.neural_adjacency_matrix[
                one, two] != 0 and self.neural_adjacency_matrix[one,
                                                                two] != np.inf:
            diff = manhattan_dist(self.nodes[x1, y1].weight,
                                  self.nodes[x2, y2].weight)
            probability = np.exp(
                -1 / omega * 1 /
                (diff * self.nodes[x1, y1].t * self.nodes[x2, y2].t))
            if np.random.rand() < probability:
                print("Removed (", x1, ",", y1, ") - (", x2, ",", y2,
                      ") probability : ", probability)
                self.remove_edges((x1, y1), (x2, y2))

    def remove_edges(self, v1, v2):  # remove_edges((x, y), (x2, y2))
        inp = "n" + str(v1[0]) + ',' + str(v1[1])
        out = "n" + str(v2[0]) + ',' + str(v2[1])
        self.neural_graph.remove_edge(inp, out)
        self.neural_graph.remove_edge(out, inp)

    def create_edges(self, v1, v2):  # create_edges((x, y), (x2, y2))
        inp = "n" + str(v1[0]) + ',' + str(v1[1])
        out = "n" + str(v2[0]) + ',' + str(v2[1])
        self.neural_graph.add_edge(Edge(inp, out, 1))
        self.neural_graph.add_edge(Edge(out, inp, 1))

    def fully_random_vector(self):
        return np.random.randint(np.shape(self.data)[0])

    def unique_random_vector(self):
        return self.vector_list.pop(0)

    def generate_random_list(self):
        self.vector_list = list(range(len(self.data)))
        np.random.shuffle(self.vector_list)

    def compute_mean_error(self, datacomp):
        SOMList = self.get_som_as_list()
        error = np.zeros(len(datacomp))
        for i in range(len(datacomp)):
            error[i] = np.mean(np.abs(self.data[i] - SOMList[datacomp[i]]))
        return np.mean(error)

    def peak_signal_to_noise_ratio(self, datacomp):
        SOMList = self.get_som_as_list()
        error = np.zeros(len(datacomp))
        for i in range(len(datacomp)):
            error[i] = np.mean((self.data[i] - SOMList[datacomp[i]])**2)
        return 10 * np.log10(1 / np.mean(error))

    def get_som_as_map(self):
        result = np.empty((neuron_nbr, neuron_nbr), dtype=np.ndarray)
        for x in range(neuron_nbr):
            for y in range(neuron_nbr):
                result[x, y] = self.nodes[x, y].weight
        return result

    def get_som_as_list(self):
        result = np.empty(neuron_nbr * neuron_nbr, dtype=np.ndarray)
        for x in range(neuron_nbr):
            for y in range(neuron_nbr):
                result[y * neuron_nbr + x] = self.nodes[x, y].weight
        return result

    def set_som_as_list(self, list):
        for x in range(neuron_nbr):
            for y in range(neuron_nbr):
                self.nodes[x, y].weight = list[y * neuron_nbr + x]
Пример #56
0
Файл: BEW.py Проект: 2lcm/B.E.W
    def run(self):
        # define local variables
        fps_clk = pygame.time.Clock()
        user = self.Team1.head.next.val
        ai = self.Team2.head.next.val

        # main routine
        while True:
            # set maximum fps
            fps_clk.tick(MAXFPS)

            # draw map
            now_map = map_img.copy()
            draw_screen([self.gunfire1, self.gunfire2, self.Team1, self.Team2],
                        now_map)

            # draw screen
            self.screen.fill((0, 0, 0))
            self.screen.blit(now_map, (-user.p[0] + 400, -user.p[1] + 300))
            pygame.display.update()

            # handle bullets
            temp_fire = self.gunfire1.head.next
            while temp_fire != self.gunfire1.tail:
                next_fire = temp_fire.next
                if self.out_of_map(temp_fire.val):
                    if not temp_fire.val.move():
                        self.gunfire1.delete(temp_fire)
                else:
                    self.gunfire1.delete(temp_fire)
                temp_fire = next_fire

            temp_fire = self.gunfire2.head.next
            while temp_fire != self.gunfire2.tail:
                next_fire = temp_fire.next
                if self.out_of_map(temp_fire.val):
                    if not temp_fire.val.move():
                        self.gunfire2.delete(temp_fire)
                else:
                    self.gunfire2.delete(temp_fire)
                temp_fire = next_fire

            # handle team1 bullets
            temp_unit = self.Team2.head.next
            while temp_unit != self.Team2.tail:
                cur_unit = temp_unit.val
                temp_fire = self.gunfire1.head.next
                while temp_fire != self.gunfire1.tail:
                    if (cur_unit.p[0] - temp_fire.val.p[0])**2 + (
                            cur_unit.p[1] - temp_fire.val.p[1])**2 < UNIT_RAD2:
                        cur_unit.life -= 1
                        # print('life is :', cur_unit.life)
                        temp_fire = temp_fire.next
                        self.gunfire1.delete(temp_fire.prev)
                    else:
                        temp_fire = temp_fire.next
                temp_unit = temp_unit.next
                if cur_unit.life <= 0:
                    self.Team2.delete(temp_unit.prev)

            # handle team2 bullets
            temp_unit = self.Team1.head.next
            while temp_unit != self.Team1.tail:
                cur_unit = temp_unit.val
                temp_fire = self.gunfire2.head.next
                while temp_fire != self.gunfire2.tail:
                    if (cur_unit.p[0] - temp_fire.val.p[0])**2 + (
                            cur_unit.p[1] - temp_fire.val.p[1])**2 < UNIT_RAD2:
                        cur_unit.life -= 1
                        print('life is :', cur_unit.life)
                        temp_fire = temp_fire.next
                        self.gunfire2.delete(temp_fire.prev)
                    else:
                        temp_fire = temp_fire.next
                temp_unit = temp_unit.next
                if cur_unit.life <= 0:
                    self.Team1.delete(temp_unit.prev)

            # handle events and user unit
            for event in pygame.event.get():
                # when click x button on window
                if event.type == pygame.QUIT:
                    sys.exit()
                # when press the keyboard
                elif event.type == pygame.KEYDOWN:
                    if event.key == pygame.K_ESCAPE:
                        sys.exit()
                    if event.key == pygame.K_w:
                        user.act[0] = True
                    if event.key == pygame.K_s:
                        user.act[1] = True
                    if event.key == pygame.K_a:
                        user.act[2] = True
                    if event.key == pygame.K_d:
                        user.act[3] = True

                if event.type == pygame.KEYUP:
                    if event.key == pygame.K_ESCAPE:
                        sys.exit()
                    if event.key == pygame.K_w:
                        user.act[0] = False
                    if event.key == pygame.K_s:
                        user.act[1] = False
                    if event.key == pygame.K_a:
                        user.act[2] = False
                    if event.key == pygame.K_d:
                        user.act[3] = False

            # update user attribute
            if user is not None:
                user.atk = pygame.mouse.get_pressed()[0]
                user.look = (user.p[0] - 400 + pygame.mouse.get_pos()[0],
                             user.p[1] - 300 + pygame.mouse.get_pos()[1])

            # update AI attributes
            # we will make this as function and more intelligent
            if ai is not None:
                # ai.atk = not ai.atk
                ai.look = user.p

                arr = [
                    (126, 112),
                    (125, 525),
                    (563, 523),
                    (569, 113),  # 0 ~ 3
                    (230, 604),
                    (232, 1077),
                    (403, 1075),
                    (396, 607),  # 4 ~ 7
                    (791, 422),
                    (553, 695),
                    (767, 874),
                    (1029, 575),  # 8 ~ 11
                    (794, 85),
                    (698, 247),
                    (986, 416),
                    (1070, 249),
                    (982, 83),  # 12 ~ 16
                    (980, 823),
                    (1040, 1160),
                    (1454, 1088),
                    (1388, 742),  # 17 ~ 20
                    (1167, 290),
                    (1523, 458)  # 21, 22
                ]
                # 0
                e = [[1, 3], [0, 2, 4], [3, 7, 8, 9, 13], [0, 2, 12, 13],
                     [1, 5, 7], [4, 6], [7, 5, 9, 10, 18], [2, 4, 6, 9],
                     [2, 9, 11, 13, 14], [2, 6, 7, 8, 10], [6, 9, 11, 17, 18],
                     [8, 10, 14, 17, 20, 21], [3, 13, 16], [2, 3, 8, 12],
                     [8, 11, 15], [14, 16, 21], [12, 15], [10, 11, 18, 20],
                     [6, 10, 17, 19], [18, 20], [11, 17, 19, 22], [11, 15, 22],
                     [20, 21]]

                d = []
                for i in range(len(arr)):
                    dd = []
                    for j in range(len(e)):
                        if j in e[i]:
                            dd.append(dist(arr[i], arr[j]))
                        else:
                            dd.append(None)
                    d.append(dd)

                if ai.path_find:
                    arr2 = [dist(ai.p, arr[i]) for i in range(len(arr))]
                    arr3 = [dist(user.p, arr[i]) for i in range(len(arr))]
                    start = arr2.index(min(arr2))
                    ai.dest = arr3.index(min(arr3))

                    ai.path = Graph.Graph(d).dijkstra(start, ai.dest)
                    ai.path_status = 0
                    ai.path_find = False

                now_dest = arr[ai.path[ai.path_status]]
                ai.act[0] = ai.p[1] > now_dest[1]
                ai.act[1] = ai.p[1] < now_dest[1]
                ai.act[2] = ai.p[0] > now_dest[0]
                ai.act[3] = ai.p[0] < now_dest[0]
                if ai.p[1] == now_dest[1] and ai.p[0] == now_dest[
                        0] and ai.path_status != len(ai.path) - 1:
                    ai.path_status += 1

            # move, change direct, attacks
            cur_node = self.Team1.head.next
            while cur_node != self.Team1.tail:
                cur_node.val.update()
                if cur_node.val.atk:
                    new_shot = M_gun()
                    new_shot.p = cur_node.val.p
                    new_shot.direct = cur_node.val.direct
                    self.gunfire1.insert(new_shot)
                cur_node = cur_node.next

            cur_node = self.Team2.head.next
            while cur_node != self.Team2.tail:
                cur_node.val.update()
                if cur_node.val.atk:
                    new_shot = M_gun()
                    new_shot.p = cur_node.val.p
                    new_shot.direct = cur_node.val.direct
                    self.gunfire2.insert(new_shot)
                cur_node = cur_node.next
Пример #57
0
 def __getattr__(self, name):
     return Graph.GetActionAttr(self.id)[name]
Пример #58
0
    clf = LinearRegression(learning_rate=1)
    clf.train(x_train, y_train)

    price_prediction = clf.predict(x_pred)

    df.dropna(inplace=True)
    df['Prediction'] = np.nan

    last_date = pd.to_datetime(dt.iloc[-1])
    last_sec = last_date.timestamp()
    one_day_sec = 86400
    next_sec = last_sec + one_day_sec

    for i in price_prediction:
        next_date = datetime.datetime.fromtimestamp(next_sec)
        datetime.datetime.fromtimestamp(next_sec)
        next_sec += 86400
        df.loc[next_date] = [np.nan for _ in range(len(df.columns) - 1)] + [i]

    df = pd.DataFrame(df)
    pred_df = df[-120:]
    copy_df = copy_df.set_index('Date')
    pred_df = pd.DataFrame(pred_df['Prediction'])
    return pred_df, copy_df


pred_df, df = calPred('ADANIPORTS')
stkdt.storeData(pred_df)
plt.plot(df, pred_df, "Stock Price Prediction of RELIANCE", 'Date', 'Price',
         'blue')
Пример #59
0
 def test_vertices(self):
     g = Graph()
     g.add_edge('a', 'b')
     g.add_edge('b', 'c')
     g.add_edge('c', 'a')
     self.assertEqual(['a', 'b', 'c'], g.vertices())
Пример #60
0
def sampling(settings, types_dict, types_dict_c, out, ncounterfactuals, clf,
             n_batches_train, n_samples_train, k, n_input, degree_active):

    argvals = settings.split()
    args = Helpers.getArgs(argvals)

    # Creating graph
    sess_HVAE = tf.Graph()

    with sess_HVAE.as_default():
        # args.model_name: excluded
        tf_nodes = Graph.C_CHVAE_graph(
            args.types_file,
            args.types_file_c,
            learning_rate=1e-3,
            z_dim=args.dim_latent_z,
            y_dim=args.dim_latent_y,
            s_dim=args.dim_latent_s,
            y_dim_partition=args.dim_latent_y_partition,
            nsamples=1000,
            p=2)

    # start session
    with tf.Session(graph=sess_HVAE) as session:
        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()
        print('Initizalizing Variables ...')
        tf.global_variables_initializer().run()

        # -----------------------------------------------------------------------------------#
        # Apply on training data

        print('Training the CHVAE ...')
        if (args.train == 1):

            start_time = time.time()
            # Training cycle

            loglik_epoch = []
            KL_s_epoch = []
            KL_z_epoch = []
            for epoch in tqdm(range(args.epochs)):
                avg_loss = 0.
                avg_KL_s = 0.
                avg_KL_z = 0.
                samples_list = []
                p_params_list = []
                q_params_list = []
                log_p_x_total = []

                # Annealing of Gumbel-Softmax parameter
                tau = np.max([1.0 - 0.001 * epoch, 1e-3])

                # Randomize the data in the mini-batches
                train_data = out['training'][1]
                train_data_c = out['training'][2]
                random_perm = np.random.permutation(
                    range(np.shape(train_data)[0]))
                train_data_aux = train_data[random_perm, :]
                train_data_aux_c = train_data_c[random_perm, :]

                for i in range(n_batches_train):
                    # Create inputs for the feed_dict
                    data_list = Helpers.next_batch(train_data_aux,
                                                   types_dict,
                                                   args.batch_size,
                                                   index_batch=i)  # DONE
                    data_list_c = Helpers.next_batch(train_data_aux_c,
                                                     types_dict_c,
                                                     args.batch_size,
                                                     index_batch=i)  # DONE

                    # Create feed dictionary
                    feedDict = {
                        i: d
                        for i, d in zip(tf_nodes['ground_batch'], data_list)
                    }
                    feedDict.update({
                        i: d
                        for i, d in zip(tf_nodes['ground_batch_c'],
                                        data_list_c)
                    })
                    feedDict[tf_nodes['tau_GS']] = tau
                    feedDict[tf_nodes['batch_size']] = args.batch_size

                    # Running VAE
                    _, X_list, loss, KL_z, KL_s, samples, log_p_x, p_params, q_params = session.run(
                        [
                            tf_nodes['optim'], tf_nodes['X'],
                            tf_nodes['loss_re'], tf_nodes['KL_z'],
                            tf_nodes['KL_s'], tf_nodes['samples'],
                            tf_nodes['log_p_x'], tf_nodes['p_params'],
                            tf_nodes['q_params']
                        ],
                        feed_dict=feedDict)

                    # Collect all samples, distirbution parameters and logliks in lists
                    if i == 0:
                        samples_list = [samples]
                        p_params_list = [p_params]
                        q_params_list = [q_params]
                        log_p_x_total = [log_p_x]
                    else:
                        samples_list.append(samples)
                        p_params_list.append(p_params)
                        q_params_list.append(q_params)
                        log_p_x_total.append(log_p_x)

                    # Compute average loss
                    avg_loss += np.mean(loss)
                    avg_KL_s += np.mean(KL_s)
                    avg_KL_z += np.mean(KL_z)

                # Concatenate samples in arrays
                s_total, z_total, y_total, est_data = Helpers.samples_concatenation(
                    samples_list)

                # Transform discrete variables back to the original values
                train_data_transformed = Helpers.discrete_variables_transformation(
                    train_data_aux[:n_batches_train * args.batch_size, :],
                    types_dict)
                est_data_transformed = Helpers.discrete_variables_transformation(
                    est_data, types_dict)

                # Create global dictionary of the distribution parameters
                p_params_complete = Helpers.p_distribution_params_concatenation(
                    p_params_list,  # DONE
                    types_dict,
                    args.dim_latent_z,
                    args.dim_latent_s)

                q_params_complete = Helpers.q_distribution_params_concatenation(
                    q_params_list,  # DONE
                    args.dim_latent_z,
                    args.dim_latent_s)

                # Compute mean and mode of our loglik models: these correspond to the estimated values
                loglik_mean, loglik_mode = Helpers.statistics(
                    p_params_complete['x'], types_dict)  # DONE

                # Try this for the errors
                error_train_mean = Helpers.error_computation(
                    train_data_transformed, loglik_mean, types_dict)
                error_train_mode = Helpers.error_computation(
                    train_data_transformed, loglik_mode, types_dict)
                error_train_samples = Helpers.error_computation(
                    train_data_transformed, est_data_transformed, types_dict)

                # Display logs per epoch step
                if epoch % args.display == 0:
                    print_loss(epoch, start_time, avg_loss / n_batches_train,
                               avg_KL_s / n_batches_train,
                               avg_KL_z / n_batches_train)
                    print("")

            # Plot evolution of test loglik
                loglik_per_variable = np.sum(np.concatenate(log_p_x_total, 1),
                                             1) / n_samples_train

                loglik_epoch.append(loglik_per_variable)

            # -----------------------------------------------------------------------------------#
            # Apply on test data

            for i in range(1):
                samples_test_list = []
                test_params_list = []
                log_p_x_test_list = []
                data_c_list = []

                test_data_counter = out['test_counter'][1]
                test_data_c_counter = out['test_counter'][2]
                y_test_counter = out['test_counter'][3]
                n_samples_test = test_data_counter.shape[0]

                # Create test minibatch
                data_list = Helpers.next_batch(test_data_counter,
                                               types_dict,
                                               n_samples_test,
                                               index_batch=i)
                data_list_c = Helpers.next_batch(test_data_c_counter,
                                                 types_dict_c,
                                                 n_samples_test,
                                                 index_batch=i)  # DONE

                # Constant Gumbel-Softmax parameter (where we have finished the annealing
                tau = 1e-3

                # Create feed dictionary
                feedDict = {
                    i: d
                    for i, d in zip(tf_nodes['ground_batch'], data_list)
                }
                feedDict.update({
                    i: d
                    for i, d in zip(tf_nodes['ground_batch_c'], data_list_c)
                })
                feedDict[tf_nodes['tau_GS']] = tau
                feedDict[tf_nodes[
                    'batch_size']] = ncounterfactuals  # n_samples_test

                # Get samples from the generator function (computing the mode of all distributions)
                samples_test, log_p_x_test, test_params, theta_test, normalization_params_test, X, delta_kl = session.run(
                    [
                        tf_nodes['samples_test'], tf_nodes['log_p_x_test'],
                        tf_nodes['test_params'], tf_nodes['theta_test'],
                        tf_nodes['normalization_params'], tf_nodes['X'],
                        tf_nodes['delta_kl']
                    ],
                    feed_dict=feedDict)

                samples_test_list.append(samples_test)
                test_params_list.append(test_params)
                log_p_x_test_list.append(log_p_x_test)
                data_c_list.append(data_list_c)

            # Concatenate samples in arrays
            s_total_test, z_total_test, y_total_test, samples_total_test = Helpers.samples_concatenation(
                samples_test_list)

            # Transform discrete variables back to the original values
            est_samples_transformed = Helpers.discrete_variables_transformation(
                samples_total_test, types_dict)

            # -----------------------------------------------------------------------------------#
            # Find k Attainable Counterfactuals
            print('[*] Find Attainable Counterfactuals...')

            counter_batch_size = 1  # counterfactual batch size (i.e. look for counterfactuals one by one)
            data_concat = []
            data_concat_c = []
            counterfactuals = []
            latent_tilde = []
            latent = []

            search_samples = args.search_samples
            p = args.norm_latent_space

            for i in tqdm(range(ncounterfactuals)):

                s = (k, n_input)  # preallocate k spots; # inputs
                sz = (k, args.dim_latent_z)
                s = np.zeros(s)
                sz = np.zeros(sz)
                ik = 0  # counter

                l = 0
                step = args.step_size

                x_adv, y_adv, z_adv, d_adv = None, None, None, None

                #scale test observations
                scaled_test, scaler_test = Helpers.standardize(
                    test_data_counter)

                # get one test observation
                data_list = Helpers.next_batch(test_data_counter,
                                               types_dict,
                                               counter_batch_size,
                                               index_batch=i)
                data_list_c = Helpers.next_batch(test_data_c_counter,
                                                 types_dict_c,
                                                 counter_batch_size,
                                                 index_batch=i)
                hat_y_test = np.repeat(y_test_counter[i] * 1,
                                       search_samples,
                                       axis=0)
                test_data_c_replicated = np.repeat(
                    test_data_c_counter[i, :].reshape(1, -1),
                    search_samples,
                    axis=0)
                replicated_scaled_test = np.repeat(scaled_test[i, :].reshape(
                    1, -1),
                                                   search_samples,
                                                   axis=0)

                # get replicated observations (observation replicated nsamples times)
                #replicated_scaled_test = Helpers.replicate_data_list(data_list_scaled, search_samples)
                replicated_data_list = Helpers.replicate_data_list(
                    data_list, search_samples)
                replicated_data_list_c = Helpers.replicate_data_list(
                    data_list_c, search_samples)
                replicated_z = np.repeat(z_total_test[i].reshape(
                    -1, args.dim_latent_z),
                                         search_samples,
                                         axis=0)

                h = l + step
                # counter to stop
                count = 0
                counter_step = 1
                max_step = 500

                while True:

                    count = count + counter_step

                    if (count > max_step) == True:
                        sz = None
                        s = None
                        z = z_total_test[i].reshape(-1, args.dim_latent_z)
                        break

                    if degree_active == 1:  #choose all latent features for search

                        delta_z = np.random.randn(
                            search_samples, replicated_z.shape[1]
                        )  # http://mathworld.wolfram.com/HyperspherePointPicking.html
                        d = np.random.rand(search_samples) * (
                            h - l) + l  # length range [l, h)
                        norm_p = np.linalg.norm(delta_z, ord=p, axis=1)
                        d_norm = np.divide(d, norm_p).reshape(
                            -1, 1)  # rescale/normalize factor
                        delta_z = np.multiply(delta_z, d_norm)
                        z_tilde = replicated_z + delta_z  # z tilde

                    else:

                        delta_z = np.random.randn(
                            search_samples, replicated_z.shape[1]
                        )  # http://mathworld.wolfram.com/HyperspherePointPicking.html
                        d = np.random.rand(search_samples) * (
                            h - l) + l  # length range [l, h)
                        norm_p = np.linalg.norm(delta_z, ord=p, axis=1)
                        d_norm = np.divide(d, norm_p).reshape(
                            -1, 1)  # rescale/normalize factor
                        delta_z = np.multiply(delta_z, d_norm)

                        mask = np.tile(
                            delta_kl[3][0, :] * 1,
                            (search_samples,
                             1))  # only alter most important latent features
                        delta_z = np.multiply(delta_z, mask)

                        z_tilde = replicated_z + delta_z

                    # create feed dictionary
                    feedDict = {
                        i: d
                        for i, d in zip(tf_nodes['ground_batch'],
                                        replicated_data_list)
                    }
                    feedDict.update({
                        i: d
                        for i, d in zip(tf_nodes['ground_batch_c'],
                                        replicated_data_list_c)
                    })
                    feedDict[tf_nodes['samples_z']] = z_tilde
                    feedDict[tf_nodes['tau_GS']] = tau
                    feedDict[tf_nodes['batch_size']] = search_samples

                    theta_perturbed, samples_perturbed = session.run(
                        [
                            tf_nodes['theta_perturbed'],
                            tf_nodes['samples_perturbed']
                        ],
                        feed_dict=feedDict)

                    x_tilde, params_x_perturbed = Evaluation.loglik_evaluation_test(
                        X_list, theta_perturbed, normalization_params_test,
                        types_dict)
                    x_tilde = np.concatenate(x_tilde, axis=1)
                    scaled_tilde = scaler_test.transform(x_tilde)
                    d_scale = np.sum(np.abs(scaled_tilde -
                                            replicated_scaled_test),
                                     axis=1)

                    x_tilde = np.c_[test_data_c_replicated, x_tilde]
                    y_tilde = clf.predict(x_tilde)

                    indices_adv = np.where(y_tilde == 0)[0]

                    if len(indices_adv) == 0:  # no candidate generated
                        l = h
                        h = l + step
                    elif all(s[k - 1, :] == 0):  # not k candidates generated

                        indx = indices_adv[np.argmin(d_scale[indices_adv])]
                        assert (y_tilde[indx] != 1)

                        s[ik, :] = x_tilde[indx, :]
                        sz[ik, :] = z_tilde[indx, :]
                        z = z_total_test[i].reshape(-1, args.dim_latent_z)

                        ik = ik + 1  # up the count
                        l = h
                        h = l + step
                    else:  # k candidates genereated
                        break

                data_concat.append(np.concatenate(data_list, axis=1))
                data_concat_c.append(np.concatenate(data_list_c, axis=1))
                counterfactuals.append(s)
                latent_tilde.append(sz)
                latent.append(z)

    cchvae_counterfactuals = np.array(counterfactuals)
    return cchvae_counterfactuals