Пример #1
0
def graph_from_edgelist(E, directed=False):
  """Make a graph instance based on a sequence of edge tuples.

  Edges can be either of from (origin,destination) or
  (origin,destination,element). Vertex set is presume to be those
  incident to at least one edge.

  vertex labels are assumed to be hashable.
  """
  g = Graph(directed)
  V = set()
  for e in E:
    V.add(e[0])
    V.add(e[1])

  verts = {}  # map from vertex label to Vertex instance
  for v in V:
    verts[v] = g.insert_vertex(v)

  for e in E:
    src = e[0]
    dest = e[1]
    element = e[2] if len(e) > 2 else None
    g.insert_edge(verts[src],verts[dest],element)

  return g
Пример #2
0
def NWGraph(nodes, k=4, p=.1):
    N = nodes
    if N <= 2*k or k % 2 != 0:
        raise Exception('error input')
    adj = np.zeros((N, N))
    for i in range(N):
        if i < k/2:
            adj[i, (N+i-k/2)%N:] = 1
            adj[i, :(i+1+k/2)%N] = 1
        elif i > N-k/2-1:
            adj[i, (N+i-k/2)%N:] = 1
            adj[i, :(i+1+k/2)%N] = 1
        else:
            adj[i, (N+i-k/2)%N:(i+1+k/2)%N] = 1
        adj[i,i] = 0
    
    edges = N * k / 2
    def _selectNodes(edges):
        while edges > 0:
            j, k = random.randint(0, N-1), random.randint(0, N-1)
            if j != k:
                yield j, k
                edges -= 1
    for j, k in _selectNodes(edges):
        if random.random() <= p:
            adj[j,k] = adj[k,j] = 1
            
    g = Graph()
    for i in range(N):
        g.addNode(i)
    for i in range(N):
        for j in range(N):
            if adj[i, j] == 1:
                g.addEdge(i, j)
    return g
Пример #3
0
 def clear(self):
     # super(CandleGraph, self).clear()
     Graph.clear(self)
     self.minname_generators = [self._name_generator_factory('min')]
     self.maxname_generators = [self._name_generator_factory('max')]
     self.barminname_generators = [self._name_generator_factory('barmin')]
     self.barmaxname_generators = [self._name_generator_factory('barmax')]
Пример #4
0
def analyze(name='CA-GrQc'):
	print('name={}'.format(name))
	start_time = time()
	raw_edges = read_graph(name)
	raw_vertices = extract_vertices(raw_edges)
	id_dict = reduce_vertex_id(raw_vertices)
	reduced_edges = set((id_dict[a], id_dict[b]) for (a, b) in raw_edges)
	reduced_vertices = set(id_dict[vertex] for vertex in raw_vertices)
	print('pre-processing done')
	community_size_distro = community_size_distribution(reduced_vertices, reduced_edges)
	print('community_size_distribution done')
	graph = Graph(reduced_edges)
	bins_by_degree = graph.bin_vertices_by_degree()
	print('graph construction done')
	degree_distro = dict((degree, len(bins_by_degree[degree])) for degree in bins_by_degree)
	print('degree_distribution done')
	hop_distro = hop_cumulative_distribution(lazy_transitive_matrix(graph, name))
	print('hop_cumulative_distribution done')
	neighbourhood_densities = dict((degree, mean([neighbourhood_density(vertex, graph) for vertex in bins_by_degree[degree]])) for degree in bins_by_degree)
	print('neighbourhood_densities done')
	max_degeneracies = dict((degree, max(degeneracy(vertex, graph) for vertex in bins_by_degree[degree])) for degree in bins_by_degree)
	print('max_degeneracies done')
	analysis_report = dict([('community_size_distro', community_size_distro),
							('degree_distro', degree_distro),
							('hop_distro', hop_distro),
							('neighbourhood_densities', neighbourhood_densities),
							('max_degeneracies', max_degeneracies)])
	with open(name + '.analysis_report.pickle', mode='wb') as file:
		pickle.dump(analysis_report, file)
	print('elapsed_time={}s'.format(time()-start_time))
Пример #5
0
def randomGraph(n, p):
    g = Graph(n)
    for u in range(n):
        for v in range(u + 1, n):
            if random() <= p:
                g.connect(u, v)
    return g
Пример #6
0
def test_has_cycle_two_vertices_linked():
    a = Vertex(Vertex._make_test_vertex())
    b = Vertex(Vertex._make_test_vertex())
    a.add_edge(Edge(b))
    b.add_edge(Edge(a))
    g = Graph([a, b])
    assert g.has_cycle()
Пример #7
0
def file_input_test_data():
    test_data = file(r'C:\Users\Sanmaya Jolly\Documents\trial.txt')
    #test_data = file(r'C:\Users\Sanmaya Jolly\Documents\influential_bloggers_tags_data.txt')
    lineno = 0
    nodes = []
    for tagdata in test_data.readlines():
        tagdata = tagdata.rstrip()
        tags = tagdata.split(',')
        nodes.append(tuple([lineno, tags]))
        lineno += 1
    test_data.close()
    create_clusters(nodes)
    node_data = {}
    for i, tags in nodes:
        for j in range(i + 1, len(nodes)):
            weight = calc_weight(tags, nodes[j][1])
            if weight > 0:
                if node_data.__contains__(i):
                    node_data[i].append(tuple([j, weight]))
                else:
                    node_data[i] = [(j, weight)]
        i += 1
    graph_data = create_weighted_edges_list(node_data)
    #print graph_data
    g = Graph()
    for node in nodes:
        g.create_node(node[0], node[1])
    g.create_weighted_edges(graph_data)
    return g
def buildGraph(wordList):
	d = {}
	g = Graph()
	text = open(wordList,'r')
	for line in text:
		word = line[:-1]
		g.vertices[word] = Vertex(word)
		for i in range(len(word)):
			newKey = word[:i] + '_' + word[i+1:]
			if newKey in d:
				d[newKey].append(word)
			else:
				d[newKey] = [word]
	# print d
	for key in g.vertices:
		# print key
		for bucket in d:
			if key in d[bucket]:
				for thing in d[bucket]:
					# print thing
					if thing != key:
						g.vertices[key].addNeighbor(thing,0)
	return g

# d = buildGraph('expressions.txt')
# print d
Пример #9
0
def test_is_connected_two_doubly_connected():
    a = Vertex(Vertex._make_test_vertex())
    b = Vertex(Vertex._make_test_vertex())
    a.add_edge(Edge(b))
    b.add_edge(Edge(a))
    g = Graph([a, b])
    assert g.is_connected()
Пример #10
0
 def testCopy(self):
     a = Vertex(1)
     b = Vertex(2)
     c = Vertex(3)
     G1 = Graph([a, b, c], [(a, b), (a, c)])
     G2 = G1.copy()
     self.assertEquals(G1, G2) 
Пример #11
0
def test():
    env = Graphenv()
    env.init()
    name1='JJCEX.ACS.PH.03.OAT'
    name2= 'JJCEX.ACS.PH.04.OAT'
    name3 = 'JJCEX.ACS.PH.03.COOLING.VLV'
    name4 = 'JJCEX.ACS.PH.03.STEAM.VLV.2'

    L =[(name1,env.getSensorId(name1)),\
        (name2,env.getSensorId(name2))]
    gr = Graph()



    #print a[0].head()
    expr = "pdata.apply(P.test1,axis=1)"
    a = gr.getRegular(L,'',expr)
    #a = gr.getPredict(L,'x.month == 7','','Ada Boost',4,30,'ada1',True)

    #b = gr.getPredict(L,'x.month == 9','','Ada Boost',4,0,'svm1',False)

    print a

    #a = gr.getCorrelations(L,expr)

    #ex = gr.getMSTF(L,expr)
    #ex = gr.getExpression(L,expr,a,True)
    #print a
    return a
Пример #12
0
def find_best(old_path, new_path, graph_num, print_output=False):
	"""
		This method finds the best of the two input paths for the graph with number graph_num, 
		and returns it.

		:param: old_path - our current answer for this graph
		:param: new_path - the new path to check
		:param: graph_num - the graph file number. For example, for file 121.in, pass in 121
		:param: print_output - boolean that reflects whether you want this function to print information
		:return: the best path of the input paths
	"""
	graph = Graph(open('instances/{0}.in'.format(graph_num)).read())

	new_path_cost = graph.path_cost(new_path)
	old_path_cost = graph.path_cost(old_path)

	if print_output:
		print "New Path: ", new_path, " with cost: ", new_path_cost
		print "Old Path: ", old_path, " with cost: ", old_path_cost

	if not graph.is_valid_hamiltonian(new_path):
		if print_output:
			print "New path is not a valid hamiltonian"
		return old_path
	elif new_path_cost < old_path_cost:
		if print_output:
			print "New path selected"
		return new_path
	else:
		print "Old path selected"
		return old_path
Пример #13
0
 def load_graph_from_file_prompt(self):
     corrdinates = False
     reliability = False
     weight = False
     user_input = input("Is this file a list a 2D vertex coordinates? (y/n): ")
     if user_input.lower().startswith('y'):
         corrdinates = True
     else:
         user_input = input("Does this file include reliability? (y/n): ")
         if user_input.lower().startswith('y'):
             reliability = True
         user_input = input("Does this file include weight? (y/n): ")
         if user_input.lower().startswith('y'):
             weight = True
         if reliability and weight:
             print('Reliability should be placed before weight in your file.')
     user_input = input("Enter the relative path to your file: ")
     if corrdinates:
         self.graph = Graph.create_wireless_mesh_graph_from_csv(user_input)
     elif weight and reliability:
         self.graph = Graph.create_reliability_weighted_graph_from_csv(user_input)
     elif reliability:
         self.graph = Graph.create_reliability_graph_from_csv(user_input)
     elif weight:
         self.graph = Graph.create_weighted_graph_from_csv(user_input)
     else:
         self.graph = Graph.create_graph_from_csv(user_input)
Пример #14
0
def rao_min_cut(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
                a_ext_syn_rels):
    """Extend sentiment lexicons using the min-cut method of Rao (2009).

    @param a_germanet - GermaNet instance
    @param a_pos - set of lexemes with positive polarity
    @param a_neg - set of lexemes with negative polarity
    @param a_neut - set of lexemes with neutral polarity
    @param a_seed_pos - part-of-speech class of seed synsets ("none" for no
      restriction)
    @param a_ext_syn_rels - use extended set of synonymous relations

    @return list of polar terms, their polarities, and scores

    """
    sgraph = Graph(a_germanet, a_ext_syn_rels)
    # partition the graph into subjective and objective terms
    mcs, cut_edges, _, _ = sgraph.min_cut(a_pos | a_neg, a_neut, a_seed_pos)
    print("min_cut_score (subj. vs. obj.) = {:d}".format(mcs),
          file=sys.stderr)
    # remove edges belonging to the min cut (i.e., cut the graph)
    for isrc, itrg in cut_edges:
        if isrc in sgraph.nodes:
            sgraph.nodes[isrc].pop(itrg, None)
    # separate the graph into positive and negative terms
    mcs, _, pos, neg = sgraph.min_cut(a_pos, a_neg, a_seed_pos)
    print("min_cut_score (pos. vs. neg.) = {:d}".format(mcs),
          file=sys.stderr)
    ret = [(inode[0], POSITIVE, 1.) for inode in pos]
    ret.extend((inode[0], NEGATIVE, -1.) for inode in neg)
    return ret
Пример #15
0
    def test_lanczos_optimal_cut_2(self):
        g = Graph(5, [
                        (0, 0, 0.),
                        (1, 1, 0.),
                        (2, 2, 0.),
                        (3, 3, 0.),
                        (4, 4, 0.),

                        (1, 0, 15.1),
                        (2, 0, 14,3),
                        (3, 0, 21.3),
                        (4, 0, 11.1),

                        (2, 1, 17.),
                        (3, 1, 10.2),
                        (4, 1, 4.9),

                        (3, 2, 19.99),
                        (4, 2, 17.1),

                        (4, 3, 11.1)])

        real1, real2 = g.lanczos_optimal_cut()
        min_cut_real = g.calculate_normalized_cut(real1, real2)

        expected1, expected2 = set([0, 1, 3]), set([2, 4])
        min_cut_expected = 1.151324986

        #self.assertAlmostEqual(min_cut_expected, min_cut_real, places=3)

        all_real = set([str(real1), str(real2)])
        all_expected = set([str(expected1), str(expected2)])

        self.assertSetEqual(all_expected, all_real)
Пример #16
0
class SpectralClusteringTestCase(unittest.TestCase):
	def test_spectral_clustering(self):
		self.graph_type = clustering_config.get_graph_type()
		self.vertex_list = clustering_config.get_vertex_list()
		self.edges_list = clustering_config.get_edges_list()
		self.number_of_clusters = clustering_config.get_number_of_clusters()
		self.test_cluster_labels = clustering_config.get_cluster_label()
		self.create_graph()
		self.find_clusters()
		self.assertItemsEqual(self.spc.cluster_labels,self.test_cluster_labels,msg='Spectral Clustering Failure')

	def create_graph(self):
		self.graph = Graph(graph_type=self.graph_type)
		# add each vertex
		for vertex in self.vertex_list:
			self.graph.add_vertex(vertex)
		# add each edge
		for edge in self.edges_list:
			src,dst,weight = edge
			self.graph.add_edge(src,dst,weight)

	def find_clusters(self):
		# find clusters
		self.spc = SpectralClustering(self.graph)
		self.spc.get_clusters(self.number_of_clusters)
Пример #17
0
def test_add_edge():
    """Test add_edge function for nodes in empty graph."""
    from graph import Graph
    new_graph = Graph()
    new_graph.add_edge("monkeybutler", "penguinbutler")
    assert new_graph.graph["monkeybutler"] == {"penguinbutler": 0}
    assert new_graph.graph["penguinbutler"] == {}
Пример #18
0
def my_graph():
    """Fixture for graph."""
    from graph import Graph
    new_graph = Graph()
    new_graph.add_node("monkeybutler")
    new_graph.add_node("penguinbutler")
    return new_graph
Пример #19
0
def weighted_graph():
    from graph import Graph
    weighted_graph = Graph()
    weighted_graph.graph = {'A': {'B': 2, 'C': 3, 'G': 1}, 'B': {'D': 1, 'E': 1},
                            'C': {'D': 3, 'E': 2}, 'D': {'E': 4},
                            'E': {'A': 2, 'F': 3}, 'F': {}, 'G': {'A': 3}}
    return weighted_graph
Пример #20
0
 def testBfs(self):
     s = Vertex('s')
     r = Vertex('r')
     v = Vertex('v')
     w = Vertex('w')
     t = Vertex('t')
     x = Vertex('x')
     u = Vertex('u')
     y = Vertex('y')
     z = Vertex('z')
     vertices = [v,r,s,w,t,x,u,y,z]
     edges = [(s, r), (s, w), (r, v), (r, s), (v, r), (w, s), (w, t), (w, x), (t, w), (t, x), (t, u), (u, t), (u, x), (u, y), (x, w), (x, t), (x, u), (x, y), (y, x), (y, u)]
     g = Graph(vertices, edges)
     #g.printAllEdges()
     #for i in g.vertices:
     #    i.printEdge()
 #        print
     g.bfs(s)
 #    g.printVertices()
     self.assertEquals(s.d, 0)
     self.assertEquals(r.d, 1)
     self.assertEquals(v.d, 2)
     self.assertEquals(w.d, 1)
     self.assertEquals(t.d, 2)
     self.assertEquals(x.d, 2)
     self.assertEquals(u.d, 3)
     self.assertEquals(y.d, 3)
Пример #21
0
def generateWordsGraph(words):
    """generateWordsGraph(words): return
    This function is quick enough for a sequence of 50000 words."""
    d, daw = {}, {}
    g = Graph()
    #g_o = g.o; g_i = g.i # For the faster alternative.

    for w in words:
        g.addNode(w)
        #g_o[w] = {} # Faster alternative.
        alternativeWords = [ w[:i]+"*"+w[i+1:] for i in xrange(len(w)) ]
        daw[w] = alternativeWords
        for wa in alternativeWords:
            if wa in d:
                d[wa].add(w)
            else:
                d[wa] = set([w])

    for w in words: # Probably this can be made faster.
        l = set()
        for wa in daw[w]:
            l.update(d[wa])
        for n in l:
            g._fastAddBiArc(n, w, 1) # 1 is the arc weight. This creates absent nodes too.
            #g_o[n][w] = 1; g_o[w][n] = 1 # Faster alternative.
    return g
Пример #22
0
 def testCut(self):
     a = Vertex('a')
     b = Vertex('b')
     c = Vertex('c')
     d = Vertex('d')
     e = Vertex('e')
     f = Vertex('f')
     g = Vertex('g')
     h = Vertex('h')
     i = Vertex('i')
     vertices = [a, b, c, d, e, f, g, h, i]
     edges = [(a, b), (b, c), (b, h), (c, i), (d, c), (e, d), (f, d), (f, e), (f, c), (g, f), (g, h), (g, i), (h, a), (h, i)]
     G = Graph(vertices, edges, directed = False)
     #weight = [4, 8, 11, 2, 7, 9, 14, 10, 4, 2, 2, 1, 8, 7]
     weight = [4, 8, 11, 2, 7, 9, 14, 10, 4, 2, 1, 6, 8, 7]
     z = dict()
     for x,y in zip(edges, weight):
         z[x] = y    
         z[(x[1], x[0])] = y
     def w(x, y):
         return z[(x, y)]        
     G.cut(a, h, w)
     r1 = set()
     r2 = set()
     for u in G.vertices:
         if u.root == a:
             r1.add(u)
         else:
             r2.add(u)
     self.assertEquals(r1, set([a, b]))
     self.assertEquals(r2, set([h, i, g, c, f, d, e]))
Пример #23
0
    def testSimplified(self):
        a = Vertex('a')        
        b = Vertex('b')        
        c = Vertex('c')        
        d = Vertex('d')        
        e = Vertex('e')        
        f = Vertex('f')        
        g = Vertex('g')        
        h = Vertex('h')        
        vertices = [a, b, c, d, e, f, g, h]
        #edges = [(a, c), (b, a), (d, h), (d, f), (e, a), (a, b), (b, c), (d, c), (c, d), (b, e), (e, f), (b, f), (g, f), (f, g), (c, g), (g, h), (h, h)]    
        edges = [(e, a), (a, b), (b, c), (d, c), (c, d), (b, e), (e, f), (b, f), (g, f), (f, g), (c, g), (g, h), (h, h)]    
        G = Graph(vertices, edges)
        s = G.simplified()
#        for u in s.vertices:
#            print "u.key: {}, u.cc: {}".format(u.key, u.cc)    
#            s.printEdge(u)
        a = Vertex('a')    
        b = Vertex('b')    
        c = Vertex('c')    
        d = Vertex('d')    
        e = Vertex('e')    
        f = Vertex('f')    
        vertices = [a, b, c, d, e, f]
        edges = [(a, b), (b, a), (b, c), (b, d), (c, b), (d, b), (c, e), (b, e), (d, f), (e, f), (f, e)]
        G = Graph(vertices, edges)
        s = G.simplified()
Пример #24
0
    def testComponentGraph(self):
        a = Vertex('a')        
        b = Vertex('b')        
        c = Vertex('c')        
        d = Vertex('d')        
        e = Vertex('e')        
        f = Vertex('f')        
        g = Vertex('g')        
        h = Vertex('h')        
        vertices = [a, b, c, d, e, f, g, h]
        edges = [(e, a), (a, b), (b, c), (d, c), (c, d), (d, h), (b, e), (e, f), (b, f), (g, f), (f, g), (c, g), (g, h), (h, h)]    
        G = Graph(vertices, edges)
        cg = G.component_graph()
#        print
#        for u in cg.vertices:
#            print "u.key: {}".format(u.key)    
#            cg.printEdge(u)
        a = Vertex('a')    
        b = Vertex('b')    
        c = Vertex('c')    
        d = Vertex('d')    
        e = Vertex('e')    
        f = Vertex('f')    
        vertices = [a, b, c, d, e, f]
        edges = [(a, b), (b, a), (b, c), (b, d), (c, b), (d, b), (c, e), (b, e), (d, f), (e, f), (f, e)]
        G = Graph(vertices, edges)
        cg = G.component_graph()
Пример #25
0
def convert_edge_list(edge_list):
    """
        Converts the input edge list
        (it's mandatory that all edges are numbers).
    """
    #result_graph = Graph()
    result_graph = Graph(directed=True)
    parsed_graph = [tuple(entry.strip(' \r\n').split('\t')) for entry in edge_list]
    node_count = int(parsed_graph[0][0])

    # populate the graph's node dict
    input_list = [(node, None) for node in range(0, node_count)]
    result_graph.add_nodes(*input_list)

    # populate the graph's edge dict
    for line in parsed_graph[1:]:
        res_edge, attr = line, None

        if len(line) == 3:
            res_edge = (line[0], line[1])
            attr = EdgeProperty(wgt=[line[2]])

        res_edge = tuple([int(n) for n in res_edge])
        result_graph.add_edges([res_edge, attr])

    return result_graph
Пример #26
0
def convert_node_edge_list(node_edge_list):
    """
        Converts the input node/edge list
        used in p7 for KostenminimalX.txt
        node = nodeNumber, wgt = [Balance, Balance']
        edge = (from, to), wgt = [cost, capacity, flow]
    """
    result_graph = Graph(directed=True)
    parsed_graph = [tuple(entry.strip(' \r\n').split('\t')) for entry in node_edge_list]
    node_count = int(parsed_graph[0][0])

    # populate the graph's node dict with node_balance
    input_list = []
    for i in range(node_count):
        #wgt = Balance, Balance'
        node_atr = NodeProperty(wgt=[float(parsed_graph[i+1][0]), 0])
        input_list.append((i, node_atr))
    result_graph.add_nodes(*input_list)

    # populate the graph's edge dict
    for line in parsed_graph[node_count+1:]:
        res_edge, attr = line, None

        res_edge = (int(line[0]), int(line[1]))
        #wgt = Cost, MaxCapacity, CurrentCapacity
        attr = EdgeProperty(wgt=[float(line[2]), float(line[3]), 0])

        result_graph.add_edges([res_edge, attr])

    return result_graph
Пример #27
0
def check_global_balance(edges, stationary, places=7):
    """
    Checks that the stationary distribution satisfies the global balance
    condition. https://en.wikipedia.org/wiki/Balance_equation

    Parameters
    ----------
    edges: list of tuples
        transitions of the Markov process
    s: dict
        the stationary distribution
    """

    g = Graph(edges)

    for s1 in g.vertices():
        lhs = 0.0
        rhs = 0.0
        for s2, v in g.out_dict(s1).items():
            if s1 == s2:
                continue
            lhs += stationary[s1] * v
        for s2, v in g.in_dict(s1).items():
            if s1 == s2:
                continue
            rhs += stationary[s2] * v
        assert_almost_equal(lhs, rhs, places=places)
Пример #28
0
def main(argv):
    _file = "teste04.txt"

    try:
        opts, args = getopt.getopt(argv, 'hf:d', ['help', 'file='])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit()
        elif opt == '-d':
            Settings.debug = True
        elif opt in ('-f', '--file'):
            _file = arg

    parser = FileParser(_file)
    content = parser.read_content()

    graph = Graph()
    graph.build(content['vertices'], content['caminho'], content['h'])

    start = graph.get_vertex(content['início'][0][0])
    final = graph.get_vertex(content['final'][0][0])

    final.add_heuristic(final, 0)

    dijkstra = Dijkstra(graph, start, final)
    a_star = AStar(graph, start, final)

    Menu(graph, dijkstra, a_star).run()
class StreamEngine:
    def __init__(self, windowSize):
        self.graph = Graph()
        self.queue = MsgQueue()
        self.windowSize = windowSize

    def processPayment(self, payment):
        paytime = payment[2]

        # 1. dequeue old payments that are out of new window
        while not self.queue.isEmpty() and \
              self.queue.minTime() + self.windowSize < paytime:
            oldpayment = self.queue.peek()
            self.graph.delEdge(oldpayment[0], oldpayment[1])
            self.queue.dequeue()

        # 2. enqueue new payment
        if self.queue.isEmpty() or \
            paytime >= self.queue.maxTime() - self.windowSize:
            if not self.queue.isEmpty() and paytime < self.queue.maxTime():
                # time > window min time and < window max time -- out of order
                self.queue.insertRandomItem(payment)
            else:
                # time > window max time
                self.queue.enqueue(payment)
            self.graph.addEdge(payment[0], payment[1])

        # 3. return median degree
        return self.graph.findMedianDegree()
class GraphUnitTests(unittest.TestCase):

	def setUp(self):
		from graph import Graph
		from data import Data
		data= Data()
		self.inst = Graph(data)

	def test_produceMain(self):
		from datetime import datetime, timedelta
		import os
		path=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
		self.setUp()
		fromTime=datetime.today()-timedelta(days=100)
		toTime=datetime.today()
		fileName=self.inst.produceMain('temp', (fromTime,toTime))
		self.assertTrue(fileName.find('mainPlot')!=-1)
		self.assertTrue(os.path.exists(path+fileName[1:len(fileName)]))

	def test_produceSmall(self):
		from datetime import datetime, timedelta
		import os
		path=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
		self.setUp()
		fromTime=datetime.today()-timedelta(days=100)
		toTime=datetime.today()
		fileName=self.inst.produceSmall('temp')
		self.assertTrue(fileName.find('smallPlot')!=-1)
		self.assertTrue(os.path.exists(path+fileName[1:len(fileName)]))
Пример #31
0
def wtap_solver(probabilities, weapons, target_values):
    """
    Definition:

    Weapons target assignment problem (WTAP)

    The problem instance has a number weapons which can be assigned
    to engage targets, with a success rate of P(x). Targets have values V.
    If a weapon is engaged against a target, and is successful, the value is
    reduced to zero. Expected outcome of an engagement (D,T) is thereby

        O = V * (1-P(x))

    The optimal assignment is minimises the value of the targets.

        min E( O )

    [1].

    Variations:

    - if V is unknown, use v = 1. This maximises the exploitation of the available
    probabilities.

    Solution methods:

    1. Dynamic programming problem.
    2. Alternating iterative auction.


    [1] https://en.wikipedia.org/wiki/Weapon_target_assignment_problem

    Method:

    1. initial assignment using greedy algorithm;
    2. followed by search for improvements.

    ----------------

    :param probabilities: instance of Graph, where the relationship
    between weapons and targets is given as the probability to a
    successful engagement of the device.
    :param weapons: list of devices.
    :param target_values: dict , where the d[target] = value of target.
    :return: tuple: value of target after attack, optimal assignment

    """
    assert isinstance(probabilities, Graph)
    assert isinstance(weapons, list)
    assert isinstance(target_values, dict)

    assignments = Graph()
    current_target_values = sum(target_values.values()) + 1

    improvements = {}
    while True:
        for w in weapons:
            # calculate the effect of engaging in all targets.
            effect_of_assignment = {}
            for _, t, p in probabilities.edges(from_node=w):
                current_engagement = _get_current_engagement(w, assignments)
                if current_engagement != t:
                    if w in assignments and current_engagement is not None:
                        assignments.del_edge(w, current_engagement)
                    assignments.add_edge(w, t, value=probabilities.edge(w, t))
                effect_of_assignment[t] = _damages(probabilities=probabilities,
                                                   assignment=assignments,
                                                   target_values=target_values)

            damage_and_targets = [(v, t) for t, v in effect_of_assignment.items()]
            damage_and_targets.sort()
            best_alt_damage, best_alt_target = damage_and_targets[0]
            nett_effect = current_target_values - best_alt_damage
            improvements[w] = max(0, nett_effect)

            current_engagement = _get_current_engagement(w, assignments)
            if current_engagement != best_alt_target:
                if w in assignments and current_engagement is not None:
                    assignments.del_edge(w, current_engagement)
                assignments.add_edge(w, best_alt_target, probabilities.edge(w, best_alt_target))
            current_target_values = effect_of_assignment[best_alt_target]
        if sum(improvements.values()) == 0:
            break
    return current_target_values, assignments
Пример #32
0
    def test_simple(self):
        graph = Graph(6)

        graph.add(0, 1, 3)
        graph.add(0, 2, 15)
        graph.add(1, 2, 7)
        graph.add(1, 3, 2)
        graph.add(2, 4, 5)
        graph.add(3, 2, 1)
        graph.add(3, 5, 20)
        graph.add(4, 3, -3)
        graph.add(4, 5, 4)

        marks = floyd_warshall(graph)
        expected_marks = [
            [0, 3, 6, 5, 11, 15],
            [None, 0, 3, 2, 8, 12],
            [None, None, 0, 2, 5, 9],
            [None, None, 1, 0, 6, 10],
            [None, None, -2, -3, 0, 4],
            [None, None, None, None, None, 0],
        ]

        for v_from in xrange(len(graph)):
            self.assertListEqual(marks[v_from], expected_marks[v_from])
Пример #33
0
#Add this point layer to the map object
map.add_children(pt_lyr)

#Add the pollution of each
max_amount = float(all_data['pm2.5'].max())
map.add_child(
    plugins.HeatMap(stationArr,
                    radius=15,
                    min_opacity=0.2,
                    max_val=max_amount,
                    blur=15,
                    max_zoom=10))

map.save('routing_test123.html')

g = Graph()
#Read data
nodes = pd.read_csv('./data/nodes_dataset.csv', encoding='cp1252', sep=",")
edges = pd.read_csv('./data/edges_dataset.csv', encoding='cp1252', sep=",")

#Add nodes to graph
for i in range(len(nodes)):
    x = nodes['x'][i]
    y = nodes['y'][i]
    n = nodes['node'][i]
    pm = nodes['pm'][i]
    g.add_node(n, x, y, pm)

#Add edges to graph
#Add nodes to graph
# Aalto University

# Some demo code to print out a lp_solve-compatible encoding for
# the independent set problem for a given graph instance

from __future__ import print_function
from graph import Graph
import sys

if __name__ == "__main__":

    if len(sys.argv) != 2:
        print("Usage: python IndependentSetLP.py <instance>")
    else:
        # Read graph instance from given file
        g = Graph(filename=sys.argv[1])

        # Variables n1, n2, n3... for the nodes
        # n1 = 0 means node 1 is *not* in the independent set
        # n1 = 1 means node 1 *is* in the independent set

        # We want to maximise the size of the independent set
        nodelist = ["n%d" % node.index for node in g.nodes]
        # This will print out the line max: n1 + n2 + n3 + ...;
        line = "max: %s;" % " + ".join(nodelist)
        print(line)

        # Independence constraint: no two nodes chosen may be connected by an edge
        for edge in g.edges:
            print("n%d+n%d <= 1;" % (edge.source, edge.dest))
Пример #35
0
 def __init__(self):
     self.lastID = 0
     self.users = {}
     self.friendships = {}
     self.graph = Graph()
Пример #36
0
def ap_solver(graph):
    """

    ASSIGNMENT PROBLEM

    Definition:

    The problem instance has a number of agents and a number of tasks.
    Any agent can be assigned to perform any task, incurring some cost that may
    vary depending on the agent-task assignment. It is required to perform all
    tasks by assigning exactly one agent to each task and exactly one task to each
    agent in such a way that the total cost of the assignment is minimized.[1]

    Variations:

    - If there are more agents than tasks the problem can be solved by creating a
    "do nothing tasks" with a cost of zero. The assignment problem solver does this
    automatically.

    - If there are more tasks than agents then the problem is a knapsack problem.
    The assignment problem solver handles this case gracefully too.

    Solution methods:

    1. Using maximum flow method.
    2. Using alternating iterative auction.

    [1] https://en.wikipedia.org/wiki/Assignment_problem

    ----------------------------------------------------

    The assignment problem solver expects a bi-partite graph
    with agents, tasks and the value/cost of each task, as links,
    so that the relationship is explicit as:

        value = g.edge(agent 1, task 1)

    The optimal assignment is determined as an alternating auction
    (see Dmitri Bertsekas, MIT) which maximises the value.
    Once all agents are assigned the alternating auction halts.

    :param graph: Graph
    :return: optimal assignment as list of edges (agent, task, value)
    """
    assert isinstance(graph, Graph)
    agents = [n for n in graph.nodes(in_degree=0)]
    tasks = [n for n in graph.nodes(out_degree=0)]

    unassigned_agents = agents
    v_null = min(v for a, t, v in graph.edges()) - 1

    dummy_tasks = set()
    if len(agents) > len(tasks):  # make dummy tasks.
        dummy_tasks_needed = len(agents) - len(tasks)

        for i in range(dummy_tasks_needed):
            task = uuid4().hex
            dummy_tasks.add(task)
            tasks.append(task)
            for agent in agents:
                graph.add_edge(agent, task, v_null)
        v_null -= 1

    unassigned_tasks = set(tasks)
    assignments = Graph()

    while unassigned_agents:
        n = unassigned_agents.pop(0)  # select phase:
        value_and_task_for_n = [(v, t) for a, t, v in graph.edges(from_node=n)]
        value_and_task_for_n.sort(reverse=True)
        for v, t in value_and_task_for_n:  # for each opportunity (in ranked order)
            d = v_null
            for s, e, d in assignments.edges(from_node=t):  # if connected, get whoever it is connected to.
                break

            if v > d:  # if the opportunity is better.
                if t in assignments:  # and if there is a previous relationship.
                    unassigned_agents.append(e)  # add the removed node to unassigned.
                    assignments.del_edge(t, e)  # erase any previous relationship.
                else:
                    unassigned_tasks.remove(t)
                assignments.add_edge(t, n, v)  # record the new relationship.
                break

    return [(a, t, v) for t, a, v in assignments.edges() if t not in dummy_tasks]
Пример #37
0
def resolve(input_path, output_path, test_utils, genome, is_sc, is_careful):

    grp_filename = os.path.join(input_path, 'late_pair_info_counted.grp')
    sqn_filename = os.path.join(input_path, 'late_pair_info_counted.sqn')
    cvr_filename = os.path.join(input_path, 'late_pair_info_counted.cvr')
    first_prd_filename = os.path.join(input_path,
                                      'late_pair_info_counted_0.prd')

    if experimental.filter != experimental.Filter.spades:
        prd_filename = first_prd_filename
    else:
        prd_filename = os.path.join(input_path, 'distance_estimation_0_cl.prd')
    if experimental.filter == experimental.Filter.pathsets:
        pst_filename = os.path.join(input_path, 'distance_estimation.pst')
    inf_filename = os.path.join(input_path,
                                'late_pair_info_counted_est_params.info')
    log_filename = os.path.join(output_path, 'rectangles.log')
    config = saveparser.config(inf_filename)

    d = config['median'] - config['RL']

    if d <= 0:
        print "Read length", config[
            'RL'], "is smaller than insert size", config[
                'median'], ", can't do anything"
        return

    makelogger(log_filename)
    logger = logging.getLogger('rectangles')

    logger.info("Rectangle Resolving %s..." % input_path)
    logger.info("d = %d..." % d)

    #################################
    # PARSE INITIAL BE BRUIJN GRAPH #
    #################################

    ingraph = Graph()
    ingraph.load(grp_filename, sqn_filename, cvr_filename)
    ingraph.check()
    logger.info("init rectangles set")
    rs = RectangleSet(ingraph, d, test_utils, prd_filename, first_prd_filename,
                      config)
    if experimental.filter == experimental.Filter.pathsets:
        rs.pathsets(pst_filename)
    else:
        logger.info("begin filter")
        rs.filter(prd_filename, config)
    logger.info("  RectangleSet built.")

    threshold = 0.0
    logger.info("  Checking threshold %f..." % threshold)
    maxbgraph = rs.bgraph(threshold)
    save_fasta(maxbgraph, output_path, is_sc, 'begin_rectangles.fasta',
               is_careful)
    logger.info("outputed begin rectangles")
    maxbgraph.check_tips(ingraph.K)
    save_fasta(maxbgraph, output_path, is_sc, 'delete_tips.fasta', is_careful)
    logger.info("outputed delete tips")
    edges_before_loop = maxbgraph.delete_loops(ingraph.K, 1000, 10)
    save_fasta(maxbgraph, output_path, is_sc,
               "delete_tips_delete_loops_1000.fasta", is_careful)
    logger.info("outputed delete loops")
    edges_before_loop_DG = ingraph.find_loops(10, 1000, rs)
    logger.info("find DG 1000 loops")
    to_del = set(edges_before_loop_DG.keys()) & edges_before_loop
    for eid in to_del:
        del edges_before_loop_DG[eid]

    maxbgraph.delete_missing_loops(edges_before_loop_DG, ingraph.K, 1000, 10)
    logger.info("delete missing loops")
    save_fasta(maxbgraph, output_path, is_sc,
               'delete_tips_delete_all_loops_1000.fasta', is_careful)

    edges_before_loop_DG = ingraph.find_loops(4, 10000, rs)
    to_del = set(edges_before_loop_DG.keys()) & edges_before_loop
    for eid in to_del:
        del edges_before_loop_DG[eid]

    maxbgraph.delete_missing_loops(edges_before_loop_DG, ingraph.K, 10000, 10)
    outgraph = save_fasta(maxbgraph, output_path, is_sc,
                          "after_deleting_big_loops.fasta", is_careful)

    additional_paired_info = dict()
    should_connect = maxbgraph.edges_expand(5000)
    should_connect_by_first_pair_info = maxbgraph.use_scaffold_paired_info(
        2 * maxbgraph.d, rs.prd_for_scaffold)

    for (e1id, e2id) in should_connect_by_first_pair_info:
        if e1id not in additional_paired_info and maxbgraph.es[
                e1id].conj.eid not in additional_paired_info and e2id not in additional_paired_info:
            additional_paired_info[e1id] = [
                maxbgraph.es[e1id], maxbgraph.es[e2id]
            ]
            additional_paired_info[maxbgraph.es[e1id].conj.eid] = [
                maxbgraph.es[e2id].conj, maxbgraph.es[e1id].conj
            ]

    outgraph.fasta_for_long_contigs(
        ingraph.K, maxbgraph.d, is_sc, is_careful,
        open(os.path.join(output_path, "rectangles_extend.fasta"), "w"),
        should_connect, additional_paired_info)
    outgraph.fasta_for_long_contigs(
        ingraph.K, maxbgraph.d, is_sc, is_careful,
        open(
            os.path.join(output_path,
                         "rectangles_extend_before_scaffold.fasta"), "w"),
        should_connect, dict())

    outgraph.save(os.path.join(output_path, "last_graph"))

    if genome:
        check_diags.check(
            genome, maxbgraph, maxgraph.K,
            open(os.path.join(output_path, "check_log.txt"), "w"), test_utils)
Пример #38
0
#!python

import sys
from graph import Graph

if __name__ == "__main__":

    # get args
    f_name = sys.argv[1]

    # create graph
    g = Graph(f_name)

    # try recursive dfs from vtx_A to vtx_B
    result = g.prims()
    # try recursive dfs from vtx_A to vtx_B
    result2 = g.dijkstra('a')

    # print if there is a mst
    print('There exists a mst: {}'.format(result))

    print('Distances: {}'.format(result2))
Пример #39
0
    def __init__(self, g):
        for v in g.adj:
            if (v not in self.marked) or (not self.marked[v]):
                self.dfs(g, v)
                self.count += 1

    def dfs(self, g, v):
        self.marked[v] = True
        self.cid[v] = self.count
        for w in g.adj[v]:
            if (w not in self.marked) or (not self.marked[w]):
                self.dfs(g, w)

    def connected(self, v, w):
        return (self.cid[v] == self.cid[w])


if __name__ == '__main__':
    fn = "../algs4-data/tinyG.txt"
    g = Graph.fromFile(fn)
    g.printGraph()
    cc = CC(g)

    print("%d components" % cc.count)
    components = [[] for x in range(cc.count)]  # init 2d-empty-list
    for v in g.adj:
        components[cc.cid[v]].append(v)
    for c in components:
        c.reverse()
        print(c)
Пример #40
0
class SocialGraph:
    def __init__(self):
        self.lastID = 0
        self.users = {}
        self.friendships = {}
        self.graph = Graph()

    def addFriendship(self, userID, friendID):
        """
        Creates a bi-directional friendship
        """
        if userID == friendID:
            print("WARNING: You cannot be friends with yourself")
            return False
        elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:
            print("WARNING: Friendship already exists")
            return False
        elif friendID in self.users and userID in self.users:
            self.friendships[userID].add(friendID)
            self.friendships[friendID].add(userID)
            self.graph.add_edge(str(friendID),str(userID))
            return True
        else:
            print("Error: one or both users don't exist")
            return False

    def addUser(self, name):
        """
        Create a new user with a sequential integer ID
        """
        self.lastID += 1  # automatically increment the ID to assign the new user
        self.users[self.lastID] = User(name)
        self.friendships[self.lastID] = set()
        self.graph.add_vertex(str(self.lastID))

    def populateGraph(self, numUsers, avgFriendships):
        """
        Takes a number of users and an average number of friendships
        as arguments

        Creates that number of users and a randomly distributed friendships
        between those users.

        The number of users must be greater than the average number of friendships.

        >>> sg = SocialGraph()
        >>> sg.populateGraph(10, 2)  # Creates 10 users with an average of 2 friends each
        >>> print(sg.friendships)
        {1: {8, 10, 5}, 2: {10, 5, 7}, 3: {4}, 4: {9, 3}, 5: {8, 1, 2}, 6: {10}, 7: {2}, 8: {1, 5}, 9: {4}, 10: {1, 2, 6}}
        """
        # Reset graph
        self.lastID = 0
        self.users = {}
        self.friendships = {}
        # !!!! IMPLEMENT ME
        f = open('names.txt', 'r')
        names = f.read().split("\n")  # List containing 10000 names
        f.close()
        # Add users
        for userId in range(1,numUsers+1):
            self.addUser(names[userId])

        # Create friendships
        totalFriendships = numUsers * avgFriendships
        while(totalFriendships > 0):
            userId = randint(1,numUsers)
            friendId = randint(1,numUsers)
            if self.addFriendship(userId,friendId):
                totalFriendships-=2

        

    def getAllSocialPaths(self, userID):
        """
        Takes a user's userID as an argument

        Returns a dictionary containing every user in that user's
        extended network with the shortest friendship path between them.

        The key is the friend's ID and the value is the path.
        """
        visited = {}  # Note that this is a dictionary, not a set
        # !!!! IMPLEMENT ME
        u = str(userID)
        allConnections = self.graph.bft(u)
        for c in allConnections:
            if c != u and c not in self.graph.vertices[u]:
                visited[c] = self.graph.bfs_path(u,c)
        return visited
Пример #41
0
    # =====================
    from graphfile import GraphFile
    from graph import Graph
    import datetime

    # Read data: clean paths and clean edges
    # ======================================
    filename_paths = "data/clean_manufacturing_paths.txt"
    filename_edges = "data/clean_manufacturing_edges.txt"
    edges = GraphFile(filename_edges).read_edges_from_file()
    paths = GraphFile(filename_paths).read_paths_with_count()

    # Generate graph from clean edges
    # ===============================
    edges = add_self_loops(paths, edges)
    G = Graph(edges)

    print("Number of nodes: ", len(G.nodes))
    print("Number of edges: ", len(G.edges.keys()))

    # Color code nodes
    # =======================
    node_colors = dict()
    for node in G.nodes:
        if node in {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23}: # Start nodes threshold 0.001
            node_colors[node] = "salmon"
        elif node in {24,25}:
            node_colors[node] = "palegreen"
        elif node in {26,27,28}: # End nodes threshold 0.001
            node_colors[node] = "lightblue"
        else:
            },
        ],
        "relation": [
            {
                "type": "ParentToChild",
                "value": "父亲",
                "offset": 3,
                "code": 0
            },
        ],
        "intent":
        0
    }
    qg = QueryGraph(data_dict)
    gr = qg.person_relation_chain
    g = Graph(gr)
    print('=========chain=====')
    # g.show()

    co = QueryGraphComponent(data_dict['entity'][0], 'person0')
    c = Graph(co)
    print('=========component=====')
    # c.show()

    # t = nx.compose(gr, nx.MultiDiGraph(co))
    t = nx.compose(gr, co)

    t = Graph(t)
    t.show()

    t = nx.relabel.relabel_nodes(t, {'person0_name': 'person0'})