def _generate_1edge_frequent_subgraphs(self): vlb_counter = collections.Counter() vevlb_counter = collections.Counter() vlb_counted = set() vevlb_counted = set() for g in self.graphs.values(): for v in g.vertices.values(): if (g.gid, v.vlb) not in vlb_counted: vlb_counter[v.vlb] += 1 vlb_counted.add((g.gid, v.vlb)) for to, e in v.edges.items(): vlb1, vlb2 = v.vlb, g.vertices[to].vlb if self._is_undirected and vlb1 > vlb2: vlb1, vlb2 = vlb2, vlb1 if (g.gid, (vlb1, e.elb, vlb2)) not in vevlb_counter: vevlb_counter[(vlb1, e.elb, vlb2)] += 1 vevlb_counted.add((g.gid, (vlb1, e.elb, vlb2))) # add frequent vertices. for vlb, cnt in vlb_counter.items(): if cnt >= self._min_support: g = Graph(gid=next(self._counter), is_undirected=self._is_undirected) g.add_vertex(0, vlb) self._frequent_size1_subgraphs.append(g) if self._min_num_vertices <= 1: self._report_size1(g, support=cnt) else: continue if self._min_num_vertices > 1: self._counter = itertools.count()
def test_tsort_simple(self): g = Graph('build.ninja') n1 = Node(name='foo.so', scope='build.ninja', outputs=['foo.so'], rule_name='shlib', explicit_deps=['foo.o']) n2 = Node(name='foo.o', scope='build.ninja', outputs=['foo.o'], rule_name='cc', explicit_deps=['foo.c']) g.nodes[n1.name] = n1 g.nodes[n2.name] = n2 self.assertEqual(g.tsort([n1.name]), [n2.name, n1.name])
def __init__(self): """ Function: __init__ Initalizes attribites, controls flow of executing, displays boards. """ while True: self.graph = Graph() self.board = self.graph.build_table() print("Starting board:") self.graph.display() self.solve() if self.get_empty(): print("\nUnable to solve, generating new board") else: break print("Completed board:") self.graph.display()
def to_graph(self, gid=-1): """Construct a graph according to the dfs code.""" g = Graph(gid) for dfsedge in self: frm, to, (vlb1, elb, vlb2) = dfsedge.frm, dfsedge.to, dfsedge.vevlb g.add_vertex(frm, vlb1) g.add_vertex(to, vlb2) g.add_edge(frm, to, elb) return g
def main(): funcs = [] # 一个文件可能有多个函数,每个函数都有一组constraints essa = eSSA(args.data_dir, args.phase) for func in essa.funcs: funcs.append(extract_constraints(func, essa.input)) constraints = merge(essa, funcs) for cons in constraints: print(cons) graph = Graph(essa, constraints) print(graph) print(graph.vars, graph.cons) findRange(graph)
class SudokuSolver: """ Class: SudokuSolver Solves a generated sudoku game using backtracking. """ def __init__(self): """ Function: __init__ Initalizes attribites, controls flow of executing, displays boards. """ while True: self.graph = Graph() self.board = self.graph.build_table() print("Starting board:") self.graph.display() self.solve() if self.get_empty(): print("\nUnable to solve, generating new board") else: break print("Completed board:") self.graph.display() def solve(self): """ Function: solve Uses backtracking to solve the puzzle by recusively checking for valid entries until the board has been completed or no valid solution is found. """ pos = self.get_empty() if not pos: return True row, col = pos for num in range(1, 10): if self.graph.validate(num, row, col): self.board[row][col] = num if self.solve(): return True self.board[row][col] = "x" return False def get_empty(self): """ Function: get_empty Returns the next unfilled space in the board or False if the board is complete. """ for row in range(0, 9): for col in range(0, 9): num = self.board[row][col] if num == "x": return (row, col) return False
@author: wangxindi """ # anaylsis predicted meshs are more precise or more general import numpy as np from build_graph import Graph connections = [] with open('MeSH_parent_child_mapping_2018.txt') as f: for line in f: item = tuple(line.strip().split(" ")) connections.append(item) # build a graph g_undirected = Graph(connections, directed=False) g_directed = Graph(connections, directed=True) def intersection(lst1, lst2): return list(set(lst1) & set(lst2)) def find_chirdren(y, distance): new_y = [] for item in y: y_child = g_directed.find_node(item, distance) #y_new = list(set(y_child+item)) new_y.append(y_child) #new_y = [x for x in new_y if x != []] #new_y = [item for sublist in new_y for item in sublist]
for j, txt in enumerate(topics): plt.annotate( "Topic {}".format(txt + 1), (average_individual_cents[j], average_overall_cents[j])) plt.savefig( "../visualizations/centrality_charts/{}_opposing_centrality_chart_(mean_of_leaders).png" .format(measure)) plt.clf() if __name__ == "__main__": usernames = sys.argv[1:] if sys.argv[1:] else [ "JustinTrudeau", "ElizabethMay", "theJagmeetSingh", "AndrewScheer", "MaximeBernier" ] G = Graph(usernames).G sum_overall_topic_centralities = centrality_per_topic(G, measure='sum') mean_overall_topic_centralities = centrality_per_topic(G, measure='mean') zscore_overall_topic_centralities = centrality_per_topic(G, measure='zscore') sum_leader_cents = [] mean_leader_cents = [] zscore_leader_cents = [] for username in usernames: single_leader_g = Graph([username]).G sum_leader_cents.append( centrality_per_topic(single_leader_g, measure='sum')) mean_leader_cents.append( centrality_per_topic(single_leader_g, measure='mean')) zscore_leader_cents.append( centrality_per_topic(single_leader_g, measure='zscore'))
import numpy as np from scipy.sparse import issparse from scipy import stats from build_graph import Graph connections = [] with open('MeSH_parent_child_mapping_2018.txt') as f: for line in f: item = tuple(line.strip().split(" ")) connections.append(item) # build a graph g = Graph(connections, directed=False) def intersection(lst1, lst2): return list(set(lst1) & set(lst2)) def precision(p, t): """ p, t: two sets of labels/integers >>> precision({1, 2, 3, 4}, {1}) 0.25 """ return len(t.intersection(p)) / len(p)
def dump_dict(a_dict,file_name="heat_traces"): with open("{}.json".format(file_name),'wb') as fp: pickle.dump(a_dict, fp) fp.flush() def load_dict(file_name): try: with open("{}.json".format(file_name), 'rb') as fp: ret_dict = pickle.load(fp) return ret_dict except: return {} if __name__ == "__main__": retweet_histogram = Graph(config["usernames"]).retweet_histogram() sample_g = Graph(config["usernames"],n=config["num_tweets"]) graph_dict = {"Original Graph": sample_g.G} heat_dict_fn = "heat_traces_{}".format(str(config["kwargs"])) heat_dict = load_dict(heat_dict_fn) alphas = [float(sys.argv[1])] if sys.argv[1:] else config["alphas"] pbar = tqdm.tqdm(total=len(alphas)*config["num_per_alpha"]) for alpha in alphas: gs = [] if alpha not in heat_dict: for _ in range(config["num_per_alpha"]): gs.append(stochastic_hybrid_graph(alpha,m=sample_g.num_retweeters,retweet_histogram=retweet_histogram,**config["kwargs"])) pbar.update(1) graph_dict[alpha] = gs draw_graph(graph_dict[alpha][-1],save=config["save"],file_name="stochastic_hybrid_graph_alpha={:.3f}_{}".format(alpha,str(config["kwargs"])),title="Hybrid Graph. Alpha={}".format(alpha)) else: