def cnrg_learn_grammars_probabilistic_graph_generation(g, args): name, clustering, mode, mu, type, outdir = args.graph, args.clustering, args.boundary, args.mu, \ args.type, args.outdir grammar, orig_n = get_grammar_s(original_graph=g, name=name, grammar_type=type, clustering=clustering, mu=mu) g = generate_graph(rule_dict=grammar.rule_dict, target_n=orig_n) ng = g[0] return list(ng.edges())
def main(): args = parse_args() name, clustering, mode, mu, type, outdir = args.graph, args.clustering, args.boundary, args.mu, \ args.type, args.outdir grammar, orig_n = get_grammar(name=name, grammar_type=type, clustering=clustering, mu=mu) g = generate_graph(rule_dict=grammar.rule_dict, target_n=orig_n)
def main(): args = parse_args() name, clustering, mode, mu, type, outdir = args.graph, args.clustering, args.boundary, args.mu, \ args.type, args.outdir grammar, orig_n = get_grammar(name=name, grammar_type=type, clustering=clustering, mu=mu) g, rule_ordering = generate_graph(rule_dict=grammar.rule_dict, target_n=orig_n) nx.write_edgelist(g, f'{args.outdir}/{name}_CNRG.g', data=False)
def dump_graphs(name: str, clustering: str, grammar_type: str) -> None: """ Dump the stats :return: """ original_graph = get_graph(name) outdir = 'dumps' make_dirs(outdir, name) # make the directories if needed mus = range(2, min(original_graph.order(), 11)) grammar_types = ('mu_random', 'mu_level', 'mu_dl', 'mu_level_dl', 'local_dl', 'global_dl') assert grammar_type in grammar_types, f'Invalid grammar type: {grammar_type}' # g_copy = original_graph.copy() num_graphs = 10 # fieldnames = ('name', 'n', 'm', 'g_dl', 'type', 'mu', 'clustering', '#rules', 'grammar_dl', 'time') base_filename = f'{outdir}/grammars/{name}' for mu in mus: grammar_filename = f'{base_filename}/{clustering}_{grammar_type}_{mu}.pkl' graphs_filename = f'{outdir}/graphs/{name}/{clustering}_{grammar_type}_{mu}_graphs.pkl' rule_orders_filename = f'{outdir}/rule_orders/{name}/{clustering}_{grammar_type}_{mu}_orders.pkl' if not os.path.exists(grammar_filename): print('Grammar not found:', grammar_filename) continue if os.path.exists(graphs_filename): print('Graphs already generated') continue grammar = pickle.load(open(grammar_filename, 'rb')) graphs: List[LightMultiGraph] = [] rule_orderings: List[List[int]] = [] for i in range(num_graphs): rule_dict = dict(grammar.rule_dict) new_graph, rule_ordering = generate_graph(rule_dict) print( f'{name} {grammar_type}: {i+1}, n = {new_graph.order():_d} m = {new_graph.size():_d}' ) graphs.append(new_graph) rule_orderings.append(rule_ordering) pickle.dump(graphs, open(graphs_filename, 'wb')) pickle.dump(rule_orderings, open(rule_orders_filename, 'wb')) return
def generate_graphs(grammar: VRG, num_graphs=10): """ :param grammar: VRG grammar object :param num_graphs: number of graphs :return: list of generated graphs and the rule orderings for each of the graphs """ graphs: List[LightMultiGraph] = [] rule_orderings: List[List[int]] = [] for _ in range(num_graphs): rule_dict = dict(grammar.rule_dict) new_graph, rule_ordering = generate_graph(rule_dict) graphs.append(new_graph) rule_orderings.append(rule_ordering) # print(f'graph #{_ + 1} n = {new_graph.order()} m = {new_graph.size()} {rule_ordering}') return graphs, rule_orderings
def main(): args = parse_args() name, clustering, mode, mu, type, outdir = args.graph, args.clustering, args.boundary, args.mu, \ args.type, args.outdir path_input, path_node_attrs, path_edge_attrs, path_timestamps = args.input, args.nodes, args.edges, args.timestamps grammar, orig_n = get_grammar(name=name, grammar_type=type, clustering=clustering, mu=mu, \ path_input=path_input, path_node_attrs=path_node_attrs, \ path_edge_attrs=path_edge_attrs, path_timestamps=path_timestamps) g, rule_ordering = generate_graph(rule_dict=grammar.rule_dict, target_n=orig_n) #for e in g.edges(data=True): # print(e) try: os.mkdir(args.outdir) except FileExistsError as e: pass #nx.write_edgelist(g, f'{args.outdir}/{name}_CNRG.edges', data=False) with open(f'{args.outdir}/{name}_CNRG.edges', 'w') as edgefile: for u, v, dd in g.edges(data=True): if 'edge_color' in dd.keys(): edgefile.write(f'{u} {v} {dd["edge_color"]}\n') else: edgefile.write(f'{u} {v}\n') edgefile.truncate(edgefile.tell() - len(os.linesep)) with open(f'{args.outdir}/{name}_CNRG.nodes', 'w') as nodefile: for v, dd in g.nodes(data=True): if 'node_color' in dd.keys(): nodefile.write(f'{v} {dd["node_color"]}\n') else: nodefile.write(f'{v}\n') nodefile.truncate(nodefile.tell() - len(os.linesep))