from args_parser import Args, parse from graph import Graph from page_rank import PageRank if __name__ == "__main__": args = parse("Graph Analyzer by Marcin Knap") path = args[Args.ProjectName.value] g = Graph(path) g.analyze() page_rank = PageRank(g.dig, path) #page_rank.check_concur() page_rank.calc_page_rank(d=0.85) page_rank.print() page_rank.save()
def main(argv): opts = args_parser.parse(argv) predict(opts)
if parsed_args.notebook_id == None: # Start off in notebook_selection_scene import notebook_selection_scene notebook_selection_scene.init(stdscr) scene_handler.scene = notebook_selection_scene else: # Start off in selected notebook_editing_scene import notebook_editing_scene notebook_editing_scene.init(stdscr, parsed_args.notebook_id) scene_handler.scene = notebook_editing_scene run = True while run: scene_handler.scene.redraw(stdscr) stdscr.refresh() c = stdscr.getch() run = scene_handler.scene.handle_input(stdscr, c) notebooks.save_notebooks() if __name__ == "__main__": args = sys.argv[1:len(sys.argv)] parsed_args = args_parser.parse(args) curses.wrapper(tsnb, parsed_args)
def main(): global VERBOSE h, v, w, mu, mu_val = parse(' '.join(sys.argv[1:])) if h: print("- Use -v to activate Verbose") print("- Use -w to exclude the genetic algorithm from the run") print("- Use -mu value to set the value of mu in the graph generators; value should be in the range (0, 1)") return if v: VERBOSE = True else: VERBOSE = False if w: algorithms = [clauset_newman_moore, louvain, reneel] else: algorithms = [clauset_newman_moore, louvain, reneel, gcm] if VERBOSE: print("Start process") # small graphs non_lfr_runs(algorithms) with open('results/small_c1_test.json', 'w') as fs: json.dump(RESULTS_S, fs) # lfr benchmark graphs sizes = [250, 500, 1000, 1500, 2000, 2500, 3000] for n in sizes: G, target_partition, target_communities = genrate_lfr_graph(size=n, mu=mu_val) nodes_no = n edges_no = G.number_of_edges() avg_degree = sum([G.degree[i] for i in range(n)]) / nodes_no print("========================================================") print(nodes_no, edges_no, avg_degree) print("========================================================") pos = nx.spring_layout(G) results = [alg(G) for alg in algorithms] partitions = [r[0] for r in results] metrics = [(coverage(G, r[1]), performance(G, r[1]), normalized_mutual_info_score(convert_to_array(target_partition), convert_to_array(r[0]) )) for r in results] runtimes = [r[2] for r in results] for idx in range(len(metrics)): RESULTS_LFR[NAMES[idx]][n] = { "coverage": metrics[idx][0], "performance": metrics[idx][1], "nmi": metrics[idx][2], "runtime": runtimes[idx], } if VERBOSE: print( f"The coverage obtained by {algorithms[idx].__name__} was " + "%.4f" % metrics[idx][0]) print( f"The performance obtained by {algorithms[idx].__name__} was " + "%.4f" % metrics[idx][1]) print( f"The NMI score obtained by {algorithms[idx].__name__} was " + "%.4f" % metrics[idx][2]) print("========================================================") parallel_display(algorithms, partitions, G, pos) with open('results/lfr_c1_test.json', 'w') as fb: json.dump(RESULTS_LFR, fb)
def main(argv): opts = args_parser.parse(argv) train.train(opts, export=True)
def __init__(self): self.parser, self.args = args_parser.parse() self.data = np.empty(shape=(0, self.args.channels), dtype=self.args.audio_data_type) self.buffer_max_samples = self.args.sample_rate / self.args.buffer_size self.buffer_num_samples = 0
def main(): args = parse("WebCrawler by Marcin Knap") crawler = ParallelCrawler(args[Args.HomePage.value], args[Args.ProjectName.value], args[Args.SaveFlag.value]) crawler.crawl()