def main(): print("PROJEKCIK 3 GRAFY") print("--------AD1--------") graph = random_connected_graph() graph = WeightedGraph(graph) weights, nodes_and_neighbours = dijkstra(graph) print("--------AD2--------") print_dijkstry(weights, nodes_and_neighbours) draw_graph(graph) print("--------AD3--------") dist_matrix = distance_matrix(graph) print_distance_matrix(dist_matrix) print("--------AD4--------") center_vertex = center_index(graph) + 1 minmax_center_vertex = minmax_center_index(graph) + 1 print("graph center vertex: " + str(center_vertex)) print("graph minmax center vertex: " + str(minmax_center_vertex)) draw_graph(graph) print("--------AD5--------") graph = minimum_spanning_tree_PRIM_V2(graph) draw_weighted_graph(graph)
def run(): logo() selection = validate() # Select log file to analyse if selection == "exit": # Exit option os.system("python3 hello.py") sys.exit() count_ID(selection) # Count IDs clock = timestamp.get_time() # Get time fw = open(C5235_directory + "Visdata_Log_" + clock, "w+") # Create Log file fw.write("Matched Event ID statistics:\r\n\r\n") for ids, info in idlist.items( ): # Print out each item and values in dictionary print("Event ID:", ids) fw.write("Event ID: " + str(ids) + "\n") item_array.append(ids) # Append all IDs to array for graph for key in info: # Print out nested dictionary print(key + ':', info[key], "\n") fw.write(str(key) + ': ' + str(info[key]) + "\n\n") fw.close() input("\n# Log file saved. Press enter to visualise data #") count_array = get_graph_data(clock) graph.draw_graph(item_array, count_array) # Call script to draw graph
def _updateMode(self): self.ui.tabWidget.setTabEnabled(self.ui.tabWidget.indexOf(self.ui.editorTab), not self.no_alg) self.ui.tabWidget.setTabEnabled(self.ui.tabWidget.indexOf(self.ui.modelTab), bool(self.model)) self.ui.tabWidget.setTabEnabled(self.ui.tabWidget.indexOf(self.ui.analysisTab), bool(self.model)) self.ui.tabWidget.setTabEnabled(self.ui.tabWidget.indexOf(self.ui.machineTab), bool(self.machine)) self.ui.tabWidget.setTabEnabled(self.ui.tabWidget.indexOf(self.ui.tableTab), bool(self.tr_table)) self.ui.tabWidget.setTabEnabled(self.ui.tabWidget.indexOf(self.ui.vhdlTab), bool(self.machine)) #use full ABSOLUTE path to the image, not relative ren = None self._update_graph() if self.machine: ren = graph.renumerate(self.machine[0]) graph.draw_machine(*self.machine) self._fill_signals() if self.model: txt = self.ui.info.toPlainText() self.ui.info.setPlainText("%s\nInput signals: %d\nOutput signals: %d" % (txt, len(self.model.in_signals), len(self.model.out_signals))) graph.draw_graph(self.model.barenodes, self.model.connections, self.model.matrix, loop=self.model.loop, renumerated=ren, added=self.machine[3]) if os.path.exists(IMG_PATH): self.canvas.setPixmap(QtGui.QPixmap(IMG_PATH)) self.canvas.adjustSize() if os.path.exists(IMG_MACHINE_PATH): self.m_canvas.setPixmap(QtGui.QPixmap(IMG_MACHINE_PATH)) self.m_canvas.adjustSize() if os.path.exists(IMG_FORMULAS_PATH): self.f_canvas.setPixmap(QtGui.QPixmap(IMG_FORMULAS_PATH)) self.f_canvas.adjustSize()
def main(): f = open("result.csv", "w") N = [] tempos = [] iterations = range(1, 23) k = 0 # apenas para dar print a uma percentagem de progresso f.write("N, tempo(s)\n") for i in iterations: array = generate_array(2**i) start = t.time() root = tree.build_tree(array) total_time = t.time() - start tempos.append(total_time) N.append(2**i) f.write(f"{2**i},{total_time}\n") k += 1 print(f"{((k/len(iterations))*100):.0f}%") # Print do progresso g.draw_graph(N, tempos) f.close()
def btn_get_pressed(self): if (self.selected_faculty is not None) and (self.selected_program is not None) and (self.selected_term is not None): self.db.update_courses(self.selected_term) self.db.update_dmajor_courses(self.selected_dmajor_term) graph.draw_graph(self.selected_term, self.selected_dmajor_term)
def process(): character = request.args.get('character0') query = load_data.form_query4(character) data = load_data.load_data(query, "http://3.101.82.158:3030/SER531") result = load_data.clean_data(data) print(result) graph.draw_graph(character, result) # return send_from_directory(app.config['CLIENT_IMAGES'],"unix.gv.pdf", as_attachment=True) return send_file('unix.gv.pdf', attachment_filename='something.pdf')
def next_solution(number_of_genes, adjacency_matrix, number_of_color, show_solution): global idx_solution global number_of_solution idx_solution += 1 if idx_solution == number_of_solution: Button_solution = Button(root, text="Quit ...", width= 15, command=root.destroy) Button_solution.grid(row=13, column=2, padx=5, pady=5) else: Button_solution = Button(root, text="next solution", width= 15, command=lambda: next_solution(number_of_genes, adjacency_matrix, number_of_color, final_solution[idx_solution])) Button_solution.grid(row=13, column=2, padx=5, pady=5) draw_graph(number_of_genes, adjacency_matrix, number_of_color, show_solution)
def run_task_6(self, doc_id: str, user_id: str): """ A method to run the task 6 using methods and functions written in this class. It simply displays the graph. :param doc_id: The input document ID :param user_id: The input user ID """ readers = self.get_relevant_readers(doc_id, user_id) d = self.get_documents(readers) out = self.sort_graph_nodes(d) out[user_id] = {doc_id} graph.draw_graph(out, doc_id, user_id)
def eval_cplex_lp(links, paths, capacity, demand_k, draw=False): # parse solution fp = open(os.path.join(cplex_dir, 'solution.txt'), 'r') solution = ''.join(fp.readlines()) fp.close() z, dvars = parse_solution(solution) # initial route to store paths and demands route = {} for s in paths: route[s] = {} for t in paths[s]: route[s][t] = {} # fill values to route k = -1 for s in paths: for t in paths[s]: k += 1 p = dvars[k].index(1) route[s][t]['path'] = paths[s][t][p]['path'] route[s][t]['demand'] = demand_k[k] # calculate link capacity utilization rate link_util = exec_route(links, capacity, route) if draw: # draw graph with link utilization rate edge_labels = {} for edge in G.edges: i, j = edge if i > j: i, j = j, i edge_labels[edge] = '({}%/{}%)'.format(i, j, link_util[(i, j)] * 100, j, i, link_util[(j, i)] * 100) draw_graph(G, edge_labels) # draw distribution of link utilization rate draw_dist(link_util, interval=0.1, prefix='CPLEX ROUTE') link_max = None util_max = 0. for link in link_util: if link_util[link] >= util_max: util_max = link_util[link] link_max = link assert util_max == z, 'util_max={},z={}'.format(util_max, z) return util_max, link_max
def wrap_up_4_topology(self, path): for port1 in UDP_PORTs: for port2 in UDP_PORTs: elapsed_time = int( time.time()) - self.topology_last_update_time[ self.make_node_id(port1)][self.make_node_id(port2)] if elapsed_time >= DISCONNECT_TIME_LIMIT: self.topology[self.make_node_id(port1)][self.make_node_id( port2)] = False self.topology[self.make_node_id(port2)][self.make_node_id( port1)] = False for uni_port in self.current_unidirectional_neighbors: self.topology[self.make_node_id(uni_port)][self.make_node_id( self.port)] = True for port in UDP_PORTs: if port in self.current_bidirectional_neighbors: self.topology[self.make_node_id(port)][self.make_node_id( self.port)] = True self.topology[self.make_node_id( self.port)][self.make_node_id(port)] = True else: self.topology[self.make_node_id( self.port)][self.make_node_id(port)] = False with open(path + "/4_topology.json", "w") as available_file: available_file.write(json.dumps(self.topology)) plt = draw_graph(self.topology, [self.make_node_id(port) for port in UDP_PORTs]) plt.savefig(path + "/4_topology.png", bbox_inches='tight')
def compare_trajectory(): n = None set_x = [] set_y = [] while True: try: n = int(input("How many trajectories? ")) if (type(n) == int) and (n > 0): break print("Enter a positive integer number") except Exception as e: print(e) for _ in range(n): while True: try: ivelo = float(input("Enter initial velocity (m/s): ")) if ivelo > 0: break print("Initial velocity cannot be negative") except Exception as e: print(e) while True: try: angle = float(input("Enter angle of trajectory (deg): ")) if (angle > 0) and (angle < 180): break print("Enter a positive number") except Exception as e: print(e) print(f"${n, ivelo, angle}") t_x, t_y, t_ax = get_coords_trajectory(ivelo, angle) set_x.extend(t_x) set_x = list(set(set_x)) set_y.extend(t_y) set_y = list(set(set_y)) t_ax = [min(set_x), max(set_x), min(set_y), max(set_y)] graph.draw_graph(t_x, t_y, t_ax) graph.show_graph()
def _updateMode(self): self.ui.tabWidget.setTabEnabled( self.ui.tabWidget.indexOf(self.ui.editorTab), not self.no_alg) self.ui.tabWidget.setTabEnabled( self.ui.tabWidget.indexOf(self.ui.modelTab), bool(self.model)) self.ui.tabWidget.setTabEnabled( self.ui.tabWidget.indexOf(self.ui.analysisTab), bool(self.model)) self.ui.tabWidget.setTabEnabled( self.ui.tabWidget.indexOf(self.ui.machineTab), bool(self.machine)) self.ui.tabWidget.setTabEnabled( self.ui.tabWidget.indexOf(self.ui.tableTab), bool(self.tr_table)) self.ui.tabWidget.setTabEnabled( self.ui.tabWidget.indexOf(self.ui.vhdlTab), bool(self.machine)) #use full ABSOLUTE path to the image, not relative ren = None self._update_graph() if self.machine: ren = graph.renumerate(self.machine[0]) graph.draw_machine(*self.machine) self._fill_signals() if self.model: txt = self.ui.info.toPlainText() self.ui.info.setPlainText( "%s\nInput signals: %d\nOutput signals: %d" % (txt, len(self.model.in_signals), len(self.model.out_signals))) graph.draw_graph(self.model.barenodes, self.model.connections, self.model.matrix, loop=self.model.loop, renumerated=ren, added=self.machine[3]) if os.path.exists(IMG_PATH): self.canvas.setPixmap(QtGui.QPixmap(IMG_PATH)) self.canvas.adjustSize() if os.path.exists(IMG_MACHINE_PATH): self.m_canvas.setPixmap(QtGui.QPixmap(IMG_MACHINE_PATH)) self.m_canvas.adjustSize() if os.path.exists(IMG_FORMULAS_PATH): self.f_canvas.setPixmap(QtGui.QPixmap(IMG_FORMULAS_PATH)) self.f_canvas.adjustSize()
def compute_graph(g, f_times=None, draw=False): t1 = datetime.now() print(g.name, file=stderr) if draw: layout = nx.spring_layout(g) measures = {} for pert in [0, .05, .1, .2, .5, 1]: print(' perturbation ({:.0%} of edges)...'.format(pert), file=stderr) pert_graph = anonymity.perturbation(g, pert) if draw: graph.draw_graph(pert_graph, pert, layout) print(' measurements...', file=stderr) measurements = graph.get_measurements(pert_graph) print(' h...', file=stderr) h = [anonymity.deanonymize_h(pert_graph, i) for i in range(0, 5)] print(' edge facts...', file=stderr) ef = [ ] #[anonymity.deanonymize_edgefacts(g, pert_graph, n) for n in range(0, 51, 10)] measures[pert] = pd.concat([measurements, *h, *ef]) t2 = datetime.now() t = t2 - t1 print(' execution time: {}'.format(t), file=stderr) if f_times is not None: print('{},{}'.format(g.name, t.total_seconds()), file=f_times) df = pd.DataFrame(measures) # print(df.to_string(), file=stderr) df.to_csv('out/{}.csv'.format(g.name))
def teste(arq): cot = int(arq[1]) G = [[0 for _i in range(2)] for _i in range(cot)] weights = [[0 for _i in range(1)] for _i in range(cot)] i = 3; j = 0 while i < len(arq): G[j][0] = arq[i] i = i + 1 G[j][1] = arq[i] i = i + 1 weights[j][0] = arq[i] i = i + 1 j = j + 1 edge_labels ={} for i in range(cot): edge_labels.setdefault((G[i][0], G[i][1]), weights[i][0]) graph.draw_graph(G, edge_labels)
def dynamic_generate_bi(file_path, out_dir): df = pd.DataFrame.from_csv(file_path) bar_generator = BarGenerator() bi_generator = BiGenerator() for index, row in df.iterrows(): raw_bar = Bar(index, row.High, row.Close, 0) result, new_bar = bar_generator.process_bar(raw_bar) if result == 'merge': bi_generator.replace_last_bar(new_bar) elif result == 'new': bi_generator.append_bar(new_bar) else: pass ended_bi = bi_generator.ended_bi trend_confirmed_bi = bi_generator.trend_confirmed_bi processed_bars = bar_generator.processed_bars calc_gravity_and_log_change(processed_bars) lines = [] for bi in ended_bi: start, end = bi.to_line() lines.append(start) lines.append(end) df2 = bars_to_dataframe(processed_bars) df2['gravity'] = (df2.high + df2.low) / 2 calc_log_change(df2) del df2['gravity'] ticker = os.path.basename(file_path).split('.')[0] # df2.to_csv(os.path.join(data_dir, ticker + '_processed.csv')) os.chdir(out_dir) draw_graph(ticker, df2, lines)
def page_rank_tags(tags, pages=10, related_tags=2, num_tags=25, min_weight=3): all_media = {} for tag in tags: iSearch = InstagramSearch() if related_tags: similar_tags = iSearch.tag_search(tag)[0][:related_tags] print similar_tags if similar_tags: similar_tags = [tg.name for tg in similar_tags] else: similar_tags = [tag] print "searching for {0} similar tags {1}".format(related_tags, "\t".join(similar_tags)) else: similar_tags = [tag] for t in similar_tags: res = iSearch.tag_recent_media(t, pages=pages) all_media.update(res) # {'tag_info': tag_info, 'tag_page_rank': tpr, 'graph': graph} result = results.run_tag_pagerank(all_media, num_tags, min_weight) tag_info = result['tag_info'] tag_page_rank = result['tag_page_rank'] graph = result['graph'] draw_graph(graph, file_name="{0}.png".format('_'.join(tags)), rank=tag_page_rank) results.print_page_rank(tag_page_rank, tag_info)
def run(option, Max=12, MAX_T=0.01, p=90): global MAX, T_MAX MAX = Max MAX_T = T_MAX nodes = 20 edges = 30 graph = gr.create_graph() if len(graph.edges()) > edges or len(graph.nodes) != nodes: print("Invalid number of edges or nodes! ") return N = c.init(graph, MAX, CONNECTED, p, option) if N != None: g = gr.draw_graph(graph) c.simulation(N, graph, T_MAX, option, 100)
def draw_graph(db, subreddit_name, a_month_ago, a_week_ago): subreddit.logging.info("Started making graph...") actions_to_get = db.get_chart_colors(subreddit_name, "line") longterm_actions = db.get_graph_stats(subreddit_name, actions_to_get=actions_to_get, since=a_month_ago) subreddit.logging.info("Got longterm actions...") shortterm_actions = db.get_graph_stats(subreddit_name, actions_to_get=actions_to_get, since=a_week_ago) subreddit.logging.info("Got shortterm actions...") actions_to_get = db.get_chart_colors(subreddit_name, "bar") stats = db.get_bar_graph_stats(actions_to_get, subreddit.get_mods(subreddit_name, db), subreddit_name, a_week_ago) subreddit.logging.info("Got bar chart actions...") path = graph.draw_graph(longterm_actions, shortterm_actions, stats) subreddit.logging.info("Drew graph... Completed.") return path
def train(model, num_epochs, learning_rate, mini_batch_size): print('Start Training...\nModel:', model, '\nTotal Epochs:', num_epochs, '\nLearning Rate:', learning_rate, '\nMini Batch Size:', mini_batch_size) w2i, embedding = set_embedding(model=model, pretrained_path='./pretrained', train_data_path='./data/rt-polaritydata') longest_setenence_length = longest_sentence(path='./data/rt-polaritydata') divide_train_and_test_set(path='./data/rt-polaritydata') train_data, train_labels = train_data_ready( path='./data/rt-polaritydata', w2i=w2i, length=longest_setenence_length) test_data, answers = test_data_ready(path='./data/rt-polaritydata', w2i=w2i, length=longest_setenence_length) classifier = Classifier(model, embedding, len(w2i), learning_rate) epoch_lst = [] accuracy_lst = [] best_accuracy = 0.0 best_epoch = 0 #print(classifier.static_check(0)) #you can check CNN-static's Embedding doesn't change during training for epoch in range(1, num_epochs + 1): start_time = time.time() print( '-------------------------------------------------------------------' ) print('Epoch:', epoch) train_data, train_labels = shuffle_train_data(train_data, train_labels) train_num = len(train_data) i = 0 while i < train_num: if i + mini_batch_size <= train_num: loss = classifier.train(train_data[i:i + mini_batch_size], train_labels[i:i + mini_batch_size], constraint=3.0) else: loss = classifier.train(train_data[i:], train_labels[i:], constraint=3.0) i += mini_batch_size if i % 1500 == 0: print('loss: %.7f' % loss) end_time = time.time() time_elapsed = end_time - start_time print('Time Elapsed for This Epoch: %02d:%02d:%02d\n' % (time_elapsed // 3600, (time_elapsed % 3600 // 60), (time_elapsed % 60 // 1))) test_classified = classifier.test(test_data) #[N 2] accuracy = validate(test_classified, answers) print('accuracy: %.5f %%' % accuracy) epoch_lst.append(epoch) accuracy_lst.append(accuracy) if accuracy > best_accuracy: best_accuracy = accuracy best_epoch = epoch classifier.save(epoch=epoch, model=model) print('Best Accuracy: %.5f %%, Best Epoch: %04d' % (best_accuracy, best_epoch)) print( '-------------------------------------------------------------------\n' ) #print(classifier.static_check(0)) #you can check CNN-static's Embedding doesn't change during training print('Training has been Completed') print('Best Model has been Saved') print(model) print('Best Accuracy: %.5f, Best Epoch: %04d' % (best_accuracy, best_epoch)) draw_graph(epoch_lst, accuracy_lst, model)
import sys sys.path.append('../') import matplotlib.pyplot as plt from graph import load_graph from graph import draw_graph if __name__ == "__main__": G = load_graph(data_dir='../data') draw_graph(G) plt.show()
state_machine = master events = master_events else: state_machine = slave events = slave_events print('--------------------------------') print('Aktualny stan:', state_machine.current_state.name) tranzycje = state_machine.allowed_transitions id = state_machine.states.index(state_machine.current_state) if master_on: state = 'm' + str(id) else: state = 's' + str(id) draw_graph(state) # -------------------------------- # # Ręczne wybieranie tranzycji # print('Dostępne tranzycje:') # for i in range(len(tranzycje)): # print(str(i + 1) + '.', events[tranzycje[i].identifier], '->', tranzycje[i].destinations[0].name) # # print('Wybierz zdarzenie:', end=' ') # zd = int(input()) - 1 # while zd not in range(len(tranzycje)): # print('Niepoprawne zdarzenie.') # print('Podaj numer zdarzenia:', end=' ') # zd = int(input()) - 1 # # t = tranzycje[zd]
help='input output filetypes separated by space') group.add_argument('--show', '-s', action="store_true", help='show the graph') parser.add_argument('--dir', '-d', default=str(Path.cwd()) + "/out", help='input directory for output files') parser.add_argument('--name', '-n', help='input filename for the output file') args = parser.parse_args() assets = args.asset filetypes = args.output # TODO: if filename exists, auto-increment try: current_fig = graph.draw_graph(assets) except (data.AssetError, data.RequestError, graph.StyleError): pass else: if filetypes: if not args.name: args.name = '_'.join(assets) + datetime.date.today().strftime( "%y%m%d") print("Program running...Press ^C to exit.") # generate output path with dir/filename path = Path(args.dir) / args.name graph.get_exports(current_fig, filetypes, path) print("Export succeeded.") else: graph.show_graph()
import graph import matplotlib.pyplot as plt def gravity_force(m1, m2, r): gravity_constant = 6.674e-11 force = (gravity_constant * (m1 * m2)) / (r**2) return force def get_coords_gravity(m1, m2, radius): x = [] y = [] for distance in radius: newtons = gravity_force(m1, m2, distance) x.append(distance) y.append(newtons) ax = [] ax.extend([min(x), max(x), min(y), max(y)]) plt.title(f"Gravity between {m1}kg & {m2}kg") plt.xlabel("Distance [m] between each mass") plt.ylabel("Force [N] acting on each mass") return x, y, ax if __name__ == "__main__": g_x, g_y, g_ax = get_coords_gravity(0.5, 1.5, graph.ranger(1, 11, 0.1)) graph.draw_graph(g_x, g_y, g_ax) graph.show_graph()
start = time.clock() branch = branch.upper() print('------------------%s ------------------' % branch) print 'Loading parameters ...' # get config information parameters = read_para.read_conf(path, branch) print 'Updating chart ...' # update data in chart chart.update_chart(path, branch, parameters, unstable) print 'Reading chart ...' # get data for graph datelist, datadict = chart.read_chart(path, branch) print 'Drawing graphs ...' # draw graph for ci result pic1, pic2 = graph.draw_graph(path, branch, datelist, datadict, parameters['graph']['day_num']) # get_labels(path, branch, parameters) sources = {'s1': pic1, 's2': pic2} print 'Creating email content ...' # create the html of email sources = mail.create_email(path, branch, datelist[0], sources, parameters) if r == 'a': rec = parameters['to'] cop = parameters['cc'] elif r == 'm': rec = [parameters['from']] cop = [parameters['from']] mail.send_email(path, branch, datelist[0], parameters['from'], rec, cop, sources) print 'Email('+str(datelist[0])+') sent to:\n\t\t'+'\n\t\t'.join(rec) print ' copied to:\n\t\t'+'\n\t\t'.join(cop) stop = time.clock()
#!/bin/python #%% import os import pandas as pd from dataset import merge_tweets_of_all_groups from graph import create_graph, graph_measures, merge_graph_feats_with_tweet_feats, draw_graph # merge_tweets_of_all_groups() g = create_graph(only_classified_users=True, override=True) # meas = graph_measures(g) # merge_graph_feats_with_tweet_feats(meas) draw_graph(False)
def fuzzificate(self, rule_no): alpha_cut = min(self.distance_membership, self.brightness_membership) ranges = self.ranges rule_outs = [] if (rule_no == 1): y1 = [] for i in range(len(ranges)): y1.append(round(power.very_high(ranges[i], alpha_cut), 2)) draw_graph(ranges, y1, 'rule' + str(1), 1) return y1 elif (rule_no == 2): y2 = [] for i in range(len(ranges)): y2.append(round(power.very_high(ranges[i], alpha_cut), 2)) draw_graph(ranges, y2, 'rule' + str(2), 2) return y2 elif (rule_no == 3): y3 = [] for i in range(len(ranges)): y3.append(round(power.high(ranges[i], alpha_cut), 2)) draw_graph(ranges, y3, 'rule' + str(3), 3) return y3 elif (rule_no == 4): y4 = [] for i in range(len(ranges)): y4.append(round(power.high(ranges[i], alpha_cut), 2)) draw_graph(ranges, y4, 'rule' + str(4), 4) return y4 elif (rule_no == 5): y5 = [] for i in range(len(ranges)): y5.append(round(power.low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y5, 'rule' + str(5), 5) return y5 elif (rule_no == 6): y6 = [] for i in range(len(ranges)): y6.append(round(power.low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y6, 'rule' + str(6), 6) return y6 elif (rule_no == 7): y7 = [] for i in range(len(ranges)): y7.append(round(power.low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y7, 'rule' + str(7), 7) return y7 elif (rule_no == 8): y8 = [] for i in range(len(ranges)): y8.append(round(power.low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y8, 'rule' + str(8), 8) return y8 elif (rule_no == 9): y9 = [] for i in range(len(ranges)): y9.append(round(power.very_low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y9, 'rule' + str(9), 9) return y9 elif (rule_no == 10): y10 = [] for i in range(len(ranges)): y10.append(round(power.low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y10, 'rule' + str(10), 10) return y10 elif (rule_no == 11): y11 = [] for i in range(len(ranges)): y11.append(round(power.very_low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y11, 'rule' + str(11), 11) return y11 elif (rule_no == 12): y12 = [] for i in range(len(ranges)): y12.append(round(power.very_low(ranges[i], alpha_cut), 2)) draw_graph(ranges, y12, 'rule' + str(12), 12) return y12
import graph as gp print("PROJEKCIK 1 GRAFY") graph_in_file = gp.read_graph_from_file('projekt1/graph_examples.txt') graph_type = type(graph_in_file) if graph_type == gp.AdjacencyMatrix: print(gp.convert(graph_in_file, gp.AdjacencyList)) print(gp.convert(graph_in_file, gp.IncidenceMatrix)) elif graph_type == gp.IncidenceMatrix: print(gp.convert(graph_in_file, gp.AdjacencyMatrix)) print(gp.convert(graph_in_file, gp.AdjacencyList)) elif graph_type == gp.AdjacencyList: print(gp.convert(graph_in_file, gp.AdjacencyMatrix)) print(gp.convert(graph_in_file, gp.IncidenceMatrix)) gp.draw_graph(graph_in_file) rnd_graph = gp.random_graph(7, 10) gp.draw_graph(rnd_graph) rnd1_graph = gp.random_graph(7, edge_probability=0.5) gp.draw_graph(rnd1_graph)
louvain_res = [ "louvain_n400_p0.3_q0.3.txt", "louvain_n400_p0.6_q0.3.txt", "louvain_n400_p0.8_q0.1.txt", "louvain_n400_p0.8_q0.2.txt", "louvain_benchmark_n128_e1024.txt", "louvain_benchmark_n1000_e8000.txt" ] if __name__ == '__main__': print("Loading Louvain Algorithm's results") for i in range(len(louvain_res)): louvain_filename = louvain_res[i] graph_filename = graphs[i] print("---") print("Louvain Algorithm's result of graph " + graph_filename) cluster = load_community("results/" + louvain_filename, delimiter=" ") graph, _, _ = load_graph("data/" + graph_filename) labels_unique, partition = np.unique(cluster, return_counts=True) number_communities = len(labels_unique) community_filename = os.path.splitext(louvain_filename)[0] draw_graph(graph, cluster, community_filename + ".png") print("Picture of communities exported to graph/" + community_filename + ".png") print("Number of communities:", number_communities) print("Partition of nodes in different clusters/labels:", [item for item in partition])
def opening(): os.system("cd fabric8-analytics.github.io; git log --pretty=oneline > \ logs.txt") with open("fabric8-analytics.github.io/logs.txt", "r") as f: for line in f: splitted_line = line.split() if splitted_line[1] == "Dashboard" and len(splitted_line) == 3: commits.append(splitted_line) commits.reverse() f.close() print(commits, len(commits)) def checkout(): for commit in commits: os.system("cd fabric8-analytics.github.io; git checkout %s" % (commit[0])) print(os.path.exists(file)) if os.path.exists(file): graph.opening(file, commit[2]) opening() checkout() for file in graph.lines: if len(file) > 1: graph.draw_graph(file[0])
def get_coords_poly(fn, coefficients, lo, hi): x = [] y = [] for n in range(int(lo), int(hi + 1)): x.append(n) y.append(fn(n, coefficients)) ax = [] ax.extend([min(x), max(x), min(y), max(y)]) plt.title("Plot of polynomial function") plt.xlabel("x-values") plt.ylabel("y-values") return x, y, ax if __name__ == "__main__": coe = [1, 2, 1] x_values = list(graph.ranger(-10, 10, 1)) y_values = [] for n in x_values: y_values.append(polynomial(n, coe)) value_pairs = list(zip(x_values, y_values)) print(f"Function values: {value_pairs}") coefficients = [0.5, 2, 0.25] x, y, ax = get_coords_poly(polynomial, coefficients, 0, 100) graph.draw_graph(x, y, ax) graph.show_graph()
def angel_mortal_arrange(player_list): ''' Depending on the gender preferences to follow, run the edge-finding algorithm, generate a graph and find a Hamiltonian circuit. ''' print("Arranging player list: %s" % player_list) # Convert the list of players into a list of valid edges player_edges = get_player_edges_from_player_list(player_list) # Generate the overall graph from all edges overall_graph = get_graph_from_edges(player_edges) print("Number of nodes in overall graph: " + str(overall_graph.number_of_nodes())) # Find all connected components and find cycles for all graphs = list(nx.strongly_connected_component_subgraphs(overall_graph)) print("\nConnected components detected: %s" % len(graphs)) print("Printing original player list: ") for player in player_list: print(player) print("\n\n") print("Player list size: " + str(len(player_list))) list_of_player_chains = [] for G in graphs: print("Printing players in current graph:") for graph_player in G.nodes(): print(graph_player) # Draw this intermediate graph print("Number of nodes in graph: " + str(G.number_of_nodes())) if DISPLAY_GRAPH: draw_graph(G) # Find out if there is DEFINITELY no hamiltonian cycle is_there_full_cycle = is_there_definitely_no_hamiltonian_cycle(G) print("Is there DEFINITELY no full cycle? - %s" % is_there_full_cycle) # Sleep for a few seconds time.sleep(2) ''' # Output all cycles that encompass all nodes (valid pairings) full_cycles = get_full_cycles_from_graph(G) # Pick any full cycle to draw, or draw nothing if there are no full cycles full_cycle = get_one_full_cycle(full_cycles) ''' full_cycle = hamilton(G) #get_one_full_cycle_from_graph(G) # full_cycle = get_hamiltonian_path_from_graph(G) # Draw the full cycle if it exists if full_cycle is not None: G_with_full_cycle = convert_full_cycle_to_graph(full_cycle) draw_graph(G_with_full_cycle) list_of_player_chains.append(full_cycle) else: print( "There is no full cycle - sorry! This means that the current set of players cannot form a perfect chain given the arrange requirements" ) return list_of_player_chains
v = 0 try: (i,j) = x[v][0] except IndexError: return [] path = [i, v] while j != None: j = find_match_and_remove(x[v], i) path.append(j) i, v = v, j return path[:-2] ea_algo = ga.EA(fitness=fitness, mutation=mutation) best_x = ea_algo.run(n=n, x_init=x_init, offspring_size=10, n_generations=100, p=0.5) print 'fitness = ', fitness(best_x) print best_x path= reconstruct_path(best_x) print path graph.draw_graph(edges, 'graph.png') graph.draw_graph(edges, 'resources/graph_cycle.png', path=path)
# Сортировка всех точек points.sort(key=lambda point: (point.y, point.x)) # Строим Ттреугольник на первых 3 точках points[0].bind(points[1]) points[1].bind(points[2]) points[2].bind(points[0]) convex_hall = points[:3] # Далее идем по остальным точ# convex_hall = add_point(convex_hall, points[3]) for i in range(3, len(points)): convex_hall = add_point(convex_hall, points[i]) return points if __name__ == "__main__": fig, ax = plt.subplots() # points1 = [Point(0, 8.5), Point(8.2, 0), Point(8, 4), Point(14, 4), Point(19, 5.5), Point(3, 11), Point(7, 12), # Point(12, 11.5), Point(17, 9), Point(19, 5.5)] coords = [(35, 425), (123, 365), (240, 192), (480, 67), (512, 212), (671, 161), (897, 431), (800, 383), (674, 377), (553, 445), (454, 542), (374, 452), (266, 394), (344, 374)] points2 = [Point(coord[0], coord[1]) for coord in coords] # draw_graph(create_triangulation(points1)) # for point in points1: # plt.plot(point.x, point.y, 'ro') # plt.show() draw_graph(create_triangulation(points2)) for point in points2: plt.plot(point.x, point.y, 'ro') plt.show()
from google_sheet import read_google_sheet from topology import topological_sort, preprocess_data from file_process import generate_deploy_files from graph import draw_graph if __name__ == '__main__': # read data from google sheet list_of_records = read_google_sheet( secret_key_file='input_files/secret.json') print(list_of_records) # # parse data apps = preprocess_data(list_of_records) print(apps) # # topology sort result, jobs_order = topological_sort(apps) print(jobs_order) if result == 'OK': generate_deploy_files(jobs_order) draw_graph(apps)
y_train, validation_data=(x_test, y_test), epochs=FLAGS.n_e, batch_size=FLAGS.b_s, verbose=FLAGS.verbose) # Evaluate the model scores = model.evaluate(x_test, y_test) log.logger.info("ACC(test):\t" + str(scores[2] * 100) + "%\t" + log.filename + " s" + str(FLAGS.seed) + "\t") log.logger.info("MSE(test):\t" + str(scores[1]) + "\t" + log.filename + " s" + str(FLAGS.seed) + "\t") scores = model.evaluate(x_data, y_data) log.logger.info("ACC(all):\t" + str(scores[2] * 100) + "%\t" + log.filename + " s" + str(FLAGS.seed) + "\t") log.logger.info("MSE(all):\t" + str(scores[1]) + "\t" + log.filename + " s" + str(FLAGS.seed) + "\t") # Save model model_json = model.to_json() with open("result/model/" + log.filename + ".json", "w") as json_file: json_file.write(model_json) # serialize model to JSON model.save_weights("result/model/" + log.filename + ".h5") # weight print("Save model ... done") # Make graph if FLAGS.graph == 1: est = model.predict(x_data) graph.draw_graph(x_data[:, -1], y_data, est) print("Save graph ... done")