def calculate(): # Выполнить необходимые расчеты и построить графики # Задать конфигурацию на основе ввода пользователя m = [int(n) for n in configuration.field_input_configuration.get().split()] graph.Vertex.max_requests = m graph.Vertex.number_of_types = len(graph.Vertex.max_requests) # Основные параметры для расчета производительности TO = program.TO = [float(n) for n in configuration.field_TO.get().split() ] # Время выполнения операций τ = program.τ = [float(n) for n in configuration.field_τ.get().split() ] # Время цикла оперативной памяти ξ = program.ξ = float(configuration.field_ξ.get()) # Коэффициент связности # Дополнительные параметры для расчета производительности (вычисляются из основных) ν = program.ν = [ξ / (TO + ξ * τ) for TO, τ in zip(TO, τ)] # Параметр интенсивности μ = program.μ = [1 / τ for τ in τ] # Параметр обслуживания program.ρ = [ν / μ for ν, μ in zip(ν, μ)] # Построить граф graph.plot_graph(m) # Построить график зависимости производительности от количества процессоров plt_data = program.plot_graph_for( m, int(plot_data.field_input_plot_for.get()) - 1, int(plot_data.field_plot_up_to.get())) # Вывести статистику в окно txt = '' for m, q, p in zip(plt_data[0], plt_data[1], plt_data[2]): txt += ('{:<10} {:<34} {:<30}\n'.format(m, q, p)) output_window.text.config(state='normal') output_window.text.delete(1.0, 'end') output_window.text.insert('end', txt) output_window.text.config(state='disabled')
def main(): print("\nObjective Genetic Algorithm\n") if len(sys.argv) != 11 and len(sys.argv) != 2: print("Wrong number of parameters (expected 2 or 11) but got " + str(len(sys.argv))) exit() if len(sys.argv) == 2: print("Number of tasks: ", end="") nTasks = int(input()) print("Number of machines: ", end="") nMachines = int(input()) print("Size of MAKESPAN population: ", end="") populationSize = int(input()) print("Mutation factor (percentage): ", end="") mutationFactor = int(input()) print("Crossover factor (percentage): ", end="") crossoverFactor = int(input()) print("Elitism factor (percentage): ", end="") elitismFactor = int(input()) print("Crossover Operator (1..4): ", end="") crossoverOperator = int(input()) print("Mutation Operator (1..2): ", end="") mutationOperator = int(input()) print("\nNumber of executions: ", end="") executions = int(input()) else: nTasks = int(sys.argv[2]) nMachines = int(sys.argv[3]) populationSize = int(sys.argv[4]) mutationFactor = int(sys.argv[5]) crossoverFactor = int(sys.argv[6]) elitismFactor = int(sys.argv[7]) crossoverOperator = int(sys.argv[8]) mutationOperator = int(sys.argv[9]) nExecutions = int(sys.argv[10]) filename = sys.argv[1] iterations = 50000 summaryLogLines = [] makespans = [] flowtimes = [] averages = [] for i in range(nExecutions): arguments = ' '.join(sys.argv[1:]) print("EXECUTION " + str(i + 1) + " of " + arguments) if crossoverOperator == 1: if mutationOperator == 1: bestIndividual, executionGenerations, executionAverages, executionMakespans, executionFlowtimes = genetic_algorithm11.GA( filename, nTasks, nMachines, mutationFactor, crossoverFactor, elitismFactor, populationSize, crossoverOperator, mutationOperator, iterations).execute() else: bestIndividual, executionGenerations, executionAverages, executionMakespans, executionFlowtimes = genetic_algorithm12.GA( filename, nTasks, nMachines, mutationFactor, crossoverFactor, elitismFactor, populationSize, crossoverOperator, mutationOperator, iterations).execute() elif crossoverOperator == 2: if mutationOperator == 1: bestIndividual, executionGenerations, executionAverages, executionMakespans, executionFlowtimes = genetic_algorithm21.GA( filename, nTasks, nMachines, mutationFactor, crossoverFactor, elitismFactor, populationSize, crossoverOperator, mutationOperator, iterations).execute() else: bestIndividual, executionGenerations, executionAverages, executionMakespans, executionFlowtimes = genetic_algorithm22.GA( filename, nTasks, nMachines, mutationFactor, crossoverFactor, elitismFactor, populationSize, crossoverOperator, mutationOperator, iterations).execute() else: bestIndividual, executionGenerations, executionAverages, executionMakespans, executionFlowtimes = genetic_algorithm41.GA( filename, nTasks, nMachines, mutationFactor, crossoverFactor, elitismFactor, populationSize, crossoverOperator, mutationOperator, iterations).execute() makespans.append(executionMakespans) flowtimes.append(executionFlowtimes) averages.append(executionAverages) graphName = '-'.join(sys.argv[1:]).replace( ".txt", "") + "-" + str(i + 1) + "-graph.png" print(graphName) graph.plot_graph(graphName, executionGenerations, executionMakespans, executionAverages, graphName) summaryLogLine = logLine(filename, bestIndividual, executionAverages, populationSize, crossoverOperator, crossoverFactor, mutationOperator, mutationFactor, elitismFactor) print(summaryLogLine) summaryLogLines.append(summaryLogLine) logFilename = filename.replace(".txt", "_log.txt") logAverages = [] logMakespans = [] for i in range(len(executionGenerations)): averageSum = 0.0 makespansSum = 0.0 for j in range(nExecutions): averageSum += (averages[j])[i] makespansSum += (makespans[j])[i] logAverage = averageSum / nExecutions logMakespan = makespansSum / nExecutions logAverages.append(logAverage) logMakespans.append(logMakespan) graphName = '-'.join(sys.argv[1:]).replace(".txt", "") + "-graph.png" graph.plot_graph(filename, executionGenerations, logMakespans, logAverages, graphName) logSummary(summaryLogLines)
for n2 in nodes2: if np.random.rand() < p_inter: G.add_edge(n1, n2) has_inter_edge = True if not has_inter_edge: G.add_edge(nodes1[0], nodes2[0]) #print('connected comp: ', len(list(nx.connected_component_subgraphs(G)))) G = G.to_directed() G = gl.networkx2graph(G) return G def max_n_nodes(graphs): max_nodes = 0 for graph in graphs: n_nodes = len(graph.nodes) if n_nodes > max_nodes: max_nodes = n_nodes return max_nodes if __name__ == "__main__": gnf_dataset_keys = [ 'ego_small', 'community_small', 'graph_rnn_protein', 'ego', 'grid', 'community', 'community_medium' ] dataset = DatasetErdosRenyiNodes(partition='test', overfit=True) for graph in dataset.graphs: gl.plot_graph(graph)
from graph import author_graph, plot_graph from submissions import count_deltas, submission_authors REDDIT = Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent) cmv = REDDIT.subreddit('changemyview') for submission in cmv.hot(limit=5): print('\n') print(submission.url) nDelta = count_deltas(submission, REDDIT) print(nDelta, 'deltas') authorSubs = submission_authors(submission) G = author_graph(authorSubs) plot_graph(G) partition = best_partition(G) nGroups = len(set(partition.values())) print(nGroups, 'group(s)')
distance_matrix, static_connection_graph) punishment_matrix, transport_matrix = graph.merge_graph_to_matrix( punishment_graph) print( f'{np.count_nonzero(~np.isnan(punishment_matrix))} / {nbr_cities*nbr_cities} connections' ) #travel_time_graph, punishment_graph, score_graph = graph.generate_punishment_graph_from_distance(nbr_transport_types, distance_matrix, city_extra_points) # Set arguments args = (punishment_matrix, transport_matrix, city_extra_points) kwargs = { 'start_city': 2, 'target_city': 9, 'nbr_ants': 50, 'verbose': True, 'evaporation': 0.5, 'alpha': 1.0, # pheromones 'beta': 3.0 # scores } # Run colonies in parallel best_path, best_score, all_results = aco.run_parallel_colonies( nbr_parallel_jobs, nbr_colonies, args, kwargs) # Run 1 colony #best_path, best_score = aco.summon_the_ergodic_colony(*args, **kwargs) # Plot the graph graph.plot_graph(city_locations, punishment_graph, best_path)
def graph(self): plot_graph()
'community_overfit' 'erdosrenyinodes_0.25_overfit' :return: ''' if dataset_name.startswith("erdosrenyinodes"): _, p, overfit = dataset_name.split("_") dataset = d_creator.DatasetErdosRenyiNodes(p=float(p), partition=partition, overfit=overfit=="overfit") elif dataset_name.startswith("erdosrenyi"): _, n_samples, n_nodes, n_edges = dataset_name.split("_") dataset = d_creator.DatasetErdosRenyi(None, int(n_nodes), int(n_edges), partition, directed) elif dataset_name == "community_ours": dataset = d_creator.DatasetCommunity(partition=partition) elif dataset_name == "community_overfit": dataset = d_creator.DatasetCommunity(n_samples=100) else: raise Exception("Wrong dataset %s" % dataset_name) return dataset if __name__ == "__main__": datasets = ['erdosrenyinodes_0.25_none'] for dataset in datasets: print(dataset) dataset = retrieve_dataset(dataset, "test") for sample in dataset.graphs: graph.plot_graph(sample)
ps.author_id(data, 'aris anagnostopoulos') # Given the author's ID, find her/his publications: ps.publications_for_author_id(data, 256176) # Create the set of all author IDs in order to build the graph: list_of_authors_IDs = ps.return_all_the_authors_IDs(data) len(list_of_authors_IDs) # Graph: G = g.create_graph(list_of_authors_IDs) g.plot_graph(G) # We need a dictionary containing all the publications for a given author # in order to give a relationship author-publication: authorsDict = {} for i in range(len(data)): for author in data[i]['authors']: for j in range(len(data[i]['authors'])): key = data[i]['authors'][j]['author_id'] # the key is the ID for every author if key in authorsDict.keys(): if data[i]['id_publication_int'] not in authorsDict[key]: authorsDict[key].append(data[i]['id_publication_int']) else: authorsDict[key] = [data[i]['id_publication_int']] authorsDict
def entry_point(): name = request.args.get('lname') yvals, xvals = get_stock_value(name) plot_graph(app, xvals, yvals, name) return redirect('/graph')