def genetic(param_grid_dic, fun): #%% from importlib import reload import genetic as g reload(g) gene_names = list(param_grid_dic.keys()) genes_grid = param_grid_dic gene_result = g.genetic_algorithm(fun, genes_grid, init_pop=None, pop_size=30, n_gen=10, mutation_prob=0.1, normalize=g.normalizer(2.0, 0.01), seed=1336) #%% 0.7407 gene_result = g.genetic_algorithm( fun, gene_names, genes_grid, init_pop=None, pop_size=30, n_gen=10, mutation_prob=0.2, #normalize = g.normalizer( 1.0, 0.3), seed=1337) #%% return gene_result
def genetic(param_grid_dic, fun, seed=1336): #%% from importlib import reload import genetic as G reload(G) genes_grid = param_grid_dic best_val, best_idxs, fun_eval = G.genetic_algorithm(fun, genes_grid, init_pop=None, pop_size=10, n_gen=30, mutation_prob=0.1, normalize=g.normalizer( 2.0, 0.01), seed=seed) # First set of experiments #best_val, best_idxs, fun_eval = G.genetic_algorithm( fun, genes_grid, # init_pop = None, pop_size = 30, n_gen=10, # mutation_prob=0.1, # normalize = g.normalizer( 2.0, 0.01), # seed=seed ) print(best_val, fun_eval.eval_cnt()) #%% return fun_eval
def genetic_with_local_search(random_constructor, edgelist): population = genetic_algorithm(edgelist, random_constructor, 5, 1.2, 10) neighborhood_factory = NeighborhoodFactory(edgelist, 'Reversal') best = None for p in population: new = local_search(p, best_improvement, neighborhood_factory) if best == None or new < best: best = new print('New best is {}'.format(best.obj)) return best
import random import numpy import Problem, genetic from utils import * # generate TSP problem size = 5 loc = list(range(size)) start = random.randint(0, size - 1) map = init_matrix(numpy.random.random((size, 2))) print("size=%s" % (size)) # genetic algorithm ga_result = genetic.genetic_algorithm(loc, map, 2000, 10) print("Genetic algorithm's optional result is %s and the order is %s" % (ga_result[0], ga_result[1]))
from hillclimb import hillclimb from simulated_annealing import simulated_annealing from genetic import genetic_algorithm print("Daftar algoritma yang akan digunakan:\n" + \ "1. Hill Climbing\n" + \ "2. Simulated Annealing\n" + \ "3. Genetic Algorithm\n") input_algorithm = int(input("Pilih Algoritma yang diinginkan: ")) while input_algorithm < 0 and input_algorithm > 3: print("Algoritma tidak ada dalam pilihan.") input_algorithm = input("Pilih Algoritma yang diinginkan: ") input_file = input("Masukkan nama file input: ") if input_algorithm == 1: hillclimb(input_file) elif input_algorithm == 2: simulated_annealing(input_file) elif input_algorithm == 3: init_pop = input( "Masukkan jumlah Initial Population. Harus power of 2 (4096): ") epoch_length = input("Masukkan jumlah Epoch Length (1000): ") if init_pop == "" and epoch_length == "": genetic_algorithm(input_file) else: if init_pop == "": genetic_algorithm(input_file, int(epoch_length)) elif epoch_length == "": genetic_algorithm(input_file, int(init_pop)) else: genetic_algorithm(input_file, int(init_pop), int(epoch_length))
def train( iterations, sample_size, reduce, positive_train, negative_train, test_data, test_label): f1_original_clr = [] f1_original = [] f1_dca = [] f1_clr = [] f1_ilr = [] roc_original_clr = [] roc_original = [] roc_dca = [] roc_clr = [] roc_ilr = [] for _ in range( iterations ): # Select a smaller size #Select a random set from the train data train_sample_data, train_sample_label = split_train_test( positive_train, negative_train, sample_size ) f1_original_data, roc_original_data = train_svm(train_sample_data, train_sample_label, test_data, test_label) f1_original.append( f1_original_data ) roc_original.append( roc_original_data ) train_sample_data[train_sample_data == 0] = 0.1e-32 test_data[test_data == 0] = 0.1e-32 clr_original_train = clr(train_sample_data) clr_original_test = clr(test_data) scaler = StandardScaler() clr_original_train = np.nan_to_num(scaler.fit_transform(clr_original_train)) clr_original_test = np.nan_to_num(scaler.fit_transform(clr_original_test)) f1_original_data_clr, roc_original_data_clr = train_svm( clr_original_train, train_sample_label, clr_original_test, test_label ) f1_original_clr.append ( f1_original_data_clr ) roc_original_clr.append( roc_original_data_clr ) matrices = genetic_algorithm( train_sample_data, reduce ) roc_dca_iterations = [] for br_matrix in matrices: #br_matrix = matrices[0] reduced_data = np.matmul(br_matrix, train_sample_data.transpose()).transpose() reduced_test = np.matmul(br_matrix, test_data.transpose()).transpose() f1_dca_data, roc_dca_data = train_svm( reduced_data, train_sample_label, reduced_test, test_label ) #f1_dca.append( f1_dca_data ) roc_dca_iterations.append( roc_dca_data ) #print ("DCA max", max(roc_dca_iterations) ) roc_dca.append( max(roc_dca_iterations) ) #print ( " PCA CLR train shape ", train_sample_data.shape ) # Do ILR and CLR transformation # Set zeros to small values train_sample_data[train_sample_data == 0] = 0.1e-32 test_data[test_data == 0] = 0.1e-32 clr_data_train = clr(train_sample_data) clr_test = clr(test_data) ilr_data_train = ilr( train_sample_data ) ilr_test = ilr( test_data ) np.savetxt("ilr_data.csv", ilr_data_train, delimiter=",") # Do PCA to reduce dimensions pca_clr = PCA(n_components = reduce) pca_ilr = PCA(n_components = reduce) #print ( "reduce ", reduce ) fit_train_clr = np.ascontiguousarray( pca_clr.fit_transform(clr_data_train) ) fit_test_clr = np.ascontiguousarray( pca_clr.transform(clr_test) ) fit_train_ilr = np.ascontiguousarray( pca_ilr.fit_transform(ilr_data_train) ) fit_test_ilr = np.ascontiguousarray( pca_ilr.transform(ilr_test) ) np.savetxt("ilr_data_pca.csv", fit_train_ilr, delimiter=",") pca_clr_reduced_train = np.nan_to_num( fit_train_clr ) pca_ilr_reduced_train = np.nan_to_num( fit_train_ilr ) fit_test_clr = np.nan_to_num( fit_test_clr ) fit_test_ilr = np.nan_to_num( fit_test_ilr ) f1_pca_clr_data, roc_pca_clr_data = train_svm( pca_clr_reduced_train, train_sample_label, fit_test_clr, test_label ) f1_pca_ilr_data, roc_pca_ilr_data = train_svm( pca_ilr_reduced_train, train_sample_label, fit_test_ilr, test_label ) f1_clr.append( f1_pca_clr_data ) roc_clr.append( roc_pca_clr_data ) f1_ilr.append( f1_pca_ilr_data ) roc_ilr.append( roc_pca_ilr_data ) #print ( roc_original, roc_dca, roc_clr, roc_ilr) return ( sum ( roc_original ) / iterations ) , ( sum ( roc_original_clr ) / iterations ), ( sum( roc_dca ) / iterations ) , ( sum( roc_clr ) / iterations ) , ( sum( roc_ilr ) / iterations )
m = re.search(pattern, "".join(f.readlines())) name = m.group(1) size = int(m.group(2)) numbers = [int(x) for x in "".join(m.group(3).split()).split(",")] distances = [] position = 0 for i in range(size): row = [] for j in range(i): row.append(distances[j][i]) row.append(0) row += numbers[position:position + size - i - 1] position += size - i - 1 distances.append(row) return distances if __name__ == "__main__": filename = sys.argv[2] distances = generate_distance_matrix(filename) algorithm = sys.argv[1] if algorithm == 'genetic': genetic.distances = distances solution, length = genetic.genetic_algorithm() elif algorithm == 'annealing': annealing.distances = distances solution, length = annealing.annealing_algorithm() print(solution) print(length)
#print(newp_cisat) #scene.simul.trace(newp_cisat) #raw_input("hit enter:") """ basepath = shortest.shortest_two(state) tot, newp = pathcost(basepath, scene.simul.drone, state) #scene.simul.trace(newp) print("total cost of base path 2:", tot) """ print("Computing genetic algorithm path") start = time.time() best_path, max_list, avg_list = genetic.genetic_algorithm( state, genetic.A_star_dist, genetic.fitness_d_water, pop_size=10000, num_generation=300, lamda=10000, mutation_prob=0.5) end = time.time() - start print("best answer: ", best_path) print("max list: ", max_list) print("avg list: ", avg_list) tot2, newp2 = pathcost(best_path, scene.simul.drone, state) tofile = [end, tot2, newp2] pickle.dump(tofile, open(str(randseed) + "-gapath.pickle", "wb")) print("Total cost of GA path:", tot2) print("Elapsed Time: ", end) if tot2 < tot: print("Woohoo! Genetic path cost shorter!")
def global_plus_local(f): """ This function runs the whole approach global + local search """ iterations_genetic = 1 # iterations genetic algorithm knowledge = 0.8 # level of knowledge list_best_individuals = list( ) # to store the best individual of each genetic algorithm run list_best_fitness = list( ) # to store the best fitness of the best individual of each run list_generations = list() # to store the generations list_drones_climbing = list() list_evolution_max = list() type_global = "Genetic" type_local = "Hill Climbing" f.write(type_global) f.write("\n") scenarios.generate_victim_positions_traces() scenarios.partial_knowledge_generation(knowledge) for i in range(0, iterations_genetic): individual, fitness, generation, evol_max = genetic.genetic_algorithm( "global_plus_local", i, knowledge) list_best_individuals.append(individual) list_best_fitness.append(fitness) list_generations.append(generation) list_evolution_max.append(evol_max) length = len(list_best_fitness) mean = sum(list_best_fitness) / length sum2 = sum(x * x for x in list_best_fitness) std = abs(sum2 / length - mean**2)**0.5 f.write("Results \n") f.write("Max, Min, Mean, Std \n") f.write( str(max(list_best_fitness)) + "," + str(min(list_best_fitness)) + "," + str(mean) + "," + str(std)) f.write("\n") global_max = max(list_best_fitness) index = miscelleneous.find_max(list_best_fitness) plots.print_drones_data(list_best_individuals[index], f) # LOCAL f.write(type_local) f.write("\n") #list_dr = quality.init_modified(list_best_individuals[index], global_variables.num_drones) # To simulate different number of drones for the initial deployment and for the adaptation to the real conditions #list_drones_climbing, records = local.hill_climbing(list_dr, list_best_individuals[index].fitness.values) list_drones_climbing, records = local.hill_climbing( list_best_individuals[index], list_best_individuals[index].fitness.values) f.write("Results \n") f.write(str(quality.evaluate(list_drones_climbing))) plots.print_drones_data(list_drones_climbing, f) plots.positions(list_best_individuals[index], list_drones_climbing, type_global, type_local) plots.evolution_local(records, type_local) plots.evolution_global(list_evolution_max[index], type_global) print("######### FIRST DEPLOYMENT STATISTICS ################") print(" Min %s" % min(list_best_fitness)) print(" Max %s" % max(list_best_fitness)) print(" Avg %s" % mean) print(" Std %s" % std)
def only_global(f, type_algorithm, knowledge, fil, argument): #iterations_genetic = 120 #iterations_genetic = 30 # how many times we run the algorithm iterations_genetic = 30 iterations_pso = 1 list_best_individuals = list() # list of best individuals list_best_fitness = list() # list of best fitnesses list_generations = list() # list of generations list_evolution_max = list() list_covergence = list() list_best_individuals_f1 = list() # list of best individuals list_best_fitness_f1 = list() # list of best fitnesses list_generations_f1 = list() # list of generations list_evolution_max_f1 = list() list_id_f1 = list() list_covergence_f1 = list() list_best_individuals_f2 = list() # list of best individuals list_best_fitness_f2 = list() # list of best fitnesses list_generations_f2 = list() # list of generations list_evolution_max_f2 = list() list_id_f2 = list() list_covergence_f2 = list() list_best_individuals_f3 = list() # list of best individuals list_best_fitness_f3 = list() # list of best fitnesses list_generations_f3 = list() # list of generations list_evolution_max_f3 = list() list_id_f3 = list() list_covergence_f3 = list() list_best_individuals_f4 = list() # list of best individuals list_best_fitness_f4 = list() # list of best fitnesses list_generations_f4 = list() # list of generations list_evolution_max_f4 = list() list_id_f4 = list() list_covergence_f4 = list() results_f1 = list() results_f2 = list() results_f3 = list() results_f4 = list() res = list() # scenario generation ## basically, the victims are splitted into quadrants (4 :1.up-right,2.left-down,3.up-left,4.down-right) ## given the preferred number of ground nodes to display, the quadrants will appear scenarios.generate_victim_positions_traces() scenarios.partial_knowledge_generation(knowledge) if type_algorithm == "genetic": for i in range(0, iterations_genetic): # individual, fitness, generation, evol_max, convergence = genetic.genetic_algorithm( "genetic", i, knowledge) list_best_individuals.append(individual) list_best_fitness.append(fitness) list_generations.append(generation) list_evolution_max.append(evol_max) list_covergence.append(convergence) #print convergence print("list_covergence STEP ", list_covergence) stat = miscelleneous.statistics(list_best_fitness) stat_convergence = miscelleneous.statistics(list_covergence) data_results = pd.DataFrame( [stat], columns=["maximum", "minimum", "mean", "std", "index"]) data_convergence = pd.DataFrame( [stat_convergence], columns=["maximum", "minimum", "mean", "std", "index"]) data_results.to_csv(fil + "results.csv") data_convergence.to_csv(fil + "convergence.csv") f.write("The best solution of population 1\n") plots.print_drones_data(list_best_individuals[stat["index"]], list_evolution_max[stat["index"]], f) if type_algorithm == "pso": for i in range(0, iterations_pso): individual, fitness, = pso.pso_algorithm() list_best_individuals.append(individual) list_best_fitness.append(fitness) if type_algorithm == "multi_population": for i in range(0, iterations_genetic): ## print the iterations print("Iteration genetic : ", i) if len(argument) > 1: ## input : initial positions res = ga_multi_population.ga_multi_population( argument, type_algorithm, i, knowledge) else: ## classic search with no individuals to start with res = ga_multi_population.ga_multi_population( None, type_algorithm, i, knowledge) ## 4 * iterations_genetic number of ga_multi_population : each node in the queue (ring schema) ## has THE SAME : iterations_genetic (30) results for i, r in enumerate(res): if i == 0: results_f1.append(r) if i == 1: results_f2.append(r) if i == 2: results_f3.append(r) if i == 3: results_f4.append(r) if type_algorithm == "multi_population": for r in results_f1: list_best_fitness_f1.append(r.best_fitness) list_best_individuals_f1.append(r.best) list_evolution_max_f1.append(r.best_evolution) list_id_f1.append(r.id) list_covergence_f1.append(r.convergence_generation) for r in results_f2: list_best_fitness_f2.append(r.best_fitness) list_best_individuals_f2.append(r.best) list_evolution_max_f2.append(r.best_evolution) list_id_f2.append(r.id) list_covergence_f2.append(r.convergence_generation) for r in results_f3: list_best_fitness_f3.append(r.best_fitness) list_best_individuals_f3.append(r.best) list_evolution_max_f3.append(r.best_evolution) list_id_f3.append(r.id) list_covergence_f3.append(r.convergence_generation) for r in results_f4: list_best_fitness_f4.append(r.best_fitness) list_best_individuals_f4.append(r.best) list_evolution_max_f4.append(r.best_evolution) list_id_f4.append(r.id) list_covergence_f4.append(r.convergence_generation) stat1 = miscelleneous.statistics(list_best_fitness_f1) stat2 = miscelleneous.statistics(list_best_fitness_f2) stat3 = miscelleneous.statistics(list_best_fitness_f3) stat4 = miscelleneous.statistics(list_best_fitness_f4) ''' x=0 for v in (list_covergence_f1,list_covergence_f2,list_covergence_f3,list_covergence_f4) : print(x," : ",v) x=x+1 ''' stat_convergence1 = miscelleneous.statistics(list_covergence_f1) stat_convergence2 = miscelleneous.statistics(list_covergence_f2) stat_convergence3 = miscelleneous.statistics(list_covergence_f3) stat_convergence4 = miscelleneous.statistics(list_covergence_f4) data_results = pd.DataFrame( [stat1, stat2, stat3, stat4], columns=["maximum", "minimum", "mean", "std", "index"]) data_results.to_csv(fil + "results.csv") data_convergence = pd.DataFrame( [ stat_convergence1, stat_convergence2, stat_convergence3, stat_convergence4 ], columns=["maximum", "minimum", "mean", "std", "index"]) data_convergence.to_csv(fil + "convergence.csv") f.write("The best solution of population 1\n") plots.print_drones_data(list_best_individuals_f1[stat1["index"]], list_evolution_max_f1[stat1["index"]], f) f.write("The best solution of population 2\n") plots.print_drones_data(list_best_individuals_f2[stat2["index"]], list_evolution_max_f2[stat2["index"]], f) f.write("The best solution of population 3\n") plots.print_drones_data(list_best_individuals_f3[stat3["index"]], list_evolution_max_f3[stat3["index"]], f) f.write("The best solution of population 4\n") plots.print_drones_data(list_best_individuals_f4[stat4["index"]], list_evolution_max_f4[stat4["index"]], f) if type_algorithm == "multi_objective": pareto_global = tools.ParetoFront() for i in range(0, iterations_genetic): pareto = ga_multi_objective.genetic_algorithm( "multi_objective", i, knowledge) pareto_global.update(pareto) plots.print_pareto(pareto_global, f)