def executar(self, nome_topologia, nome_problema, numero_iteracoes, numero_particulas, executions, dispersion , \ dispersion_iteration, iteration_criteria): #Calculando o tempo time_inicio = time() TSPConstants.NUMERO_ITERACOES = numero_iteracoes TSPConstants.TAM_BANDO = numero_particulas TSPConstants.DISPERSION_ITERACAO = dispersion_iteration if nome_problema in Input.problemas.keys(): problema = Input.problemas[nome_problema] PSO.cria_mapa(problema[0], problema[1]) TSPConstants.STOP_CRITERIA = problema[2] else: print 'Nome do problema invalido. Veja nomes disponiveis no pacote "input/Dados."' TSPConstants.N_DIMENSION = len(PSO.mapa) algoritmo = None for i in range(executions): if nome_topologia == 'ESTRELA': algoritmo = TSP_PSO(PSO.mapa, Estrela, dispersion, iteration_criteria) elif nome_topologia == 'LOCAL': algoritmo = TSP_PSO(PSO.mapa, Local, dispersion, iteration_criteria) elif nome_topologia == 'FOCAL': algoritmo = TSP_PSO(PSO.mapa, Focal, dispersion, iteration_criteria) elif nome_topologia == 'VONNEUMANN': algoritmo = TSP_PSO(PSO.mapa, VonNeumann, dispersion, iteration_criteria) elif nome_topologia == 'CLAN': TSPClanConstants.N_DIMENSION = len(PSO.mapa) algoritmo = TSP_PSO_Clan(PSO.mapa, Clan, dispersion, iteration_criteria) else: print 'Nao existe topologia com este nome.' if algoritmo != None: algoritmo.simular() mean_fitnesses_evolution.append(PSO.fitnesses) fitnesses_of_best.append(PSO.fitnesses[-1]) best_particles.append(PSO.best_particle[0]) PSO.fitnesses = [] PSO.best_particle = [] time_fim = time() time_total = float(time_fim - time_inicio)/executions mean_simulation.append(Relatorio.imprimir_resultado_final(fitnesses_of_best, mean_fitnesses_evolution, best_particles, time_total))
def compLearn(inputs, numHNodes, iterations, learnRate): clusters = [] cluster_num = 0 final_clusters = [] inputs_copy = [] # normalize inputs minIn = 10000 maxIn = 0 for x in inputs: for y in x: minIn = min(minIn, y) maxIn = max(maxIn, y) inputs_copy = PSO.rescaleMatrix(inputs, minIn, maxIn, 0, 1) clusters = competitiveLearn(inputs_copy, numHNodes, iterations, learnRate) for c in range(len(clusters)): final_clusters.append([]) # calculate distance to get which cluster center the inputs lie in dist = 10000 for i in range(len(inputs_copy)): for j in range(len(clusters)): tmpDist = PSO.EuclideanDistance(clusters[j], inputs_copy[i]) if tmpDist < dist: dist = tmpDist cluster_num = j dist = 10000 final_clusters[cluster_num].append(inputs[i]) # print("Clusters: ") # for i in range(len(final_clusters)): # print(clusters[i], final_clusters[i]) return [x for x in final_clusters if x != []]
def getBestCrossover(self): from pprint import pprint #self.tickerSymbol = self.tickerTextBox.get() if self.stockVar.get() == 4: self.tickerSymbol = self.tickerTextBox.get() print(self.pressed) if self.pressed == 0: #Box not checked self.startDate = self.date1TextBox.get() self.endDate = self.date2TextBox.get() #print(startDate, endDate) #print((ystockquote.get_historical_prices(tickerSymbol, startDate, endDate).keys())) #print(str(ystockquote.get_historical_prices(tickerSymbol, startDate, endDate))) entireDictionaryOfData = ystockquote.get_historical_prices(self.tickerSymbol, self.startDate, self.endDate) #entireDictionaryOfData = ystockquote.get_historical_prices('CAS', '2013-11-01', '2013-11-05') #pprint(entireDictionaryOfData.keys()) listOfDates = self.getListOfDatesFromHistoricalData(entireDictionaryOfData) #print(listOfDates) listOfCloses = self.getListOfClosesFromHistoricalData(entireDictionaryOfData,listOfDates) #print(entireStringOfData.find("'Close'")) #print(ystockquote.get_trade_date('GOOG')) #theBestCrossover = PSO.runPSO(listOfCloses) if self.algorithmVar.get() == 0: theBestCrossover = HillClimbing.runHillClimbing(listOfCloses) if self.algorithmVar.get() == 1: theBestCrossover = NelderMeadNew.runNelderMead(listOfCloses) if self.algorithmVar.get() == 2: theBestCrossover = GeneticAlgorithms.runGA(listOfCloses) if self.algorithmVar.get() == 3: theBestCrossover = DifferentialEvolution.runDE(listOfCloses) if self.algorithmVar.get() == 4: theBestCrossover = PSO.runPSO(listOfCloses) if self.algorithmVar.get() == 5: theBestCrossover = SA.runSA(listOfCloses) #theBestCrossover = PSO.figureOutBestConstants(listOfCloses) #theBestCrossover = NelderMeadNew.runNelderMead(listOfCloses) shortLength = theBestCrossover[0] longLength = theBestCrossover[1] if shortLength > longLength: shortLength, longLength = longLength, shortLength #return ('Short Length = '', Long Length = ').format(shortLength, longLength #print(listOfDates) self.bestCrossoverLabel['text'] = 'The Best Crossover is: ' self.answerLabel['text'] = 'Short Length = '+ str(shortLength) + ', Long Length = ' + str(longLength) self.makeGraph(listOfCloses, listOfDates, self.tickerSymbol, shortLength, longLength)
def runPS4(xSize, params, method): print('PS4 - {}:\n'.format(method)) bounds = array([(0, 10)] * xSize) x0 = random.uniform(0.5, 5.5, xSize) if method == 'PSO': return PSO(P4, x0, bounds, numParticles=max([200, xSize * 10]), maxRepeat=10, maxIter=100, params=params).optimize(verbose=False) elif method == 'PSO Hybrid': return PSOHybrid(P4, x0, bounds, numParticles=max([200, xSize * 10]), maxRepeat=10, maxIter=100, params=params).optimize(verbose=False)
def gopso(id, niter, popsize, nhood_size): res = np.inf if (id == 1): print("parabloid") numvar = 7 xmin = 0, 5 xmax = 0, 8 if (id == 2): print("rasenbork") numvar = 7 xmin = 0, 5 xmax = 0, 8 #gopso(idfunc,niter,popsize,nhood_size) #run optimizzation algorithm PSO = ParSwarm.ParSwarmOpt(xmin, xmax) res = PSO.pso_solve(popsize, id, numvar, niter, nhood_size) return res
def selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb = func_details[1] ub = func_details[2] dim = func_details[3] if (algo == 0): x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 1): x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 2): x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 3): x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 4): x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 5): x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 6): x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 7): x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 8): x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 9): x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) if (algo == 10): x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) return x
def selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb = func_details[1] ub = func_details[2] dim = 30 if (algo == 'PSO'): x = pso.PSO(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'SSA'): x = ssa.SSA(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'GOA'): x = goa.GOA(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'IGOA'): x = igoa.IGOA(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'MVO'): x = mvo.MVO(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'GWO'): x = gwo.GWO(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'MFO'): x = mfo.MFO(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'CS'): x = cs.CS(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'BAT'): x = bat.BAT(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'WOA'): x = woa.WOA(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) if (algo == 'FFA'): x = ffa.FFA(getattr(cec2005, function_name), lb, ub, dim, popSize, Iter) return x
def pso_test(doc_dir: str, ref_dir: str, weights: List[float], config): docs = sorted(os.listdir(doc_dir)) refs = sorted(os.listdir(ref_dir)) documents: List[List[List[str]]] = [] references: List[str] = [] features = [] docs_wo_title = [] for d, r in zip(docs, refs): doc, ref = Utils.load_document(doc_dir + "/" + d, ref_dir + "/" + r) doc_wo_title = Utils.remove_headings(doc) p_doc: List[List[str]] = Utils.process_document( doc, config.use_stopwords, config.use_lemmatizer) p_doc_wo_title: List[List[str]] = Utils.process_document( doc_wo_title, config.use_stopwords, config.use_lemmatizer) p_ref: str = Utils.join_sentences( Utils.process_document(ref, config.use_stopwords, config.use_lemmatizer)) features.append(PSO.extract_features(p_doc, config)) documents.append(p_doc_wo_title) references.append(p_ref) docs_wo_title.append(doc_wo_title) weights = np.array(weights) # Generate summary with weights. rouge_scores = [0.0] * len(documents) test_summaries = [] for i, feature in enumerate(features): p_sum_idx = np.argsort(np.dot(feature, weights))[-config.summary_size:] p_sum = Utils.join_sentences([documents[i][idx] for idx in p_sum_idx]) test_summaries.append( Utils.generate_summary( [docs_wo_title[i][idx] for idx in p_sum_idx])) rouge_scores[i] = Utils.calculate_rouge(p_sum, [references[i]], 1) print(sum(rouge_scores)) print(rouge_scores) return test_summaries
def competitiveLearn(inputs, numHNodes, iterations, learnRate): index = 0 nodes = [] winner = [] weights = [] wcount = 0 tmp_wt = [] minWt = 10000 maxWt = 0 # randomly assign numHNodes input vectors to be the weights for i in range(numHNodes): weights.append(random.choice(inputs)) for i in range(iterations): print("{:>7.2%}".format(i / iterations), end="\r") # randomly select an input vector for comparison selectedInput = random.choice(inputs) # calculate a starting point for the winner winner = PSO.EuclideanDistance(weights[0], selectedInput) # find the winning weight vector index = 0 for w in weights: tmp_w = w temp = PSO.EuclideanDistance(tmp_w, selectedInput) if winner >= temp: # want the shortest distance winner = temp index = weights.index(w) # winning index # update the weight at the winning index dist = PSO.EuclideanDistance(selectedInput, weights[index]) for j in range(len(weights[index])): weights[index][j] += learnRate * (dist) # renormalize weights for x in weights: for y in x: minWt = min(minWt, y) maxWt = max(maxWt, y) weights = PSO.rescaleMatrix(weights, minWt, maxWt, 0, 1) # weights are now the cluster centers return weights
def main(): PSO().run()
import time import GA import PSO def closest(lst, K): idx = (np.abs(lst - K)).argmin() return lst[idx] benchmarkType = Salomon() timeStartGA = time.time() resultGA = GA.executeGA(benchmarkType) timeEndGA = time.time() timeStartPSO = time.time() resultPSO = PSO.executePSO(benchmarkType) timeEndPSO = time.time() benchmarkTimeGA = (timeEndGA - timeStartGA) benchmarkTimePSO = (timeEndPSO - timeStartPSO) minorValueGA = (closest(resultGA[2].x, 0)) minorValuePSO = (closest(resultPSO[2], 0)) plt.plot(resultGA[0].n_evals, resultGA[0].x_f_vals) plt.plot(resultPSO[0].n_evals, resultPSO[0].x_f_vals) plt.plot(0, 0) plt.plot(0, 0) plt.plot(0, 0) plt.plot(0, 0) plt.plot(0, 0)
def principal(): while True: afisareMeniu() x = input("Introdu numarul corespunzaotr: ") if (x == "1"): populationNR = int(input("population number:")) n = int(input("Dimensiunea(n):")) k = int(input("cate elemente random pentru turnir se aleg")) nrGeneratii = int(input("Cate generatii sa fie:")) nrRulari = int(input("Cate rulari:")) alpha = float(input("Alpha:")) parentSel = "turnir" start_time = time.time() topRulari = list( evolutionaryAlg(populationNR, n, nrGeneratii, nrRulari, k, alpha, parentSel)) print("--- %s seconds ---" % (time.time() - start_time)) i = topRulari.index(valMinima(topRulari)) vect = citesteVectorTuple("REAL.txt", i) print(topRulari[i]) avg = list() best = list() for i in range(len(vect)): avg.append(float(vect[i][1])) best.append(float(vect[i][0])) plt.figure() plt.title("PB. 4 ") plt.plot(avg, 'r', label="Valoarea medie") plt.plot(best, 'g', label="Valoarea cea mai buna") plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2) plt.show() if (x == "2"): populationNR = int(input("population number:")) n = int(input("Dimensiunea(n):")) k = int(input("cate elemente random pentru turnir se aleg")) nrGeneratii = int(input("Cate generatii sa fie:")) nrRulari = int(input("Cate rulari:")) pb = float(input("Probabilitatea:")) parentSel = "rank" start_time = time.time() topRulari = list( evolutionaryAlg2(populationNR, n, nrGeneratii, nrRulari, k, pb, parentSel)) print("--- %s seconds ---" % (time.time() - start_time)) i = topRulari.index(valMinima(topRulari)) vect = citesteVectorTuple("REAL.txt", i) print(topRulari[i]) avg = list() best = list() for i in range(len(vect)): avg.append(float(vect[i][1])) best.append(float(vect[i][0])) plt.figure() plt.title("PB. 4 ") plt.plot(avg, 'r', label="Valoarea medie") plt.plot(best, 'g', label="Valoarea cea mai buna") plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2) plt.show() if x == "3": populationNR = int(input("population number:")) n = int(input("Dimensiunea(n):")) maxIteration = int(input("Nr maxim iteratii:")) nrRulari = int(input("Cate rulari:")) c1 = float(input("c1:")) c2 = float(input("c2:")) w = float(input("w:")) racire = float(input("Factor racire:")) start_time = time.time() topRulari = list( PSO(c1, c2, w, populationNR, n, maxIteration, nrRulari, racire)) print("--- %s seconds ---" % (time.time() - start_time)) i = topRulari.index(valMinima(topRulari)) vect = citesteVectorTuple("PSO.txt", i) print(topRulari) print(i) print(topRulari[i]) print(vect) avg = list() best = list() for i in range(len(vect)): avg.append(float(vect[i][1])) best.append(float(vect[i][0])) plt.figure() plt.title("PB.4(PSO) ") #plt.plot(avg, 'r', label="Valoarea medie") plt.plot(best, 'g', label="Valoarea cea mai buna") plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=2) plt.show() if x == "4": pop = generatePopulation(3, 2) print("FITNESS POP") for i in pop: print(fitness(i)) bestP = rankSelection(pop) print("FITNESS BEST") for i in bestP: print(fitness(i)) if (x == "0"): print("codul s-a terminat cu succes")
def run(test_problem, max_iterations: int, number_of_runs: int, file_prefix: str, tol=-1, visualisation=False, aPreCallback=None, aPostCallback=None): global g_test_problem global g_iterations g_test_problem = test_problem # Store the results for each optimisation method columns = ['Run', 'Methods'] for i in range(test_problem.number_of_dimensions): columns.append("X_" + str(i)) columns.append("Objective value") columns.append("Euclidean distance") columns.append("Evaluations") df = pd.DataFrame(columns=columns) for run_id in range(number_of_runs): print("Run #", run_id) # Create a random guess common to all the optimisation methods initial_guess = g_test_problem.initialRandomGuess() # Optimisation methods implemented in scipy.optimize methods = [ 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP' ] for method in methods: g_test_problem.number_of_evaluation = 0 optimiser = ScipyMinimize(g_test_problem, method, tol=tol, initial_guess=initial_guess) print("\tOptimiser:", optimiser.full_name) if not isinstance(aPreCallback, (str, type(None))): aPreCallback(optimiser, file_prefix, run_id) optimiser.setMaxIterations(max_iterations) if run_id == 0 and visualisation: optimiser.plotAnimation( aNumberOfIterations=max_iterations, aCallback=None, aFileName=(file_prefix + "_" + optimiser.short_name + "_%d.png")) else: optimiser.run() df = appendResultToDataFrame(run_id, optimiser, df, columns, file_prefix) if not isinstance(aPostCallback, (str, type(None))): aPostCallback(optimiser, file_prefix, run_id) # Parameters for EA g_iterations = int(max_iterations / g_number_of_individuals) # Optimisation and visualisation g_test_problem.number_of_evaluation = 0 optimiser = EvolutionaryAlgorithm(g_test_problem, g_number_of_individuals, initial_guess=initial_guess) print("\tOptimiser:", optimiser.full_name) if not isinstance(aPreCallback, (str, type(None))): aPreCallback(optimiser, file_prefix, run_id) # Set the selection operator #optimiser.setSelectionOperator(TournamentSelection(3)); #optimiser.setSelectionOperator(RouletteWheel()); optimiser.setSelectionOperator(RankSelection()) # Create the genetic operators gaussian_mutation = GaussianMutationOperator(0.1, 0.3) elitism = ElitismOperator(0.1) new_blood = NewBloodOperator(0.0) blend_cross_over = BlendCrossoverOperator(0.6, gaussian_mutation) # Add the genetic operators to the EA optimiser.addGeneticOperator(new_blood) optimiser.addGeneticOperator(gaussian_mutation) optimiser.addGeneticOperator(blend_cross_over) optimiser.addGeneticOperator(elitism) if run_id == 0 and visualisation: optimiser.plotAnimation( aNumberOfIterations=g_iterations, aCallback=visualisationCallback, aFileName=(file_prefix + "_" + optimiser.short_name + "_%d.png")) else: for _ in range(1, g_iterations): optimiser.runIteration() visualisationCallback() df = appendResultToDataFrame(run_id, optimiser, df, columns, file_prefix) if not isinstance(aPostCallback, (str, type(None))): aPostCallback(optimiser, file_prefix, run_id) # Parameters for PSO # Optimisation and visualisation g_test_problem.number_of_evaluation = 0 optimiser = PSO(g_test_problem, g_number_of_individuals, initial_guess=initial_guess) print("\tOptimiser:", optimiser.full_name) if not isinstance(aPreCallback, (str, type(None))): aPreCallback(optimiser, file_prefix, run_id) if run_id == 0 and visualisation: optimiser.plotAnimation( aNumberOfIterations=g_iterations, aCallback=visualisationCallback, aFileName=(file_prefix + "_" + optimiser.short_name + "_%d.png")) else: for _ in range(1, g_iterations): optimiser.runIteration() visualisationCallback() df = appendResultToDataFrame(run_id, optimiser, df, columns, file_prefix) if not isinstance(aPostCallback, (str, type(None))): aPostCallback(optimiser, file_prefix, run_id) # Optimisation and visualisation optimiser = PureRandomSearch(g_test_problem, max_iterations, initial_guess=initial_guess) print("\tOptimiser:", optimiser.full_name) if not isinstance(aPreCallback, (str, type(None))): aPreCallback(optimiser, file_prefix, run_id) g_test_problem.number_of_evaluation = 0 if run_id == 0 and visualisation: optimiser.plotAnimation( aNumberOfIterations=max_iterations, aCallback=None, aFileName=(file_prefix + "_" + optimiser.short_name + "_%d.png")) else: for _ in range(max_iterations): optimiser.runIteration() df = appendResultToDataFrame(run_id, optimiser, df, columns, file_prefix) if not isinstance(aPostCallback, (str, type(None))): aPostCallback(optimiser, file_prefix, run_id) # Optimisation and visualisation g_test_problem.number_of_evaluation = 0 optimiser = SimulatedAnnealing(g_test_problem, 5000, 0.04, initial_guess=initial_guess) print("\tOptimiser:", optimiser.full_name) optimiser.cooling_schedule = cooling if not isinstance(aPreCallback, (str, type(None))): aPreCallback(optimiser, file_prefix, run_id) if run_id == 0 and visualisation: optimiser.plotAnimation( aNumberOfIterations=max_iterations, aCallback=None, aFileName=(file_prefix + "_" + optimiser.short_name + "_%d.png")) else: for _ in range(1, max_iterations): optimiser.runIteration() #print(optimiser.current_temperature) df = appendResultToDataFrame(run_id, optimiser, df, columns, file_prefix) if not isinstance(aPostCallback, (str, type(None))): aPostCallback(optimiser, file_prefix, run_id) title_prefix = "" if g_test_problem.name != "": if g_test_problem.flag == 1: title_prefix = "Minimisation of " + g_test_problem.name + "\n" else: title_prefix = "Maximisation of " + g_test_problem.name + "\n" boxplot(df, 'Evaluations', title_prefix + 'Number of evaluations', file_prefix + 'evaluations.pdf', False) boxplot( df, 'Euclidean distance', title_prefix + 'Euclidean distance between\nsolution and ground truth', file_prefix + 'distance.pdf', False) plt.show()
def rescaleSet(data, fromMinVal, fromMaxVal, toMinVal, toMaxVal): return [PSO.rescaleMatrix(x, fromMinVal, fromMaxVal, toMinVal, toMaxVal) for x in data]
def assemble_optimizers(self): '''initialize all optimizers 1. DE, 2. POS, 3. APSO, 4.FF, 5. CuckooSearch, 6.flower pollinatiion, 7.Bat Algorithm''' self.DE = Differential_Evolution(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, scaling=0.9, seed=self.seed, cross_prob=0.50, target=0.0001, obj_dim=self.obj_dim) self.PSO = PSO(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, alpha=2, alpha_decay=0.03, beta=2, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.PSOL = PSOLevy(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, alpha=2, alpha_decay=0.03, beta=2, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.Cuckoo = CuckooSearch(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, alpha=0.01, p=0.25, beta=1.5, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.flower = FlowerPollination(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, gamma=0.1, p=0.5, beta=1.5, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.Bat = BAT(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, fmin=0.01, fmax=0.25, pulse=0.2, amplitude=1, scaling=0.01, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.BatL = BATLevy(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, fmin=0.01, fmax=0.25, pulse=0.2, amplitude=1, scaling=0.01, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling)
def ACO(data, ants, iterations): '''Take some input matrix of initial data points and values for the number of ants and the number of iterations. Data is randomly placed on a 2D field that is directly related to the number of input values. This keeps a desirable density on the field at all times. Ants carry out their actions, one per iteration, and then die. The points are then assessed and clusters are determiend.''' global inputs # Scale the inputs minVal = 10000 maxVal = 0 for x in data: for y in x: minVal = min(minVal, y) maxVal = max(maxVal, y) inputs = PSO.rescaleMatrix(data, minVal, maxVal, 0, 1) iterCount = len(data) * iterations global maxDimensions maxDimensions = int(math.sqrt(10 * len(inputs)) + 0.5) global inputLocations inputLocations = [] # Every input needs a location for x in inputs: pos = [random.randint(0, maxDimensions), random.randint(0, maxDimensions)] while pos in inputLocations: pos = [random.randint(0, maxDimensions), random.randint(0, maxDimensions)] inputLocations.append(pos) # for i in range(len(inputLocations)): # print(inputLocations[i], ' \t@', inputs[i]) antCollection = [] for a in range(ants): antCollection.append(ant(int(math.sqrt(2 * maxDimensions)))) for i in range(iterCount): print("{:>7.2%}".format(i / iterCount), end="\r") for x in antCollection: x.logic(int(((i * 5) / iterCount) + 1)) if i % 100 == 0: for x in antCollection: x.updateActivity() for x in antCollection: x.die() # for i in range(len(inputLocations)): # print(inputLocations[i], ' \t@', inputs[i]) global cluster cluster = [] global clusterList clusterList = [] #print(inputLocations, len(inputLocations)) for pos in inputLocations: findNear(pos) if clusterList[-1] != None: clusterList.append(None) # print(clusterList) finalClusters = [] temp = [] for x in clusterList: if x != None: temp.append(data[inputLocations.index(x)]) elif x == None: finalClusters.append(temp) temp = [] return finalClusters
blend_cross_over = BlendCrossoverOperator(0.6, gaussian_mutation) # Add the genetic operators to the EA optimiser.addGeneticOperator(new_blood) optimiser.addGeneticOperator(gaussian_mutation) optimiser.addGeneticOperator(blend_cross_over) optimiser.addGeneticOperator(elitism) test_problem.number_of_evaluation = 0 optimiser.plotAnimation(g_iterations, visualisationCallback) EA_number_of_evaluation = test_problem.number_of_evaluation EA_solution = optimiser.best_solution # Optimisation and visualisation test_problem.number_of_evaluation = 0 optimiser = PSO(test_problem, g_number_of_individuals) optimiser.plotAnimation(g_iterations) PSO_number_of_evaluation = test_problem.number_of_evaluation PSO_solution = optimiser.best_solution # Optimisation and visualisation test_problem.number_of_evaluation = 0 optimiser = SimulatedAnnealing(test_problem, 5000, 0.04) optimiser.plotAnimation(211) SA_number_of_evaluation = test_problem.number_of_evaluation SA_solution = optimiser.best_solution for method, number_of_evaluation, solution in zip(methods, number_of_evaluation_set, solution_set): print(method, ".number_of_evaluation:\t", number_of_evaluation)
for j in range(31): #int((len(tree))/4)-1) if j is 0: Tree1.append([Tree[0][0], Tree[1][0]]) else: Tree1.append([Tree[j][0], Tree[2 * j][0]]) Tree1.append([Tree[j][0], Tree[2 * j + 1][0]]) temp = [Tree[2 * i + 2], Tree[2 * i + 3]] temp = [Tree[int((i + 1) / 2)]] + temp triangle = [temp[0][0], temp[1][0], temp[1][0]] Tree2 = Tree1.copy() del Tree2[i] del Tree2[2 * i + 1] del Tree2[2 * i + 1] if i is 0: psw = PSO(func, initial, 2, bounds, num_particles=25, maxiter=130) else: psw = PSO(func, initial, 2, bounds, num_particles=25, maxiter=130) Tree[i + 1][0] = optimise(tree[i][0], psw, Tree2, triangle) sol.append(func(Tree[i + 1][0])) fun.append(sum(sol)) print(k) k = k + 1 def Draw_branch(start, end): pt1 = Point(start[0], start[1]) pt2 = Point(end[0], end[1]) ln = Line(pt1, pt2) ln.setWidth(3) ln.draw(win)
def selector(algo, func_details, popSize, Iter, trainDataset, testDataset): function_name = func_details[0] lb = func_details[1] ub = func_details[2] DatasetSplitRatio = 2 / 3 dataTrain = "datasets/" + trainDataset dataTest = "datasets/" + testDataset Dataset_train = numpy.loadtxt(open(dataTrain, "rb"), delimiter=",", skiprows=0) Dataset_test = numpy.loadtxt(open(dataTest, "rb"), delimiter=",", skiprows=0) numRowsTrain = numpy.shape(Dataset_train)[ 0] # number of instances in the train dataset numInputsTrain = numpy.shape( Dataset_train)[1] - 1 #number of features in the train dataset numRowsTest = numpy.shape(Dataset_test)[ 0] # number of instances in the test dataset numInputsTest = numpy.shape( Dataset_test)[1] - 1 #number of features in the test dataset trainInput = Dataset_train[0:numRowsTrain, 0:-1] trainOutput = Dataset_train[0:numRowsTrain, -1] testInput = Dataset_test[0:numRowsTest, 0:-1] testOutput = Dataset_test[0:numRowsTest, -1] #number of hidden neurons HiddenNeurons = numInputsTrain * 2 + 1 net = nl.net.newff([[0, 1]] * numInputsTrain, [HiddenNeurons, 1]) dim = (numInputsTrain * HiddenNeurons) + (2 * HiddenNeurons) + 1 if (algo == 0): x = pso.PSO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 1): x = mvo.MVO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 2): x = gwo.GWO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 3): x = mfo.MFO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 4): x = cs.CS(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 5): x = bat.BAT(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) # Evaluate MLP classification model based on the training set trainClassification_results = evalNet.evaluateNetClassifier( x, trainInput, trainOutput, net) x.trainAcc = trainClassification_results[0] x.trainTP = trainClassification_results[1] x.trainFN = trainClassification_results[2] x.trainFP = trainClassification_results[3] x.trainTN = trainClassification_results[4] # Evaluate MLP classification model based on the testing set testClassification_results = evalNet.evaluateNetClassifier( x, testInput, testOutput, net) x.testAcc = testClassification_results[0] x.testTP = testClassification_results[1] x.testFN = testClassification_results[2] x.testFP = testClassification_results[3] x.testTN = testClassification_results[4] return x
import Utils import PSO import numpy as np fn = "history_ncert_class10/chap_3.txt" # sys.argv[1] ref_dir_n = "history_ncert_class10/annotations/chapter3" # sys.argv[2] # load file document, ref_sum = Utils.load_documents(fn, ref_dir_n) # Pre-process with Stemmer and/or Lemmatizer. processed_doc = Utils.process_document(document) processed_ref_sum = Utils.process_documents(ref_sum) # Extract features features = PSO.extract_features(processed_doc) # Initialize a Binary PSO model. model = PSO.Swarm(processed_doc, processed_ref_sum) # Train the model with extracted features. weights = model.train(features) # Generate summary with weights. p_sum_idx = np.argsort(np.dot(features, weights))[-PSO.SUMMARY_SIZE:] p_sum = Utils.generate_summary([document[idx] for idx in p_sum_idx]) p_sum1 = Utils.join_sentences([processed_doc[idx] for idx in p_sum_idx]) ref_sum = Utils.join_docs(processed_ref_sum) print("Final Rouge Score: ", Utils.calculate_rouge(p_sum1, ref_sum, 1)) f = open("predicted_summary.txt", 'w', encoding='utf-8')
thRef2 = [] for xref, yref in zip(xReference, yReference): distance = sqrt((xref - 0) * (xref - 0) + (yref - 282.84) * (yref - 282.84)) thref2 = (pi) - acos( ((200 * 200) + (200 * 200) - (distance * distance)) / (2 * 200 * 200)) thref1 = atan((0 - xref) / (yref - 282.84)) thref1 = thref1 - acos(((200 * 200) - (200 * 200) + (distance * distance)) / (2 * 200 * distance)) thRef1.append(thref1) thRef2.append(thref2) yCostPSO, KpidPSO = PSO.runPSO(itr) yCostGWO, KpidGWO = GWO1.runGWO(itr) # yCostGA, KpidGA = gademo.runGA(itr) # yCostGA = [1000-x for x in yCostGA] trackingErrorPSO = test.resultPlot(KpidPSO[0], KpidPSO[1], KpidPSO[2]) trackingErrorGWO = test.resultPlot(KpidGWO[0], KpidGWO[1], KpidGWO[2]) print(trackingErrorGWO) # graphs.plotPerformanceGraph(plt, xRef, yRef, list(range(1,5)), list(range(1,5)), list(range(1,5)), list(range(1,5)), list(range(1,5)), list(range(1,5))) # graphs.plotPoseGraph(plt, list(range(len(trackingErrorGWO))), trackingErrorGWO, list(range(len(trackingErrorPSO))), trackingErrorGWO, list(range(5, 10)), list(range(5, 10))) # graphs.plotTorqueGraph(plt, list(range(10, 15)), list(range(10, 15)), list(range(10, 15)), list(range(10, 15)), list(range(10, 15)), list(range(10, 15))) graphs.plotCostGraph(plt, list(range(itr)), yCostGWO, list(range(itr)), yCostPSO, list(range(itr)), list(range(itr)))
t = 0 #minimization problem pop = 30 itr = 300 c1 = 2.4 vmax = 6 desired = 0 objfname = 'Multi-threshold OTSU' lim = [threshMin, threshMax] runs = 1 '''Using PSO Optimization to find out optimal threshold values for segmentation''' thr = [] fits = [] psnrs = [] runtime = [] for r in range(runs): sol = PSO.pso(itr, thresh, fun_multiotsu, pop, lim, c1, vmax, desired, objfname) print('Objective Function: ', sol.objfname) print('Method: ', sol.optimizer) print('Global best: ', sol.best) print('Best Threshold: ', sol.bestIndividual) print('Execution Time: ', sol.executionTime, ' secs') thresholds = sorted(sol.bestIndividual) print('Thresholds: ', thresholds) thr.append(thresholds) runtime.append(sol.executionTime) fits.append(sol.best) regions = np.digitize(img, bins=thresholds) output = img_as_ubyte(regions)
def main(): print("Program Start") headers = [ "Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2" ] filename = 'VIDEORESULTS.csv' Per = Performance.Results() Per.PipeToFile([], headers, filename) data_sets = [ "soybean", "glass", "abalone", "Cancer", "forestfires", "machine" ] regression_data_set = { "soybean": False, "Cancer": False, "glass": False, "forestfires": True, "machine": True, "abalone": True } categorical_attribute_indices = { "soybean": [], "Cancer": [], "glass": [], "forestfires": [], "machine": [], "abalone": [] } tuned_0_hl = { "soybean": { "omega": .5, "c1": .1, "c2": 5, "hidden_layer": [] }, "Cancer": { "omega": .5, "c1": .5, "c2": 5, "hidden_layer": [] }, "glass": { "omega": .2, "c1": .9, "c2": 5, "hidden_layer": [] }, "forestfires": { "omega": .2, "c1": 5, "c2": .5, "hidden_layer": [] }, "machine": { "omega": .5, "c1": .9, "c2": 5, "hidden_layer": [] }, "abalone": { "omega": .2, "c1": 5, "c2": .9, "hidden_layer": [] } } tuned_1_hl = { "soybean": { "omega": .5, "c1": .5, "c2": 1, "hidden_layer": [7] }, "Cancer": { "omega": .2, "c1": .5, "c2": 5, "hidden_layer": [4] }, "glass": { "omega": .2, "c1": .9, "c2": 5, "hidden_layer": [8] }, "forestfires": { "omega": .2, "c1": 5, "c2": 5, "hidden_layer": [8] }, "machine": { "omega": .5, "c1": 5, "c2": .5, "hidden_layer": [4] }, "abalone": { "omega": .2, "c1": .1, "c2": 5, "hidden_layer": [8] } } tuned_2_hl = { "soybean": { "omega": .5, "c1": .9, "c2": .1, "hidden_layer": [7, 12] }, "Cancer": { "omega": .2, "c1": .5, "c2": 5, "hidden_layer": [4, 4] }, "glass": { "omega": .2, "c1": .9, "c2": 5, "hidden_layer": [8, 6] }, "forestfires": { "omega": .2, "c1": .9, "c2": 5, "hidden_layer": [8, 8] }, "machine": { "omega": .2, "c1": .9, "c2": .1, "hidden_layer": [7, 2] }, "abalone": { "omega": .2, "c1": 5, "c2": 5, "hidden_layer": [6, 8] } } du = DataUtility.DataUtility(categorical_attribute_indices, regression_data_set) total_counter = 0 for data_set in data_sets: if data_set != 'Cancer': continue data_set_counter = 0 # ten fold data and labels is a list of [data, labels] pairs, where # data and labels are numpy arrays: tenfold_data_and_labels = du.Dataset_and_Labels(data_set) for j in range(10): test_data, test_labels = copy.deepcopy(tenfold_data_and_labels[j]) #Append all data folds to the training data set remaining_data = [ x[0] for i, x in enumerate(tenfold_data_and_labels) if i != j ] remaining_labels = [ y[1] for i, y in enumerate(tenfold_data_and_labels) if i != j ] #Store off a set of the remaining dataset X = np.concatenate(remaining_data, axis=1) #Store the remaining data set labels labels = np.concatenate(remaining_labels, axis=1) print(data_set, "training data prepared") regression = regression_data_set[data_set] #If the data set is a regression dataset if regression == True: #The number of output nodes is 1 output_size = 1 #else it is a classification data set else: #Count the number of classes in the label data set output_size = du.CountClasses(labels) #Get the test data labels in one hot encoding test_labels = du.ConvertLabels(test_labels, output_size) #Get the Labels into a One hot encoding labels = du.ConvertLabels(labels, output_size) input_size = X.shape[0] data_set_size = X.shape[1] + test_data.shape[1] tuned_parameters = [ tuned_0_hl[data_set], tuned_1_hl[data_set], tuned_2_hl[data_set] ] for z in range(1): hidden_layers = tuned_parameters[z]["hidden_layer"] layers = [input_size] + hidden_layers + [output_size] nn = NeuralNetwork(input_size, hidden_layers, regression, output_size) nn.set_input_data(X, labels) nn1 = NeuralNetwork(input_size, hidden_layers, regression, output_size) nn1.set_input_data(X, labels) nn2 = NeuralNetwork(input_size, hidden_layers, regression, output_size) nn2.set_input_data(X, labels) total_weights = 0 for i in range(len(layers) - 1): total_weights += layers[i] * layers[i + 1] hyperparameters = { "population_size": 10 * total_weights, "beta": .5, "crossover_rate": .6, "max_gen": 100 } hyperparameterss = { "maxGen": 100, "pop_size": 100, "mutation_rate": .5, "mutation_range": 10, "crossover_rate": .5 } hyperparametersss = { "position_range": 10, "velocity_range": 1, "omega": .1, # tuned_parameters[z]["omega"], "c1": .9, # tuned_parameters[z]["c1"], "c2": .1, # tuned_parameters[z]["c2"], "vmax": 1, "pop_size": 1000, "max_t": 50 } de = DE.DE(hyperparameters, total_weights, nn) ga = GA.GA(hyperparameterss, total_weights, nn1) pso = PSO.PSO(layers, hyperparametersss, nn2) learning_rate = 3 momentum = 0 VNN = VideoNN.NeuralNetworks(input_size, hidden_layers, regression, output_size, learning_rate, momentum) VNN.set_input_data(X, labels) for gen in range(de.maxgens): de.mutate_and_crossover() for gen in range(ga.maxGen): ga.fitness() ga.selection() ga.crossover() counter = 0 for epoch in range(pso.max_t): pso.update_fitness() pso.update_position_and_velocity() for epoch in range(100): VNN.forward_pass() VNN.backpropagation_pass() bestSolution = de.bestChromie.getchromie() bestWeights = de.nn.weight_transform(bestSolution) de.nn.weights = bestWeights Estimation_Values = de.nn.classify(test_data, test_labels) Estimation_Values1 = ga.nn.classify(test_data, test_labels) Estimation_Values2 = pso.NN.classify(test_data, test_labels) Estimation_Values3 = VNN.classify(test_data, test_labels) if regression == False: #Decode the One Hot encoding Value Estimation_Values = de.nn.PickLargest(Estimation_Values) test_labels_list = de.nn.PickLargest(test_labels) Estimation_Values1 = ga.nn.PickLargest(Estimation_Values1) Tll = ga.nn.PickLargest(test_labels) Estimation_Values2 = pso.NN.PickLargest(Estimation_Values2) tll1 = pso.NN.PickLargest(test_labels) Estimation_Values3 = VNN.PickLargest(Estimation_Values3) tll = VNN.PickLargest(test_labels) # print("ESTiMATION VALUES BY GIVEN INDEX (CLASS GUESS) ") # print(Estimation_Values) else: Estimation_Values = Estimation_Values.tolist() test_labels_list = test_labels.tolist()[0] Estimation_Values = Estimation_Values[0] Estimat = Estimation_Values groun = test_labels_list meta = list() Nice = Per.ConvertResultsDataStructure(groun, Estimat) Nice1 = Per.ConvertResultsDataStructure( Tll, Estimation_Values1) Nice2 = Per.ConvertResultsDataStructure( tll1, Estimation_Values2) Nice3 = Per.ConvertResultsDataStructure( tll, Estimation_Values3) DEss = Per.StartLossFunction(regression, Nice, meta) GAss = Per.StartLossFunction(regression, Nice1, meta) PSOSS = Per.StartLossFunction(regression, Nice2, meta) VNNS = Per.StartLossFunction(regression, Nice3, meta) print("DE") print(DEss) print("GA") print(GAss) print("PSO") print(PSOSS) print("NN Back prop.") print(VNNS) # print("THE GROUND VERSUS ESTIMATION:") # print(Nice) # headers = ["Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2"] Meta = [ data_set, len(hidden_layers), hyperparameters["population_size"], hyperparameters["beta"], hyperparameters["crossover_rate"], hyperparameters["max_gen"] ] Per.StartLossFunction(regression, Nice, Meta, filename) data_set_counter += 1 total_counter += 1 print("Program End ")
def runPSO(iterations, c1, c2, w, execution): pso = PSO(SWARM_SIZE, iterations, w, c1, c2) pso.generateSwarm() for iteration in range(pso.iterations): for idx, particle in enumerate(pso.swarm): pso.calculateFitness(particle) if (particle.fitness < particle.pbest[FITNESS]): particle.pbest = [particle.fitness, particle.x] pso.setNeighborGBest(particle, idx) particle.v = pso.updateParticleVelocity(particle) particle.x = pso.updateParticleSolution(particle) (best, worst, average) = pso.getMetricsOfIteration() fileOperations(best, worst, average, iterations, c1, c2, w, execution)
def algorithm(x_train, y_train): N = settings.goa_population_size grasshoppers = give_N_random_solutions(N, len(x_train[0])) # A' = [[list([0, 1]) '101' '110' array([0, 0]) array([0, 0])] print(N, "random Solutions generated") print(grasshoppers) # exit() previous_weights_of_ghs = [] # saves previous weights of ith grasshopper previous_ghs = [] # saves previous ith grasshopper best_sol = [0, -1, -1] # accuracy, grasshopper, corresponding_weights for i in range(len(grasshoppers)): no_of_hidden_neurons1 = int(grasshoppers[i][1], 2) no_of_hidden_neurons2 = int(grasshoppers[i][2], 2) tf1 = grasshoppers[i][3] tf2 = grasshoppers[i][4] print("Running PSO on", i, "solution:") print(grasshoppers[i]) updated_x_train = updated_X(x_train, grasshoppers[i][0]) accuracy, corresponding_weights = PSO.model( updated_x_train, y_train, no_of_input_neurons=len(updated_x_train[0]), no_of_hidden_neurons1=no_of_hidden_neurons1, no_of_hidden_neurons2=no_of_hidden_neurons2, no_of_output_neurons=settings.no_of_classes, tf1=tf1, tf2=tf2) previous_ghs.append(copy.deepcopy(grasshoppers[i])) previous_weights_of_ghs.append(copy.deepcopy(corresponding_weights)) print(accuracy, "\n") if accuracy > best_sol[0]: best_sol[0] = accuracy best_sol[1] = copy.deepcopy(grasshoppers[i]) best_sol[2] = copy.deepcopy(corresponding_weights) print("Initial Best", best_sol[0:1], "\n\n") # exit() max_it = settings.goa_max_iteration cMax = 1 cMin = 0.00004 l = 2 ub = len(grasshoppers[0][0]) + len( grasshoppers[0][1]) ##................????? lb = 0 while l < max_it: c = cMax - l * ((cMax - cMin) / max_it) print( l, "iteration, c", c, "----------------------------------------------------------------------------->" ) for i in range(len(grasshoppers)): j = 0 Xi = 0 # for every ith grasshopper's position is updated according to the position of every jth while j < len(grasshoppers): if j != i: # Normalize grasshoppers[i] = normalize_distance( grasshoppers[i], grasshoppers[j]) # grasshoppers[j] = normalize_distance(grasshoppers[j], grasshoppers[i]) dist = distance(grasshoppers[j], grasshoppers[i]) Xi += c * ((ub - lb) / 2) * (0.5 * np.exp(-dist / 1.5) - np.exp(-dist)) j += 1 # print(Xi) Xi *= c # print(Xi) Td = distance(grasshoppers[i], best_sol[1]) # print("Dist", Td) Xi += Td # print("Final Xi", Xi) change_value = np.ceil(Xi) grasshoppers[i] = update_position(grasshoppers[i], abs(change_value)) print( "current GOA grasshopper----------------------------------------->>>", grasshoppers[i]) no_of_hidden_neurons1 = int(grasshoppers[i][1], 2) no_of_hidden_neurons2 = int(grasshoppers[i][2], 2) tf1 = grasshoppers[i][3] tf2 = grasshoppers[i][4] updated_x_train = updated_X(x_train, grasshoppers[i][0]) # guess initial weights from previous weights # print("PReviouysfnefe\n", previous_weights_of_ghs[i]) guessed_weights = guess_weight( grasshoppers[i], copy.deepcopy(previous_ghs[i]), copy.deepcopy(previous_weights_of_ghs[i])) # print("guesssss\n", guessed_weights) # exit() accuracy, corresponding_weights = PSO.model( updated_x_train, y_train, no_of_input_neurons=len(updated_x_train[0]), no_of_hidden_neurons1=no_of_hidden_neurons1, no_of_hidden_neurons2=no_of_hidden_neurons2, no_of_output_neurons=settings.no_of_classes, tf1=tf1, tf2=tf2, guessed_weights=guessed_weights) previous_ghs[i] = copy.deepcopy(grasshoppers[i]) previous_weights_of_ghs[i] = copy.deepcopy(corresponding_weights) if accuracy > best_sol[0]: best_sol[0] = accuracy best_sol[1] = copy.deepcopy(grasshoppers[i]) best_sol[2] = copy.deepcopy(corresponding_weights) print("\n\nBEST UPDATED: ", best_sol[0:1], "\n\n") print( "----------------------------------------------------------------------------->" ) print("Best accuracy so far", best_sol[0:1]) l += 1 return best_sol
import PSO import functions number = input("What function do you want to test? \n press 1 for ackley \n press 2 for sumOfSquare \n press 3 for sphere\n") func = functions.functions() initial=[5,5] # initial starting location [x1,x2...] bounds=[(-5,5),(-5,5)] # input bounds [(x1_min,x1_max),(x2_min,x2_max)...] try: val = int(number) if val == 1: PSO.PSO(func.ackley, initial, bounds, num_particles=15, maxiter=30, verbose=True) elif val == 2: PSO.PSO(func.sumOfSquare, initial, bounds, numa_particles=15, maxiter=30, verbose=True) elif val == 3: PSO.PSO(func.sphere, initial, bounds, num_particles=15, maxiter=30, verbose=True) else: print("Invalid Input") except ValueError: print("That's not an int!") print("No.. input string is not an Integer. It's a string")
def ACO(data, ants, iterations): '''Take some input matrix of initial data points and values for the number of ants and the number of iterations. Data is randomly placed on a 2D field that is directly related to the number of input values. This keeps a desirable density on the field at all times. Ants carry out their actions, one per iteration, and then die. The points are then assessed and clusters are determiend.''' global inputs # Scale the inputs minVal = 10000 maxVal = 0 for x in data: for y in x: minVal = min(minVal, y) maxVal = max(maxVal, y) inputs = PSO.rescaleMatrix(data, minVal, maxVal, 0, 1) iterCount = len(data) * iterations global maxDimensions maxDimensions = int(math.sqrt(10 * len(inputs)) + 0.5) global inputLocations inputLocations = [] # Every input needs a location for x in inputs: pos = [ random.randint(0, maxDimensions), random.randint(0, maxDimensions) ] while pos in inputLocations: pos = [ random.randint(0, maxDimensions), random.randint(0, maxDimensions) ] inputLocations.append(pos) # for i in range(len(inputLocations)): # print(inputLocations[i], ' \t@', inputs[i]) antCollection = [] for a in range(ants): antCollection.append(ant(int(math.sqrt(2 * maxDimensions)))) for i in range(iterCount): print("{:>7.2%}".format(i / iterCount), end="\r") for x in antCollection: x.logic(int(((i * 5) / iterCount) + 1)) if i % 100 == 0: for x in antCollection: x.updateActivity() for x in antCollection: x.die() # for i in range(len(inputLocations)): # print(inputLocations[i], ' \t@', inputs[i]) global cluster cluster = [] global clusterList clusterList = [] #print(inputLocations, len(inputLocations)) for pos in inputLocations: findNear(pos) if clusterList[-1] != None: clusterList.append(None) # print(clusterList) finalClusters = [] temp = [] for x in clusterList: if x != None: temp.append(data[inputLocations.index(x)]) elif x == None: finalClusters.append(temp) temp = [] return finalClusters
# Setup the PSO algorithm and run # list of lengthh 10000, 1 or 0. More 1s towards end of list. # to graph, look at average over some increment. # for example, graph the average over the first 1000 episodes, then the next. # avg will be number in [0, 1]. This corresponds to "fitness" IR = [q.run_session(random.random(), random.random()) for _ in range(30)] IRAvg = sum([sum(i[-1000:])/1000 for i in IR])/30 InitialRewardPerEpisode = IR print ("Before tuning, QLearner achieves averages fitness of ", IRAvg, "(S.D. = ", statistics.stdev([sum(i[-1000:])/1000 for i in IR]), ") in 30 runs of 10,000 episodes.") dataplot = DataPlotter() # Eval function/fitness is average of the last 1000 episodes rewards. # Should converge to ~0.7. In other words, gets a reward 70% of the time. pso = PSO.ParticleSwarm((lambda a, b : sum(q.run_session(a, b)[-1000:])/1000), 5, 100, 0.00, 1.00, dataplot) pso.minV = -1 pso.maxV = 1 particles = [PSO.Particle(0.00, 1.00, dataplot) for _ in range(pso.numParticles)] pso.particles = particles pso.display_particles() alpha, gamma = pso.algorithm() FR = [q.run_session(alpha, gamma) for _ in range(30)] FRAvg = sum([sum(i[-1000:])/1000 for i in FR])/30 FinalRewardPerEpisode = FR print ("After tuning, QLearner achieves average fitness of ", FRAvg, "(S.D. = ", statistics.stdev([sum(i[-1000:])/1000 for i in FR]), ")in 30 runs of 10,000 episodes.") dataplot.appendqlRVals(IR, FR) dataplot.outputGraphs()
class EnsembleOptimizer: def __init__(self, obj, class_obj, num_agents, num_gen, seed, obj_dim, communicate, scheme, wts_decay, floor=-5, ceiling=5, **kwargs): self.obj = obj self.class_obj = class_obj self.num_agents = num_agents self.num_gen = num_gen self.seed = seed self.obj_dim = obj_dim self.scheme = scheme self.decay = wts_decay self.floor = floor self.ceiling = ceiling self.ind_best = None self.ensemble_best = None self.runner = None self.randomize = None self.fitness = None self.err_tracker = [] self.DE = None self.PSO = None self.PSOL = None self.Cuckoo = None self.flower = None self.Bat = None self.BatL = None self.rank_weights = None self.final_best = None self.comms = communicate self.final_best_params = None self.loss_tracker = [] def new_intensity(self, param): return self.obj(param) def init_optimization(self): self.runner = init_EO(self.obj, self.obj_dim, self.num_agents, self.ceiling, self.floor, self.seed) self.population = self.runner.init_pop self.fitness = self.runner.fitness def custom_sort(self, arr): l = list(arr) l.sort(key=lambda crunch: self.new_intensity(crunch)) sort_ary = np.asarray(l) return sort_ary def meta_weighing_scheme(self, best_list): '''weighing schemes used 1.best, 2. average, 3. rank weighted, 4. exponential rank weiging''' weighted_best = self.custom_sort(best_list) best_param_1 = weighted_best[len(weighted_best) - 1] weighted_best_2 = np.mean(best_list, axis=0) best_param_2 = weighted_best_2 self.rank_weights = list(range(1, 8, 1)) weighted_best_3 = self.custom_sort(best_list) best_param_3 = np.average(weighted_best_3, weights=self.rank_weights, axis=0) weighted_best_4 = self.custom_sort(best_list) self.rank_weights = list(range(7, 0, -1)) exp_wts = [] for i in range(len(self.rank_weights)): tmp = self.rank_weights[i] * self.decay**abs( (self.rank_weights[i] - self.rank_weights[0])) exp_wts.append(tmp) exp_np = np.array(exp_wts) exp_wts = exp_np / np.sum(exp_np) best_param_4 = np.average(weighted_best_4, weights=np.flip(exp_wts, axis=0), axis=0) a = best_param_1.shape[0] tmp_best = np.asarray(np.zeros( (4, best_param_1.shape[0]))).astype(float) tmp_best[0, :], tmp_best[1, :], tmp_best[2, :], tmp_best[ 3, :] = best_param_1, best_param_2, best_param_3, best_param_4 meta_param = np.mean(tmp_best, axis=0) return meta_param def weighing_scheme(self, best_list): '''weighing schemes used 1.best, 2. average, 3. rank weighted, 4. exponential rank weiging''' if self.scheme == 'best': weighted_best = self.custom_sort(best_list) best_param = weighted_best[len(weighted_best) - 1] elif self.scheme == 'average': weighted_best = np.mean(best_list, axis=0) best_param = weighted_best elif self.scheme == 'rank': self.rank_weights = list(range(1, 8, 1)) weighted_best = self.custom_sort(best_list) best_param = np.average(weighted_best, weights=self.rank_weights, axis=0) elif self.scheme == 'exponential': weighted_best = self.custom_sort(best_list) self.rank_weights = list(range(7, 0, -1)) exp_wts = [] for i in range(len(self.rank_weights)): tmp = self.rank_weights[i] * self.decay**abs( (self.rank_weights[i] - self.rank_weights[0])) exp_wts.append(tmp) exp_np = np.array(exp_wts) exp_wts = exp_np / np.sum(exp_np) best_param = np.average(weighted_best, weights=np.flip(exp_wts, axis=0), axis=0) return best_param def assemble_optimizers(self): '''initialize all optimizers 1. DE, 2. POS, 3. APSO, 4.FF, 5. CuckooSearch, 6.flower pollinatiion, 7.Bat Algorithm''' self.DE = Differential_Evolution(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, scaling=0.9, seed=self.seed, cross_prob=0.50, target=0.0001, obj_dim=self.obj_dim) self.PSO = PSO(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, alpha=2, alpha_decay=0.03, beta=2, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.PSOL = PSOLevy(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, alpha=2, alpha_decay=0.03, beta=2, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.Cuckoo = CuckooSearch(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, alpha=0.01, p=0.25, beta=1.5, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.flower = FlowerPollination(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, gamma=0.1, p=0.5, beta=1.5, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.Bat = BAT(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, fmin=0.01, fmax=0.25, pulse=0.2, amplitude=1, scaling=0.01, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) self.BatL = BATLevy(self.obj, self.class_obj, num_agents=self.num_agents, num_gen=self.num_gen, seed=self.seed, obj_dim=self.obj_dim, fmin=0.01, fmax=0.25, pulse=0.2, amplitude=1, scaling=0.01, floor=self.class_obj.floor, ceiling=self.class_obj.ceiling) def model_fit(self): self.assemble_optimizers() for n in range(self.num_gen): de_best = self.DE.ensemble_fit() pso_best = self.PSO.ensemble_fit() psol_best = self.PSOL.ensemble_fit() bat_best = self.Bat.ensemble_fit() batl_best = self.BatL.ensemble_fit() cuckoo_best = self.Cuckoo.ensemble_fit() flower_best = self.flower.ensemble_fit() best_list = np.array([ de_best, pso_best, psol_best, bat_best, batl_best, cuckoo_best, flower_best ]) if self.scheme != 'meta': self.ensemble_best = self.weighing_scheme(best_list) else: self.ensemble_best = self.meta_weighing_scheme(best_list) '''now communicate this ensemble best result to all the optimizers''' ''' 1. For DE update the current best with the ensemble best 2. For PSO update the current best with the ensemble best 3. For apso update the current best with the ensemble best 4. For Firefly update the worst with the ensemble best 5. For Cuckoo update the worst in tmp nest with the ensemble best ''' if n % self.comms == 0: #print('communicating') self.DE.current_best = self.ensemble_best self.PSO.current_best = self.ensemble_best self.PSOL.current_best = self.ensemble_best self.Bat.current_best = self.ensemble_best self.BatL.current_best = self.ensemble_best #randint(0, 9) self.Cuckoo.population[random.randint(0, self.num_agents - 1)] = self.ensemble_best self.flower.population[random.randint(0, self.num_agents - 1)] = self.ensemble_best self.final_best = np.array([ de_best, pso_best, psol_best, bat_best, batl_best, cuckoo_best, flower_best ]) self.final_best_params = self.custom_sort(best_list)[len(best_list) - 1] self.err_tracker.append( abs( self.class_obj.return_value(self.final_best_params) - self.class_obj.min)) #self.current_best = self.custom_sort(self.population)[len(self.population) - 1] self.final_best_params = self.custom_sort(best_list)[len(best_list) - 1] return self.final_best_params def best_param(self): return self.custom_sort(self.final_best)[len(self.final_best) - 1]
dist = np.linalg.norm(rule_values-f_t) dist_to_rules.append(dist) selected_rule = np.argmin(dist_to_rules) weight, profit, index = selector[selected_rule, -1](matrix) if total_weight + weight <= capacity: total_weight += weight total_profit += profit matrix = np.delete(matrix, index, axis=0) #return total_weight, total_profit return total_profit params = [SELECTOR, MATRIX, CAPACITY] pso_instance = pso.PSO(2, 250, 100, 0.2, 0.5, 0.5, kp_hyper_heuristic, params) results = pso_instance.run_pso() print(results) #print("Initial capacity: " + str(CAPACITY)) #val = kp_hyper_heuristic(FEATURE, SELECTOR, MATRIX, CAPACITY) #print("Final profit: " + str(final_profit)) """ preguntas 1. Cuales son los algoritmos que ajustan mejor los valores de los pesos? propuestas * añadir clase de caracteristicas * añadir clase de reglas * añadir clase de KP hyper heristica que este compuesta de clase reglas
if (x[1] > 511): x[1] = 511 if (x[0] < 0): x[0] = 0 if (x[1] < 0): x[1] = 0 return imagen[int(x[0])][int(x[1])] lower = [-10, -10] upper = [10, 10] breakCriteria = -250 imagen = cv2.imread('imagen.jpg', 0) algoritmo = pso.PSO(dimention=2, lower=lower, upper=upper, function=bohachevsky, populationSize=30, maxIter=100, breakCriteria=breakCriteria, wInertia=.5, c1=2, c2=2, imagen=imagen) solucion, fitness = algoritmo.run() print('terminó algoritmo') print(solucion) print(fitness)
def selector(algo, func_details, popSize, Iter, trainDataset, testDataset, actv): function_name = func_details[0] lb = func_details[1] ub = func_details[2] Dataset_train = trainDataset Dataset_test = testDataset numRowsTrain = numpy.shape(Dataset_train)[ 0] # number of instances in the train dataset numInputsTrain = numpy.shape( Dataset_train)[1] - 1 #number of features in the train dataset numRowsTest = numpy.shape(Dataset_test)[ 0] # number of instances in the test dataset numInputsTest = numpy.shape( Dataset_test)[1] - 1 #number of features in the test dataset trainInput = Dataset_train[0:numRowsTrain, 0:-1] trainOutput = Dataset_train[0:numRowsTrain, -1] testInput = Dataset_test[0:numRowsTest, 0:-1] testOutput = Dataset_test[0:numRowsTest, -1] #number of hidden neurons HiddenNeurons = numInputsTrain * 2 + 1 net = nl.net.newff([[0, 1]] * numInputsTrain, [HiddenNeurons, 1]) if (actv == 1): net = nl.net.newff( [[0, 1]] * numInputsTrain, [HiddenNeurons, 1], [nl.trans.LogSig(), nl.trans.LogSig()]) if (actv == 2): net = nl.net.newff( [[0, 1]] * numInputsTrain, [HiddenNeurons, 1], [nl.trans.SatLinPrm(1, 0, 1), nl.trans.SatLinPrm(1, 0, 1)]) dim = (numInputsTrain * HiddenNeurons) + (2 * HiddenNeurons) + 1 if (algo == 12): x = adam.adamse(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 0): x = pso.PSO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 1): x = mvo.MVO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 2): x = gwo.GWO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 3): x = cs.CS(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 4): x = bat.BAT(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 5): x = de.DE(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 6): x = ga.GA(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 7): x = fa.FFA(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 8): x = bbo.BBO(getattr(costNN, function_name), lb, ub, dim, popSize, Iter, trainInput, trainOutput, net) if (algo == 9): printAcc = [] printAcc2 = [] x = solution() timerStart = time.time() x.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") if (function_name == "costNN"): net.trainf.defaults['trainf'] = nl.error.MSE() elif (function_name == "costNN4"): net.trainf.defaults['trainf'] = nl.error.CEE() else: return x net.trainf = nl.train.train_gd newOutput = [[x] for x in trainOutput] newOutput = numpy.asarray(newOutput) e = net.train(trainInput, newOutput, epochs=Iter * popSize) timerEnd = time.time() x.optimizer = "BP" x.objfname = function_name x.popnum = 0 x.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") x.executionTime = timerEnd - timerStart x.convergence = e pred = net.sim(trainInput).reshape(len(trainOutput)) pred = numpy.round(pred).astype(int) trainOutput = trainOutput.astype(int) pred = numpy.clip(pred, 0, 1) ConfMatrix = confusion_matrix(trainOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc.append(accuracy_score(trainOutput, pred, normalize=True)) classification_results = numpy.concatenate((printAcc, ConfMatrix1D)) x.trainAcc = classification_results[0] x.trainTP = classification_results[1] x.trainFN = classification_results[2] x.trainFP = classification_results[3] x.trainTN = classification_results[4] pred = net.sim(testInput).reshape(len(testOutput)) pred = numpy.round(pred).astype(int) testOutput = testOutput.astype(int) pred = numpy.clip(pred, 0, 1) ConfMatrix = confusion_matrix(testOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc2.append(accuracy_score(testOutput, pred, normalize=True)) classification_results2 = numpy.concatenate((printAcc2, ConfMatrix1D)) x.testAcc = classification_results2[0] x.testTP = classification_results2[1] x.testFN = classification_results2[2] x.testFP = classification_results2[3] x.testTN = classification_results2[4] return x if (algo == 10): printAcc = [] printAcc2 = [] x = solution() timerStart = time.time() x.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") if (function_name == "costNN"): net.trainf.defaults['trainf'] = nl.error.MSE() elif (function_name == "costNN4"): net.trainf.defaults['trainf'] = nl.error.CEE() else: return x net.trainf = nl.train.train_gdx newOutput = [[x] for x in trainOutput] newOutput = numpy.asarray(newOutput) e = net.train(trainInput, newOutput, epochs=Iter * popSize) timerEnd = time.time() x.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") x.optimizer = "BPMA" x.objfname = function_name x.popnum = 0 x.executionTime = timerEnd - timerStart x.convergence = e pred = net.sim(trainInput).reshape(len(trainOutput)) pred = numpy.round(pred).astype(int) trainOutput = trainOutput.astype(int) pred = numpy.clip(pred, 0, 1) ConfMatrix = confusion_matrix(trainOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc.append(accuracy_score(trainOutput, pred, normalize=True)) classification_results = numpy.concatenate((printAcc, ConfMatrix1D)) x.trainAcc = classification_results[0] x.trainTP = classification_results[1] x.trainFN = classification_results[2] x.trainFP = classification_results[3] x.trainTN = classification_results[4] pred = net.sim(testInput).reshape(len(testOutput)) pred = numpy.round(pred).astype(int) testOutput = testOutput.astype(int) pred = numpy.clip(pred, 0, 1) ConfMatrix = confusion_matrix(testOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc2.append(accuracy_score(testOutput, pred, normalize=True)) classification_results2 = numpy.concatenate((printAcc2, ConfMatrix1D)) x.testAcc = classification_results2[0] x.testTP = classification_results2[1] x.testFN = classification_results2[2] x.testFP = classification_results2[3] x.testTN = classification_results2[4] return x if (algo == 11): x = solution() printAcc = [] printAcc2 = [] if (function_name == "costNN4"): if (actv == 0): timerStart = time.time() x.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") clf = MLPClassifier(hidden_layer_sizes=HiddenNeurons, activation='tanh', max_iter=Iter * popSize, learning_rate_init=0.01, n_iter_no_change=7500).fit( trainInput, trainOutput) timerEnd = time.time() x.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") x.optimizer = "Adam" x.objfname = function_name x.popnum = 0 x.executionTime = timerEnd - timerStart x.convergence = clf.loss_curve_ pred = clf.predict(trainInput) ConfMatrix = confusion_matrix(trainOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc.append( accuracy_score(trainOutput, pred, normalize=True)) classification_results = numpy.concatenate( (printAcc, ConfMatrix1D)) x.trainAcc = classification_results[0] x.trainTP = classification_results[1] x.trainFN = classification_results[2] x.trainFP = classification_results[3] x.trainTN = classification_results[4] pred = clf.predict(testInput) ConfMatrix = confusion_matrix(testOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc2.append( accuracy_score(testOutput, pred, normalize=True)) classification_results2 = numpy.concatenate( (printAcc2, ConfMatrix1D)) x.testAcc = classification_results2[0] x.testTP = classification_results2[1] x.testFN = classification_results2[2] x.testFP = classification_results2[3] x.testTN = classification_results2[4] return x elif (actv == 1): timerStart = time.time() x.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") clf = MLPClassifier(hidden_layer_sizes=HiddenNeurons, activation='logistic', max_iter=Iter * popSize, learning_rate_init=0.01, n_iter_no_change=7500).fit( trainInput, trainOutput) timerEnd = time.time() x.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") x.optimizer = "Adam" x.objfname = function_name x.popnum = 0 x.executionTime = timerEnd - timerStart x.convergence = clf.loss_curve_ pred = clf.predict(trainInput) ConfMatrix = confusion_matrix(trainOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc.append( accuracy_score(trainOutput, pred, normalize=True)) classification_results = numpy.concatenate( (printAcc, ConfMatrix1D)) x.trainAcc = classification_results[0] x.trainTP = classification_results[1] x.trainFN = classification_results[2] x.trainFP = classification_results[3] x.trainTN = classification_results[4] pred = clf.predict(testInput) ConfMatrix = confusion_matrix(testOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc2.append( accuracy_score(testOutput, pred, normalize=True)) classification_results2 = numpy.concatenate( (printAcc2, ConfMatrix1D)) x.testAcc = classification_results2[0] x.testTP = classification_results2[1] x.testFN = classification_results2[2] x.testFP = classification_results2[3] x.testTN = classification_results2[4] return x elif (actv == 2): timerStart = time.time() x.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") clf = MLPClassifier(hidden_layer_sizes=HiddenNeurons, activation='relu', max_iter=Iter * popSize, learning_rate_init=0.01, n_iter_no_change=7500).fit( trainInput, trainOutput) timerEnd = time.time() x.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") x.optimizer = "Adam" x.objfname = function_name x.popnum = 0 x.executionTime = timerEnd - timerStart x.convergence = clf.loss_curve_ pred = clf.predict(trainInput) ConfMatrix = confusion_matrix(trainOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc.append( accuracy_score(trainOutput, pred, normalize=True)) classification_results = numpy.concatenate( (printAcc, ConfMatrix1D)) x.trainAcc = classification_results[0] x.trainTP = classification_results[1] x.trainFN = classification_results[2] x.trainFP = classification_results[3] x.trainTN = classification_results[4] pred = clf.predict(testInput) ConfMatrix = confusion_matrix(testOutput, pred) ConfMatrix1D = ConfMatrix.flatten() printAcc2.append( accuracy_score(testOutput, pred, normalize=True)) classification_results2 = numpy.concatenate( (printAcc2, ConfMatrix1D)) x.testAcc = classification_results2[0] x.testTP = classification_results2[1] x.testFN = classification_results2[2] x.testFP = classification_results2[3] x.testTN = classification_results2[4] return x else: return x # Evaluate MLP classification model based on the training set trainClassification_results = evalNet.evaluateNetClassifier( x, trainInput, trainOutput, net) x.trainAcc = trainClassification_results[0] x.trainTP = trainClassification_results[1] x.trainFN = trainClassification_results[2] x.trainFP = trainClassification_results[3] x.trainTN = trainClassification_results[4] # Evaluate MLP classification model based on the testing set testClassification_results = evalNet.evaluateNetClassifier( x, testInput, testOutput, net) x.testAcc = testClassification_results[0] x.testTP = testClassification_results[1] x.testFN = testClassification_results[2] x.testFP = testClassification_results[3] x.testTN = testClassification_results[4] return x
print_figure(plt.gcf()) return yfore, horizon_data_length if len(sys.argv) == 2: forecast(sys.argv[1]) else: for i in range(len(indices)): f, horizon_data_length = forecast(indices[i]) result_forecasts.append(f) portfolioInitialValue = 100000 numvar = 7 xmin = 0.05 xmax = 0.7 niter = 2 popsize = 50 nhood_size = 10 PSO = ParSwarm.ParSwarmOpt(xmin, xmax) res = PSO.pso_solve(popsize, numvar, niter, nhood_size, portfolioInitialValue, horizon_data_length, result_forecasts) print("Portfolio: ", end='') for value in res.xsolbest: print(value, end=' ') print("") print("Return: {}".format(res.return_valuebest)) print("Devst: {}".format(res.devstbest))
def pruebaPSO(d,k): for i in range(5): PSO.ejecutarPSO(d,i,k)
ga = GA.Genetic(population, data.N, data.K, data.testN) ga.setInputsOutputs(data.inputs, data.outputs) ga.setTestInputsOutputs(data.testInputs, data.testOutputs) ga.setWeights(data.population) statsGA = ga.calc(20) print(ga.parents[0][1], ga.test(ga.parents[0][0])) plt.subplot(1, 3, 2) plt.ylim(0, maxY) plt.semilogx(statsGA) plt.title("Genetic Algorithm") ##================== Particle Swarm Optimization ================ print( "\n\n##================== Particle Swarm Optimization ================\n\n" ) pso = PSO.PSO(population, data.N, data.K, data.testN) pso.setInputsOutputs(data.inputs, data.outputs) pso.resetPopulation(data.population) pso.setTestInputsOutputs(data.testInputs, data.testOutputs) statsPSO = pso.calc(50) print("Train: ", pso.globalBest.cost, "\nTest: ", pso.test(pso.globalBest.position)) plt.subplot(1, 3, 3) plt.ylim(0, maxY) plt.semilogx(statsPSO) plt.title("\n\nParticle Swarm Optimization") plt.show()
time_start = time.time() trainData = [[0.91, 0.21, 0.02, 0.04, 0.06], [0.88, 0.23, 0.04, 0.03, 0.05], [0.90, 0.20, 0.05, 0.03, 0.02], [0.04, 0.98, 0.10, 0.02, 0.02], [0.02, 0.97, 0.08, 0.01, 0.01], [0.03, 0.99, 0.09, 0.02, 0.02], [0.02, 0.41, 0.43, 0.34, 0.15], [0.01, 0.47, 0.40, 0.32, 0.10], [0.02, 0.52, 0.41, 0.31, 0.14], [0.01, 0.04, 0.01, 0.01, 0.03], [0.02, 0.03, 0.06, 0.04, 0.02], [0.02, 0.03, 0.05, 0.03, 0.02]] Y = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4] maxi = 50 my_pso = PSO(pN=15, dim=9, max_iter=maxi, data=trainData, Y=Y) my_pso.init_Population() fitness = my_pso.iterator() time_end = time.time() print('训练耗时:', time_end - time_start) plt.figure(1) plt.title("Figure1") plt.xlabel("iterators", size=14) plt.ylabel("fitness", size=14) t = np.array([t for t in range(0, maxi)]) fitness = np.array(fitness) plt.plot(t, fitness, color='b', linewidth=1) plt.show() bestRbf = my_pso.layoutBest()