def ejecutando_algoritmo(a): save_path = '/home/optimizacion_final/datos_json' name_of_file = 'estado' completeName = os.path.join(save_path, name_of_file + ".txt") with open(completeName) as json_file: data = json.load(json_file) data[a]['estado'] = 'ejecutando' with open(completeName, 'w') as outfile: json.dump(data, outfile) if (a == 'GA'): GA.ejecutarGA() if (a == 'SA'): SA.ejecutarSA() if (a == 'PSO'): PSO.ejecutarPSO() if (a == 'ACO'): ACO.ejecutarACO()
def get_information(file_path): # using target_MDG and a benchmark, create result """ Execute Each algorithm for given file and return TurboMQ, cohesion, and coupling :param file_path: A path of dot file :return: Clustering result - value of TurboMQ, A list of [Cohesion, Coupling], A list of result clusters """ targetMDG = make_target_MDG(file_path) methods = ['WCA', 'HC', 'WCA_HC', 'SA', 'WCA_SA', 'PSO', 'WCA_PSO'] clusters_set = [] TMQ = [] cohe_coup = [] print("====WCA start====\n") clusters_set.append(WCA(targetMDG)) print("====WCA end====\n\n") print("====HC start====\n") clusters_set.append(HC.HC(targetMDG)) print("====HC end====\n\n") print("====WCA_HC start====\n") clusters_set.append(HC.WCA_HC(targetMDG, WCA(targetMDG))) print("====WCA_HC end====\n\n") print("====SA start====\n") clusters_set.append(SA.SA(targetMDG)) print("====SA end====\n\n") print("====WCA_SA start====\n") clusters_set.append(SA.WCA_SA(targetMDG, WCA(targetMDG))) print("====WCA_SA end====\n\n") print("====PSO start====\n\n") clusters_set.append(PSO.PSO(targetMDG)) print("====PSO end====\n\n") print("====WCA_PSO start====\n\n") clusters_set.append(PSO.WCA_PSO(targetMDG, WCA(targetMDG))) print("====WCA_PSO end====\n\n") # get TMQ data for clusters in clusters_set: TMQ.append(TurboMQ.calculate_fitness(clusters, targetMDG)) cohe_coup.append(TurboMQ.get_cohesion_coupling(clusters, targetMDG)) # write result files for i in range(len(methods)): DotParser.write_file(file_path, methods[i], clusters_set[i], targetMDG) return TMQ, cohe_coup, clusters_set
def TSP(stops, Alg, steps, param, seed=None, coordfile='xycoords.txt'): '''A wrapper function that attempts to optimize the traveling salesperson problem using a specified algorithm. If coordfile exists, a preexisting set of coordinates will be used. Otherwise, a new set of "stops" coordinates will be generated for the person to traverse, and will be written to the specified file.''' # Create the distance matrix, which will be used to calculate # the fitness of a given path if os.path.isfile(coordfile): coords = scipy.genfromtxt(coordfile) distMat = DistanceMatrix(coords) else: distMat = GenerateMap(stops, fname=coordfile, seed=seed) if Alg == 'HC': # param is the number of solutions to try per step bestSol, fitHistory = hc.HillClimber(steps, param, distMat, seed) elif Alg == 'SA': # param is a placeholder bestSol, fitHistory = sa.SimulatedAnnealing(steps, param, distMat, seed) elif Alg == 'MC3': # param is the number of chains bestSol, fitHistory = mc3.MCMCMC(steps, param, distMat, seed) elif Alg == 'GA': # param is the population size bestSol, fitHistory = ga.GeneticAlgorithm(steps, param, distMat, seed) else: raise ValueError('Algorithm must be "HC", "SA", "MC3", or "GA".') outfname = coordfile + '-' + Alg + '-' + \ str(steps) + '-' + str(param) + '.txt' scipy.savetxt(outfname, scipy.array(bestSol), fmt='%i') return bestSol, fitHistory
def main(): # 读取订单数据,订单总金额列表 order_list, order_money = read_data() # 当订单队列不为空的时候按天数进行订单处理 while len(order_list) > 0: global day # 选取今天的订单组成订单矩阵 today_order, today_money = select_today(order_list) # 对今天的订单矩阵进行优化调度 sa = SA.SA(today_order, today_money) result = sa.slove() print('%s天调度结果:%s' % (str(day), str(result))) # 根据调度处理更新订单队列 update_order(result, order_list, order_money, today_order) print('%s天调度结束后订单队列:%s' % (str(day), str(order_list))) # 根据调度结果更新订单消耗成本 update_lost(result, order_money, today_order) # 天数加一 day += 1 # 计算最终收益 total_money = 0 for i in range(len(order_money)): total_money += order_money[i] print("最终利益为:%d" % total_money)
def runGrasp(self, graph, maxIter): bestSolution = self.initialSolution(graph) bestSolutionColors, sumOfBestColors = graph.checkColor() for i in range(maxIter): # Reseta as cores do grafo self.resetGraph(graph) # ListColors, inicialmente eh passada como uma lista de tamanho = qtdVertices no grafo do tipo boolean com todas as posicoes com valor False, indicando que a cor nao foi totalmente verificada no RCL self.listColors = self.initListColors(graph) # Chama a fase construtiva do GRASP newGraph = self.constructivePhase(graph) solutionColors, sumOfColors = newGraph.checkColor() sa = SA.SA() solutionColors, graph = sa.runSa(None, 10000, 20, 0.95, 1000, False, newGraph, solutionColors) solution = graph.getVertices().copy() bestSolution, bestSolutionColors = self.updateSolution( solution, solutionColors, bestSolution, bestSolutionColors) return bestSolutionColors
def algrun(): global t, list time_start = time.time() map = Map.Map() x, y = Read.Read() map.path[0] = x map.path[1] = y # state == 0: SA, state == 1: LS state = 0 if state == 0: # SA(map, T, Coolrate) alg = SA.SA(map, 100, 0.001) alg.run() elif state == 1: # LS(map, state), state = 0: LS1, state = 1: LS2, state = 2: 2-OPT alg = LS.LS(map, 2) alg.run() list.append(alg.path.distance()) time_end = time.time() t += (time_end - time_start) print('totally cost', time_end - time_start, 's')
def run(FacilityNum, CustomerNum, Capacity, OpeningCost, Demand, Assignment): test = SA.SA(FacilityNum, CustomerNum, Capacity, OpeningCost, Demand, Assignment) time_start = time.time() Str = test.run() time_end = time.time() Str += "Time cost: " + str(time_end - time_start) + 's\n\n' return Str
def getBestCrossover(self): from pprint import pprint #self.tickerSymbol = self.tickerTextBox.get() if self.stockVar.get() == 4: self.tickerSymbol = self.tickerTextBox.get() print(self.pressed) if self.pressed == 0: #Box not checked self.startDate = self.date1TextBox.get() self.endDate = self.date2TextBox.get() #print(startDate, endDate) #print((ystockquote.get_historical_prices(tickerSymbol, startDate, endDate).keys())) #print(str(ystockquote.get_historical_prices(tickerSymbol, startDate, endDate))) entireDictionaryOfData = ystockquote.get_historical_prices( self.tickerSymbol, self.startDate, self.endDate) #entireDictionaryOfData = ystockquote.get_historical_prices('CAS', '2013-11-01', '2013-11-05') #pprint(entireDictionaryOfData.keys()) listOfDates = self.getListOfDatesFromHistoricalData( entireDictionaryOfData) #print(listOfDates) listOfCloses = self.getListOfClosesFromHistoricalData( entireDictionaryOfData, listOfDates) #print(entireStringOfData.find("'Close'")) #print(ystockquote.get_trade_date('GOOG')) #theBestCrossover = PSO.runPSO(listOfCloses) if self.algorithmVar.get() == 0: theBestCrossover = HillClimbing.runHillClimbing(listOfCloses) if self.algorithmVar.get() == 1: theBestCrossover = NelderMeadNew.runNelderMead(listOfCloses) if self.algorithmVar.get() == 2: theBestCrossover = GeneticAlgorithms.runGA(listOfCloses) if self.algorithmVar.get() == 3: theBestCrossover = DifferentialEvolution.runDE(listOfCloses) if self.algorithmVar.get() == 4: theBestCrossover = PSO.runPSO(listOfCloses) if self.algorithmVar.get() == 5: theBestCrossover = SA.runSA(listOfCloses) #theBestCrossover = PSO.figureOutBestConstants(listOfCloses) #theBestCrossover = NelderMeadNew.runNelderMead(listOfCloses) shortLength = theBestCrossover[0] longLength = theBestCrossover[1] if shortLength > longLength: shortLength, longLength = longLength, shortLength #return ('Short Length = '', Long Length = ').format(shortLength, longLength #print(listOfDates) self.bestCrossoverLabel['text'] = 'The Best Crossover is: ' self.answerLabel['text'] = 'Short Length = ' + str( shortLength) + ', Long Length = ' + str(longLength) self.makeGraph(listOfCloses, listOfDates, self.tickerSymbol, shortLength, longLength)
def main(): parser = argparse.ArgumentParser(description='Modularize given dot file') parser.add_argument('file_path', metavar='F', type=str, nargs='+', help='File path for dot file') parser.add_argument( '-a', help= 'Algorithm for modularization. All, WCA, HC, WCA_HC, SA, WCA_SA, PSO, WCA_PSO' ) args = parser.parse_args() file_path = args.file_path if args.a: modularizeMethod = args.a else: modularizeMethod = 'All' for single_file in file_path: targetMDG = MakeResult.make_target_MDG(single_file) clusters = None if modularizeMethod == 'WCA': clusters = WCA(targetMDG) elif modularizeMethod == 'HC': clusters = HC.HC(targetMDG) elif modularizeMethod == 'WCA_HC': clusters = HC.WCA_HC(targetMDG, WCA(targetMDG)) elif modularizeMethod == 'SA': clusters = SA.SA(targetMDG) elif modularizeMethod == 'WCA_SA': clusters = SA.WCA_SA(targetMDG, WCA(targetMDG)) elif modularizeMethod == 'PSO': clusters = PSO.PSO(targetMDG) elif modularizeMethod == 'WCA_PSO': clusters = PSO.WCA_PSO(targetMDG, WCA(targetMDG)) elif modularizeMethod == 'All': MakeResult.print_result(single_file) if modularizeMethod != 'All': DotParser.write_file(single_file, modularizeMethod, clusters, targetMDG)
def getBestCrossover(self): from pprint import pprint #self.tickerSymbol = self.tickerTextBox.get() if self.stockVar.get() == 4: self.tickerSymbol = self.tickerTextBox.get() print(self.pressed) if self.pressed == 0: #Box not checked self.startDate = self.date1TextBox.get() self.endDate = self.date2TextBox.get() #print(startDate, endDate) #print((ystockquote.get_historical_prices(tickerSymbol, startDate, endDate).keys())) #print(str(ystockquote.get_historical_prices(tickerSymbol, startDate, endDate))) entireDictionaryOfData = ystockquote.get_historical_prices(self.tickerSymbol, self.startDate, self.endDate) #entireDictionaryOfData = ystockquote.get_historical_prices('CAS', '2013-11-01', '2013-11-05') #pprint(entireDictionaryOfData.keys()) listOfDates = self.getListOfDatesFromHistoricalData(entireDictionaryOfData) #print(listOfDates) listOfCloses = self.getListOfClosesFromHistoricalData(entireDictionaryOfData,listOfDates) #print(entireStringOfData.find("'Close'")) #print(ystockquote.get_trade_date('GOOG')) #theBestCrossover = PSO.runPSO(listOfCloses) if self.algorithmVar.get() == 0: theBestCrossover = HillClimbing.runHillClimbing(listOfCloses) if self.algorithmVar.get() == 1: theBestCrossover = NelderMeadNew.runNelderMead(listOfCloses) if self.algorithmVar.get() == 2: theBestCrossover = GeneticAlgorithms.runGA(listOfCloses) if self.algorithmVar.get() == 3: theBestCrossover = DifferentialEvolution.runDE(listOfCloses) if self.algorithmVar.get() == 4: theBestCrossover = PSO.runPSO(listOfCloses) if self.algorithmVar.get() == 5: theBestCrossover = SA.runSA(listOfCloses) #theBestCrossover = PSO.figureOutBestConstants(listOfCloses) #theBestCrossover = NelderMeadNew.runNelderMead(listOfCloses) shortLength = theBestCrossover[0] longLength = theBestCrossover[1] if shortLength > longLength: shortLength, longLength = longLength, shortLength #return ('Short Length = '', Long Length = ').format(shortLength, longLength #print(listOfDates) self.bestCrossoverLabel['text'] = 'The Best Crossover is: ' self.answerLabel['text'] = 'Short Length = '+ str(shortLength) + ', Long Length = ' + str(longLength) self.makeGraph(listOfCloses, listOfDates, self.tickerSymbol, shortLength, longLength)
def SA(self): begin = timeit.default_timer() sa = SA.SA() cores_usadas, self.graph = sa.runSa(self.graph, 10000, 20, 0.95, 1000, True, None, None) end = timeit.default_timer() # Gera o arquivo de saida self.outFile(cores_usadas, (end - begin))
def run_trials(T0, alpha, out_avg_file): beta = 1 m_initial = 1 max_time = 1100 trials = 30 solution_sets = [] best_s_sets = [] total_cpu_time = 0 for i in range(trials): t = time() (solution, best_s) = SA.simulated_annealing(Z[i], T0, alpha, beta, m_initial, max_time) total_cpu_time += (time() - t) solution_sets.append(solution) best_s_sets.append(best_s) # Best vs. Cur Cost with open(out_avg_file, 'w') as out_file: for i in range(max_time): total_cost_current = 0 total_cost_best = 0 for sol in range(trials): total_cost_current += solution_sets[sol][i][1] total_cost_best += solution_sets[sol][i][2] iteration = solution_sets[sol][i][0] avg_cost_current = total_cost_current / trials avg_cost_best = total_cost_best / trials out_file.write("%d, %f, %f\n" % (iteration, avg_cost_current, avg_cost_best)) # Avg and Std deviation G = 1000 best_costs = numpy.zeros([trials, 1]) for sol in range(trials): best_costs[sol] = solution_sets[sol][G][2] avg_cost = numpy.mean(best_costs) std_cost = numpy.std(best_costs) print "Average best cost: %f, Std Dev: %f" % (avg_cost, std_cost) # Avg CPU Time avg_cpu_time = total_cpu_time / trials print "Average CPU time: %f" % avg_cpu_time
def run_trials(T0, alpha, out_avg_file): solution_sets = [] best_s_sets = [] total_cpu_time = time() for i in range(trials): (solution, best_s) = SA.simulated_annealing(Z[i], T0, alpha, beta, m_initial, max_time) solution_sets.append(solution) best_s_sets.append(best_s) total_cpu_time = time() - total_cpu_time # Best vs. Cur Cost with open(out_avg_file, 'w') as out_file: for i in range(max_time): total_cost_current = 0 total_cost_best = 0 for sol in range(trials): total_cost_current += solution_sets[sol][i][1] total_cost_best += solution_sets[sol][i][2] iteration = solution_sets[sol][i][0] avg_cost_current = total_cost_current / trials avg_cost_best = total_cost_best / trials out_file.write("%d, %f, %f\n" % (iteration, avg_cost_current, avg_cost_best)) # Avg and Std deviation best_costs = numpy.zeros([trials, 1]) for sol in range(trials): best_costs[sol] = solution_sets[sol][max_time - 1][2] avg_cost = numpy.mean(best_costs) std_cost = numpy.std(best_costs) print("Average best cost: %f, Std Dev: %f" % (avg_cost, std_cost)) # Avg CPU Time avg_cpu_time = total_cpu_time / trials print("Average CPU time: %f" % avg_cpu_time) with open("best_solutions_SA.txt", 'w') as output: output.write('SA = [') for best_cost in best_costs: output.write(str(best_cost[0]) + ' ') output.write('];')
def run(self): for index in range(len(result)): threadLock.acquire() global current current = index threadLock.release() num = result[index][0] ch = result[index][1] po = result[index][2] #print(num) #print(ch) #print(po) string = SA.bi(ch, po) cursor.execute("update raw2 set bilingual=%s where num=%s", [string, num]) db.commit()
import SA import random ap = 20 min_s = 0 max_s = 127 s_values = [] cost_values = [] #Set initial values s = [random.randint(min_s, max_s), random.randint(min_s, max_s)] cost_s = SA.cost(s) s_values.append(s) cost_values.append(cost_s) best_s = s best_cost = cost_s # Find other s_values and calculate their costs while len(s_values) != 20: s = [random.randint(min_s, max_s), random.randint(min_s, max_s)] if s not in s_values: cost_s = SA.cost(s) if cost_s < best_cost: best_s = s best_cost = cost_s s_values.append(s) cost_values.append(cost_s)
(119.3, 26.08), (115.89, 28.68), (113, 28.21), (114.31, 30.52), (113.23, 23.16), (121.5, 25.05), (110.35, 20.02), (103.73, 36.03), (108.95, 34.27), (104.06, 30.67), (106.71, 26.57), (102.73, 25.04), (114.1, 22.2), (113.33, 22.13)] city = city34 d = cal_distance(city) aco_round = 5000 ga_round = 5000 ts_round = 5000 sa_round = 5000 aco_champions, aco_best, aco_t = ACO.tsp_aco(city, d, aco_round) ga_champions, ga_best, ga_t = GA.tsp_ga(city, d, ga_round) ts_champions, ts_best, ts_t = TS.tsp_ts(city, d, ts_round) sa_champions, sa_best, sa_t = SA.tsp_sa(city, d, sa_round) # sa result plot_champions(sa_champions) plot_current_best(city, sa_best) print("sa best", sa_best[0], "spend", sa_t, "round", sa_round) # ts result plot_champions(ts_champions) plot_current_best(city, ts_best) print("ts best", ts_best[0], "spend", ts_t, "round", ts_round) # ga result plot_champions(ga_champions) plot_current_best(city, ga_best) print("ga best", ga_best[0], "spend", ga_t, "round", ga_round)
if Track == []: f.write('FAIL') return 0 else: f.write('OK') for i in range(n): f.write('\n') for j in range(n): if (i, j) in Track: f.write('1') elif (i, j) in blocks: f.write('2') else: f.write('0') return 1 method, n, p, segments, blocks = init() if len(segments) < p - n: Track = [] elif segments == [[]] and p > 0: Track = [] else: if method == 'DFS' or method == 'BFS': Track = Traverse.Traverse_Main(blocks, method, n, p, segments) elif method == 'SA': Track = SA.SA_Main(blocks, n, p, segments) else: Track = [] output(blocks, Track, n)
def main(graph, algo, cutoff, seed): random.seed(seed) graph_name = graph.split('/')[-1].split('.')[0] sol_file = "_".join([graph_name, algo, str(cutoff), str(seed)]) + '.sol' trace_file = "_".join([graph_name, algo, str(cutoff), str(seed)]) + '.trace' output_dir = './output/' #'./{}_output/'.format(algo) start_time = time.time() # Create output directory if it does not exist if not os.path.exists(output_dir): os.makedirs(output_dir) fo = open(os.path.join(output_dir, trace_file), 'w') if algo == 'BnB': if graph_name not in opt_cutoff: return G = BnB.parse_edges(graph) num_vc_nodes, vc = BnB.Branch_and_Bound(G, start_time, cutoff, fo, opt_cutoff[graph_name], seed) fo.close() total_time = round((time.time() - start_time), 5) print('BnB Algo Runtime: ' + str(total_time)) with open(os.path.join(output_dir, sol_file), 'w') as f: f.write(str(num_vc_nodes) + "\n") f.write(','.join([str(n) for n in sorted(vc)])) f.close() if algo == 'SA': sa_obj = SA.SA() G, nV, nE = sa_obj.parse_edges(graph) G_init = G.copy() sol = sa_obj.initial_solution(G=G_init, fo=fo, start_time=start_time, cutoff=cutoff, input_file=graph) final_solution = sa_obj.simulate_annealing( G, fo, sol, cutoff, nV, start_time, graph, opt_cutoff.get(graph_name, 10)) fo.close() print('SA Solution: ({}) {}'.format(len(final_solution), final_solution)) total_time = round((time.time() - start_time), 5) print('SA Runtime (s): {}'.format(total_time)) with open(os.path.join(output_dir, sol_file), 'w') as f: f.write(str(nV) + "\n") f.write(','.join([str(n) for n in sorted(final_solution)])) f.close() if algo == 'approx': G = approx.parse_edges(graph) num_vc_nodes, vc = approx.mdg(G, start_time, cutoff) total_time = round((time.time() - start_time), 5) print('Approx Algo Runtime: ' + str(total_time)) with open(os.path.join(output_dir, sol_file), 'w') as f: f.write(str(num_vc_nodes) + "\n") f.write(','.join([str(n) for n in sorted(vc)])) f.close() with open(os.path.join(output_dir, trace_file), 'w') as f: f.write(' '.join([str(total_time), str(num_vc_nodes)])) f.close() if algo == 'GA': runner = GA.manual_runner(graph) pop_size = 3 * len(runner.graph.vert_dict) best_ind = runner.run_test(MU=pop_size, MUTPB=0.07, CXPB=0.8, NGEN=2000, verbose=False, cutoff=cutoff, trace_path=os.path.join( output_dir, trace_file), start=start_time) total_time = round((time.time() - start_time), 5) print('GA Runtime: ' + str(total_time)) num_nodes = sum(best_ind) solution_vertices = runner.sorted_vertex_ids(best_ind) with open(os.path.join(output_dir, sol_file), 'w') as f: f.write(str(num_nodes) + "\n") f.write(','.join([str(n) for n in sorted(solution_vertices)])) with open(os.path.join(output_dir, trace_file), 'a') as f: f.write(','.join([str(total_time), str(num_nodes)]))
if opt == "--iter": iter = long(arg) if opt == "--reset": reset = long(arg) if opt == "--seed": seed = long(arg) if opt == "--freq": stat_freq = long(arg) if opt == "--steps": step_limit = int(arg) if opt == "--time": time_limit = int(arg) if opt == "--states": num_states = int(arg) if opt == "--symbols": num_symbols = int(arg) print "BB_Anneal_Accel.py --T0=%f --Tf=%f --iter=%d --reset=%d --seed=%s --freq=%d --steps=%d --time=%d --states=%d --symbols=%d" % \ (T0,Tf,iter,reset,seed,stat_freq,step_limit,time_limit,num_states,num_symbols) print a = 1.0 / reset * (math.exp(math.pow(T0 / Tf, reset / float(iter))) - math.e) print "a = ", a print tm_obj = TM_Object(num_states, num_symbols, step_limit, time_limit, seed) make_TMs = SA.SA(T0, Tf, a, tm_obj, reset, stat_freq, seed) (best_TM, best_energy, best_extra) = make_TMs.run()
def predict_vm(ecs_infor_array, input_file_array): ## 确定最小日期,便于后面设定时间基数 ID, flavor, y, s = ecs_infor_array[0].rstrip('\n').split() t = y + ' ' + s t = time.strptime(t, '%Y-%m-%d %H:%M:%S') initial_year = t.tm_year # 历史数据中的第一组数据年份 first = input_file_array[0].rstrip('\n').split() serve_cpu = int(first[0]) serve_ram = int(first[1]) vm_number = input_file_array[2].rstrip('\n') vm_number = int(vm_number) vm = [] i = 3 # while input_file_array[i] != '\n': while i < vm_number + 3: line = input_file_array[i] flavor, vm_cpu, vm_ram = line.rstrip('\n').split() flavor = int(flavor.strip('flavor')) vm_cpu = int(vm_cpu) vm_ram = int(vm_ram) vm.append([flavor, vm_cpu, vm_ram]) i += 1 vm_vector = [] for item in vm: vm_vector.append(item[0]) resource_type = input_file_array[i + 1].rstrip('\n') ## 预测时间(开始和结束) predict_begin, zero = input_file_array[i + 3].rstrip('\n').split() predict_begin = time.strptime(predict_begin, '%Y-%m-%d') predict_begin = predict_begin.tm_yday + 365 * (predict_begin.tm_year - initial_year) predict_end, zero = input_file_array[i + 4].rstrip('\n').split() predict_end = time.strptime(predict_end, '%Y-%m-%d') predict_end = predict_end.tm_yday + 365 * (predict_end.tm_year - initial_year) delta_day = predict_end - predict_begin ##特殊节日 #双十一 d_0 = time.strptime('2015-11-11', '%Y-%m-%d').tm_yday + 365 * (2015 - initial_year) d_1 = time.strptime('2015-10-01', '%Y-%m-%d').tm_yday + 365 * (2015 - initial_year) d_2 = time.strptime('2015-12-25', '%Y-%m-%d').tm_yday + 365 * (2015 - initial_year) d_3 = time.strptime('2016-02-07', '%Y-%m-%d').tm_yday + 365 * (2016 - initial_year) d_4 = time.strptime('2016-10-01', '%Y-%m-%d').tm_yday + 365 * (2016 - initial_year) d_5 = time.strptime('2016-11-11', '%Y-%m-%d').tm_yday + 365 * (2016 - initial_year) d_6 = time.strptime('2016-12-25', '%Y-%m-%d').tm_yday + 365 * (2016 - initial_year) d_7 = time.strptime('2017-01-27', '%Y-%m-%d').tm_yday + 365 * (2017 - initial_year) d_8 = time.strptime('2017-10-01', '%Y-%m-%d').tm_yday + 365 * (2017 - initial_year) d_9 = time.strptime('2017-11-11', '%Y-%m-%d').tm_yday + 365 * (2017 - initial_year) d_10 = time.strptime('2017-12-25', '%Y-%m-%d').tm_yday + 365 * (2017 - initial_year) ## 根据所有历史数据(天)的指数预测 flag = 1 for line in ecs_infor_array: line = line.rstrip('\n') # 去除换行符 ID, flavor, y, s = line.split() # 将每一行的字符串按照空格分割开来 t = y + ' ' + s t = time.strptime(t, '%Y-%m-%d %H:%M:%S') day_in2015 = t.tm_yday + 365 * (t.tm_year - initial_year) flavor = flavor.strip('flavor') # 去除flavor字符,只保留数字 flavor = int(flavor) ## 读取数据过程中,直接选取并叠加入X_test,形成一个长为vm_number的一维数组 if vm_vector.count(flavor) != 0: # 如果属于预测范围 if flag == 1: initial_day = day_in2015 # 除了第一年的年份,第一组历史数据的天也为后续预测设置序列长度提供参考 total_days = predict_begin - initial_day X = [[0 for i in range(total_days)] for j in range(vm_number)] flag = 0 # if (predict_begin-7)<=day_in2015<predict_begin: X[vm_vector.index( flavor)][day_in2015 - initial_day] += 1 #序号从零开始,大小还是vm_number*total_days # if X[vm_vector.index(flavor)][day_in2015 - initial_day]>30: # X[vm_vector.index(flavor)][day_in2015 - initial_day]=0 #print(X) ## 全用二阶指数平滑,参数统一设置 #A=[0,0,0.9,0,0,0,0,0,0,0,0,0,0,0,0] # A=[0.15]*15 # A = [float(x * 1) for x in A] # a=[] # for item in vm_vector: # a.append(A[item-1]) # print(a) # y_test=exp_smoothing.double_exp_smoothing(X,a,delta_day,vm_number,total_days) ## 初赛不存在flavor 3,4,6,7,10,12,13,14,15 # 1,2,5,8,9,11 y_test = [] ## 噪声上限参数设置 # 如果使用丢弃最后一个箱子的做法,可以考虑预测多一点,或者对噪声少控制一些 # 值越小,阈值下限越低,滤去的噪声越多 over_lie = 3.5 # 每一天的总数,占所有天平均数的多少倍,如果大于3倍,就认为这天是异常天。 每列相加,和所有列相加的平均比较 over_ratio = 0.2 # 在天数异常的情况下,同时这个数据在flavor预测历史数据中要超过多少比率的flavor总数。 每个点数据,和每列总数的比例 ## 二阶平滑参数设置 # fix_a=0.1 ## 固定值 # flavor1 = flavor2 = flavor3 = flavor4 = flavor5 = flavor6 = flavor7 = flavor8 = flavor9 = flavor10 = flavor11 = flavor12 = flavor13 = flavor14 =flavor15 = fix_a ## 离散值 flavor越小,其占用资源越小 flavor1 = 0.10 ##确定 因为其对后面的装箱影响比较小,所以可以预测多点 0.1-0.15 flavor2 = 0.12 ##确定 0.1-0.15 flavor3 = 0.2 ## 和flavor9类似 0.2-0.3 flavor4 = 0.1 ## 特别平稳,个别突出0.1-0.2 flavor5 = 0.05 ##确定 0.1-81.56重点关注,flavor可能预测数据刚好在密集区,所以一般可设置0.05-0.1 flavor6 = 0.2 ## 0.2-0.25 flavor7 = 0.2 ## 和flavor9类似 flavor8 = 0.1 ##确定 敏感,0.1-0.15 flavor9 = 0.3 ##确定 0.2-0.3分数区别不大,理论应该是0.2左右 flavor10 = 0.2 ## 稀疏到没规律 flavor11 = 0.1 ##确定 flavor12 = 0.1 ## 和flavor1类似 flavor13 = 0.2 ## 稀疏到没规律 flavor14 = 0.2 ## 稀疏,波动大 flavor15 = 0.2 ## 稀疏波动大 ss = [] for i in range(total_days): ss.append(sum([x[i] for x in X])) #每天中所有flavor的总数 for i in range(vm_number): ## 去噪处理,包括节日和异常点 for j in range(total_days): # 没什么影响1## # if vm_vector[i] == 8 and (j+initial_day==d_0 or j+initial_day==d_1 or # j+ initial_day==d_2 or j+initial_day==d_3 or j+ initial_day==d_4 # or j+initial_day==d_5 or j+ initial_day==d_6):#j==d_0-1 or # # X[i][j]=0 # X[i][j]=(X[i][j-1]+X[i][j+1]+X[i][j])/3 #if sum([x[j] for x in X]) > over_lie*sum(ss)/total_days and X[i][j] > over_num * sum(X[i]) / total_days:#若某一天的总量特别大,则所有元素都缩小 sum_flavor = sum([x[j] for x in X]) #各天的所有flavor请求量之和 if sum_flavor > over_lie * sum(ss) / total_days and X[i][ j] / sum_flavor > over_ratio: #定位哪个flavor异常,看其在异常天中的比例 X[i][j] *= 0.8 #X[i][j]=sum_flavor*over_ratio #0.2-82.27 0.5-83.866 0.3-82.0 # if X[i][j] > over_num * sum(X[i]) / total_days: # X[i][j] /= 2 if vm_vector[i] == 1: # y_test.append(simple_predictor.double_exp_smoothing(X[i],0.13,delta_day,predict_begin, predict_end)) y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor1, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 2: # y_test.append(simple_predictor.double_exp_smoothing(X[i],0.12,delta_day,predict_begin, predict_end)) y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor2, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 5: # y_test.append(simple_predictor.combine(X[i],0.2,delta_day,total_days,predict_begin,predict_end,10)) y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor5, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 8: y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor8, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 9: y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor9, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 11: y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor11, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 3: ##例子不存在flavor3 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor3, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 4: ##例子不存在flavor4 #y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor4, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 6: ##例子不存在flavor6 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor6, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 7: ##例子不存在flavor7 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor7, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 10: ##例子不存在flavor10 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor10, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 12: ##例子不存在flavor12 y_test.append( simple_predictor.double_exp_smoothing(X[i], flavor12, delta_day)) # y_test.append(simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 13: ##例子不存在flavor13 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor13, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 14: ##例子不存在flavor14 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor14, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) elif vm_vector[i] == 15: ##例子不存在flavor15 # y_test.append(simple_predictor.double_exp_smoothing(X[i], flavor15, delta_day)) y_test.append( simple_predictor.oneweek(X[i], predict_begin, predict_end)) # y_test = []# # for item in range(vm_number): # y_test.append(5) ## y_test表示按照预测顺序,每种flavor的数量 print('y_test') print(y_test) flavor_total_number = sum(y_test) items = [] for i in range(len(y_test)): if y_test[i] != 0: for j in range(y_test[i]): index_vm = vm_vector[i] items.append(index_vm) print('vm_vector') print(vm_vector) ## items表示把y_test中的标号表示出来,如果是多个就把标号多次打印 print(items) ## items_w = [] bin_height_cpu = serve_cpu bin_height_mem = serve_ram * 1024 if resource_type == 'CPU': resource = 1 else: resource = 2 for it in items: items_w.append( [it, vm[vm_vector.index(it)][1], vm[vm_vector.index(it)][2]]) ## items_w 表示给items加上资源属性,为分配做准备 print(items_w) items_w.sort(key=lambda item_for_sort: item_for_sort[resource], reverse=True) #print('排序后items_w') #print(items_w) first_fit_sort, min_goal = SA.sa(items_w, bin_height_cpu, bin_height_mem, resource) print('未修改最后一个箱子的SA') print(first_fit_sort) # th=0.01 # 越小表示,不去删数据,基本就是不对分配后进行干预 th = 0.65 # 基本下调参数,分数就成比例下降,说明预测装箱的提升,相比于预测更重要。但高于0.7也就是分数降低了 ## 对装箱后的资源进行再修改 if 0 < (min_goal - min_goal // 1) < th: print('开始有点技巧地去掉最后一个箱子,修改预测值,来提高利用率') it = first_fit_sort.pop() print('未修改的items') print(items) for item in it: items.remove(item) xuhao = vm_vector.index(item) y_test[xuhao] -= 1 print('更改后的items') print(items) print('更改后的y_test') print(y_test) result = [str(sum(y_test))] for i in range(len(y_test)): result.append('flavor' + str(vm_vector[i]) + ' ' + str(y_test[i])) result.append('') result.append(str(len(first_fit_sort))) for i in range(len(first_fit_sort)): temp = [] first_fit_set = set(first_fit_sort[i]) for item in first_fit_set: if temp: temp = temp + ' flavor' + str(item) + ' ' + str( first_fit_sort[i].count(item)) else: temp = str(i + 1) + ' flavor' + str(item) + ' ' + str( first_fit_sort[i].count(item)) result.append(temp) ## END ## return result else: ## 原始输出 result = [str(flavor_total_number)] for i in range(len(y_test)): result.append('flavor' + str(vm_vector[i]) + ' ' + str(y_test[i])) result.append('') result.append(str(len(first_fit_sort))) for i in range(len(first_fit_sort)): temp = [] first_fit_set = set(first_fit_sort[i]) for item in first_fit_set: if temp: temp = temp + ' flavor' + str(item) + ' ' + str( first_fit_sort[i].count(item)) else: temp = str(i + 1) + ' flavor' + str(item) + ' ' + str( first_fit_sort[i].count(item)) result.append(temp) ## END ## return result
# Data Normalization Xtrain = Xtrain.astype('float32') Xvalid = Xvalid.astype('float32') Xtrain /= 255 Xvalid /= 255 # ----------------------------------------------------- # Set hyper-parameters learning_rate = 0.0001 epoch = 50 batch_size = 32 # SA Parameters parameters = { 'x_train': Xtrain, 'y_train': Ytrain, 'x_valid': Xvalid, 'y_valid': Yvalid, 'batch_size': batch_size, 'learning_rate': learning_rate } # Start SA Algorithm alg = sannealing.SA(**parameters) alg.startAlgorithm() ## Outputs: # model_history.txt: The loss and accuracy values (per epoch) of each model produced for training and validation are stored. # models.txt: Store iteration number, model_no, #parameters, Flops, train accuracy, validation accuracy and model topology # sau_sols.pickle: It stores information about solutions on the archive. # *.json files: Stores information about the topology of solutions on the archive (Keras Model).
import SA import random min_s = 0.0 max_s = 10.0 s_values = [] cost_values = [] # Set initial values start = [] for i in range(20): start.append(random.uniform(min_s, max_s)) cost_s = SA.cost(start) s_values.append(start) cost_values.append(cost_s) best_s = start best_cost = cost_s # Find other s_values and calculate their costs while len(s_values) != 20: s = [] for i in range(20): s.append(random.uniform(min_s, max_s)) if s not in s_values: cost_s = SA.cost(s) if cost_s > best_cost: best_s = s best_cost = cost_s s_values.append(s) cost_values.append(cost_s)
def pruebaSA(d,k): for i in range(5): SA.ejecutarSA(d,i,k)
sys.exit() if opt == "--T0": T0 = float(arg) if opt == "--Tf": Tf = float(arg) if opt == "--iter": iter = long(arg) if opt == "--reset": reset = long(arg) if opt == "--seed": seed = long(arg) if opt == "--freq": stat_freq = long(arg) if opt == "--m": m = int(arg) if opt == "--n": n = int(arg) print "Cube_Cube_Anneal.py --T0=%f --Tf=%f --iter=%d --reset=%d --seed=%s --freq=%d --m=%d --n=%d" % \ (T0,Tf,iter,reset,seed,stat_freq,m,n) print a = 1.0/reset * (math.exp(math.pow(T0/Tf,reset/float(iter))) - math.e) print "a = ",a print cube_obj = Cube_Object(m,n,1.0,seed) make_cubes = SA.SA(T0,Tf,a,cube_obj,reset,stat_freq,seed) (best_cube,best_energy,best_extra) = make_cubes.run()
[m.city[t[i]][1], m.city[t[i - 1]][1]]) plt.plot(m.city[0][0], m.city[0][1], 'o') plt.plot([m.city[t[0]][0], m.city[t[m.N - 1]][0]], [m.city[t[0]][1], m.city[t[m.N - 1]][1]]) st.pyplot(p4) st.text("最优顺序:") t st.text("最优值:{}".format(value)) st.text("总迭代次数:{}".format(c)) st.text("总时间:{}".format(time_end - time_start)) p5 = plt.figure() plt.plot(res) st.pyplot(p5) if st.button("SA3"): time_start = time.time() c, t, value, res = m3.SA() time_end = time.time() p6 = plt.figure() for i in range(1, m.N): plt.plot(m.city[i][0], m.city[i][1], 'o') plt.plot([m.city[t[i]][0], m.city[t[i - 1]][0]], [m.city[t[i]][1], m.city[t[i - 1]][1]]) plt.plot(m.city[0][0], m.city[0][1], 'o') plt.plot([m.city[t[0]][0], m.city[t[m.N - 1]][0]], [m.city[t[0]][1], m.city[t[m.N - 1]][1]]) st.pyplot(p6) st.text("最优顺序:") t st.text("最优值:{}".format(value)) st.text("总迭代次数:{}".format(c)) st.text("总时间:{}".format(time_end - time_start))
# Quality: Overall shortest tour found # Route: Array of node_ids in order of travel [node_1,node_2,...] if options.method == "BnB": branch = BranchAndBound.BranchAndBound(cities, options.cutoff) branch.main() quality = branch.minimum route = branch.bestSolution trace = branch.trace elif options.method == "Approx": trace, quality, route = construction_heuristic.nearest_neighbor( params, cities, options.cutoff) elif options.method == "LS1": s = SA.SimulatedAnnealing( cities, 0.00001) # the second argument is the cooling rate, default is 0.001. s.anneal(options.cutoff) quality = s.best_distance route = s.best_route trace = s.trace elif options.method == "LS2": g = genetic.genetic(params, cities, options.cutoff) trace, quality, route = g.evolve() #### ########## #### print(quality) for id in route: print(id, end=" ") print("")
# [4, 12, 3, 13], [10, 9, 6, 7], [5, 8, 13, 11], [5, 7, 9, 1]]) # print(MatrixWithSolution.Pos) # print(MatrixWithSolution.FitnessPosSelf()) # print(MatrixWithSolution.FirstAxiom()) # print(MatrixWithSolution.ThirdAxiom()) #print('compare result',Matrix.CompareLines([1,2,4],[1,2,3])) Curs = list() Q = 3 # размерность плоскости try_count = 0 last_iter_count = 0 while True: time_start = time.time() Matrix = SaMatrix.Matrix(Q) SaRun = SA.SA(Matrix) Curs.clear() try_count += 1 iter_count = 0 for n in range(100000): iter_count += 1 if SaRun.T < 0.000001: break #ограничение на нижнюю границу тепмпературы #print(n) cur = SaRun.Run() #print("current: ", Matrix.Pos) #print("temp: ", SaRun.T) Curs.append(Matrix.FitnessPosSelf()) last_iter_count = n if cur == 0: print("N:", n)
from SA import * c=SA("ffield1.reax","params","Trainingfile.txt","Inputstructurefile.txt") c.anneal() print(c.costs) print([c.sol_,c.cost_])
print "Do you want to add your own file for inputs or let me create some random inputs" print "press 1 for random otherwise enter your full file location" s = raw_input() n = 0 if s == "1": # creating random inputs print "enter the amount of inputs (preferrably close to 200)" n = input() input_data = np.random.randint(1000, size=(n, 2)) else: print "Input should be in the format as described in the Readme section" #fetching file from user's database with open(s) as f: for line in f: numbers_str = line.split() x = numbers_str[0] y = numbers_str[1] input_data.append((x, y)) final_arr = lib.sa_algorithm(input_data) final_l = lib.total_length(final_arr, n) print "minimum length using simulated annealing- " print final_l print "\nfinal order of coordinates to visit for near optimal solution--\n" print final_arr
f_2 = [] for i in list(range(len(f_1))): f_2.append((f_1[i] - average_f)**2) f_2 = sum(f_2) / (len(f_2) - 1) detal_0 = 6 * math.sqrt(f_2) K = 3 T = [detal_0 * K] M = 800 for i in list(range(M)): T.append(T[i] * 0.98) f = open('SA.txt', 'a+') f.write(str(T[0])) for n in list(range(10)): x_0 = [] x_0.append(random.random()) x_0.append(random.random() * math.sqrt((1 - x_0[0]) / 2)) x_0.append(random.random() * pow((1 - x_0[0] - 2 * x_0[1]**2) / 3, 1 / 3)) x = [] x.append(x_0) for i in list(range(len(T))): x.append( SA.iterative_inner(f=obf, x_0=x[i], g=supg, t_0=T[i], iter_num=200)) f.write('dat:' + str(x[i]) + '\n') f.write('coordinate:' + str(x[M]) + '\n') f.write('object:' + str(-obf(x[M])) + '\n') f.write('max:' + str(-obf([0.642, 0.3964, 0.302])) + '\n---------------------\n') f.close()
houseList.append(Class.Maison(tuple[0], tuple[1])) return houseList def decoder(houseList): decodedList = [] for house in houseList: tempList = [] tempList.extend((house.x, house.y, house.freespace, house.width, house.height)) decodedList.append(tempList) return str(decodedList) def encoder(decodedList): houseList = [] for list in decodedList: if list[2] == 2: single = Class.SingleHouse(list[0],list[1]) houseList.append(single) elif list[2] == 3: bunga = Class.Bungalow(list[0],list[1]) houseList.append(bunga) elif list[2] == 6: maison = Class.Maison(list[0],list[1]) houseList.append(maison) elif list[2] == 0: water = Class.Water(list[0], list[1], list[3], list[4], 0) houseList.append(water) return houseList p = encoder([[17, 17, 0, 38, 38], [105, 17, 0, 38, 38], [17, 125, 0, 38, 38], [105, 125, 0, 38, 38], [6, 6, 6, 11, 10.5], [143, 163.5, 6, 11, 10.5], [143, 6, 6, 11, 10.5], [123, 100, 3, 10, 7.5], [20, 87, 3, 10, 7.5], [78, 163, 3, 10, 7.5], [90, 108, 3, 10, 7.5], [50, 105, 3, 10, 7.5], [76, 3, 2, 8, 8], [74, 38, 2, 8, 8], [46, 72, 2, 8, 8], [28, 106, 2, 8, 8], [76, 130, 2, 8, 8], [117, 76, 2, 8, 8], [77, 105, 2, 8, 8], [54, 83, 2, 8, 8], [91, 120, 2, 8, 8], [74, 72, 2, 8, 8], [6, 117, 2, 8, 8], [36, 166, 2, 8, 8]]) values, climbed = SA.SA(SA.exponentialCooling, 50000, 1000, p, 20, 10000, 20, True, True)
def train(FIS_name, data, target_col, mf, Ncentroids, overlap, alpha=0.5, iterations=50, sa=False, sa_plot=False): ''' Trains a FIS, writes all the properties of this FIS to a FIS file using the write function. Inputs: data: nummpy array of size > number of centroids x 2 target_col: integer index of the target column Ncentroids: either an integer (for each feature te same) or an array size = number of features mf: 'triangle', 'trapezoid' or 'Gaussian' overlap: number between 0 and 1, when gaussian mf overlap is the variance when triangle/trapezoid overlap is half of the base iterations: number of iterations for the simulated annealing Outputs: RB: list of lists of integer rules target_centroids: list with scaled target centroids feature_centroids: the other feature centroids ''' # scale the data data, min_x, max_x = scale(data) # get centroids centroids = cluster(data, target_col, Ncentroids, plot=False) # learn WM rules RB = WM.learn(data, centroids, overlap, mf, target_col) # return everything needed for testing target_centroids = centroids[target_col] # delete target centroid for testing feature_centroids = np.delete(centroids, target_col, 0) # delete target values for testing targets = data[:, target_col] data = np.delete(data, target_col, 1) method = 'WM' # for simulated annealing, get the new rule base if sa: method = 'WM+SA' RB = SA.search(data, targets, RB, alpha, feature_centroids, overlap, mf, target_centroids, min_x[target_col], max_x[target_col], plot=sa_plot, iterations=iterations) # Write FIS file in the format: # FIS_name.FIS with open(FIS_name + '.FIS', "w") as fis_file: write(fis_file, method, mf, overlap, target_centroids, feature_centroids, RB)