def SA(): SA_iters = 10 correctCount = 0 t=0 totalTime=0 totalIters =0 global sa sa = SimulatedAnnealing(1e11, .85, hcp) while correctCount < NUM_RIGHT: start = time.time() fit = FixedIterationTrainer(sa, SA_iters) fitness = fit.train() t = time.time() - start totalTime+=t totalIters+= SA_iters myWriter.addValue(fitness, "SA_fitness", runNum) myWriter.addValue(t, "SA_searchTimes",runNum) v = ef.value(sa.getOptimal()) if v == N: correctCount += 1 else: correctCount = 0 #SA_iters += 1 myWriter.addValue(t,"SA_times",0) myWriter.addValue(int(SA_iters),"SA_iters",0) print str(N) + ": SA: " + str(ef.value(sa.getOptimal())) + " took "+str(totalIters)+ " seconds and " + str(totalIters) + " iterations"
def SA(): SA_iters = 10 correctCount = 0 t = 0 totalTime = 0 totalIters = 0 global sa sa = SimulatedAnnealing(1e11, .85, hcp) while correctCount < NUM_RIGHT: start = time.time() fit = FixedIterationTrainer(sa, SA_iters) fitness = fit.train() t = time.time() - start totalTime += t totalIters += SA_iters myWriter.addValue(fitness, "SA_fitness", runNum) myWriter.addValue(t, "SA_searchTimes", runNum) v = ef.value(sa.getOptimal()) if v == N: correctCount += 1 else: correctCount = 0 #SA_iters += 1 myWriter.addValue(t, "SA_times", 0) myWriter.addValue(int(SA_iters), "SA_iters", 0) print str(N) + ": SA: " + str(ef.value(sa.getOptimal())) + " took " + str( totalIters) + " seconds and " + str(totalIters) + " iterations"
def run_four_peaks_exploringSA(): N=200 T=N/5 fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 30000, 35000, 40000, 45000, 50000] num_repeats = 5 all_sa_results = [] all_sa_times = [] coolings = [0.15, 0.35, 0.55, 0.75, 0.95] for cooling in coolings: sa_results = [] sa_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() sa = SimulatedAnnealing(1E11, cooling, hcp) fit = FixedIterationTrainer(sa, i) fit.train() end = time.time() sa_results.append(ef.value(sa.getOptimal())) sa_times.append(end - start) print "SA cooling " + str(cooling) + ": " + str(ef.value(sa.getOptimal())) all_sa_results.append(sa_results) all_sa_results.append(sa_times) with open('four_peaks_exploringSA.csv', 'w') as csvfile: writer = csv.writer(csvfile) for sa_results in all_sa_results: writer.writerow(sa_results) for sa_times in all_sa_times: writer.writerow(sa_times) return all_sa_results, all_sa_times
def run_sa(t, CE): fname = outfile.format('SA{}'.format(CE), str(t + 1)) with open(fname, 'a+') as f: content = f.read() if "fitness" not in content: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_sa(hcp, ef, iterations=200000): sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() optimal_result = str(ef.value(sa.getOptimal())) print "SA: " + optimal_result return optimal_result, iterations
def run_experiment(self, opName): """Run a simulated annealing optimization experiment for a given optimization problem. Args: ef (AbstractEvaluationFunction): Evaluation function. ranges (array): Search space ranges. op (str): Name of optimization problem. """ outdir = 'results/OPT/{}'.format(opName) # get results directory outfile = 'SA_{}_results.csv'.format(self.cr) fname = get_abspath(outfile, outdir) # get output filename # delete existing results file, if it already exists try: os.remove(fname) except Exception as e: print e pass with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals,trial\n') # start experiment for t in range(self.numTrials): # initialize optimization problem and training functions ranges, ef = self.op.get_ef() nf = None if opName == 'TSP': nf = SwapNeighbor() else: nf = DiscreteChangeOneNeighbor(ranges) odd = DiscreteUniformDistribution(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, self.cr, hcp) fit = FixedIterationTrainer(sa, 10) # run experiment and train evaluation function start = time.clock() for i in range(0, self.maxIters, 10): fit.train() elapsed = time.clock() - start fe = ef.valueCallCount score = ef.value(sa.getOptimal()) ef.valueCallCount -= 1 # write results to output file s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t) with open(fname, 'a+') as f: f.write(s)
def run_sa(t, CE): fname = outfile.format('SA{}'.format(CE), str(t + 1)) ef = TravelingSalesmanRouteEvaluationFunction(points) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_sa(t, CE): fname = outfile.format('SA{}'.format(CE), str(t + 1)) base.write_header(fname) ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) print(st) with open(fname, 'a') as f: f.write(st) # GA for t in range(numTrials): for pop, mate, mutate in product([100], [50, 30, 10], [50, 30, 10]): fname = outfile.format('GA{}_{}_{}'.format(pop, mate, mutate), str(t + 1)) with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges)
mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) expt = "expt_avg" rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 200000) score_RHC.append(train(rhc, "RHC", ef, 200000, "test", expt)) print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal())) sa = SimulatedAnnealing(1E9, .98, hcp) fit = FixedIterationTrainer(sa, 200000) score_SA.append(train(sa, "SA", ef, 200000, "test", expt)) print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal())) ga = StandardGeneticAlgorithm(225, 40, 5, gap) fit = FixedIterationTrainer(ga, 1000) score_GA.append(train(ga, "GA", ef, 40000, "test", expt)) print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(150, 20, pop)
fit.train() value += ef.value(rhc.getOptimal()) end = time.time() clock_time = (end - start) / nsample value = round(value / nsample, 2) print "RHC " + str(value), iters, clock_time #-- Simulated Annealing sa = SimulatedAnnealing(1E11, .95, hcp) for iters in niters: start = time.time() fit = FixedIterationTrainer(sa, iters) value = 0 for isample in range(nsample): fit.train() value += ef.value(sa.getOptimal()) end = time.time() clock_time = (end - start) / nsample value = round(value / nsample, 2) print "SA " + str(value), iters, clock_time #-- Genetic Algorithm ga = StandardGeneticAlgorithm(200, 100, 10, gap) for iters in niters: start = time.time() fit = FixedIterationTrainer(ga, iters) value = 0 for isample in range(nsample): fit.train() value += ef.value(ga.getOptimal()) end = time.time()
for SA_COOLING_FACTOR in SA_COOLING_FACTOR_pool: sa = SimulatedAnnealing(SA_TEMPERATURE, SA_COOLING_FACTOR, hcp) fit_sa = FixedIterationTrainer(sa, n_iteration) print("calculating for cooling rate = %f" % SA_COOLING_FACTOR) # Training start_sa = time.time() fit_sa.train() end_sa = time.time() # Result extracting last_training_time_sa = end_sa - start_sa sa_training_time[n].append(last_training_time_sa) sa_fitness[n].append(ef.value(sa.getOptimal())) overall_sa_training_time = list_avg(*sa_training_time) overall_sa_fitness = list_avg(*sa_fitness) with open(OUTPUT_FILE, "w") as outFile: for i in range(1): outFile.write( ','.join(["sa_cooling_factor", "sa_fitness", "sa_training_time"]) + '\n') for i in range(len(SA_COOLING_FACTOR_pool)): outFile.write(','.join([ str(SA_COOLING_FACTOR_pool[i]), str(overall_sa_fitness[i]), str(overall_sa_training_time[i]) ]) + '\n')
fit = FixedIterationTrainer(rhc, it_rhc) fit.train() op = rhc.getOptimal(); network_rhc.setWeights(op.getData()) print "\nRHC training error:", errorRate(network_rhc, train) print "RHC training confusion matrix:", confusionMatrix(network_rhc, train) print " RHC test error:", errorRate(network_rhc, test) print " RHC test confusion matrix:", confusionMatrix(network_rhc, test) # learn weights with simulated annealing network_sa = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer]) nnop_sa = NeuralNetworkOptimizationProblem(set, network_sa, measure) sa = SimulatedAnnealing(1E11, 0.95, nnop_sa) fit = FixedIterationTrainer(sa, it_sa) fit.train() op = sa.getOptimal(); network_sa.setWeights(op.getData()) print "\nSA training error:", errorRate(network_sa, train) print "SA training confusion matrix:", confusionMatrix(network_sa, train) print " SA test error:", errorRate(network_sa, test) print " SA test confusion matrix:", confusionMatrix(network_sa, test) exit() # learn weights with generic algorithms network_ga = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer]) nnop_ga = NeuralNetworkOptimizationProblem(set, network_ga, measure) ga = StandardGeneticAlgorithm(200, 100, 10, nnop_ga) fit = FixedIterationTrainer(ga, it_ga) fit.train() op = ga.getOptimal();
start_ga = time.time() fit_ga.train() end_ga = time.time() start_mimic = time.time() fit_mimic.train() end_mimic = time.time() # Result handling last_train_time_rhc = end_rhc - start_rhc rhc_train_time[repetition].append(last_train_time_rhc) rhc_accuracy[repetition].append(ef.value(rhc.getOptimal())) last_train_time_sa = end_sa - start_sa sa_train_time[repetition].append(last_train_time_sa) sa_accuracy[repetition].append(ef.value(sa.getOptimal())) last_train_time_ga = end_ga - start_ga ga_train_time[repetition].append(last_train_time_ga) ga_accuracy[repetition].append(ef.value(ga.getOptimal())) last_train_time_mimic = end_mimic - start_mimic mimic_train_time[repetition].append(last_train_time_mimic) mimic_accuracy[repetition].append(ef.value(mimic.getOptimal())) while current_iteration_count <= MAX_ITERATION - ITERATION_STEP: print("Computing for %d iterations" % (current_iteration_count + ITERATION_STEP)) # Trainer declaration fit_rhc = FixedIterationTrainer(rhc, ITERATION_STEP) fit_sa = FixedIterationTrainer(sa, ITERATION_STEP)
i = 0 while (max < goal and i < timeout): rhc.train() i += 1 max = ef.value(rhc.getOptimal()) #print "rhc,", i,",", max, ',', goal print "rhc,", i,",", max, ',', goal # run SA sa = SimulatedAnnealing(1E11, .95, hcp) max = 0 i = 0 while (max < goal and i < timeout): sa.train() i += 1 max = ef.value(sa.getOptimal()) #print "sa,", i,",", max, ',', goal print "sa,", i,",", max, ',', goal # run GA ga = StandardGeneticAlgorithm(200, 100, 25, gap) max = 0 i = 0 while (max < goal and i < timeout): ga.train() i += 200 max = ef.value(ga.getOptimal()) #print "ga,", i,",", max, ',', goal print "ga,", i,",", max, ',', goal # run MIMIC
pop = GenericProbabilisticOptimizationProblem(ef, odd, df) from time import time rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 600000) t0 = time() fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())), "time taken", time() - t0 sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, 600000) t0 = time() fit.train() print "SA: " + str(ef.value(sa.getOptimal())), "time taken", time() - t0 ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, 20000) t0 = time() fit.train() print "GA: " + str(ef.value(ga.getOptimal())), "time taken", time() - t0 mimic = MIMIC(50, 10, pop) fit = FixedIterationTrainer(mimic, 10000) t0 = time() fit.train()
def run_count_ones_experiments(): OUTPUT_DIRECTORY = './output' N = 80 fill = [2] * N ranges = array('i', fill) ef = CountOnesEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) max_iter = 5000 outfile = OUTPUT_DIRECTORY + '/count_ones_{}_log.csv' # Randomized Hill Climber filename = outfile.format('rhc') with open(filename, 'w') as f: f.write('iteration,fitness,time\n') for it in range(0, max_iter, 10): rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(rhc.getOptimal()) data = '{},{},{}\n'.format(it, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Simulated Annealing filename = outfile.format('sa') with open(filename, 'w') as f: f.write('iteration,cooling_value,fitness,time\n') for cooling_value in (.19, .38, .76, .95): for it in range(0, max_iter, 10): sa = SimulatedAnnealing(100, cooling_value, hcp) fit = FixedIterationTrainer(sa, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(sa.getOptimal()) data = '{},{},{},{}\n'.format(it, cooling_value, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Genetic Algorithm filename = outfile.format('ga') with open(filename, 'w') as f: f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n') for population_size, to_mate, to_mutate in itertools.product([20], [4, 8, 16, 20], [0, 2, 4, 6]): for it in range(0, max_iter, 10): ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate, gap) fit = FixedIterationTrainer(ga, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(ga.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate, to_mutate, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # MIMIC filename = outfile.format('mm') with open(filename, 'w') as f: f.write('iterations,samples,to_keep,m,fitness,time\n') for samples, to_keep, m in itertools.product([50], [10], [0.1, 0.3, 0.5, 0.7, 0.9]): for it in range(0, 500, 10): df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mm = MIMIC(samples, 20, pop) fit = FixedIterationTrainer(mm, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(mm.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data)
def main(): iterations = 200000 alg = 'all' gaPop = 2000 gaMate = 1500 gaMutate = 250 mimicSamples = 500 mimicToKeep = 100 saTemp = 1E12 saCooling = .999 gaIters = 1000 mimicIters = 1000 run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:i:", ["gaIters=", "mimicIters=", "gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'travelingsalesman.py -i <iterations>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'travelingsalesman.py -i <iterations>' sys.exit(1) elif opt == '-i': if arg < 1: print 'Iterations must be greater than 0' sys.exit(2) iterations = int(arg) elif opt == '-a': alg = 'all' elif opt == '-r': alg = 'RHC' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '--gaPop': if arg < 1: print 'Population must be greater than 0' sys.exit(2) gaPop = int(arg) elif opt == '--gaMate': if arg < 1: print 'Mating must be greater than 0' sys.exit(2) gaMate = int(arg) elif opt == '--gaMutate': if arg < 1: print 'Mutators must be greater than 0' sys.exit(2) gaMutate = int(arg) elif opt == '--mimicSamples': if arg < 1: print 'MIMIC samples must be greater than 0' sys.exit(2) mimicSamples = int(arg) elif opt == '--mimicToKeep': if arg < 1: print 'MIMIC to keep must be greater than 0' sys.exit(2) mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '-n': run = int(arg) elif opt == '--gaIters': if arg < 1: print 'GA Iterations must be greater than 0' sys.exit(2) gaIters = int(arg) elif opt == '--mimicIters': if arg < 1: print 'MIMIC Iterations must be greater than 0' sys.exit(2) mimicIters = int(arg) vars = { 'iterations' : iterations, 'alg' : alg, 'gaPop' : gaPop, 'gaMate' : gaMate, 'gaMutate' : gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'gaIters' : gaIters, 'mimicIters' : mimicIters, 'run' : run } settings = getSettings(alg, settings, vars) if gaPop < gaMate or gaPop < gaMutate or gaMate < gaMutate: pebkac({gaPop: 'total population',gaMate : 'mating population', gaMutate : 'mutating population'}, alg, 'total population', settings) if mimicSamples < mimicToKeep: pebkac({mimicSamples: 'mimic samples', mimicToKeep : 'mimic to keep'}, alg, 'mimic samples', settings) prob = 'Traveling Sales Problem' invDist = {} cities = CityList() N = len(cities) #random = Random() points = [[0 for x in xrange(2)] for x in xrange(N)] for i in range(0, len(points)): coords = cities.getCoords(i) points[i][0] = coords[0] points[i][1] = coords[1] ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) rows = [] if alg == 'RHC' or alg == 'all': print '\n----------------------------------' print 'Using Random Hill Climbing' for label, setting in settings: print label + ":" + str(setting) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() path = [] for x in range(0,N): path.append(rhc.getOptimal().getDiscrete(x)) output(prob, 'RHC', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(rhc.getOptimal())) rows.append(row) invDist['RHC'] = ef.value(rhc.getOptimal()) buildFooter(prob, 'RHC', rows, settings) outputFooter(prob, 'RHC', rows, settings) if alg == 'SA' or alg == 'all': print 'Using Simulated Annealing' for label, setting in settings: print label + ":" + str(setting) sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() path = [] for x in range(0,N): path.append(sa.getOptimal().getDiscrete(x)) output(prob, 'SA', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(sa.getOptimal())) rows.append(row) invDist['SA'] = ef.value(sa.getOptimal()) buildFooter(prob, 'SA', rows, settings) outputFooter(prob, 'SA', rows, settings) if alg == 'GA' or alg == 'all': print '\n----------------------------------' print 'Using Genetic Algorithm' for label, setting in settings: print label + ":" + str(setting) ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() path = [] for x in range(0,N): path.append(ga.getOptimal().getDiscrete(x)) output(prob, 'GA', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(ga.getOptimal())) rows.append(row) invDist['GA'] = ef.value(ga.getOptimal()) buildFooter(prob, 'GA', rows, settings) outputFooter(prob, 'GA', rows, settings) if alg == 'MIMIC' or alg == 'all': print '\n----------------------------------' print 'Using MIMIC' for label, setting in settings: print label + ":" + str(setting) # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points); fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges); df = DiscreteDependencyTree(.1, ranges); pop = GenericProbabilisticOptimizationProblem(ef, odd, df); mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0,len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) output(prob, 'MIMIC', order, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(mimic.getOptimal())) rows.append(row) invDist['MIMIC'] = ef.value(mimic.getOptimal()) buildFooter(prob, 'MIMIC', rows, settings) outputFooter(prob, 'MIMIC', rows, settings) maxn = max(len(key) for key in invDist) maxd = max(len(str(invDist[key])) for key in invDist) print "Results" for result in invDist: print "%-*s %s %-*s" % (len('Best Alg') + 2, result, ':', maxd, invDist[result]) if alg == 'all': print "%-*s %s %-*s" % (len('Best Alg') + 2, 'Best Alg', ':', maxd, max(invDist.iterkeys(), key=(lambda key: invDist[key]))) print '----------------------------------'
for _ in range(runs): rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iters) fitness = fit.train() results.append(ef.value(rhc.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "RHC, average results , " + str( sum(results) / float(runs)) + ", countones-%d.txt" % N print "RHC, average feval calls , " + str( sum(calls) / float(runs)) + ", countones-%d.txt" % N t1 = time.time() - t0 print "RHC, average time , " + str(float(t1) / runs) + ", countones-%d.txt" % N t0 = time.time() calls = [] results = [] for _ in range(runs): sa = SimulatedAnnealing(1e10, .95, hcp) fit = FixedIterationTrainer(sa, iters) fitness = fit.train() results.append(ef.value(sa.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "SA, average results , " + str( sum(results) / float(runs)) + ", countones-%d.txt" % N print "SA, average feval calls , " + str( sum(calls) / float(runs)) + ", countones-%d.txt" % N t1 = time.time() - t0 print "SA, average time , " + str(t1 / float(runs)) + ", countones-%d.txt" % N
def fourpeaksfunc(N, iterations): rhcMult = 200 saMult = 200 gaMult = 2 mimicMult = 1 optimalOut = [] timeOut = [] evalsOut = [] T = N / 5 fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) for niter in iterations: iterOptimalOut = [N, niter] iterTimeOut = [N, niter] iterEvals = [N, niter] start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, niter * rhcMult) fit.train() end = time.time() rhcOptimal = ef.value(rhc.getOptimal()) rhcTime = end - start print "RHC optimum: " + str(rhcOptimal) print "RHC time: " + str(rhcTime) iterOptimalOut.append(rhcOptimal) iterTimeOut.append(rhcTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() sa = SimulatedAnnealing(1E20, .8, hcp) fit = FixedIterationTrainer(sa, niter * saMult) fit.train() end = time.time() saOptimal = ef.value(sa.getOptimal()) saTime = end - start print "SA optimum: " + str(saOptimal) print "SA time: " + str(saTime) iterOptimalOut.append(saOptimal) iterTimeOut.append(saTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, niter * gaMult) fit.train() end = time.time() gaOptimal = ef.value(ga.getOptimal()) gaTime = end - start print "GA optimum: " + str(gaOptimal) print "GA time: " + str(gaTime) iterOptimalOut.append(gaOptimal) iterTimeOut.append(gaTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, niter * mimicMult) fit.train() end = time.time() mimicOptimal = ef.value(mimic.getOptimal()) mimicTime = end - start print "MIMIC optimum: " + str(mimicOptimal) print "MIMIC time: " + str(mimicTime) iterOptimalOut.append(mimicOptimal) iterTimeOut.append(mimicTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) optimalOut.append(iterOptimalOut) timeOut.append(iterTimeOut) evalsOut.append(iterEvals) return [optimalOut, timeOut, evalsOut]
cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) # rhc = RandomizedHillClimbing(hcp) # fit = FixedIterationTrainer(rhc, num_iterations) # fit.train() start = time.time() sa = SimulatedAnnealing(param[0], param[1], hcp) fit = FixedIterationTrainer(sa, num_iterations) fit.train() end = time.time() value = str(ef.value(sa.getOptimal())) results = { 'num_iterations': num_iterations, 'value': value, 'time': end - start } print 'SA', param, results writer.writerow(results) # ga = StandardGeneticAlgorithm(param[0], param[1], param[2], gap) # fit = FixedIterationTrainer(ga, num_iterations) # fit.train() # print "GA: " + str(ef.value(ga.getOptimal())) # # mimic = MIMIC(200, 100, pop) # fit = FixedIterationTrainer(mimic, 1000)
max_fit = ef.value(rhc.getOptimal()) time_optimum = [total_time, max_fit] hill_climbing.append(time_optimum) print "RHC Optimum: " + str(ef.value(rhc.getOptimal())) # # ANNEALING for i in range(trials): sa = SimulatedAnnealing(1E12, .999, hcp) fit = FixedIterationTrainer(sa, 200000) start = clock() fit.train() end = clock() total_time = end - start max_fit = ef.value(sa.getOptimal()) time_optimum = [total_time, max_fit] annealing.append(time_optimum) print("SA Optimum: " + str(ef.value(sa.getOptimal()))) # GENETIC ALGO for i in range(trials): ga = StandardGeneticAlgorithm(2000, 1500, 250, gap) fit = FixedIterationTrainer(ga, 1000) start = clock() fit.train() end = clock() total_time = end - start
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 200000) fit.train() print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(rhc.getOptimal().getDiscrete(x)) print path sa = SimulatedAnnealing(1e12, 0.999, hcp) fit = FixedIterationTrainer(sa, 200000) fit.train() print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(sa.getOptimal().getDiscrete(x)) print path ga = StandardGeneticAlgorithm(2000, 1500, 250, gap) fit = FixedIterationTrainer(ga, 1000) fit.train() print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(ga.getOptimal().getDiscrete(x))
print "RHC: " + str(ef.value(rhc.getOptimal())), "time taken", time() - t0, "Iterations:", iters f.write("starting SA\n") sa = SimulatedAnnealing(1E13, .95, hill_climbing_problem) t0 = time() iters = 0 score = 0 while iters < 80000: score = sa.train() f.write(str(iters) + "," + str(score) + "\n") iters += 1 print "SA: " + str(ef.value(sa.getOptimal())), "time taken", time() - t0, "Iterations", iters ga = StandardGeneticAlgorithm(200, 100, 10, genetic_problem) t0 = time() iters = 0 score = 0 f.write("starting GA\n") while iters < 5000: ga.train() score = ef.value(ga.getOptimal()) f.write(str(iters) + "," + str(score) +"\n") iters += 1 print "GA: " + str(ef.value(ga.getOptimal())), "time taken", time() - t0, "Iterations", iters
ef.clearCount() print "RHC, average results , " + str(sum(results)/float(runs)) print "RHC, average feval calls , " + str(sum(calls)/float(runs)) t1 = time.time() - t0 print "RHC, average time , " + str(float(t1)/runs) t0 = time.time() calls = [] results = [] for _ in range(runs): sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, iters) fitness = fit.train() results.append(ef.value(sa.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "SA95, average results , " + str(sum(results)/float(runs)) print "SA95, average feval calls , " + str(sum(calls)/float(runs)) t1 = time.time() - t0 print "SA95, average time , " + str(t1/float(runs)) t0 = time.time() calls = [] results = [] for _ in range(runs): sa = SimulatedAnnealing(1E11, .8, hcp) fit = FixedIterationTrainer(sa, iters) fitness = fit.train()
def run_knapsack(): # Random number generator */ random = Random() # The number of items NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000] num_repeats = 5 rhc_results = [] rhc_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, i) fit.train() end = time.time() rhc_results.append(ef.value(rhc.getOptimal())) rhc_times.append(end - start) #print "RHC: " + str(ef.value(rhc.getOptimal())) sa_results = [] sa_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() sa = SimulatedAnnealing(100, .95, hcp) fit = FixedIterationTrainer(sa, i) fit.train() end = time.time() sa_results.append(ef.value(sa.getOptimal())) sa_times.append(end - start) #print "SA: " + str(ef.value(sa.getOptimal())) ga_results = [] ga_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() ga = StandardGeneticAlgorithm(200, 150, 25, gap) fit = FixedIterationTrainer(ga, i) fit.train() end = time.time() ga_results.append(ef.value(sa.getOptimal())) ga_times.append(end - start) #print "GA: " + str(ef.value(ga.getOptimal())) mimic_results = [] mimic_times = [] for i in iters[0:6]: print(i) for j in range(num_repeats): start = time.time() mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, i) fit.train() end = time.time() mimic_results.append(ef.value(mimic.getOptimal())) mimic_times.append(end - start) #print "MIMIC: " + str(ef.value(mimic.getOptimal())) with open('knapsack.csv', 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(rhc_results) writer.writerow(rhc_times) writer.writerow(sa_results) writer.writerow(sa_times) writer.writerow(ga_results) writer.writerow(ga_times) writer.writerow(mimic_results) writer.writerow(mimic_times) return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iteration) start = time.time() fit.train() end = time.time() rhc_times.append(end - start) rhc_acc.append(ef.value(rhc.getOptimal())) print "RHC: " + str(ef.value(rhc.getOptimal())) sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, iteration) start = time.time() fit.train() end = time.time() sa_times.append(end - start) sa_acc.append(ef.value(sa.getOptimal())) print "SA: " + str(ef.value(sa.getOptimal())) ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, iteration) start = time.time() fit.train() end = time.time() ga_times.append(end - start) ga_acc.append(ef.value(ga.getOptimal())) print "GA: " + str(ef.value(ga.getOptimal())) mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, iteration) start = time.time() fit.train()
print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(rhc.getOptimal().getDiscrete(x)) print path # ANNEALING for i in range(trials): sa = SimulatedAnnealing(1E12, .999, hcp) fit = FixedIterationTrainer(sa, 200000) start = clock() fit.train() end = clock() total_time = end - start max_fit = ef.value(sa.getOptimal()) time_optimum = [total_time, max_fit] annealing.append(time_optimum) print("SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))) print("Route:") path = [] for x in range(0, N): path.append(sa.getOptimal().getDiscrete(x)) print(path) # GENETIC ALGO for i in range(trials): ga = StandardGeneticAlgorithm(2000, 1500, 250, gap) fit = FixedIterationTrainer(ga, 1000) start = clock() fit.train()
def main(): N=200 tempDenom = 5 T=N/tempDenom fill = [2] * N ranges = array('i', fill) iterations = 2000 gaIters = 1000 mimicIters = 1000 gaPop = 200 gaMate = 100 gaMutate = 10 mimicSamples = 200 mimicToKeep = 20 saTemp = 1E11 saCooling = .95 alg = 'all' run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahn:rsgN:m:t:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(1) elif opt == '-i': iterations = int(arg) elif opt == '-N': N = int(arg) elif opt == '-t': T = float(arg) elif opt == '-d': tempDenom = int(arg) elif opt == '-r': alg = 'RHC' elif opt == '-a': alg = 'all' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '--gaPop': gaPop = int(arg) elif opt == '--gaMate': gaMate = int(arg) elif opt == '--gaMutate': gaMutate = int(arg) elif opt == '--mimicSamples': mimicSamples = int(arg) elif opt == '--mimicToKeep': mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '--gaIters': gaIters = int(arg) elif opt == '--mimicIters': mimicIters = int(arg) elif opt == '-n': run = int(arg) vars = { 'N':N, 'tempDenom':tempDenom, 'T':T, 'fill':fill, 'ranges':ranges, 'iterations' :iterations, 'gaIters':gaIters, 'mimicIters':mimicIters, 'gaPop' :gaPop, 'gaMate' :gaMate, 'gaMutate' :gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'alg' : alg, 'run' : run } settings = getSettings(alg, settings, vars) T=N/tempDenom fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) if alg == 'RHC' or alg == 'all': rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(rhc.getOptimal())) rows.append(row) print "RHC: " + str(ef.value(rhc.getOptimal())) output2('4Peaks', 'RHC', rows, settings) rows = [] buildFooter("4Peaks", "RHC", rows, settings), outputFooter("4Peaks", "RHC", rows, settings) if alg == 'SA' or alg == 'all': sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(sa.getOptimal())) rows.append(row) print "SA: " + str(ef.value(sa.getOptimal())) output2('4Peaks', 'SA', rows, settings) rows = [] buildFooter("4Peaks", "SA", rows, settings) outputFooter("4Peaks", "SA", rows, settings) if alg == 'GA' or alg == 'all': ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() print "GA: " + str(ef.value(ga.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(ga.getOptimal())) rows.append(row) output2('4Peaks', 'GA', rows, settings) rows = [] buildFooter("4Peaks", "GA", rows, settings) outputFooter("4Peaks", "GA", rows , settings) if alg == 'MIMIC' or alg == 'all': mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(mimic.getOptimal())) rows.append(row) output2('4Peaks', 'MIMIC', rows, settings) rows = [] buildFooter("4Peaks", "GA", rows, settings) outputFooter("4Peaks", "MIMIC", rows, settings)
def run_traveling_salesman(): # set N value. This is the number of points N = 50 random = Random() points = [[0 for x in xrange(2)] for x in xrange(N)] for i in range(0, len(points)): points[i][0] = random.nextDouble() points[i][1] = random.nextDouble() ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000] num_repeats = 5 rhc_results = [] rhc_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, i) fit.train() end = time.time() rhc_results.append(ef.value(rhc.getOptimal())) rhc_times.append(end - start) print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal())) # print "Route:" # path = [] # for x in range(0,N): # path.append(rhc.getOptimal().getDiscrete(x)) # print path sa_results = [] sa_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() sa = SimulatedAnnealing(1E12, .999, hcp) fit = FixedIterationTrainer(sa, i) fit.train() sa_results.append(ef.value(sa.getOptimal())) sa_times.append(end - start) print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal())) # print "Route:" # path = [] # for x in range(0,N): # path.append(sa.getOptimal().getDiscrete(x)) # print path ga_results = [] ga_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() ga = StandardGeneticAlgorithm(2000, 1500, 250, gap) fit = FixedIterationTrainer(ga, i) fit.train() end = time.time() ga_results.append(ef.value(ga.getOptimal())) print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) ga_times.append(end - start) # print "Route:" # path = [] # for x in range(0,N): # path.append(ga.getOptimal().getDiscrete(x)) # print path # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic_results = [] mimic_times = [] for i in iters[0:6]: print(i) for j in range(num_repeats): start = time.time() mimic = MIMIC(500, 100, pop) fit = FixedIterationTrainer(mimic, i) fit.train() end = time.time() mimic_results.append(ef.value(mimic.getOptimal())) print "MIMIC Inverse of Distance: " + str( ef.value(mimic.getOptimal())) # print "Route:" # path = [] # optimal = mimic.getOptimal() # fill = [0] * optimal.size() # ddata = array('d', fill) # for i in range(0,len(ddata)): # ddata[i] = optimal.getContinuous(i) # order = ABAGAILArrays.indices(optimal.size()) # ABAGAILArrays.quicksort(ddata, order) # print order mimic_times.append(end - start) with open('travelingsalesman.csv', 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(rhc_results) writer.writerow(rhc_times) writer.writerow(sa_results) writer.writerow(sa_times) writer.writerow(ga_results) writer.writerow(ga_times) writer.writerow(mimic_results) writer.writerow(mimic_times) return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
start = time.time() fit.train() end = time.time() training_time = end - start print "RHC: " + str(ef.value(rhc.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "RHC") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(rhc.getOptimal()))) sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, 100) start = time.time() fit.train() end = time.time() training_time = end - start print "SA: " + str(ef.value(sa.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "SA") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(sa.getOptimal()))) ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, 10) start = time.time() fit.train() end = time.time() training_time = end - start print "GA: " + str(ef.value(ga.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "GA") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(ga.getOptimal())))
def main(): # The number of items NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 iterations = 20000 gaIters = 1000 mimicIters = 1000 gaPop = 200 gaMate = 150 gaMutate = 25 mimicSamples = 200 mimicToKeep = 100 saTemp = 100 saCooling = .95 alg = 'all' run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:N:c:w:v:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(1) elif opt == '-i': iterations = int(arg) elif opt == '-N': NUM_ITEMS = int(arg) elif opt == '-c': COPIES_EACH = int(arg) elif opt == '-w': MAX_WEIGHT = int(arg) elif opt == '-v': MAX_VOLUME = int(arg) elif opt == '-n': run = int(arg) elif opt == '-r': alg = 'RHC' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '-a': alg = 'all' elif opt == '--gaPop': gaPop = int(arg) elif opt == '--gaMate': gaMate = int(arg) elif opt == '--gaMutate': gaMutate = int(arg) elif opt == '--mimicSamples': mimicSamples = int(arg) elif opt == '--mimicToKeep': mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '--gaIters': gaIters = int(arg) elif opt == '--mimicIters': mimicIters = int(arg) vars ={ 'NUM_ITEMS' : NUM_ITEMS, 'COPIES_EACH' : COPIES_EACH, 'MAX_WEIGHT' : MAX_WEIGHT, 'MAX_VOLUME' : MAX_VOLUME, 'iterations' : iterations, 'gaIters' : gaIters, 'mimicIters' : mimicIters, 'gaPop' : gaPop, 'gaMate' : gaMate, 'gaMutate' : gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'alg' : alg, 'run' : run } settings = getSettings(alg, settings, vars) # Random number generator */ random = Random() # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) if alg == 'RHC' or alg == 'all': rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(str(ef.value(rhc.getOptimal()))) rows.append(row) output2('Knapsack', 'RHC', rows, settings) rows = [] buildFooter("Knapsack", "RHC", rows, settings) outputFooter("Knapsack", "RHC", rows , settings) if alg == 'SA' or alg == 'all': sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(sa.getOptimal())) rows.append(row) print "SA: " + str(ef.value(sa.getOptimal())) output2('Knapsack', 'SA', rows, settings) rows = [] buildFooter("Knapsack", "SA", rows, settings) outputFooter("Knapsack", "SA", rows, settings) if alg == 'GA' or alg == 'all': ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(ga.getOptimal())) rows.append(row) print "GA: " + str(ef.value(ga.getOptimal())) output2('Knapsack', 'GA', rows, settings) buildFooter("Knapsack", "GA", rows, settings) outputFooter("Knapsack", "GA", rows , settings) if alg == 'MIMIC' or alg == 'all': mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(mimic.getOptimal())) rows.append(row) output2('Knapsack', 'MIMIC', rows, settings) rows = [] buildFooter("Knapsack", "MIMIC", rows, settings) outputFooter("Knapsack", "MIMIC", rows , settings)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 200000) fit.train() rhc_opt = ef.value(rhc.getOptimal()) print("RHC: " + str(rhc_opt)) # print "RHC: Board Position: " # print(ef.boardPositions()) print("============================") sa = SimulatedAnnealing(1E1, .1, hcp) fit = FixedIterationTrainer(sa, 200000) fit.train() sa_opt = ef.value(sa.getOptimal()) print("SA: " + str(sa_opt)) # print("SA: Board Position: ") # print(ef.boardPositions()) print("============================") ga = StandardGeneticAlgorithm(200, 0, 10, gap) fit = FixedIterationTrainer(ga, 1000) fit.train() ga_opt = ef.value(ga.getOptimal()) print("GA: " + str(ga_opt)) # print("GA: Board Position: ") # print(ef.boardPositions()) print("============================")
ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 200000) fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())) sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, 200000) fit.train() print "SA: " + str(ef.value(sa.getOptimal())) ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, 1000) fit.train() print "GA: " + str(ef.value(ga.getOptimal())) mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, 1000) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal()))
fit.train() dur = time.time() - start print "Iters: " + str(iters) + ", Fitness: " + str( ef.value(rhc.getOptimal())) + ", Dur: " + str(dur) print "Simulated Annealing" temp = 100000 cooling_rate = 0.85 sa = SimulatedAnnealing(temp, 0.85, hcp) for iters in iters_list: fit = FixedIterationTrainer(sa, iters) start = time.time() fit.train() dur = time.time() - start print "Iters: " + str(iters) + ", Fitness: " + str( ef.value(sa.getOptimal())) + ", Dur: " + str(dur) print "Genetic Algorithm" ga = StandardGeneticAlgorithm(2 * N, 300, 100, gap) for iters in iters_list: fit = FixedIterationTrainer(ga, iters) start = time.time() fit.train() dur = time.time() - start print "Iters: " + str(iters) + ", Fitness: " + str( ef.value(ga.getOptimal())) + ", Dur: " + str(dur) print "MIMIC" # the number of samples to take each iteration # The number of samples to keep mimic = MIMIC(250, 25, pop)
#""" #======================= # Simulated Annealing #======================= print "Starting Simulated Annealing Seacrh..." sa = SimulatedAnnealing(SA_start_temp, SA_temp_decay, hcp) sa_iters = [] sa_fitness = [] sa_time = [] for i in maxiters_sa: fit = FixedIterationTrainer(sa, i) t1=time.time() fit.train() t2=time.time() fitness = ef.value(sa.getOptimal()) time_ms=round(1000*(t2-t1),2) sa_fitness.append(fitness) sa_time.append(time_ms) sa_iters.append(i) print "SA fitness using "+ str(i)+" fixed iterations: " + str(fitness) print "Time taken for SA using fixed iterations: "+str(time_ms)+" milliseconds" print "Finished Simulated Annealing Seacrh." print "="*100 #""" """ #======================= # Genetic Algorithm #=======================
fit = FixedIterationTrainer(rhc, iterations) fit.train() print(str(ef.value(rhc.getOptimal()))) end = time.time() times += "\n%0.03f" % (end - start) print(times) times = "" print "SA:" for x in range(20): start = time.time() iterations = (x + 1) * 2500 sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() print(str(ef.value(sa.getOptimal()))) end = time.time() times += "\n%0.03f" % (end - start) print(times) times = "" print "GA:" for x in range(20): start = time.time() iterations = (x + 1) * 2500 ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, iterations) fit.train() print(str(ef.value(ga.getOptimal()))) end = time.time() times += "\n%0.03f" % (end - start)
def travelingsalesmanfunc(N, iterations): rhcMult = 1500 saMult = 1500 gaMult = 1 mimicMult = 3 random = Random() points = [[0 for x in xrange(2)] for x in xrange(N)] for i in range(0, len(points)): points[i][0] = random.nextDouble() points[i][1] = random.nextDouble() optimalOut = [] timeOut = [] evalsOut = [] for niter in iterations: ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) iterOptimalOut = [N, niter] iterTimeOut = [N, niter] iterEvals = [N, niter] start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, niter * rhcMult) fit.train() end = time.time() rhcOptimal = ef.value(rhc.getOptimal()) rhcTime = end - start print "RHC Inverse of Distance: optimum: " + str(rhcOptimal) print "RHC time: " + str(rhcTime) #print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(rhc.getOptimal().getDiscrete(x)) print path iterOptimalOut.append(rhcOptimal) iterTimeOut.append(rhcTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() sa = SimulatedAnnealing(1E12, .999, hcp) fit = FixedIterationTrainer(sa, niter * saMult) fit.train() end = time.time() saOptimal = ef.value(sa.getOptimal()) saTime = end - start print "SA Inverse of Distance optimum: " + str(saOptimal) print "SA time: " + str(saTime) #print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(sa.getOptimal().getDiscrete(x)) print path iterOptimalOut.append(saOptimal) iterTimeOut.append(saTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() ga = StandardGeneticAlgorithm(2000, 1500, 250, gap) fit = FixedIterationTrainer(ga, niter * gaMult) fit.train() end = time.time() gaOptimal = ef.value(ga.getOptimal()) gaTime = end - start print "GA Inverse of Distance optimum: " + str(gaOptimal) print "GA time: " + str(gaTime) #print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(ga.getOptimal().getDiscrete(x)) print path iterOptimalOut.append(gaOptimal) iterTimeOut.append(gaTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) start = time.time() mimic = MIMIC(500, 100, pop) fit = FixedIterationTrainer(mimic, niter * mimicMult) fit.train() end = time.time() mimicOptimal = ef.value(mimic.getOptimal()) mimicTime = end - start print "MIMIC Inverse of Distance optimum: " + str(mimicOptimal) print "MIMIC time: " + str(mimicTime) #print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal())) print "Route:" path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0, len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) print order iterOptimalOut.append(mimicOptimal) iterTimeOut.append(mimicTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) optimalOut.append(iterOptimalOut) timeOut.append(iterTimeOut) evalsOut.append(iterEvals) return [optimalOut, timeOut, evalsOut]