# create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) numTrials = 1 maxIters = 5000 outfile = './../logs/KS_@ALG@_@N@_LOG.csv' ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) #MIMIC for t in range(numTrials): for samples,keep,m in product([100],[50],[0.1,0.3,0.5,0.7,0.9]): fname = outfile.replace('@ALG@','MIMIC{}_{}_{}'.format(samples,keep,m)).replace('@N@',str(t+1)) with open(fname,'w') as f: f.write('iterations,fitness,time\n') df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(samples, keep, pop) fit = FixedIterationTrainer(mimic, 10) times =[0] for i in range(0,maxIters,10): start = clock() fit.train()
def run_four_peaks_experiments(): OUTPUT_DIRECTORY = './output' if not os.path.exists(OUTPUT_DIRECTORY): os.makedirs(OUTPUT_DIRECTORY) N = 200 T = N / 5 fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) max_iter = 5000 outfile = OUTPUT_DIRECTORY + '/four_peaks_{}_log.csv' # Randomized Hill Climber filename = outfile.format('rhc') with open(filename, 'w') as f: f.write('iterations,fitness,time\n') for it in range(0, max_iter, 10): rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(rhc.getOptimal()) data = '{},{},{}\n'.format(it, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Simulated Annealing filename = outfile.format('sa') with open(filename, 'w') as f: f.write('iteration,cooling_value,fitness,elapsed_time\n') for cooling_value in (.19, .38, .76, .95): for it in range(0, max_iter, 10): sa = SimulatedAnnealing(1E11, cooling_value, hcp) fit = FixedIterationTrainer(sa, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevalss score = ef.value(sa.getOptimal()) data = '{},{},{},{}\n'.format(it, cooling_value, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Genetic Algorithm filename = outfile.format('ga') with open(filename, 'w') as f: f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n') for population_size, to_mate, to_mutate in itertools.product( [200], [25, 50, 75, 100], [10, 20, 30, 40]): for it in range(0, max_iter, 10): ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate, gap) fit = FixedIterationTrainer(ga, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(ga.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate, to_mutate, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # MIMIC filename = outfile.format('mm') with open(filename, 'w') as f: f.write('iterations,samples,to_keep,fitness,time\n') for samples, to_keep, m in itertools.product([200], [20], [0.1, 0.3, 0.5, 0.7, 0.9]): for it in range(0, 500, 10): df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mm = MIMIC(samples, 20, pop) fit = FixedIterationTrainer(mm, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(mm.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data)
def travelingsalesmanfunc(N, iterations): rhcMult = 1500 saMult = 1500 gaMult = 1 mimicMult = 3 random = Random() points = [[0 for x in xrange(2)] for x in xrange(N)] for i in range(0, len(points)): points[i][0] = random.nextDouble() points[i][1] = random.nextDouble() optimalOut = [] timeOut = [] evalsOut = [] for niter in iterations: ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) iterOptimalOut = [N, niter] iterTimeOut = [N, niter] iterEvals = [N, niter] start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, niter * rhcMult) fit.train() end = time.time() rhcOptimal = ef.value(rhc.getOptimal()) rhcTime = end - start print "RHC Inverse of Distance: optimum: " + str(rhcOptimal) print "RHC time: " + str(rhcTime) #print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(rhc.getOptimal().getDiscrete(x)) print path iterOptimalOut.append(rhcOptimal) iterTimeOut.append(rhcTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() sa = SimulatedAnnealing(1E12, .999, hcp) fit = FixedIterationTrainer(sa, niter * saMult) fit.train() end = time.time() saOptimal = ef.value(sa.getOptimal()) saTime = end - start print "SA Inverse of Distance optimum: " + str(saOptimal) print "SA time: " + str(saTime) #print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(sa.getOptimal().getDiscrete(x)) print path iterOptimalOut.append(saOptimal) iterTimeOut.append(saTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() ga = StandardGeneticAlgorithm(2000, 1500, 250, gap) fit = FixedIterationTrainer(ga, niter * gaMult) fit.train() end = time.time() gaOptimal = ef.value(ga.getOptimal()) gaTime = end - start print "GA Inverse of Distance optimum: " + str(gaOptimal) print "GA time: " + str(gaTime) #print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(ga.getOptimal().getDiscrete(x)) print path iterOptimalOut.append(gaOptimal) iterTimeOut.append(gaTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) start = time.time() mimic = MIMIC(500, 100, pop) fit = FixedIterationTrainer(mimic, niter * mimicMult) fit.train() end = time.time() mimicOptimal = ef.value(mimic.getOptimal()) mimicTime = end - start print "MIMIC Inverse of Distance optimum: " + str(mimicOptimal) print "MIMIC time: " + str(mimicTime) #print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal())) print "Route:" path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0, len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) print order iterOptimalOut.append(mimicOptimal) iterTimeOut.append(mimicTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) optimalOut.append(iterOptimalOut) timeOut.append(iterTimeOut) evalsOut.append(iterEvals) return [optimalOut, timeOut, evalsOut]
def run_all_2(N=200,T=40,fout=None): problem = 'flipflop' # N=200 # T=N/10 maxEpochs = 10**5 maxTime = 300 #5 minutes fill = [2] * N ranges = array('i', fill) # ef = FourPeaksEvaluationFunction(T) ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) # mf = SwapMutation() cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) def run_algo(alg,fit,label,difficulty,iters): trainTimes = [0.] trainTime = [] scoreChange = [0.] stuckCount = 10**3 prev = 0. for epoch in range(0,maxEpochs,1): st = time.clock() fit.train() et = time.clock() trainTimes.append(trainTimes[-1]+(et-st)) trainTime.append((et-st)) rollingMean = 10 avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean)) score = ef.value(alg.getOptimal()) # print(score,difficulty) # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1]) trialData = [problem,difficulty,label,score,epoch,trainTimes[-1],avgTime,iters] print(trialData) # fout.writerow(trialData) optimum = (difficulty-1) if score >= optimum: break scoreChange.append(abs(score-prev)) prev = score scoreChange = scoreChange[-stuckCount:] # print(scoreChange) if max(scoreChange) == 0: break if trainTimes[-1] > maxTime: break # print(trialData) fout.writerow(trialData) iters = 10 rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iters) run_algo(rhc,fit,'RHC',N,iters) iters = 10 startTemp = 1E10 coolingFactor = .99 sa = SimulatedAnnealing(startTemp, coolingFactor, hcp) fit = FixedIterationTrainer(sa, iters) run_algo(sa,fit,'SA',N,iters) iters = 10 population = 300 mates = 40 mutations = 10 ga = StandardGeneticAlgorithm(population, mates, mutations, gap) fit = FixedIterationTrainer(ga, iters) run_algo(ga,fit,'GA',N,iters) iters = 10 samples = 200 keep = 5 mimic = MIMIC(samples, keep, pop) fit = FixedIterationTrainer(mimic, iters) run_algo(mimic,fit,'MIMIC',N,iters)
def run_four_peaks(): N = 200 T = N / 5 fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000] num_repeats = 5 rhc_results = [] rhc_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, i) fit.train() end = time.time() rhc_results.append(ef.value(rhc.getOptimal())) rhc_times.append(end - start) print "RHC: " + str(ef.value(rhc.getOptimal())) sa_results = [] sa_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, i) fit.train() end = time.time() sa_results.append(ef.value(sa.getOptimal())) sa_times.append(end - start) print "SA: " + str(ef.value(sa.getOptimal())) ga_results = [] ga_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, i) fit.train() end = time.time() ga_results.append(ef.value(ga.getOptimal())) ga_times.append(end - start) print "GA: " + str(ef.value(ga.getOptimal())) mimic_results = [] mimic_times = [] for i in iters[0:6]: print(i) for j in range(num_repeats): start = time.time() mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, i) fit.train() end = time.time() mimic_results.append(ef.value(mimic.getOptimal())) mimic_times.append(end - start) print "MIMIC: " + str(ef.value(mimic.getOptimal())) with open('four_peaks.csv', 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(rhc_results) writer.writerow(rhc_times) writer.writerow(sa_results) writer.writerow(sa_times) writer.writerow(ga_results) writer.writerow(ga_times) writer.writerow(mimic_results) writer.writerow(mimic_times) return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
def run_knapsack_experiments(): OUTPUT_DIRECTORY = './output' # Random number generator */ random = Random() # The number of items NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) max_iter = 5000 outfile = OUTPUT_DIRECTORY + '/knapsack_{}_log.csv' # Randomized Hill Climber filename = outfile.format('rhc') with open(filename, 'w') as f: f.write('iterations,fitness,time\n') for it in range(0, max_iter, 10): rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(rhc.getOptimal()) data = '{},{},{}\n'.format(it, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Simulated Annealing filename = outfile.format('sa') with open(filename, 'w') as f: f.write('iteration,cooling_value,fitness,time\n') for cooling_value in (.19, .38, .76, .95): for it in range(0, max_iter, 10): sa = SimulatedAnnealing(200, cooling_value, hcp) fit = FixedIterationTrainer(sa, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(sa.getOptimal()) data = '{},{},{},{}\n'.format(it, cooling_value, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Genetic Algorithm filename = outfile.format('ga') with open(filename, 'w') as f: f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n') for population_size, to_mate, to_mutate in itertools.product( [200], [110, 120, 130, 140, 150], [2, 4, 6, 8]): for it in range(0, max_iter, 10): ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate, gap) fit = FixedIterationTrainer(ga, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(ga.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate, to_mutate, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # MIMIC filename = outfile.format('mm') with open(filename, 'w') as f: f.write('iterations,samples,to_keep,m,fitness,time\n') for samples, to_keep, m in itertools.product([200], [100], [0.1, 0.3, 0.5, 0.7, 0.9]): for it in range(0, 500, 10): df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mm = MIMIC(samples, 20, pop) fit = FixedIterationTrainer(mm, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(mm.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data)
fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) initial_distribution = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mutation_function = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hill_climbing_problem = GenericHillClimbingProblem(ef, initial_distribution, nf) genetic_problem = GenericGeneticAlgorithmProblem(ef, initial_distribution, mutation_function, cf) probablistic_optimization = GenericProbabilisticOptimizationProblem( ef, initial_distribution, df) from time import time f = open("experiments/results/fourpeaks_optimal_1000.txt", "w") f.write("starting RHC\n") rhc = RandomizedHillClimbing(hill_climbing_problem) score = 0 iters = 0 t0 = time() while iters < 60000: score = rhc.train() f.write(str(iters) + "," + str(score) + "\n") iters += 1
def run_algorithm_test(ranges, algorithms, output_file_name, trial_number, iterations=False): with open(output_file_name,'w') as f: f.write('algorithm,optimal_result,iterations,time,trial\n') ef = CountOnesEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) for trial in range(trial_number): if iterations is False: for item in algorithms: start_time = time.time() if item in ['rhc']: optimal_result, run_iters = run_rhc(hcp, ef) elif item in ['sa']: optimal_result, run_iters = run_sa(hcp, ef) elif item in ['ga']: optimal_result, run_iters = run_ga(gap, ef) elif item in ['mimic']: optimal_result, run_iters = run_mimic(pop, ef) else: print "The algorithm type {} is not supported.".format(item) end_time = time.time() time_elapsed = end_time - start_time run_output = '{},{},{},{},{}\n'.format(item, optimal_result, run_iters, time_elapsed, trial) with open(output_file_name,'a') as f: f.write(run_output) else: for iter in iterations: for item in algorithms: start_time = time.time() if item in ['rhc']: optimal_result, run_iters = run_rhc(hcp, ef, iter) elif item in ['sa']: optimal_result, run_iters = run_sa(hcp, ef, iter) elif item in ['ga']: optimal_result, run_iters = run_ga(gap, ef, iter) elif item in ['mimic']: optimal_result, run_iters = run_mimic(pop, ef, iter) else: print "The algorithm type {} is not supported.".format(item) end_time = time.time() time_elapsed = end_time - start_time run_output = '{},{},{},{},{}\n'.format(item, optimal_result, run_iters, time_elapsed, trial) with open(output_file_name,'a') as f: f.write(run_output) print "time elapsed is {}".format(time_elapsed) return
ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) # # for mimic we use a sort encoding efm = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array('i', fill) oddm = DiscreteUniformDistribution(ranges) dfm = DiscreteDependencyTree(.1, ranges); pop = GenericProbabilisticOptimizationProblem(efm, oddm, dfm) def merge_two_dicts(x, y): z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z def rhc_fac(args = {}): constant_params = {'hcp':hcp} params = merge_two_dicts(args, constant_params) print(params) rhc = RandomizedHillClimbing(hcp) return FixedIterationTrainer(rhc, num_iterations) def sa_fac(args = {}): constant_params = {'hcp':hcp}
def knapsackfunc(NUM_ITEMS, iterations): rhcMult = 600 saMult = 600 gaMult = 4 mimicMult = 3 # Random number generator */ random = Random() # The number of items #NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) optimalOut = [] timeOut = [] evalsOut = [] for niter in iterations: iterOptimalOut = [NUM_ITEMS, niter] iterTimeOut = [NUM_ITEMS, niter] iterEvals = [NUM_ITEMS, niter] start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, niter*rhcMult) fit.train() end = time.time() rhcOptimal = ef.value(rhc.getOptimal()) rhcTime = end-start print "RHC optimum: " + str(rhcOptimal) print "RHC time: " + str(rhcTime) iterOptimalOut.append(rhcOptimal) iterTimeOut.append(rhcTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() sa = SimulatedAnnealing(100, .95, hcp) fit = FixedIterationTrainer(sa, niter*saMult) fit.train() end = time.time() saOptimal = ef.value(sa.getOptimal()) saTime = end-start print "SA optimum: " + str(saOptimal) print "SA time: " + str(saTime) iterOptimalOut.append(saOptimal) iterTimeOut.append(saTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() ga = StandardGeneticAlgorithm(200, 150, 25, gap) fit = FixedIterationTrainer(ga, niter*gaMult) fit.train() end = time.time() gaOptimal = ef.value(ga.getOptimal()) gaTime = end - start print "GA optimum: " + str(gaOptimal) print "GA time: " + str(gaTime) iterOptimalOut.append(gaOptimal) iterTimeOut.append(gaTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, niter*mimicMult) fit.train() end = time.time() mimicOptimal = ef.value(mimic.getOptimal()) mimicTime = end - start print "MIMIC optimum: " + str(mimicOptimal) print "MIMIC time: " + str(mimicTime) iterOptimalOut.append(mimicOptimal) iterTimeOut.append(mimicTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) optimalOut.append(iterOptimalOut) timeOut.append(iterTimeOut) evalsOut.append(iterEvals) return [optimalOut, timeOut, evalsOut]