def run_rhc(t): fname = outfile.format('RHC', str(t + 1)) with open(fname, 'a+') as f: content = f.read() if "fitness" not in content: f.write('iterations,fitness,time,fevals\n') ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(rhc.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print fname, st base.write_to_file(fname, st) return
def run_sa(t, CE): fname = outfile.format('SA{}'.format(CE), str(t + 1)) with open(fname, 'a+') as f: content = f.read() if "fitness" not in content: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_mimic(t, samples, keep, m): fill = [N] * N ranges = array('i', fill) ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscreteUniformDistribution(ranges) fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m), str(t + 1)) base.write_header(fname) df = DiscreteDependencyTree(m, ranges) ef = TravelingSalesmanRouteEvaluationFunction(points) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(samples, keep, pop) fit = FixedIterationTrainer(mimic, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(mimic.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_mimic(t, samples, keep, m): fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m), str(t + 1)) ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(samples, keep, pop) fit = FixedIterationTrainer(mimic, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(mimic.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_ga(t, pop, mate, mutate): fname = outfile.format('GA{}_{}_{}'.format(pop, mate, mutate), str(t + 1)) with open(fname, 'a+') as f: content = f.read() if "fitness" not in content: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) ga = StandardGeneticAlgorithm(pop, mate, mutate, gap) fit = FixedIterationTrainer(ga, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(ga.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def test_iter(algorithm, arguments, no_loops, no_iter): results = [] for loop in range(no_loops): algo_init = algorithm(*arguments) fit = FixedIterationTrainer(algo_init, no_iter) fit.train() results += [ef.value(algo_init.getOptimal())] return results
def run_mimic(pop, ef, iterations=1000): mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, iterations) fit.train() optimal_result = str(ef.value(mimic.getOptimal())) print "MIMIC: " + optimal_result return optimal_result, iterations
def run_sa(hcp, ef, iterations=200000): sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() optimal_result = str(ef.value(sa.getOptimal())) print "SA: " + optimal_result return optimal_result, iterations
def eval_algo(ef, algo, fixed_iter): fit = FixedIterationTrainer(algo, fixed_iter) start = time.time() fit.train() end = time.time() score = ef.value(algo.getOptimal()) runtime = end - start call_count = 0 return score, call_count, runtime
def run_ga(gap, ef, iterations=1000): ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, iterations) fit.train() optimal_result = str(ef.value(ga.getOptimal())) print "GA: " + optimal_result return optimal_result, iterations
def run_rhc(hcp, ef, iterations=200000): rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() optimal_result = str(ef.value(rhc.getOptimal())) print "RHC: " + optimal_result return optimal_result, iterations
def run_four_peaks_exploringSA(): N=200 T=N/5 fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 30000, 35000, 40000, 45000, 50000] num_repeats = 5 all_sa_results = [] all_sa_times = [] coolings = [0.15, 0.35, 0.55, 0.75, 0.95] for cooling in coolings: sa_results = [] sa_times = [] for i in iters: print(i) for j in range(num_repeats): start = time.time() sa = SimulatedAnnealing(1E11, cooling, hcp) fit = FixedIterationTrainer(sa, i) fit.train() end = time.time() sa_results.append(ef.value(sa.getOptimal())) sa_times.append(end - start) print "SA cooling " + str(cooling) + ": " + str(ef.value(sa.getOptimal())) all_sa_results.append(sa_results) all_sa_results.append(sa_times) with open('four_peaks_exploringSA.csv', 'w') as csvfile: writer = csv.writer(csvfile) for sa_results in all_sa_results: writer.writerow(sa_results) for sa_times in all_sa_times: writer.writerow(sa_times) return all_sa_results, all_sa_times
def run_experiment(self, opName): """Run a genetic algorithms optimization experiment for a given optimization problem. Args: ef (AbstractEvaluationFunction): Evaluation function. ranges (array): Search space ranges. op (str): Name of optimization problem. """ outdir = 'results/OPT/{}'.format(opName) # get results directory outfile = 'GA_{}_{}_{}_results.csv'.format(self.p, self.ma, self.mu) fname = get_abspath(outfile, outdir) # get output filename # delete existing results file, if it already exists try: os.remove(fname) except Exception as e: print e pass with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals,trial\n') # start experiment for t in range(self.numTrials): # initialize optimization problem and training functions ranges, ef = self.op.get_ef() mf = None cf = None if opName == 'TSP': mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) else: mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() odd = DiscreteUniformDistribution(ranges) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) ga = StandardGeneticAlgorithm(self.p, self.ma, self.mu, gap) fit = FixedIterationTrainer(ga, 10) # run experiment and train evaluation function start = time.clock() for i in range(0, self.maxIters, 10): fit.train() elapsed = time.clock() - start fe = ef.valueCallCount score = ef.value(ga.getOptimal()) ef.valueCallCount -= 1 # write results to output file s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t) with open(fname, 'a+') as f: f.write(s)
def perform(alg, fname): fit = FixedIterationTrainer(alg, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) score = ef.value(alg.getOptimal()) st = '{},{},{}\n'.format(i, score, times[-1]) # print st with open(fname, 'a') as f: f.write(st)
def run_experiment(self, opName): """Run a simulated annealing optimization experiment for a given optimization problem. Args: ef (AbstractEvaluationFunction): Evaluation function. ranges (array): Search space ranges. op (str): Name of optimization problem. """ outdir = 'results/OPT/{}'.format(opName) # get results directory outfile = 'SA_{}_results.csv'.format(self.cr) fname = get_abspath(outfile, outdir) # get output filename # delete existing results file, if it already exists try: os.remove(fname) except Exception as e: print e pass with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals,trial\n') # start experiment for t in range(self.numTrials): # initialize optimization problem and training functions ranges, ef = self.op.get_ef() nf = None if opName == 'TSP': nf = SwapNeighbor() else: nf = DiscreteChangeOneNeighbor(ranges) odd = DiscreteUniformDistribution(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, self.cr, hcp) fit = FixedIterationTrainer(sa, 10) # run experiment and train evaluation function start = time.clock() for i in range(0, self.maxIters, 10): fit.train() elapsed = time.clock() - start fe = ef.valueCallCount score = ef.value(sa.getOptimal()) ef.valueCallCount -= 1 # write results to output file s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t) with open(fname, 'a+') as f: f.write(s)
def run_experiment(self, opName): """Run a MIMIC optimization experiment for a given optimization problem. Args: ef (AbstractEvaluationFunction): Evaluation function. ranges (array): Search space ranges. op (str): Name of optimization problem. """ outdir = 'results/OPT/{}'.format(opName) # get results directory outfile = 'MIMIC_{}_{}_{}_results.csv'.format(self.s, self.k, self.m) fname = get_abspath(outfile, outdir) # get output filename # delete existing results file, if it already exists try: os.remove(fname) except Exception as e: print e pass with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals,trial\n') # start experiment for t in range(self.numTrials): # initialize optimization problem and training functions ranges, ef = self.op.get_ef() mimic = None df = DiscreteDependencyTree(self.m, ranges) odd = DiscreteUniformDistribution(ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(self.s, self.k, pop) fit = FixedIterationTrainer(mimic, 10) # run experiment and train evaluation function start = time.clock() for i in range(0, self.maxIters, 10): fit.train() elapsed = time.clock() - start fe = ef.valueCallCount score = ef.value(mimic.getOptimal()) ef.valueCallCount -= 1 # write results to output file s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t) with open(fname, 'a+') as f: f.write(s)
def MIMICtest(): correctCount = 0 MIMIC_iters = 10 MIMIC_samples = 5 * N #max(1,int(N/10)) MIMIC_keep = int(.1 * MIMIC_samples) t = 0 while correctCount < NUM_RIGHT and MIMIC_iters <= 500: MIMIC_keep = int(max(.1 * MIMIC_samples, 1)) mimic = MIMIC(int(MIMIC_samples), int(MIMIC_keep), pop) start = time.time() fit = FixedIterationTrainer(mimic, int(MIMIC_iters)) fitness = fit.train() t = time.time() - start v = ef.value(mimic.getOptimal()) myWriter.addValue(fitness, "MIMIC_fitness", runNum) myWriter.addValue(t, "MIMIC_searchTimes", runNum) if v == N: correctCount += 1 else: correctCount = 0 MIMIC_iters *= 1.1 MIMIC_samples *= 1.1 myWriter.addValue(t, "MIMIC_times", 0) myWriter.addValue(int(MIMIC_iters), "MIMIC_iters", 0) myWriter.addValue(int(MIMIC_samples), "MIMIC_samples", 0) myWriter.addValue(int(MIMIC_keep), "MIMIC_keep", 0) print( str(N) + ": MIMIC: " + str(ef.value(mimic.getOptimal())) + " took " + str(t) + " seconds and " + str(int(MIMIC_iters)) + " iterations and " + str(int(MIMIC_samples)) + " samples with keep " + str(int(MIMIC_keep)))
def SA(): SA_iters = 10 correctCount = 0 t = 0 totalTime = 0 totalIters = 0 global sa sa = SimulatedAnnealing(1e11, .85, hcp) while correctCount < NUM_RIGHT: start = time.time() fit = FixedIterationTrainer(sa, SA_iters) fitness = fit.train() t = time.time() - start totalTime += t totalIters += SA_iters myWriter.addValue(fitness, "SA_fitness", runNum) myWriter.addValue(t, "SA_searchTimes", runNum) v = ef.value(sa.getOptimal()) if v == N: correctCount += 1 else: correctCount = 0 #SA_iters += 1 myWriter.addValue(t, "SA_times", 0) myWriter.addValue(int(SA_iters), "SA_iters", 0) print str(N) + ": SA: " + str(ef.value(sa.getOptimal())) + " took " + str( totalIters) + " seconds and " + str(totalIters) + " iterations"
def RHC(): correctCount = 0 RHC_iters = 10 t = 0 totalTime = 0 totalIters = 0 global rhc rhc = RandomizedHillClimbing(hcp) while correctCount < NUM_RIGHT: # print str(correctCount)+ " / 20 correct in RHC w/ iters " + str(RHC_iters) fit = FixedIterationTrainer(rhc, RHC_iters) start = time.time() fitness = fit.train() t = time.time() - start totalIters += RHC_iters totalTime += t myWriter.addValue(fitness, "RHC_fitness", runNum) myWriter.addValue(t, "RHC_searchTimes", runNum) v = ef.value(rhc.getOptimal()) if v == N: correctCount += 1 else: correctCount = 0 #RHC_iters += 1 myWriter.addValue(totalTime, "RHC_times", runNum) myWriter.addValue(totalIters, "RHC_iters", runNum) print str(N) + ": RHC: " + str(ef.value( rhc.getOptimal())) + " took " + str(totalTime) + " seconds and " + str( totalIters) + " iterations"
def RHC(): correctCount = 0 RHC_iters = 10 t=0 totalTime =0 totalIters = 0 global rhc rhc = RandomizedHillClimbing(hcp) while correctCount < NUM_RIGHT: # print str(correctCount)+ " / 20 correct in RHC w/ iters " + str(RHC_iters) fit = FixedIterationTrainer(rhc, RHC_iters) start = time.time() fitness = fit.train() t = time.time() - start totalIters+=RHC_iters totalTime += t; myWriter.addValue(fitness, "RHC_fitness", runNum) myWriter.addValue(t, "RHC_searchTimes",runNum) v = ef.value(rhc.getOptimal()) if v == N: correctCount += 1 else: correctCount = 0 #RHC_iters += 1 myWriter.addValue(totalTime,"RHC_times",runNum) myWriter.addValue(totalIters,"RHC_iters",runNum) print str(N) + ": RHC: " + str(ef.value(rhc.getOptimal()))+" took "+str(totalTime)+" seconds and " + str(totalIters) + " iterations"
def SA(): SA_iters = 10 correctCount = 0 t=0 totalTime=0 totalIters =0 global sa sa = SimulatedAnnealing(1e11, .85, hcp) while correctCount < NUM_RIGHT: start = time.time() fit = FixedIterationTrainer(sa, SA_iters) fitness = fit.train() t = time.time() - start totalTime+=t totalIters+= SA_iters myWriter.addValue(fitness, "SA_fitness", runNum) myWriter.addValue(t, "SA_searchTimes",runNum) v = ef.value(sa.getOptimal()) if v == N: correctCount += 1 else: correctCount = 0 #SA_iters += 1 myWriter.addValue(t,"SA_times",0) myWriter.addValue(int(SA_iters),"SA_iters",0) print str(N) + ": SA: " + str(ef.value(sa.getOptimal())) + " took "+str(totalIters)+ " seconds and " + str(totalIters) + " iterations"
def MIMICtest(): correctCount = 0 MIMIC_iters = 10 MIMIC_samples = 5*N #max(1,int(N/10)) MIMIC_keep = int(.1 * MIMIC_samples) t=0 while correctCount < NUM_RIGHT and MIMIC_iters <= 500: MIMIC_keep = int( max(.1 * MIMIC_samples, 1)) mimic = MIMIC(int(MIMIC_samples), int(MIMIC_keep), pop) start = time.time() fit = FixedIterationTrainer(mimic, int(MIMIC_iters)) fitness = fit.train() t = time.time() - start v = ef.value(mimic.getOptimal()) myWriter.addValue(fitness, "MIMIC_fitness", runNum) myWriter.addValue(t, "MIMIC_searchTimes",runNum) if v==N: correctCount +=1 else: correctCount = 0 MIMIC_iters *=1.1 MIMIC_samples *=1.1 myWriter.addValue(t,"MIMIC_times",0) myWriter.addValue(int(MIMIC_iters),"MIMIC_iters",0) myWriter.addValue(int(MIMIC_samples),"MIMIC_samples",0) myWriter.addValue(int(MIMIC_keep),"MIMIC_keep",0) print(str(N) + ": MIMIC: " + str(ef.value(mimic.getOptimal())) + " took " + str(t) + " seconds and " + str(int(MIMIC_iters)) + " iterations and " + str(int(MIMIC_samples)) + " samples with keep " + str(int(MIMIC_keep)))
def mimicGATest(): popBegin = 1 popEnd = 101 keepBegin = 1 keepEnd = 90 mutBegin = 1 mutEnd = 90 itersBegin = 1 itersEnd = 200 samples = 10 keep = 2 problemSize = N mimicRange = (problemSize) iters = 1 paramRanges = Vector(8) paramRanges.addElement(popBegin) paramRanges.addElement(popEnd) paramRanges.addElement(keepBegin) paramRanges.addElement(keepEnd) paramRanges.addElement(mutBegin) paramRanges.addElement(mutEnd) paramRanges.addElement(itersBegin) paramRanges.addElement(itersEnd) totalParamSize1 = (popEnd - popBegin + 1) + (keepEnd - keepBegin + 1) + ( mutEnd - mutBegin + 1) + (itersEnd - itersBegin + 1) allParamValues = range(popBegin, popEnd + 1) + range( keepBegin, keepEnd + 1) + range(mutBegin, mutEnd + 1) + range( itersBegin, itersEnd + 1) totalParamSize = len(allParamValues) metaFun = RamysEvalMetafunc(ranges) discreteDist = RamysMimicDistribution( paramRanges) #DiscreteUniformDistribution(problemSize) distFunc = DiscreteDependencyTree(.1, allParamValues) findGA = GenericProbabilisticOptimizationProblem(metaFun, discreteDist, distFunc) mimic = MIMIC(samples, keep, findGA) fit = FixedIterationTrainer(mimic, iters) fit.train() print str(N) + ": MIMIC finds GA : " + str(ef.value(mimic.getOptimal()))
def run_ga(t, pop, mate, mutate): fname = outfile.format('GA{}_{}_{}'.format(pop, mate, mutate), str(t + 1)) ef = TravelingSalesmanRouteEvaluationFunction(points) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) ga = StandardGeneticAlgorithm(pop, mate, mutate, gap) fit = FixedIterationTrainer(ga, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(ga.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_sa(t, CE): fname = outfile.format('SA{}'.format(CE), str(t + 1)) ef = TravelingSalesmanRouteEvaluationFunction(points) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_rhc(t): fname = outfile.format('RHC', str(t + 1)) ef = TravelingSalesmanRouteEvaluationFunction(points) hcp = GenericHillClimbingProblem(ef, odd, nf) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(rhc.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_sa(t, CE): fname = outfile.format('SA{}'.format(CE), str(t + 1)) base.write_header(fname) ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) sa = SimulatedAnnealing(1E10, CE, hcp) fit = FixedIterationTrainer(sa, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(sa.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def run_rhc(t): fname = outfile.format('RHC', str(t + 1)) base.write_header(fname) ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(rhc.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) # print st base.write_to_file(fname, st) return
def mimicGATest(): popBegin = 1 popEnd = 101 keepBegin = 1 keepEnd = 90 mutBegin = 1 mutEnd = 90 itersBegin = 1 itersEnd = 200 samples = 10 keep = 2 problemSize = N mimicRange = (problemSize) iters = 1 paramRanges = Vector(8) paramRanges.addElement(popBegin) paramRanges.addElement(popEnd) paramRanges.addElement(keepBegin) paramRanges.addElement(keepEnd) paramRanges.addElement(mutBegin) paramRanges.addElement(mutEnd) paramRanges.addElement(itersBegin) paramRanges.addElement(itersEnd) totalParamSize1 = (popEnd - popBegin +1) + (keepEnd - keepBegin +1) + (mutEnd - mutBegin +1) + (itersEnd - itersBegin +1) allParamValues = range(popBegin, popEnd+1)+range(keepBegin, keepEnd+1)+range(mutBegin, mutEnd+1)+range(itersBegin, itersEnd+1) totalParamSize = len(allParamValues) metaFun = RamysEvalMetafunc(ranges) discreteDist = RamysMimicDistribution(paramRanges) #DiscreteUniformDistribution(problemSize) distFunc = DiscreteDependencyTree(.1, allParamValues) findGA = GenericProbabilisticOptimizationProblem(metaFun, discreteDist, distFunc) mimic = MIMIC(samples, keep, findGA) fit = FixedIterationTrainer(mimic, iters) fit.train() print str(N) + ": MIMIC finds GA : " + str(ef.value(mimic.getOptimal()))
def main(): N=200 tempDenom = 5 T=N/tempDenom fill = [2] * N ranges = array('i', fill) iterations = 2000 gaIters = 1000 mimicIters = 1000 gaPop = 200 gaMate = 100 gaMutate = 10 mimicSamples = 200 mimicToKeep = 20 saTemp = 1E11 saCooling = .95 alg = 'all' run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahn:rsgN:m:t:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(1) elif opt == '-i': iterations = int(arg) elif opt == '-N': N = int(arg) elif opt == '-t': T = float(arg) elif opt == '-d': tempDenom = int(arg) elif opt == '-r': alg = 'RHC' elif opt == '-a': alg = 'all' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '--gaPop': gaPop = int(arg) elif opt == '--gaMate': gaMate = int(arg) elif opt == '--gaMutate': gaMutate = int(arg) elif opt == '--mimicSamples': mimicSamples = int(arg) elif opt == '--mimicToKeep': mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '--gaIters': gaIters = int(arg) elif opt == '--mimicIters': mimicIters = int(arg) elif opt == '-n': run = int(arg) vars = { 'N':N, 'tempDenom':tempDenom, 'T':T, 'fill':fill, 'ranges':ranges, 'iterations' :iterations, 'gaIters':gaIters, 'mimicIters':mimicIters, 'gaPop' :gaPop, 'gaMate' :gaMate, 'gaMutate' :gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'alg' : alg, 'run' : run } settings = getSettings(alg, settings, vars) T=N/tempDenom fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) if alg == 'RHC' or alg == 'all': rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(rhc.getOptimal())) rows.append(row) print "RHC: " + str(ef.value(rhc.getOptimal())) output2('4Peaks', 'RHC', rows, settings) rows = [] buildFooter("4Peaks", "RHC", rows, settings), outputFooter("4Peaks", "RHC", rows, settings) if alg == 'SA' or alg == 'all': sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(sa.getOptimal())) rows.append(row) print "SA: " + str(ef.value(sa.getOptimal())) output2('4Peaks', 'SA', rows, settings) rows = [] buildFooter("4Peaks", "SA", rows, settings) outputFooter("4Peaks", "SA", rows, settings) if alg == 'GA' or alg == 'all': ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() print "GA: " + str(ef.value(ga.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(ga.getOptimal())) rows.append(row) output2('4Peaks', 'GA', rows, settings) rows = [] buildFooter("4Peaks", "GA", rows, settings) outputFooter("4Peaks", "GA", rows , settings) if alg == 'MIMIC' or alg == 'all': mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(mimic.getOptimal())) rows.append(row) output2('4Peaks', 'MIMIC', rows, settings) rows = [] buildFooter("4Peaks", "GA", rows, settings) outputFooter("4Peaks", "MIMIC", rows, settings)
# RHC for t in range(numTrials): fname = outfile.format('RHC', str(t + 1)) with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(rhc.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) print(st) with open(fname, 'a') as f: f.write(st) # SA for t in range(numTrials): for CE in [0.15, 0.35, 0.55, 0.75, 0.95]: fname = outfile.format('SA{}'.format(CE), str(t + 1)) with open(fname, 'w') as f:
ef = CountOnesEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) t0 = time.time() calls = [] results = [] for _ in range(runs): ga = StandardGeneticAlgorithm(ga_pop, ga_keep, ga_mut, gap) fit = FixedIterationTrainer(ga, 150) fitness = fit.train() results.append(ef.value(ga.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "GA, average results , " + str( sum(results) / float(runs)) + ", countones_ga_%d-%d-%d-%d-%d.txt" % ( N, ga_pop, co_type, ga_keep, ga_mut) print "GA, average feval calls , " + str( sum(calls) / float(runs)) + ", countones_ga_%d-%d-%d-%d-%d.txt" % ( N, ga_pop, co_type, ga_keep, ga_mut) t1 = time.time() - t0 print "GA, average time , " + str( t1 / float(runs)) + ", countones_ga_%d-%d-%d-%d-%d.txt" % ( N, ga_pop, co_type, ga_keep, ga_mut)
ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) for GA_MUTATION in GA_MUTATION_pool: ga = StandardGeneticAlgorithm(GA_POPULATION, GA_CROSSOVER, GA_MUTATION, gap) fit_ga = FixedIterationTrainer(ga, n_iteration) print("calculating for mutations = %d " % GA_MUTATION) # Training start_ga = time.time() fit_ga.train() end_ga = time.time() # Result extracting last_training_time_ga = end_ga - start_ga ga_training_time[n].append(last_training_time_ga) ga_fitness[n].append(ef.value(ga.getOptimal())) overall_ga_training_time = list_avg(*ga_training_time) overall_ga_fitness = list_avg(*ga_fitness) with open(OUTPUT_FILE, "w") as outFile: for i in range(1): outFile.write(','.join([ "ga_mutations", "ga_fitness",
# RHC for t in range(numTrials): fname = outfile.replace('@ALG@', 'RHC').replace('@N@', str(t + 1)) with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals\n') ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() #calls rhc.train() 10 times and returns average elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(rhc.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) print st with open(fname, 'a') as f: f.write(st) # SA for t in range(numTrials): for CE in [0.15, 0.35, 0.55, 0.75, 0.95]: fname = outfile.replace('@ALG@', 'SA{}'.format(CE)).replace('@N@', str(t + 1))
print("the %d th cycle" % (n + 1)) ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) for SA_COOLING_FACTOR in SA_COOLING_FACTOR_pool: sa = SimulatedAnnealing(SA_TEMPERATURE, SA_COOLING_FACTOR, hcp) fit_sa = FixedIterationTrainer(sa, n_iteration) print("calculating for cooling rate = %f" % SA_COOLING_FACTOR) # Training start_sa = time.time() fit_sa.train() end_sa = time.time() # Result extracting last_training_time_sa = end_sa - start_sa sa_training_time[n].append(last_training_time_sa) sa_fitness[n].append(ef.value(sa.getOptimal())) overall_sa_training_time = list_avg(*sa_training_time) overall_sa_fitness = list_avg(*sa_fitness) with open(OUTPUT_FILE, "w") as outFile: for i in range(1): outFile.write( ','.join(["sa_cooling_factor", "sa_fitness", "sa_training_time"]) + '\n')
runs : number of runs to average over """ fill = [2] * N ranges = array('i', fill) ef = CountOnesEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) t0 = time.time() calls = [] results = [] for _ in range(runs): mimic = MIMIC(samples, tokeep, pop) fit = FixedIterationTrainer(mimic, 100) fitness = fit.train() results.append(ef.value(mimic.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "MIMIC, average results, " + str(sum(results)/float(runs)) + ", countones_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep) print "MIMIC, average feval calls , " + str(sum(calls)/float(runs)) + ", countones_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep) t1 = time.time() - t0 print "MIMIC, average time , " + str(t1/float(runs)) + ", countones_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep)
# learn weigths with back propagation network_bp = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer]) bp = BatchBackPropagationTrainer(set, network_bp, measure, RPROPUpdateRule()) cvt = ConvergenceTrainer(bp) cvt.train() print "\nBP training error:", errorRate(network_bp, train) print "BP training confusion matrix:", confusionMatrix(network_bp, train) print " BP test error:", errorRate(network_bp, test) print " BP test confusion matrix:", confusionMatrix(network_bp, test) # learn weights with randomized hill climbing network_rhc = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer]) nnop_rhc = NeuralNetworkOptimizationProblem(set, network_rhc, measure) rhc = RandomizedHillClimbing(nnop_rhc) fit = FixedIterationTrainer(rhc, it_rhc) fit.train() op = rhc.getOptimal(); network_rhc.setWeights(op.getData()) print "\nRHC training error:", errorRate(network_rhc, train) print "RHC training confusion matrix:", confusionMatrix(network_rhc, train) print " RHC test error:", errorRate(network_rhc, test) print " RHC test confusion matrix:", confusionMatrix(network_rhc, test) # learn weights with simulated annealing network_sa = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer]) nnop_sa = NeuralNetworkOptimizationProblem(set, network_sa, measure) sa = SimulatedAnnealing(1E11, 0.95, nnop_sa) fit = FixedIterationTrainer(sa, it_sa) fit.train() op = sa.getOptimal(); network_sa.setWeights(op.getData())
def main(): iterations = 200000 alg = 'all' gaPop = 2000 gaMate = 1500 gaMutate = 250 mimicSamples = 500 mimicToKeep = 100 saTemp = 1E12 saCooling = .999 gaIters = 1000 mimicIters = 1000 run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:i:", ["gaIters=", "mimicIters=", "gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'travelingsalesman.py -i <iterations>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'travelingsalesman.py -i <iterations>' sys.exit(1) elif opt == '-i': if arg < 1: print 'Iterations must be greater than 0' sys.exit(2) iterations = int(arg) elif opt == '-a': alg = 'all' elif opt == '-r': alg = 'RHC' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '--gaPop': if arg < 1: print 'Population must be greater than 0' sys.exit(2) gaPop = int(arg) elif opt == '--gaMate': if arg < 1: print 'Mating must be greater than 0' sys.exit(2) gaMate = int(arg) elif opt == '--gaMutate': if arg < 1: print 'Mutators must be greater than 0' sys.exit(2) gaMutate = int(arg) elif opt == '--mimicSamples': if arg < 1: print 'MIMIC samples must be greater than 0' sys.exit(2) mimicSamples = int(arg) elif opt == '--mimicToKeep': if arg < 1: print 'MIMIC to keep must be greater than 0' sys.exit(2) mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '-n': run = int(arg) elif opt == '--gaIters': if arg < 1: print 'GA Iterations must be greater than 0' sys.exit(2) gaIters = int(arg) elif opt == '--mimicIters': if arg < 1: print 'MIMIC Iterations must be greater than 0' sys.exit(2) mimicIters = int(arg) vars = { 'iterations' : iterations, 'alg' : alg, 'gaPop' : gaPop, 'gaMate' : gaMate, 'gaMutate' : gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'gaIters' : gaIters, 'mimicIters' : mimicIters, 'run' : run } settings = getSettings(alg, settings, vars) if gaPop < gaMate or gaPop < gaMutate or gaMate < gaMutate: pebkac({gaPop: 'total population',gaMate : 'mating population', gaMutate : 'mutating population'}, alg, 'total population', settings) if mimicSamples < mimicToKeep: pebkac({mimicSamples: 'mimic samples', mimicToKeep : 'mimic to keep'}, alg, 'mimic samples', settings) prob = 'Traveling Sales Problem' invDist = {} cities = CityList() N = len(cities) #random = Random() points = [[0 for x in xrange(2)] for x in xrange(N)] for i in range(0, len(points)): coords = cities.getCoords(i) points[i][0] = coords[0] points[i][1] = coords[1] ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) rows = [] if alg == 'RHC' or alg == 'all': print '\n----------------------------------' print 'Using Random Hill Climbing' for label, setting in settings: print label + ":" + str(setting) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() path = [] for x in range(0,N): path.append(rhc.getOptimal().getDiscrete(x)) output(prob, 'RHC', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(rhc.getOptimal())) rows.append(row) invDist['RHC'] = ef.value(rhc.getOptimal()) buildFooter(prob, 'RHC', rows, settings) outputFooter(prob, 'RHC', rows, settings) if alg == 'SA' or alg == 'all': print 'Using Simulated Annealing' for label, setting in settings: print label + ":" + str(setting) sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() path = [] for x in range(0,N): path.append(sa.getOptimal().getDiscrete(x)) output(prob, 'SA', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(sa.getOptimal())) rows.append(row) invDist['SA'] = ef.value(sa.getOptimal()) buildFooter(prob, 'SA', rows, settings) outputFooter(prob, 'SA', rows, settings) if alg == 'GA' or alg == 'all': print '\n----------------------------------' print 'Using Genetic Algorithm' for label, setting in settings: print label + ":" + str(setting) ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() path = [] for x in range(0,N): path.append(ga.getOptimal().getDiscrete(x)) output(prob, 'GA', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(ga.getOptimal())) rows.append(row) invDist['GA'] = ef.value(ga.getOptimal()) buildFooter(prob, 'GA', rows, settings) outputFooter(prob, 'GA', rows, settings) if alg == 'MIMIC' or alg == 'all': print '\n----------------------------------' print 'Using MIMIC' for label, setting in settings: print label + ":" + str(setting) # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points); fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges); df = DiscreteDependencyTree(.1, ranges); pop = GenericProbabilisticOptimizationProblem(ef, odd, df); mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0,len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) output(prob, 'MIMIC', order, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(mimic.getOptimal())) rows.append(row) invDist['MIMIC'] = ef.value(mimic.getOptimal()) buildFooter(prob, 'MIMIC', rows, settings) outputFooter(prob, 'MIMIC', rows, settings) maxn = max(len(key) for key in invDist) maxd = max(len(str(invDist[key])) for key in invDist) print "Results" for result in invDist: print "%-*s %s %-*s" % (len('Best Alg') + 2, result, ':', maxd, invDist[result]) if alg == 'all': print "%-*s %s %-*s" % (len('Best Alg') + 2, 'Best Alg', ':', maxd, max(invDist.iterkeys(), key=(lambda key: invDist[key]))) print '----------------------------------'
def main(): # The number of items NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 iterations = 20000 gaIters = 1000 mimicIters = 1000 gaPop = 200 gaMate = 150 gaMutate = 25 mimicSamples = 200 mimicToKeep = 100 saTemp = 100 saCooling = .95 alg = 'all' run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:N:c:w:v:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(1) elif opt == '-i': iterations = int(arg) elif opt == '-N': NUM_ITEMS = int(arg) elif opt == '-c': COPIES_EACH = int(arg) elif opt == '-w': MAX_WEIGHT = int(arg) elif opt == '-v': MAX_VOLUME = int(arg) elif opt == '-n': run = int(arg) elif opt == '-r': alg = 'RHC' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '-a': alg = 'all' elif opt == '--gaPop': gaPop = int(arg) elif opt == '--gaMate': gaMate = int(arg) elif opt == '--gaMutate': gaMutate = int(arg) elif opt == '--mimicSamples': mimicSamples = int(arg) elif opt == '--mimicToKeep': mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '--gaIters': gaIters = int(arg) elif opt == '--mimicIters': mimicIters = int(arg) vars ={ 'NUM_ITEMS' : NUM_ITEMS, 'COPIES_EACH' : COPIES_EACH, 'MAX_WEIGHT' : MAX_WEIGHT, 'MAX_VOLUME' : MAX_VOLUME, 'iterations' : iterations, 'gaIters' : gaIters, 'mimicIters' : mimicIters, 'gaPop' : gaPop, 'gaMate' : gaMate, 'gaMutate' : gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'alg' : alg, 'run' : run } settings = getSettings(alg, settings, vars) # Random number generator */ random = Random() # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) if alg == 'RHC' or alg == 'all': rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(str(ef.value(rhc.getOptimal()))) rows.append(row) output2('Knapsack', 'RHC', rows, settings) rows = [] buildFooter("Knapsack", "RHC", rows, settings) outputFooter("Knapsack", "RHC", rows , settings) if alg == 'SA' or alg == 'all': sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(sa.getOptimal())) rows.append(row) print "SA: " + str(ef.value(sa.getOptimal())) output2('Knapsack', 'SA', rows, settings) rows = [] buildFooter("Knapsack", "SA", rows, settings) outputFooter("Knapsack", "SA", rows, settings) if alg == 'GA' or alg == 'all': ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(ga.getOptimal())) rows.append(row) print "GA: " + str(ef.value(ga.getOptimal())) output2('Knapsack', 'GA', rows, settings) buildFooter("Knapsack", "GA", rows, settings) outputFooter("Knapsack", "GA", rows , settings) if alg == 'MIMIC' or alg == 'all': mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(mimic.getOptimal())) rows.append(row) output2('Knapsack', 'MIMIC', rows, settings) rows = [] buildFooter("Knapsack", "MIMIC", rows, settings) outputFooter("Knapsack", "MIMIC", rows , settings)