def MIMICtest(): correctCount = 0 MIMIC_iters = 10 MIMIC_samples = 5*N #max(1,int(N/10)) MIMIC_keep = int(.1 * MIMIC_samples) t=0 while correctCount < NUM_RIGHT and MIMIC_iters <= 500: MIMIC_keep = int( max(.1 * MIMIC_samples, 1)) mimic = MIMIC(int(MIMIC_samples), int(MIMIC_keep), pop) start = time.time() fit = FixedIterationTrainer(mimic, int(MIMIC_iters)) fitness = fit.train() t = time.time() - start v = ef.value(mimic.getOptimal()) myWriter.addValue(fitness, "MIMIC_fitness", runNum) myWriter.addValue(t, "MIMIC_searchTimes",runNum) if v==N: correctCount +=1 else: correctCount = 0 MIMIC_iters *=1.1 MIMIC_samples *=1.1 myWriter.addValue(t,"MIMIC_times",0) myWriter.addValue(int(MIMIC_iters),"MIMIC_iters",0) myWriter.addValue(int(MIMIC_samples),"MIMIC_samples",0) myWriter.addValue(int(MIMIC_keep),"MIMIC_keep",0) print(str(N) + ": MIMIC: " + str(ef.value(mimic.getOptimal())) + " took " + str(t) + " seconds and " + str(int(MIMIC_iters)) + " iterations and " + str(int(MIMIC_samples)) + " samples with keep " + str(int(MIMIC_keep)))
def mimicGATest(): popBegin = 1 popEnd = 101 keepBegin = 1 keepEnd = 90 mutBegin = 1 mutEnd = 90 itersBegin = 1 itersEnd = 200 samples = 10 keep = 2 problemSize = N mimicRange = (problemSize) iters = 1 paramRanges = Vector(8) paramRanges.addElement(popBegin) paramRanges.addElement(popEnd) paramRanges.addElement(keepBegin) paramRanges.addElement(keepEnd) paramRanges.addElement(mutBegin) paramRanges.addElement(mutEnd) paramRanges.addElement(itersBegin) paramRanges.addElement(itersEnd) totalParamSize1 = (popEnd - popBegin +1) + (keepEnd - keepBegin +1) + (mutEnd - mutBegin +1) + (itersEnd - itersBegin +1) allParamValues = range(popBegin, popEnd+1)+range(keepBegin, keepEnd+1)+range(mutBegin, mutEnd+1)+range(itersBegin, itersEnd+1) totalParamSize = len(allParamValues) metaFun = RamysEvalMetafunc(ranges) discreteDist = RamysMimicDistribution(paramRanges) #DiscreteUniformDistribution(problemSize) distFunc = DiscreteDependencyTree(.1, allParamValues) findGA = GenericProbabilisticOptimizationProblem(metaFun, discreteDist, distFunc) mimic = MIMIC(samples, keep, findGA) fit = FixedIterationTrainer(mimic, iters) fit.train() print str(N) + ": MIMIC finds GA : " + str(ef.value(mimic.getOptimal()))
runs : number of runs to average over """ fill = [2] * N ranges = array('i', fill) ef = CountOnesEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) t0 = time.time() calls = [] results = [] for _ in range(runs): mimic = MIMIC(samples, tokeep, pop) fit = FixedIterationTrainer(mimic, 100) fitness = fit.train() results.append(ef.value(mimic.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "MIMIC, average results, " + str(sum(results)/float(runs)) + ", countones_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep) print "MIMIC, average feval calls , " + str(sum(calls)/float(runs)) + ", countones_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep) t1 = time.time() - t0 print "MIMIC, average time , " + str(t1/float(runs)) + ", countones_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep)
def main(): iterations = 200000 alg = 'all' gaPop = 2000 gaMate = 1500 gaMutate = 250 mimicSamples = 500 mimicToKeep = 100 saTemp = 1E12 saCooling = .999 gaIters = 1000 mimicIters = 1000 run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:i:", ["gaIters=", "mimicIters=", "gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'travelingsalesman.py -i <iterations>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'travelingsalesman.py -i <iterations>' sys.exit(1) elif opt == '-i': if arg < 1: print 'Iterations must be greater than 0' sys.exit(2) iterations = int(arg) elif opt == '-a': alg = 'all' elif opt == '-r': alg = 'RHC' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '--gaPop': if arg < 1: print 'Population must be greater than 0' sys.exit(2) gaPop = int(arg) elif opt == '--gaMate': if arg < 1: print 'Mating must be greater than 0' sys.exit(2) gaMate = int(arg) elif opt == '--gaMutate': if arg < 1: print 'Mutators must be greater than 0' sys.exit(2) gaMutate = int(arg) elif opt == '--mimicSamples': if arg < 1: print 'MIMIC samples must be greater than 0' sys.exit(2) mimicSamples = int(arg) elif opt == '--mimicToKeep': if arg < 1: print 'MIMIC to keep must be greater than 0' sys.exit(2) mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '-n': run = int(arg) elif opt == '--gaIters': if arg < 1: print 'GA Iterations must be greater than 0' sys.exit(2) gaIters = int(arg) elif opt == '--mimicIters': if arg < 1: print 'MIMIC Iterations must be greater than 0' sys.exit(2) mimicIters = int(arg) vars = { 'iterations' : iterations, 'alg' : alg, 'gaPop' : gaPop, 'gaMate' : gaMate, 'gaMutate' : gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'gaIters' : gaIters, 'mimicIters' : mimicIters, 'run' : run } settings = getSettings(alg, settings, vars) if gaPop < gaMate or gaPop < gaMutate or gaMate < gaMutate: pebkac({gaPop: 'total population',gaMate : 'mating population', gaMutate : 'mutating population'}, alg, 'total population', settings) if mimicSamples < mimicToKeep: pebkac({mimicSamples: 'mimic samples', mimicToKeep : 'mimic to keep'}, alg, 'mimic samples', settings) prob = 'Traveling Sales Problem' invDist = {} cities = CityList() N = len(cities) #random = Random() points = [[0 for x in xrange(2)] for x in xrange(N)] for i in range(0, len(points)): coords = cities.getCoords(i) points[i][0] = coords[0] points[i][1] = coords[1] ef = TravelingSalesmanRouteEvaluationFunction(points) odd = DiscretePermutationDistribution(N) nf = SwapNeighbor() mf = SwapMutation() cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) rows = [] if alg == 'RHC' or alg == 'all': print '\n----------------------------------' print 'Using Random Hill Climbing' for label, setting in settings: print label + ":" + str(setting) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() path = [] for x in range(0,N): path.append(rhc.getOptimal().getDiscrete(x)) output(prob, 'RHC', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(rhc.getOptimal())) rows.append(row) invDist['RHC'] = ef.value(rhc.getOptimal()) buildFooter(prob, 'RHC', rows, settings) outputFooter(prob, 'RHC', rows, settings) if alg == 'SA' or alg == 'all': print 'Using Simulated Annealing' for label, setting in settings: print label + ":" + str(setting) sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() path = [] for x in range(0,N): path.append(sa.getOptimal().getDiscrete(x)) output(prob, 'SA', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(sa.getOptimal())) rows.append(row) invDist['SA'] = ef.value(sa.getOptimal()) buildFooter(prob, 'SA', rows, settings) outputFooter(prob, 'SA', rows, settings) if alg == 'GA' or alg == 'all': print '\n----------------------------------' print 'Using Genetic Algorithm' for label, setting in settings: print label + ":" + str(setting) ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() path = [] for x in range(0,N): path.append(ga.getOptimal().getDiscrete(x)) output(prob, 'GA', path, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(ga.getOptimal())) rows.append(row) invDist['GA'] = ef.value(ga.getOptimal()) buildFooter(prob, 'GA', rows, settings) outputFooter(prob, 'GA', rows, settings) if alg == 'MIMIC' or alg == 'all': print '\n----------------------------------' print 'Using MIMIC' for label, setting in settings: print label + ":" + str(setting) # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points); fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges); df = DiscreteDependencyTree(.1, ranges); pop = GenericProbabilisticOptimizationProblem(ef, odd, df); mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0,len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) output(prob, 'MIMIC', order, points, settings) rows = [] row = [] row.append("Inverse of Distance") row.append(ef.value(mimic.getOptimal())) rows.append(row) invDist['MIMIC'] = ef.value(mimic.getOptimal()) buildFooter(prob, 'MIMIC', rows, settings) outputFooter(prob, 'MIMIC', rows, settings) maxn = max(len(key) for key in invDist) maxd = max(len(str(invDist[key])) for key in invDist) print "Results" for result in invDist: print "%-*s %s %-*s" % (len('Best Alg') + 2, result, ':', maxd, invDist[result]) if alg == 'all': print "%-*s %s %-*s" % (len('Best Alg') + 2, 'Best Alg', ':', maxd, max(invDist.iterkeys(), key=(lambda key: invDist[key]))) print '----------------------------------'
def run_knapsack_experiments(): OUTPUT_DIRECTORY = './output' # Random number generator */ random = Random() # The number of items NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) max_iter = 5000 outfile = OUTPUT_DIRECTORY + '/knapsack_{}_log.csv' # Randomized Hill Climber filename = outfile.format('rhc') with open(filename, 'w') as f: f.write('iterations,fitness,time\n') for it in range(0, max_iter, 10): rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(rhc.getOptimal()) data = '{},{},{}\n'.format(it, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Simulated Annealing filename = outfile.format('sa') with open(filename, 'w') as f: f.write('iteration,cooling_value,fitness,time\n') for cooling_value in (.19, .38, .76, .95): for it in range(0, max_iter, 10): sa = SimulatedAnnealing(200, cooling_value, hcp) fit = FixedIterationTrainer(sa, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(sa.getOptimal()) data = '{},{},{},{}\n'.format(it, cooling_value, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # Genetic Algorithm filename = outfile.format('ga') with open(filename, 'w') as f: f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n') for population_size, to_mate, to_mutate in itertools.product( [200], [110, 120, 130, 140, 150], [2, 4, 6, 8]): for it in range(0, max_iter, 10): ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate, gap) fit = FixedIterationTrainer(ga, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(ga.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate, to_mutate, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data) # MIMIC filename = outfile.format('mm') with open(filename, 'w') as f: f.write('iterations,samples,to_keep,m,fitness,time\n') for samples, to_keep, m in itertools.product([200], [100], [0.1, 0.3, 0.5, 0.7, 0.9]): for it in range(0, 500, 10): df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mm = MIMIC(samples, 20, pop) fit = FixedIterationTrainer(mm, it) start_time = time.clock() fit.train() elapsed_time = time.clock() - start_time # fevals = ef.fevals score = ef.value(mm.getOptimal()) data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score, elapsed_time) print(data) with open(filename, 'a') as f: f.write(data)
ga = StandardGeneticAlgorithm(500, 100, 10, genetic_problem) t0 = time() iters = 0 score = 0 f.write("starting GA\n") while iters < 30000: ga.train() score = ef.value(ga.getOptimal()) f.write(str(iters) + "," + str(score) + "\n") iters += 1 print "GA: " + str(ef.value( ga.getOptimal())), "time taken", time() - t0, "Iterations", iters mimic = MIMIC(100, 50, probablistic_optimization) score = 0 t0 = time() iters = 0 f.write("starting MIMIC\n") while iters < 100: mimic.train() score = ef.value(mimic.getOptimal()) print iters, score f.write(str(iters) + "," + str(score) + "\n") iters += 1 print "MIMIC: " + str(ef.value( mimic.getOptimal())), "time taken", time() - t0, "Iterations", iters
ga = StandardGeneticAlgorithm(200, 150, 25, gap) fit = FixedIterationTrainer(ga, iteration) start = time.time() fit.train() end = time.time() ga_times.append(end - start) ga_acc.append(ef.value(ga.getOptimal())) print "GA: " + str(ef.value(ga.getOptimal())) mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, iteration) start = time.time() fit.train() end = time.time() mimic_times.append(end - start) mimic_acc.append(ef.value(mimic.getOptimal())) print "MIMIC: " + str(ef.value(mimic.getOptimal())) else: continue metrics = [ rhc_acc, rhc_times, sa_acc, sa_times, ga_acc, ga_times, mimic_acc, mimic_times, ]
ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 200000) fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())) sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, 200000) fit.train() print "SA: " + str(ef.value(sa.getOptimal())) ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, 1000) fit.train() print "GA: " + str(ef.value(ga.getOptimal())) mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, 1000) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal()))
fit.train() end = time.time() training_time = end - start print "SA: " + str(ef.value(sa.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "SA") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(sa.getOptimal()))) ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, N) start = time.time() fit.train() end = time.time() training_time = end - start print "GA: " + str(ef.value(ga.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "GA") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(ga.getOptimal()))) mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, N) start = time.time() fit.train() end = time.time() training_time = end - start print "MIMIC: " + str(ef.value(mimic.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "MIMIC") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(mimic.getOptimal())))
fit = FixedIterationTrainer(ga, N) start = time.time() fit.train() end = time.time() training_time = end - start print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "GA") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(ga.getOptimal()))) # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points); fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges); df = DiscreteDependencyTree(.1, ranges); pop = GenericProbabilisticOptimizationProblem(ef, odd, df); mimic = MIMIC(500, 100, pop) fit = FixedIterationTrainer(mimic, N) start = time.time() fit.train() end = time.time() training_time = end - start print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal())) OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "MIMIC") with open(OUTFILE, 'a+') as f: f.write("%d,%f,%f\n" % (N, training_time, ef.value(mimic.getOptimal())))
def main(): N=200 tempDenom = 5 T=N/tempDenom fill = [2] * N ranges = array('i', fill) iterations = 2000 gaIters = 1000 mimicIters = 1000 gaPop = 200 gaMate = 100 gaMutate = 10 mimicSamples = 200 mimicToKeep = 20 saTemp = 1E11 saCooling = .95 alg = 'all' run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahn:rsgN:m:t:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(1) elif opt == '-i': iterations = int(arg) elif opt == '-N': N = int(arg) elif opt == '-t': T = float(arg) elif opt == '-d': tempDenom = int(arg) elif opt == '-r': alg = 'RHC' elif opt == '-a': alg = 'all' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '--gaPop': gaPop = int(arg) elif opt == '--gaMate': gaMate = int(arg) elif opt == '--gaMutate': gaMutate = int(arg) elif opt == '--mimicSamples': mimicSamples = int(arg) elif opt == '--mimicToKeep': mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '--gaIters': gaIters = int(arg) elif opt == '--mimicIters': mimicIters = int(arg) elif opt == '-n': run = int(arg) vars = { 'N':N, 'tempDenom':tempDenom, 'T':T, 'fill':fill, 'ranges':ranges, 'iterations' :iterations, 'gaIters':gaIters, 'mimicIters':mimicIters, 'gaPop' :gaPop, 'gaMate' :gaMate, 'gaMutate' :gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'alg' : alg, 'run' : run } settings = getSettings(alg, settings, vars) T=N/tempDenom fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) if alg == 'RHC' or alg == 'all': rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(rhc.getOptimal())) rows.append(row) print "RHC: " + str(ef.value(rhc.getOptimal())) output2('4Peaks', 'RHC', rows, settings) rows = [] buildFooter("4Peaks", "RHC", rows, settings), outputFooter("4Peaks", "RHC", rows, settings) if alg == 'SA' or alg == 'all': sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(sa.getOptimal())) rows.append(row) print "SA: " + str(ef.value(sa.getOptimal())) output2('4Peaks', 'SA', rows, settings) rows = [] buildFooter("4Peaks", "SA", rows, settings) outputFooter("4Peaks", "SA", rows, settings) if alg == 'GA' or alg == 'all': ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() print "GA: " + str(ef.value(ga.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(ga.getOptimal())) rows.append(row) output2('4Peaks', 'GA', rows, settings) rows = [] buildFooter("4Peaks", "GA", rows, settings) outputFooter("4Peaks", "GA", rows , settings) if alg == 'MIMIC' or alg == 'all': mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(mimic.getOptimal())) rows.append(row) output2('4Peaks', 'MIMIC', rows, settings) rows = [] buildFooter("4Peaks", "GA", rows, settings) outputFooter("4Peaks", "MIMIC", rows, settings)
results = [] for _ in range(runs): ga_pop = N * 5 ga_keep = int(ga_pop * .75) ga_mut = int(ga_pop * .2) ga = StandardGeneticAlgorithm(ga_pop, ga_keep, ga_mut, gap) fit = FixedIterationTrainer(ga, 150) fitness = fit.train() results.append(ef.value(ga.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "GA, average results , " + str(sum(results) / float(runs)) print "GA, average feval calls , " + str(sum(calls) / float(runs)) t1 = time.time() - t0 print "GA, average time , " + str(t1 / float(runs)) t0 = time.time() calls = [] results = [] for _ in range(runs): mimic = MIMIC(N / 2, N / 5, pop) fit = FixedIterationTrainer(mimic, 100) fitness = fit.train() results.append(ef.value(mimic.getOptimal())) calls.append(ef.getTotalCalls()) ef.clearCount() print "MIMIC, average results, " + str(sum(results) / float(runs)) print "MIMIC, average feval calls , " + str(sum(calls) / float(runs)) t1 = time.time() - t0 print "MIMIC, average time , " + str(t1 / float(runs))
def knapsackfunc(NUM_ITEMS, iterations): rhcMult = 600 saMult = 600 gaMult = 4 mimicMult = 3 # Random number generator */ random = Random() # The number of items #NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) optimalOut = [] timeOut = [] evalsOut = [] for niter in iterations: iterOptimalOut = [NUM_ITEMS, niter] iterTimeOut = [NUM_ITEMS, niter] iterEvals = [NUM_ITEMS, niter] start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, niter*rhcMult) fit.train() end = time.time() rhcOptimal = ef.value(rhc.getOptimal()) rhcTime = end-start print "RHC optimum: " + str(rhcOptimal) print "RHC time: " + str(rhcTime) iterOptimalOut.append(rhcOptimal) iterTimeOut.append(rhcTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() sa = SimulatedAnnealing(100, .95, hcp) fit = FixedIterationTrainer(sa, niter*saMult) fit.train() end = time.time() saOptimal = ef.value(sa.getOptimal()) saTime = end-start print "SA optimum: " + str(saOptimal) print "SA time: " + str(saTime) iterOptimalOut.append(saOptimal) iterTimeOut.append(saTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() ga = StandardGeneticAlgorithm(200, 150, 25, gap) fit = FixedIterationTrainer(ga, niter*gaMult) fit.train() end = time.time() gaOptimal = ef.value(ga.getOptimal()) gaTime = end - start print "GA optimum: " + str(gaOptimal) print "GA time: " + str(gaTime) iterOptimalOut.append(gaOptimal) iterTimeOut.append(gaTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, niter*mimicMult) fit.train() end = time.time() mimicOptimal = ef.value(mimic.getOptimal()) mimicTime = end - start print "MIMIC optimum: " + str(mimicOptimal) print "MIMIC time: " + str(mimicTime) iterOptimalOut.append(mimicOptimal) iterTimeOut.append(mimicTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) optimalOut.append(iterOptimalOut) timeOut.append(iterTimeOut) evalsOut.append(iterEvals) return [optimalOut, timeOut, evalsOut]
start = time.time() ga = StandardGeneticAlgorithm(200, 150, 25, gap) fit = FixedIterationTrainer(ga, iteration) fit.train() end = time.time() ga_time = end - start ga_fit = ef.value(ga.getOptimal()) # print "GA: " + str(ga_fit) start = time.time() mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, iteration / 200) fit.train() end = time.time() mimic_time = end - start mimic_fit = ef.value(mimic.getOptimal()) # print "MIMIC: " + str(mimic_fit) row = [ iteration, rhc_fit, rhc_time, sa_fit, sa_time, ga_fit, ga_time, mimic_fit, mimic_time ] iterdata.append(row) round_end = time.time() with open("knapsack_{}.csv".format(str(n)), 'wb') as resultFile: wr = csv.writer(resultFile, dialect='excel') wr.writerows(iterdata) round_end = time.time() print "Takes total " + str(round_end - round_start) + " secs"
print(str(ef.value(sa.getOptimal()))) end = time.time() times += "\n%0.03f" % (end - start) print(times) times = "" print "GA:" for x in range(20): start = time.time() iterations = (x + 1) * 250 ga = StandardGeneticAlgorithm(20, 20, 0, gap) fit = FixedIterationTrainer(ga, iterations) fit.train() print(str(ef.value(ga.getOptimal()))) end = time.time() times += "\n%0.03f" % (end - start) print(times) times = "" print "MIMIC:" for x in range(20): start = time.time() iterations = (x + 1) * 250 mimic = MIMIC(50, 10, pop) fit = FixedIterationTrainer(mimic, iterations) fit.train() print(str(ef.value(mimic.getOptimal()))) end = time.time() times += "\n%0.03f" % (end - start) print(times)
sa = SimulatedAnnealing(1E11, .95, hcp) ga = StandardGeneticAlgorithm(200, 100, 10, gap) mimic = MIMIC(200, 20, pop) rhc_f = open('out/op/fourpeaks/rhc.csv', 'w') sa_f = open('out/op/fourpeaks/sa.csv', 'w') ga_f = open('out/op/fourpeaks/ga.csv', 'w') mimic_f = open('out/op/fourpeaks/mimic.csv', 'w') for i in range(ITERATIONS): rhc.train() rhc_fitness = ef.value(rhc.getOptimal()) rhc_f.write('{},{}\n'.format(i, rhc_fitness)) sa.train() sa_fitness = ef.value(sa.getOptimal()) sa_f.write('{},{}\n'.format(i, sa_fitness)) ga.train() ga_fitness = ef.value(ga.getOptimal()) ga_f.write('{},{}\n'.format(i, ga_fitness)) mimic.train() mimic_fitness = ef.value(mimic.getOptimal()) mimic_f.write('{},{}\n'.format(i, mimic_fitness)) rhc_f.close() sa_f.close() ga_f.close() mimic_f.close()
fit.train() print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(ga.getOptimal().getDiscrete(x)) print path # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(500, 100, pop) fit = FixedIterationTrainer(mimic, 1000) fit.train() print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal())) print "Route:" path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0, len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) print order
for MIMIC_SAMPLES in MIMIC_SAMPLES_pool: mimic = MIMIC(MIMIC_SAMPLES, MIMIC_TO_KEEP, pop) fit_mimic = FixedIterationTrainer(mimic, n_iteration) print("calculating for MIMIC_SAMPLES = %d" % MIMIC_SAMPLES) # Training start_mimic = time.time() fit_mimic.train() end_mimic = time.time() # Result extracting last_training_time_mimic = end_mimic - start_mimic mimic_training_time[n].append(last_training_time_mimic) mimic_fitness[n].append(ef.value(mimic.getOptimal())) overall_mimic_training_time = list_avg(*mimic_training_time) overall_mimic_fitness = list_avg(*mimic_fitness) with open(OUTPUT_FILE, "w") as outFile: for i in range(1): outFile.write(','.join([ "MIMIC_SAMPLES", "overall_mimic_fitness", "overall_mimic_training_time" ]) + '\n') for i in range(len(MIMIC_SAMPLES_pool)): outFile.write(','.join([ str(MIMIC_SAMPLES_pool[i]), str(overall_mimic_fitness[i]), str(overall_mimic_training_time[i])
# print(ef.boardPositions()) print("============================") sa = SimulatedAnnealing(1E1, .1, hcp) fit = FixedIterationTrainer(sa, 200000) fit.train() sa_opt = ef.value(sa.getOptimal()) print "SA: " + str(sa_opt) # print("SA: Board Position: ") # print(ef.boardPositions()) print("============================") ga = StandardGeneticAlgorithm(200, 0, 10, gap) fit = FixedIterationTrainer(ga, 1000) fit.train() ga_opt = ef.value(ga.getOptimal()) print "GA: " + str(ga_opt) # print("GA: Board Position: ") # print(ef.boardPositions()) print("============================") mimic = MIMIC(200, 10, pop) fit = FixedIterationTrainer(mimic, 1000) fit.train() mimic_opt = ef.value(mimic.getOptimal()) print "MIMIC: " + str(mimic_opt) # print("MIMIC: Board Position: ") # print(ef.boardPositions())
rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 20000) fit.train() print("RHC: %0.03f" % ef.value(rhc.getOptimal())) print(ef.foundConflict()) print("============================") sa = SimulatedAnnealing(1E12, .1, hcp) fit = FixedIterationTrainer(sa, 20000) fit.train() print("SA: %0.03f" % ef.value(sa.getOptimal())) print(ef.foundConflict()) print("============================") ga = StandardGeneticAlgorithm(200, 10, 60, gap) fit = FixedIterationTrainer(ga, 50) fit.train() print("GA: %0.03f" % ef.value(ga.getOptimal())) print(ef.foundConflict()) print("============================") mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, 5) fit.train() print("MIMIC: %0.03f" % ef.value(mimic.getOptimal())) print(ef.foundConflict())
# Result handling last_train_time_rhc = end_rhc - start_rhc rhc_train_time[repetition].append(last_train_time_rhc) rhc_accuracy[repetition].append(ef.value(rhc.getOptimal())) last_train_time_sa = end_sa - start_sa sa_train_time[repetition].append(last_train_time_sa) sa_accuracy[repetition].append(ef.value(sa.getOptimal())) last_train_time_ga = end_ga - start_ga ga_train_time[repetition].append(last_train_time_ga) ga_accuracy[repetition].append(ef.value(ga.getOptimal())) last_train_time_mimic = end_mimic - start_mimic mimic_train_time[repetition].append(last_train_time_mimic) mimic_accuracy[repetition].append(ef.value(mimic.getOptimal())) while current_iteration_count <= MAX_ITERATION - ITERATION_STEP: print("Computing for %d iterations" % (current_iteration_count + ITERATION_STEP)) # Trainer declaration fit_rhc = FixedIterationTrainer(rhc, ITERATION_STEP) fit_sa = FixedIterationTrainer(sa, ITERATION_STEP) fit_ga = FixedIterationTrainer(ga, ITERATION_STEP) fit_mimic = FixedIterationTrainer(mimic, ITERATION_STEP) # Fitting start_rhc = time.time() fit_rhc.train() end_rhc = time.time()
fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(500, 100, pop) niters = [50, 200, 500, 800, 1000, 1200, 1500, 2000, 4000, 10000] for iters in niters: start = time.time() fit = FixedIterationTrainer(mimic, iters) # value = 0 # for isample in range(nsample): fit.train() #value += ef.value(mimic.getOptimal()) value = ef.value(mimic.getOptimal()) end = time.time() clock_time = (end - start) #/nsample value = round(value, 2) print "MIMIC " + str(1 / value), iters, clock_time optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) path = [] for i in range(0, len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) for x in range(0, N): path.append(order[x])
rhc0 = RandomizedHillClimbing(hcp0) i = 0 max = 0 while (i < timeout/10): rhc0.train() i += 1 max = ef.value(rhc0.getOptimal()) print "rhc0,", i,",", max goal = max pop0 = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic0 = MIMIC(200, 100, pop) i = 0 while ( i< timeout/1000): mimic0.train() i += 1 max = ef.value(mimic0.getOptimal()) print "mimic0,", i,",", max if (max > goal): goal = max gap0 = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) ga0 = StandardGeneticAlgorithm(200, 100, 25, gap0) i = 0 while ( i< timeout/1000): ga0.train() i += 1 max = ef.value(ga0.getOptimal()) print "ga0,", i,",", max if (max > goal): goal = max # run RHC
#MIMIC for t in range(numTrials): for samples, keep, m in product([100, 200], [40, 80], [.4, .8]): fname = outfile.replace('@ALG@', 'MIMIC{}_{}_{}'.format( samples, keep, m)).replace('@N@', str(t + 1)) with open(fname, 'w') as f: f.write('iterations,fitness,time,fevals\n') ef = FlipFlopEvaluationFunction() odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) df = DiscreteDependencyTree(m, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(samples, keep, pop) fit = FixedIterationTrainer(mimic, 10) times = [0] for i in range(0, maxIters, 10): start = clock() fit.train() elapsed = time.clock() - start times.append(times[-1] + elapsed) fevals = ef.fevals score = ef.value(mimic.getOptimal()) ef.fevals -= 1 st = '{},{},{},{}\n'.format(i, score, times[-1], fevals) print st with open(fname, 'a') as f: f.write(st)
cf = TravelingSalesmanCrossOver(ef) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) ef = TravelingSalesmanSortEvaluationFunction(points); fill = [N] * N ranges = array('i', fill) odd = DiscreteUniformDistribution(ranges); df = DiscreteDependencyTree(.1, ranges); pop = GenericProbabilisticOptimizationProblem(ef, odd, df); start = time.time() mimic = MIMIC(param[0], param[1], pop) fit = FixedIterationTrainer(mimic, num_iterations) fit.train() value = str(ef.value(mimic.getOptimal())) print "MIMIC Inverse of Distance: " + value end = time.time() print "Route:" path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array('d', fill) for i in range(0, len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) print order end = time.time() results = { 'num_iterations': num_iterations,
print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal())) print "Route:" path = [] for x in range(0, N): path.append(ga.getOptimal().getDiscrete(x)) print path # for mimic we use a sort encoding ef = TravelingSalesmanSortEvaluationFunction(points) fill = [N] * N ranges = array("i", fill) odd = DiscreteUniformDistribution(ranges) df = DiscreteDependencyTree(0.1, ranges) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) mimic = MIMIC(500, 100, pop) fit = FixedIterationTrainer(mimic, 1000) fit.train() print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal())) print "Route:" path = [] optimal = mimic.getOptimal() fill = [0] * optimal.size() ddata = array("d", fill) for i in range(0, len(ddata)): ddata[i] = optimal.getContinuous(i) order = ABAGAILArrays.indices(optimal.size()) ABAGAILArrays.quicksort(ddata, order) print order
for iters in iters_list: sa = SimulatedAnnealing(temp, cooling_rate, hcp) fit = FixedIterationTrainer(sa, iters) start = time.time() fit.train() duration = time.time() - start print "Iters: " + str( iters) + ", Temp: " + str(temp) + ", Fitness: " + str( ef.value(sa.getOptimal())), ", Duration: " + str(duration) print "Standard Genetic Algorithm" for iters in iters_list: # Population of size 20, of which 20 will mate and 0 will mutate. ga = StandardGeneticAlgorithm(200, 200, 0, gap) fit = FixedIterationTrainer(ga, iters) start = time.time() fit.train() duration = time.time() - start print "Iters: " + str(iters) + ", Fitness " + str(ef.value( ga.getOptimal())) + ", Dur: " + str(duration) print "MIMIC Algorithm" for iters in iters_list: mimic = MIMIC(50, 10, pop) fit = FixedIterationTrainer(mimic, iters) start = time.time() fit.train() duration = time.time() - start print "Iters: " + str(iters) + ", Fitness: " + str( ef.value(mimic.getOptimal())) + ", Dur: " + str(duration)
ef = ContinuousPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 200000) fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())) sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, 200000) fit.train() print "SA: " + str(ef.value(sa.getOptimal())) ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, 1000) fit.train() print "GA: " + str(ef.value(ga.getOptimal())) mimic = MIMIC(200, 20, pop) fit = FixedIterationTrainer(mimic, 1000) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal()))
ranges = array('i', [2] * N) fitness = ContinuousPeaksEvaluationFunction(T) discrete_dist = DiscreteUniformDistribution(ranges) discrete_neighbor = DiscreteChangeOneNeighbor(ranges) discrete_mutation = DiscreteChangeOneMutation(ranges) crossover = SCO() discrete_dependency = DiscreteDependencyTree(.1, ranges) genetic_problem = GPOP(fitness, discrete_dist, discrete_dependency) start = time.clock() mimic_problem = MIMIC(200, 20, genetic_problem) fit = FixedIterationTrainer(mimic_problem, iteration) fit.train() end = time.clock() full_time = end - start mimic_total += fitness.value(mimic_problem.getOptimal()) mimic_time += full_time mimic_total_avg = mimic_total / runs mimic_time_avg = mimic_time / runs data = '{},{},{}\n'.format(iteration, mimic_total_avg, mimic_time_avg) print(data) with open(output_directory, 'a') as f: f.write(data) #SA Analysis cooling_list = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95] output_directory = "Results/Small/Continuous_Peaks_SA_Cooling.csv" with open(output_directory, 'w') as f: f.write('iterations,fitness,time\n')
def main(): # The number of items NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 iterations = 20000 gaIters = 1000 mimicIters = 1000 gaPop = 200 gaMate = 150 gaMutate = 25 mimicSamples = 200 mimicToKeep = 100 saTemp = 100 saCooling = .95 alg = 'all' run = 0 settings = [] try: opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:N:c:w:v:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="]) except: print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>' sys.exit(1) elif opt == '-i': iterations = int(arg) elif opt == '-N': NUM_ITEMS = int(arg) elif opt == '-c': COPIES_EACH = int(arg) elif opt == '-w': MAX_WEIGHT = int(arg) elif opt == '-v': MAX_VOLUME = int(arg) elif opt == '-n': run = int(arg) elif opt == '-r': alg = 'RHC' elif opt == '-s': alg = 'SA' elif opt == '-g': alg = 'GA' elif opt == '-m': alg = 'MIMIC' elif opt == '-a': alg = 'all' elif opt == '--gaPop': gaPop = int(arg) elif opt == '--gaMate': gaMate = int(arg) elif opt == '--gaMutate': gaMutate = int(arg) elif opt == '--mimicSamples': mimicSamples = int(arg) elif opt == '--mimicToKeep': mimicToKeep = int(arg) elif opt == '--saTemp': saTemp = float(arg) elif opt == '--saCooling': saCooling = float(arg) elif opt == '--gaIters': gaIters = int(arg) elif opt == '--mimicIters': mimicIters = int(arg) vars ={ 'NUM_ITEMS' : NUM_ITEMS, 'COPIES_EACH' : COPIES_EACH, 'MAX_WEIGHT' : MAX_WEIGHT, 'MAX_VOLUME' : MAX_VOLUME, 'iterations' : iterations, 'gaIters' : gaIters, 'mimicIters' : mimicIters, 'gaPop' : gaPop, 'gaMate' : gaMate, 'gaMutate' : gaMutate, 'mimicSamples' : mimicSamples, 'mimicToKeep' : mimicToKeep, 'saTemp' : saTemp, 'saCooling' : saCooling, 'alg' : alg, 'run' : run } settings = getSettings(alg, settings, vars) # Random number generator */ random = Random() # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) if alg == 'RHC' or alg == 'all': rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iterations) fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(str(ef.value(rhc.getOptimal()))) rows.append(row) output2('Knapsack', 'RHC', rows, settings) rows = [] buildFooter("Knapsack", "RHC", rows, settings) outputFooter("Knapsack", "RHC", rows , settings) if alg == 'SA' or alg == 'all': sa = SimulatedAnnealing(saTemp, saCooling, hcp) fit = FixedIterationTrainer(sa, iterations) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(sa.getOptimal())) rows.append(row) print "SA: " + str(ef.value(sa.getOptimal())) output2('Knapsack', 'SA', rows, settings) rows = [] buildFooter("Knapsack", "SA", rows, settings) outputFooter("Knapsack", "SA", rows, settings) if alg == 'GA' or alg == 'all': ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap) fit = FixedIterationTrainer(ga, gaIters) fit.train() rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(ga.getOptimal())) rows.append(row) print "GA: " + str(ef.value(ga.getOptimal())) output2('Knapsack', 'GA', rows, settings) buildFooter("Knapsack", "GA", rows, settings) outputFooter("Knapsack", "GA", rows , settings) if alg == 'MIMIC' or alg == 'all': mimic = MIMIC(mimicSamples, mimicToKeep, pop) fit = FixedIterationTrainer(mimic, mimicIters) fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())) rows = [] row = [] row.append("Evaluation Function Value") row.append(ef.value(mimic.getOptimal())) rows.append(row) output2('Knapsack', 'MIMIC', rows, settings) rows = [] buildFooter("Knapsack", "MIMIC", rows, settings) outputFooter("Knapsack", "MIMIC", rows , settings)
from time import time rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, 600000) t0 = time() fit.train() print "RHC: " + str(ef.value(rhc.getOptimal())), "time taken", time() - t0 sa = SimulatedAnnealing(1E11, .95, hcp) fit = FixedIterationTrainer(sa, 600000) t0 = time() fit.train() print "SA: " + str(ef.value(sa.getOptimal())), "time taken", time() - t0 ga = StandardGeneticAlgorithm(200, 100, 10, gap) fit = FixedIterationTrainer(ga, 20000) t0 = time() fit.train() print "GA: " + str(ef.value(ga.getOptimal())), "time taken", time() - t0 mimic = MIMIC(50, 10, pop) fit = FixedIterationTrainer(mimic, 50) t0 = time() fit.train() print "MIMIC: " + str(ef.value(mimic.getOptimal())), "time taken", time() - t0