# create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) IterNum = 1000 recStep = 10 outFile = "FlipFlop.txt" open(outFile, 'a').close() # clean the file print "RHC Start" rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainerMod(rhc, IterNum, recStep, outFile)
def knapsackfunc(NUM_ITEMS, iterations): rhcMult = 600 saMult = 600 gaMult = 4 mimicMult = 3 # Random number generator */ random = Random() # The number of items #NUM_ITEMS = 40 # The number of copies each COPIES_EACH = 4 # The maximum weight for a single element MAX_WEIGHT = 50 # The maximum volume for a single element MAX_VOLUME = 50 # The volume of the knapsack KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4 # create copies fill = [COPIES_EACH] * NUM_ITEMS copies = array('i', fill) # create weights and volumes fill = [0] * NUM_ITEMS weights = array('d', fill) volumes = array('d', fill) for i in range(0, NUM_ITEMS): weights[i] = random.nextDouble() * MAX_WEIGHT volumes[i] = random.nextDouble() * MAX_VOLUME # create range fill = [COPIES_EACH + 1] * NUM_ITEMS ranges = array('i', fill) ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) optimalOut = [] timeOut = [] evalsOut = [] for niter in iterations: iterOptimalOut = [NUM_ITEMS, niter] iterTimeOut = [NUM_ITEMS, niter] iterEvals = [NUM_ITEMS, niter] start = time.time() rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, niter*rhcMult) fit.train() end = time.time() rhcOptimal = ef.value(rhc.getOptimal()) rhcTime = end-start print "RHC optimum: " + str(rhcOptimal) print "RHC time: " + str(rhcTime) iterOptimalOut.append(rhcOptimal) iterTimeOut.append(rhcTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() sa = SimulatedAnnealing(100, .95, hcp) fit = FixedIterationTrainer(sa, niter*saMult) fit.train() end = time.time() saOptimal = ef.value(sa.getOptimal()) saTime = end-start print "SA optimum: " + str(saOptimal) print "SA time: " + str(saTime) iterOptimalOut.append(saOptimal) iterTimeOut.append(saTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() ga = StandardGeneticAlgorithm(200, 150, 25, gap) fit = FixedIterationTrainer(ga, niter*gaMult) fit.train() end = time.time() gaOptimal = ef.value(ga.getOptimal()) gaTime = end - start print "GA optimum: " + str(gaOptimal) print "GA time: " + str(gaTime) iterOptimalOut.append(gaOptimal) iterTimeOut.append(gaTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) start = time.time() mimic = MIMIC(200, 100, pop) fit = FixedIterationTrainer(mimic, niter*mimicMult) fit.train() end = time.time() mimicOptimal = ef.value(mimic.getOptimal()) mimicTime = end - start print "MIMIC optimum: " + str(mimicOptimal) print "MIMIC time: " + str(mimicTime) iterOptimalOut.append(mimicOptimal) iterTimeOut.append(mimicTime) functionEvals = ef.getNumEvals() ef.zeroEvals() iterEvals.append(functionEvals) optimalOut.append(iterOptimalOut) timeOut.append(iterTimeOut) evalsOut.append(iterEvals) return [optimalOut, timeOut, evalsOut]
def run_algorithm_test(weights, volumes, knapsack_volume, copies, ranges, algorithms, output_file_name, trial_number, iterations=False): with open(output_file_name,'w') as f: f.write('algorithm,optimal_result,iterations,time,trial\n') ef = KnapsackEvaluationFunction(weights, volumes, knapsack_volume, copies) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) cf = UniformCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) for trial in range(trial_number): if iterations is False: for item in algorithms: start_time = time.time() if item in ['rhc']: optimal_result, run_iters = run_rhc(hcp, ef) elif item in ['sa']: optimal_result, run_iters = run_sa(hcp, ef) elif item in ['ga']: optimal_result, run_iters = run_ga(gap, ef) elif item in ['mimic']: optimal_result, run_iters = run_mimic(pop, ef) else: print "The algorithm type {} is not supported.".format(item) end_time = time.time() time_elapsed = end_time - start_time run_output = '{},{},{},{},{}\n'.format(item, optimal_result, run_iters, time_elapsed, trial) with open(output_file_name,'a') as f: f.write(run_output) else: for iter in iterations: for item in algorithms: start_time = time.time() if item in ['rhc']: optimal_result, run_iters = run_rhc(hcp, ef, iter) elif item in ['sa']: optimal_result, run_iters = run_sa(hcp, ef, iter) elif item in ['ga']: optimal_result, run_iters = run_ga(gap, ef, iter) elif item in ['mimic']: optimal_result, run_iters = run_mimic(pop, ef, iter) else: print "The algorithm type {} is not supported.".format(item) end_time = time.time() time_elapsed = end_time - start_time run_output = '{},{},{},{},{}\n'.format(item, optimal_result, run_iters, time_elapsed, trial) with open(output_file_name,'a') as f: f.write(run_output) print "time elapsed is {}".format(time_elapsed) return
def run_all_2(N=200, T=40, fout=None): problem = 'fourpeaks' # N=200 # T=N/10 maxEpochs = 10**6 maxTime = 300 #5 minutes fill = [2] * N ranges = array('i', fill) ef = FourPeaksEvaluationFunction(T) odd = DiscreteUniformDistribution(ranges) nf = DiscreteChangeOneNeighbor(ranges) mf = DiscreteChangeOneMutation(ranges) # mf = SwapMutation() cf = SingleCrossOver() df = DiscreteDependencyTree(.1, ranges) hcp = GenericHillClimbingProblem(ef, odd, nf) gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf) pop = GenericProbabilisticOptimizationProblem(ef, odd, df) def run_algo(alg, fit, label, difficulty, iters): trainTimes = [0.] trainTime = [] scoreChange = [0.] stuckCount = 10**3 prev = 0. for epoch in range(0, maxEpochs, 1): st = time.clock() fit.train() et = time.clock() trainTimes.append(trainTimes[-1] + (et - st)) trainTime.append((et - st)) rollingMean = 10 avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean)) score = ef.value(alg.getOptimal()) # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1]) trialData = [ problem, difficulty, label, score, epoch, trainTimes[-1], avgTime, iters ] # print(trialData) # fout.writerow(trialData) # print(trialData) print(trialData, max(scoreChange)) # print(max(scoreChange)) optimum = (difficulty - 1 - T) + difficulty if score >= optimum: break scoreChange.append(abs(score - prev)) prev = score scoreChange = scoreChange[-stuckCount:] # print(scoreChange) if max(scoreChange) == 0: break if trainTimes[-1] > maxTime: break # print(trialData) fout.writerow(trialData) iters = 1000 rhc = RandomizedHillClimbing(hcp) fit = FixedIterationTrainer(rhc, iters) run_algo(rhc, fit, 'RHC', N, iters) iters = 1000 startTemp = 1E10 coolingFactor = .95 sa = SimulatedAnnealing(startTemp, coolingFactor, hcp) fit = FixedIterationTrainer(sa, iters) run_algo(sa, fit, 'SA', N, iters) iters = 10 population = 300 mates = 100 mutations = 50 ga = StandardGeneticAlgorithm(population, mates, mutations, gap) fit = FixedIterationTrainer(ga, iters) run_algo(ga, fit, 'GA', N, iters) iters = 10 samples = 200 keep = 20 mimic = MIMIC(samples, keep, pop) fit = FixedIterationTrainer(mimic, iters) run_algo(mimic, fit, 'MIMIC', N, iters)