Exemple #1
0
        ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
        mf = DiscreteChangeOneMutation(ranges)
        cf = UniformCrossOver()
        df = DiscreteDependencyTree(.1, ranges)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, num_iterations)
        fit.train()
        end = time.time()
        value = str(ef.value(rhc.getOptimal()))
        # print "RHC: " + value
        # print "Time -->", end - start

        results = {
            'num_iterations': num_iterations,
            'value': value,
            'time': end - start

        }

        print 'RHC', param, results
        writer.writerow(results)

    csv_file.close()
    print '------'
Exemple #2
0
ranges = array('i', fill)

ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 2000000000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))
sys.exit()
sa = SimulatedAnnealing(100, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 150, 25, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 100, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC: " + str(ef.value(mimic.getOptimal()))
Exemple #3
0
def main():

    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50

    iterations = 20000
    gaIters = 1000
    mimicIters = 1000
    gaPop = 200
    gaMate = 150
    gaMutate = 25
    mimicSamples = 200
    mimicToKeep = 100
    saTemp = 100
    saCooling = .95
    alg = 'all'
    run = 0
    settings = []

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:N:c:w:v:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
        print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
            sys.exit(1)
        elif opt == '-i':
            iterations = int(arg)
        elif opt == '-N':
            NUM_ITEMS = int(arg)
        elif opt == '-c':
            COPIES_EACH = int(arg)
        elif opt == '-w':
            MAX_WEIGHT = int(arg)
        elif opt == '-v':
            MAX_VOLUME = int(arg)
        elif opt == '-n':
            run = int(arg)
        elif opt == '-r':
            alg = 'RHC'
        elif opt == '-s':
            alg = 'SA'
        elif opt == '-g':
            alg = 'GA'
        elif opt == '-m':
            alg = 'MIMIC'
        elif opt == '-a':
            alg = 'all'
        elif opt == '--gaPop':
            gaPop = int(arg)
        elif opt == '--gaMate':
            gaMate = int(arg)
        elif opt == '--gaMutate':
            gaMutate = int(arg)
        elif opt == '--mimicSamples':
            mimicSamples = int(arg)
        elif opt == '--mimicToKeep':
            mimicToKeep = int(arg)
        elif opt == '--saTemp':
            saTemp = float(arg)
        elif opt == '--saCooling':
            saCooling = float(arg)
        elif opt == '--gaIters':
            gaIters = int(arg)
        elif opt == '--mimicIters':
            mimicIters = int(arg)
    vars ={
    'NUM_ITEMS' : NUM_ITEMS,
    'COPIES_EACH' : COPIES_EACH,
    'MAX_WEIGHT' : MAX_WEIGHT,
    'MAX_VOLUME' : MAX_VOLUME,
    'iterations' : iterations,
    'gaIters' : gaIters,
    'mimicIters' : mimicIters,
    'gaPop' : gaPop,
    'gaMate' : gaMate,
    'gaMutate' : gaMutate,
    'mimicSamples' : mimicSamples,
    'mimicToKeep' : mimicToKeep,
    'saTemp' : saTemp,
    'saCooling' : saCooling,
    'alg' : alg,
    'run' : run
    }

    settings = getSettings(alg, settings, vars)
    # Random number generator */
    random = Random()

    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if alg == 'RHC' or alg == 'all':
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        print "RHC: " + str(ef.value(rhc.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(str(ef.value(rhc.getOptimal())))
        rows.append(row)
        output2('Knapsack', 'RHC', rows, settings)
        rows = []
        buildFooter("Knapsack", "RHC", rows, settings)
        outputFooter("Knapsack", "RHC", rows , settings)
    if alg == 'SA' or alg == 'all':
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        print "SA: " + str(ef.value(sa.getOptimal()))
        output2('Knapsack', 'SA', rows, settings)
        rows = []
        buildFooter("Knapsack", "SA", rows, settings)
        outputFooter("Knapsack", "SA", rows, settings)
    if alg == 'GA' or alg == 'all':
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        print "GA: " + str(ef.value(ga.getOptimal()))
        output2('Knapsack', 'GA', rows, settings)
        buildFooter("Knapsack", "GA", rows, settings)
        outputFooter("Knapsack", "GA", rows , settings)
    if alg == 'MIMIC' or alg == 'all':
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        print "MIMIC: " + str(ef.value(mimic.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        output2('Knapsack', 'MIMIC', rows, settings)
        rows = []
        buildFooter("Knapsack", "MIMIC", rows, settings)
        outputFooter("Knapsack", "MIMIC", rows , settings)
Exemple #4
0
# create range
fill = [COPIES_EACH + 1] * N
ranges = array('i', fill)

ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    mimic = MIMIC(samples, tokeep, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    fitness = fit.train()
    results.append(ef.value(mimic.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()
print "MIMIC, average results, " + str(sum(results) / float(runs))
print "MIMIC, average feval calls , " + str(sum(calls) / float(runs))
t1 = time.time() - t0
print "MIMIC, average time , " + str(t1 / float(runs))
Exemple #5
0
from time import time
f = open("experiments/results/knapsack_optimal2.txt", "w")

f.write("starting RHC\n")
rhc = RandomizedHillClimbing(hill_climbing_problem)
score = 0
iters = 0
t0 = time()

while iters < 80000:
    score = rhc.train()
    f.write(str(iters) + "," + str(score) +"\n")
    iters += 1


print "RHC: " + str(ef.value(rhc.getOptimal())), "time taken", time() - t0, "Iterations:", iters

f.write("starting SA\n")
sa = SimulatedAnnealing(1E13, .95, hill_climbing_problem)
t0 = time()
iters = 0
score = 0

while iters < 80000:
    score = sa.train()
    f.write(str(iters) + "," + str(score) + "\n")
    iters += 1

print "SA: " + str(ef.value(sa.getOptimal())), "time taken", time() - t0, "Iterations", iters

ga = StandardGeneticAlgorithm(200, 100, 10, genetic_problem)
Exemple #6
0
#=======================
# Genetic Algorithm
#=======================
print "Starting Genetic Algorithm Seacrh..."
ga = StandardGeneticAlgorithm(GA_popsize, GA_toMate, GA_toMutate, gap)
ga_iters = []
ga_fitness = []
ga_time = []

for i in maxiters_ga:
    fit = FixedIterationTrainer(ga, i)
    t1 = time.time()
    fit.train()
    t2 = time.time()
    fitness = ef.value(ga.getOptimal())
    time_ms = round(1000 * (t2 - t1), 2)
    ga_fitness.append(fitness)
    ga_time.append(time_ms)
    ga_iters.append(i)
    print "GA fitness using " + str(i) + " fixed iterations: " + str(fitness)
    print "Time taken for GA using fixed iterations: " + str(
        time_ms) + " milliseconds"

print "Finished Genetic Algorithm Seacrh."
print "=" * 100

#"""
#=======================
# MIMIC
#=======================
def solveit(oaname, params):
    iterations = 10000
    tryi = 1
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if oaname == 'RHC':
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == 'SA':
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == 'GA':
        iterations = 1000
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]),
                                      int(params[2]), gap)
    if oaname == 'MMC':
        iterations = 1000
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (
        oaname, ','.join(params), iterations, tryi)
    print "=" * 20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i % 10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = int(timeit.default_timer() - starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'score: %.3f' % score
    print 'train time: %d secs' % (int(timeit.default_timer() - starttime))

    scsv = 'kn-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "=" * 20
Exemple #8
0
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    fitness = fit.train()
    results.append(ef.value(rhc.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()
print "RHC, average results , " + str(sum(results) / float(runs))
print "RHC, average feval calls , " + str(sum(calls) / float(runs))
t1 = time.time() - t0
print "RHC, average time , " + str(float(t1) / runs)

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    sa = SimulatedAnnealing(1E11, .95, hcp)
    fit = FixedIterationTrainer(sa, iters)
    fitness = fit.train()
    results.append(ef.value(sa.getOptimal()))
Exemple #9
0
sa_acc = []
ga_times = []
ga_acc = []
mimic_times = []
mimic_acc = []

NUMBER_ITERATIONS = 1000
for iteration in xrange(NUMBER_ITERATIONS):
    if iteration % 5 == 0:
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iteration)
        start = time.time()
        fit.train()
        end = time.time()
        rhc_times.append(end - start)
        rhc_acc.append(ef.value(rhc.getOptimal()))
        print "RHC: " + str(ef.value(rhc.getOptimal()))

        sa = SimulatedAnnealing(100, .95, hcp)
        fit = FixedIterationTrainer(sa, iteration)
        start = time.time()
        fit.train()
        end = time.time()
        sa_times.append(end - start)
        sa_acc.append(ef.value(sa.getOptimal()))
        print "SA: " + str(ef.value(sa.getOptimal()))

        ga = StandardGeneticAlgorithm(200, 150, 25, gap)
        fit = FixedIterationTrainer(ga, iteration)
        start = time.time()
        fit.train()
Exemple #10
0
    start_sa = time.time()
    fit_sa.train()
    end_sa = time.time()

    start_ga = time.time()
    fit_ga.train()
    end_ga = time.time()

    start_mimic = time.time()
    fit_mimic.train()
    end_mimic = time.time()

    # Result handling
    last_train_time_rhc = end_rhc - start_rhc
    rhc_train_time[repetition].append(last_train_time_rhc)
    rhc_accuracy[repetition].append(ef.value(rhc.getOptimal()))

    last_train_time_sa = end_sa - start_sa
    sa_train_time[repetition].append(last_train_time_sa)
    sa_accuracy[repetition].append(ef.value(sa.getOptimal()))

    last_train_time_ga = end_ga - start_ga
    ga_train_time[repetition].append(last_train_time_ga)
    ga_accuracy[repetition].append(ef.value(ga.getOptimal()))

    last_train_time_mimic = end_mimic - start_mimic
    mimic_train_time[repetition].append(last_train_time_mimic)
    mimic_accuracy[repetition].append(ef.value(mimic.getOptimal()))

    while current_iteration_count <= MAX_ITERATION - ITERATION_STEP:
        print("Computing for %d iterations" %
# create range
fill = [COPIES_EACH + 1] * N
ranges = array('i', fill)

ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    ga = StandardGeneticAlgorithm(ga_pop, ga_keep, ga_mut, gap)
    fit = FixedIterationTrainer(ga, 1000)
    fitness = fit.train()
    results.append(ef.value(ga.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()
print "GA, average results , " + str(sum(results) / float(runs))
print "GA, average feval calls , " + str(sum(calls) / float(runs))
t1 = time.time() - t0
print "GA, average time , " + str(t1 / float(runs))
Exemple #12
0
        start_sa = time.time()
        fit_sa.train()
        end_sa = time.time()

        start_ga = time.time()
        fit_ga.train()
        end_ga = time.time()

        start_mimic = time.time()
        fit_mimic.train()
        end_mimic = time.time()

        # Result extracting
        last_training_time_rhc = end_rhc - start_rhc
        rhc_training_time[n].append(last_training_time_rhc)
        rhc_fitness[n].append(ef.value(rhc.getOptimal()))

        last_training_time_sa = end_sa - start_sa
        sa_training_time[n].append(last_training_time_sa)
        sa_fitness[n].append(ef.value(sa.getOptimal()))

        last_training_time_ga = end_ga - start_ga
        ga_training_time[n].append(last_training_time_ga)
        ga_fitness[n].append(ef.value(ga.getOptimal()))

        last_training_time_mimic = end_mimic - start_mimic
        mimic_training_time[n].append(last_training_time_mimic)
        mimic_fitness[n].append(ef.value(mimic.getOptimal()))

overall_rhc_training_time = list_avg(*rhc_training_time)
overall_rhc_fitness = list_avg(*rhc_fitness)
Exemple #13
0
def knapsackfunc(NUM_ITEMS,  iterations):


        rhcMult = 600
        saMult = 600
        gaMult = 4
        mimicMult = 3


	# Random number generator */
	random = Random()
	# The number of items
	#NUM_ITEMS = 40
	# The number of copies each
	COPIES_EACH = 4
	# The maximum weight for a single element
	MAX_WEIGHT = 50
	# The maximum volume for a single element
	MAX_VOLUME = 50
	# The volume of the knapsack 
	KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

	# create copies
	fill = [COPIES_EACH] * NUM_ITEMS
	copies = array('i', fill)

	# create weights and volumes
	fill = [0] * NUM_ITEMS
	weights = array('d', fill)
	volumes = array('d', fill)
	for i in range(0, NUM_ITEMS):
		weights[i] = random.nextDouble() * MAX_WEIGHT
		volumes[i] = random.nextDouble() * MAX_VOLUME


	# create range
	fill = [COPIES_EACH + 1] * NUM_ITEMS
	ranges = array('i', fill)

	ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
	odd = DiscreteUniformDistribution(ranges)
	nf = DiscreteChangeOneNeighbor(ranges)
	mf = DiscreteChangeOneMutation(ranges)
	cf = UniformCrossOver()
	df = DiscreteDependencyTree(.1, ranges)
	hcp = GenericHillClimbingProblem(ef, odd, nf)
	gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
	pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

	optimalOut = []
	timeOut = []
	evalsOut = []

	for niter in iterations:

		iterOptimalOut = [NUM_ITEMS, niter]
		iterTimeOut = [NUM_ITEMS, niter]
		iterEvals = [NUM_ITEMS, niter]



		start = time.time()
		rhc = RandomizedHillClimbing(hcp)
		fit = FixedIterationTrainer(rhc, niter*rhcMult)
		fit.train()
		end = time.time()
		rhcOptimal = ef.value(rhc.getOptimal())
		rhcTime = end-start
		print "RHC optimum: " + str(rhcOptimal)
		print "RHC time: " + str(rhcTime)
		iterOptimalOut.append(rhcOptimal)
		iterTimeOut.append(rhcTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		sa = SimulatedAnnealing(100, .95, hcp)
		fit = FixedIterationTrainer(sa, niter*saMult)
		fit.train()
		end = time.time()
		saOptimal = ef.value(sa.getOptimal())
		saTime = end-start
		print "SA optimum: " + str(saOptimal)
		print "SA time: " + str(saTime)
		iterOptimalOut.append(saOptimal)
		iterTimeOut.append(saTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		ga = StandardGeneticAlgorithm(200, 150, 25, gap)
		fit = FixedIterationTrainer(ga, niter*gaMult)
		fit.train()
		end = time.time()
		gaOptimal = ef.value(ga.getOptimal())
		gaTime = end - start
		print "GA optimum: " + str(gaOptimal)
		print "GA time: " + str(gaTime)
		iterOptimalOut.append(gaOptimal)
		iterTimeOut.append(gaTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)


		start = time.time()
		mimic = MIMIC(200, 100, pop)
		fit = FixedIterationTrainer(mimic, niter*mimicMult)
		fit.train()
		end = time.time()
		mimicOptimal = ef.value(mimic.getOptimal())
		mimicTime = end - start
		print "MIMIC optimum: " + str(mimicOptimal)
		print "MIMIC time: " + str(mimicTime)
		iterOptimalOut.append(mimicOptimal)
		iterTimeOut.append(mimicTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)
		
		optimalOut.append(iterOptimalOut)
		timeOut.append(iterTimeOut)
		evalsOut.append(iterEvals)		
	
	return [optimalOut, timeOut, evalsOut]
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
        mf = DiscreteChangeOneMutation(ranges)
        cf = UniformCrossOver()
        df = DiscreteDependencyTree(.1, ranges)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iteration)
        fit.train()
        end = time.time()
        rhc_time = end - start
        rhc_fit = ef.value(rhc.getOptimal())
        #         print "RHC: " + str(rhc_fit)

        start = time.time()
        sa = SimulatedAnnealing(1e11, .95, hcp)
        fit = FixedIterationTrainer(sa, iteration / 200)
        fit.train()
        end = time.time()
        sa_time = end - start
        sa_fit = ef.value(sa.getOptimal())
        #         print "SA: " + str(sa_fit)

        start = time.time()
        ga = StandardGeneticAlgorithm(200, 150, 25, gap)
        fit = FixedIterationTrainer(ga, iteration)
        fit.train()
Exemple #15
0
    for MIMIC_TO_KEEP in MIMIC_TO_KEEP_pool:
        mimic = MIMIC(MIMIC_SAMPLES, MIMIC_TO_KEEP, pop)
        fit_mimic = FixedIterationTrainer(mimic, n_iteration)

        print("calculating for MIMIC_TO_KEEP = %d" % MIMIC_TO_KEEP)

        # Training
        start_mimic = time.time()
        fit_mimic.train()
        end_mimic = time.time()

        # Result extracting
        last_training_time_mimic = end_mimic - start_mimic
        mimic_training_time[n].append(last_training_time_mimic)
        mimic_fitness[n].append(ef.value(mimic.getOptimal()))

overall_mimic_training_time = list_avg(*mimic_training_time)
overall_mimic_fitness = list_avg(*mimic_fitness)

with open(OUTPUT_FILE, "w") as outFile:
    for i in range(1):
        outFile.write(','.join([
            "MIMIC_TO_KEEP", "overall_mimic_fitness",
            "overall_mimic_training_time"
        ]) + '\n')
    for i in range(len(MIMIC_TO_KEEP_pool)):
        outFile.write(','.join([
            str(MIMIC_TO_KEEP_pool[i]),
            str(overall_mimic_fitness[i]),
            str(overall_mimic_training_time[i])
ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    mimic = MIMIC(samples, tokeep, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    fitness = fit.train()
    results.append(ef.value(mimic.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()
print "MIMIC, average results, " + str(sum(results)/float(runs))
print "MIMIC, average feval calls , " + str(sum(calls)/float(runs))
t1 = time.time() - t0
print "MIMIC, average time , " + str(t1/float(runs))




#"""
#=======================
# MIMIC
#=======================
print "Starting MIMIC Seacrh..."
mimic = MIMIC(MIMIC_samples, MIMIC_toKeep, pop)
mimic_iters = []
mimic_fitness = []
mimic_time = []

for i in maxiters_mimic:
    fit = FixedIterationTrainer(mimic, i)
    t1 = time.time()
    fit.train()
    t2 = time.time()
    fitness = ef.value(mimic.getOptimal())
    time_ms = round(1000 * (t2 - t1), 2)
    mimic_fitness.append(fitness)
    mimic_time.append(time_ms)
    mimic_iters.append(i)
    print "MIMIC fitness using " + str(i) + " fixed iterations: " + str(
        fitness)
    print "Time taken for MIMIC using fixed iterations: " + str(
        time_ms) + " milliseconds"

print "Finished MIMIC Seacrh."
print "=" * 100
#"""
"""
# Writing RHC performance to a CSV
spamWriter = csv.writer(open('knapsack_rhc.csv', 'w'), delimiter=' ',quotechar='|')
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    ga = StandardGeneticAlgorithm(ga_pop, ga_keep, ga_mut, gap)
    fit = FixedIterationTrainer(ga, 1000)
    fitness = fit.train()
    results.append(ef.value(ga.getOptimal()))
    calls.append(ef.getTotalCalls())
    ef.clearCount()
print "GA, average results , " + str(sum(results)/float(runs))
print "GA, average feval calls , " + str(sum(calls)/float(runs))
t1 = time.time() - t0
print "GA, average time , " + str(t1/float(runs))






Exemple #19
0
        ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
        mf = DiscreteChangeOneMutation(ranges)
        cf = UniformCrossOver()
        df = DiscreteDependencyTree(.1, ranges)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        start = time.time()
        ga = StandardGeneticAlgorithm(200, 150, 25, gap)
        fit = FixedIterationTrainer(ga, 1000)
        fit.train()
        end = time.time()
        value = str(ef.value(ga.getOptimal()))
        # print "GA: " + value
        # print "Time -->", end - start

        results = {
            'num_iterations': num_iterations,
            'value': value,
            'time': end - start

        }

        print 'GA', param, results
        writer.writerow(results)

    csv_file.close()
    print '------'
def run_knapsack_experiments():
    OUTPUT_DIRECTORY = './output'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    max_iter = 5000
    outfile = OUTPUT_DIRECTORY + '/knapsack_{}_log.csv'

    # Randomized Hill Climber
    filename = outfile.format('rhc')
    with open(filename, 'w') as f:
        f.write('iterations,fitness,time\n')
    for it in range(0, max_iter, 10):
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, it)
        start_time = time.clock()
        fit.train()
        elapsed_time = time.clock() - start_time
        # fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        data = '{},{},{}\n'.format(it, score, elapsed_time)
        print(data)
        with open(filename, 'a') as f:
            f.write(data)

    # Simulated Annealing
    filename = outfile.format('sa')
    with open(filename, 'w') as f:
        f.write('iteration,cooling_value,fitness,time\n')
    for cooling_value in (.19, .38, .76, .95):
        for it in range(0, max_iter, 10):
            sa = SimulatedAnnealing(200, cooling_value, hcp)
            fit = FixedIterationTrainer(sa, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(sa.getOptimal())
            data = '{},{},{},{}\n'.format(it, cooling_value, score,
                                          elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # Genetic Algorithm
    filename = outfile.format('ga')
    with open(filename, 'w') as f:
        f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n')
    for population_size, to_mate, to_mutate in itertools.product(
        [200], [110, 120, 130, 140, 150], [2, 4, 6, 8]):
        for it in range(0, max_iter, 10):
            ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate,
                                          gap)
            fit = FixedIterationTrainer(ga, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(ga.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate,
                                                to_mutate, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # MIMIC
    filename = outfile.format('mm')
    with open(filename, 'w') as f:
        f.write('iterations,samples,to_keep,m,fitness,time\n')
    for samples, to_keep, m in itertools.product([200], [100],
                                                 [0.1, 0.3, 0.5, 0.7, 0.9]):
        for it in range(0, 500, 10):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mm = MIMIC(samples, 20, pop)
            fit = FixedIterationTrainer(mm, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(mm.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score,
                                                elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    fitness = fit.train()
    results.append(ef.value(rhc.getOptimal()))
    calls.append(ef.getTotalCalls())    
    ef.clearCount()
print "RHC, average results , " + str(sum(results)/float(runs))
print "RHC, average feval calls , " + str(sum(calls)/float(runs))
t1 = time.time() - t0
print "RHC, average time , " + str(float(t1)/runs)



t0 = time.time()
calls = []
results = []
for _ in range(runs):
    sa = SimulatedAnnealing(1E11, .95, hcp)
    fit = FixedIterationTrainer(sa, iters)
Exemple #22
0
        # fit = FixedIterationTrainer(rhc, num_iterations)
        # fit.train()

        # sa = SimulatedAnnealing(100, .95, hcp)
        # fit = FixedIterationTrainer(sa, 200000)
        # fit.train()

        # ga = StandardGeneticAlgorithm(200, 150, 25, gap)
        # fit = FixedIterationTrainer(ga, 1000)
        # fit.train()
        # print "GA: " + str(ef.value(ga.getOptimal()))

        start = time.time()
        mimic = MIMIC(200, 100, pop)
        fit = FixedIterationTrainer(mimic, 1000)
        fit.train()
        end = time.time()
        value = str(ef.value(mimic.getOptimal()))
        results = {
            'num_iterations': num_iterations,
            'value': value,
            'time': end - start
        }
        print 'MIMIC', param, results
        writer.writerow(results)
        # print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    csv_file.close()
    print '------'
print '***** ***** ***** ***** *****'
Exemple #23
0
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
    print "RHC: " + str(ef.value(rhc.getOptimal()))
    OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "RHC")
    with open(OUTFILE, 'a+') as f:
        f.write("%d,%f,%f\n" % (N, training_time, ef.value(rhc.getOptimal())))

    sa = SimulatedAnnealing(100, .95, hcp)
    fit = FixedIterationTrainer(sa, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
    print "SA: " + str(ef.value(sa.getOptimal()))
    OUTFILE = "%s%s.csv" % (OUTFILE_BASE, "SA")
    with open(OUTFILE, 'a+') as f:
        f.write("%d,%f,%f\n" % (N, training_time, ef.value(sa.getOptimal())))
Exemple #24
0
ranges = array('i', fill)

ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(100, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 150, 25, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 100, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC: " + str(ef.value(mimic.getOptimal()))
Exemple #25
0
    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        score = ef.value(rhc.getOptimal())
        st = '{},{},{}\n'.format(i, score, times[-1])
        print st
        with open(fname, 'a') as f:
            f.write(st)

# SA
for t in range(numTrials):
    for CE in [0.15, 0.35, 0.55, 0.75, 0.95]:
        fname = outfile.format('SA{}'.format(CE), str(t + 1))
        with open(fname, 'w') as f:
            f.write('iterations,fitness,time\n')
        ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME,
                                        copies)
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
Exemple #26
0
ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

start = time.time()
rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "\nRHC: " + str(ef.value(rhc.getOptimal()))
end = time.time()
traintime = end - start
print("RHC results time: %0.03f seconds" % (traintime, ))

start = time.time()
sa = SimulatedAnnealing(100, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "\nSA: " + str(ef.value(sa.getOptimal()))
end = time.time()
traintime = end - start
print("SA results time: %0.03f seconds" % (traintime, ))

start = time.time()
ga = StandardGeneticAlgorithm(200, 150, 25, gap)
Exemple #27
0
def run_knapsack():
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            #print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(100, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()

            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            #print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 150, 25, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(sa.getOptimal()))
            ga_times.append(end - start)
            #print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            #print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('knapsack.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Exemple #28
0
    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    expt = "expt_avg"

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 200000)
    score_RHC.append(train(rhc, "RHC", ef, 200000, "test", expt))
    print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))

    sa = SimulatedAnnealing(1E9, .95, hcp)
    score_SA.append(train(sa, "SA", ef, 200000, "test", expt))
    print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))

    ga = StandardGeneticAlgorithm(300, 80, 5, gap)
    score_GA.append(train(ga, "GA", ef, 40000, "test", expt))
    print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))

    mimic = MIMIC(250, 10, pop)
    score_MIMIC.append(train(mimic, "MIMIC", ef, 4000, "test", expt))
    print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal()))

print("Final averaged results")
print("RHC= " + str(sum(score_RHC) / len(score_RHC)))
Exemple #29
0
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

nsample = 10
niters = [50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]

#-- R-Hill Climbing
rhc = RandomizedHillClimbing(hcp)
for iters in niters:
    start = time.time()
    fit = FixedIterationTrainer(rhc, iters)
    value = 0
    for isample in range(nsample):
        fit.train()
        value += ef.value(rhc.getOptimal())
    end = time.time()
    clock_time = (end - start) / nsample
    value = round(value / nsample, 2)
    print "RHC " + str(value), iters, clock_time

#-- Simulated Annealing
sa = SimulatedAnnealing(1E11, .95, hcp)
for iters in niters:
    start = time.time()
    fit = FixedIterationTrainer(sa, iters)
    value = 0
    for isample in range(nsample):
        fit.train()
        value += ef.value(sa.getOptimal())
    end = time.time()
Exemple #30
0
        # if len(hypers.values()) > 0:
        for exper in itertools.product(*values):

            # here implement hyper params
            rep_times = []
            for r in range(reps):
                # create a new trainer each time
                row = {}
                for key, value in zip(keys, exper):
                    row[key] = value
                print(row)
                fit = factory(row)
                # have to reset func eval per run 
                ef.func_evals = 0
                for i in range(0, max_iterations,num_iterations):
                    start = clock()
                    fit.train()
                    stop = clock()
                    rep_times.append(stop - start)
                    func_eval = ef.func_evals
                    fitness = ef.value(fit.trainer.getOptimal())
                    # log
                    line = [name,r,i,fitness,rep_times[-1], func_eval] + list(exper)
                    line = [str(x) for x in line]
                    out.write(','.join(line) + os.linesep)
                

        
        print "Done " + name + " trainer..."