Esempio n. 1
0
def run_four_peaks_exploringSA():

    N=200
    T=N/5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 30000, 35000, 40000, 45000, 50000]
    num_repeats = 5


    all_sa_results = []
    all_sa_times = []


    coolings = [0.15, 0.35, 0.55, 0.75, 0.95]
    for cooling in coolings:
        sa_results = []
        sa_times = []
        for i in iters:
            print(i)
            for j in range(num_repeats):
                start = time.time()
                sa = SimulatedAnnealing(1E11, cooling, hcp)
                fit = FixedIterationTrainer(sa, i)
                fit.train()
                end = time.time()
                sa_results.append(ef.value(sa.getOptimal()))
                sa_times.append(end - start)
                print "SA cooling " + str(cooling) + ": "  + str(ef.value(sa.getOptimal()))
        all_sa_results.append(sa_results)
        all_sa_results.append(sa_times)
   


   

    with open('four_peaks_exploringSA.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        for sa_results in all_sa_results:
            writer.writerow(sa_results)
        for sa_times in all_sa_times:
            writer.writerow(sa_times)

    return all_sa_results, all_sa_times
    samples = 200

try:
    tokeep = int(sys.argv[3])
except:
    tokeep = 10
runs = 10

sys.stdout = open("fourpeaks_MIMIC-%d-%d-%d.txt" % (N, samples, tokeep), "w")

#N=200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    mimic = MIMIC(samples, tokeep, pop)
    fit = FixedIterationTrainer(mimic, 1000)
from array import array



"""
Commandline parameter(s):
   none
"""

N=200
T=N/5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(1E11, .95, hcp)
Esempio n. 4
0
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer

from array import array
"""
Commandline parameter(s):
   none
"""

N = 200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# ------------RUNNING SAME NUMBER OF ITERATIONS ACROSS ALGORTIHMS--------

# Store Metrics
rhc_times = []
rhc_acc = []
sa_times = []
Esempio n. 5
0
    print "Function Evaluations: " + str(ef.getFunctionEvaluations() - iters)
    print "Iters: " + str(iters)
    print "####"


"""
Commandline parameter(s):
   none
"""

N = 200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
train(rhc, "RHC", ef, 40000)

sa = SimulatedAnnealing(1E11, .95, hcp)
train(sa, "SA", ef, 50000)
if co_type == 2:
    cf = tpf
elif co_type == 3:
    cf = uf
else:
    cf = sf

sys.stdout = open("fourpeaks_ga_%d-%d-%d-%d-%d.txt" % (N, ga_pop, co_type, ga_keep, ga_mut), "w")

runs = 10
# N=200
T = N / 5
fill = [2] * N
ranges = array("i", fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
mf = DiscreteChangeOneMutation(ranges)

# print "Ga settings:\npop:%d\ncrossovertype:%d\ncrossoverrate:%d\nmutationrate:%d\n\n" % (ga_pop,co_type,ga_keep,ga_mut_type)

gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    # ga_pop = N*5
    ga = StandardGeneticAlgorithm(ga_pop, ga_keep, ga_mut, gap)
    fit = FixedIterationTrainer(ga, 1000)
    fitness = fit.train()
Esempio n. 7
0
for algo in ["RHC", "MIMIC", "GA", "SA"]:
    with open(OUTFILE_BASE + algo + ".csv", 'w') as f:
        f.write("iterations,training_time,fitness\n")
"""
Commandline parameter(s):
   none
"""

# N=200
for N in xrange(10, 1000):
    T = N / 5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, N)
    start = time.time()
    fit.train()
    end = time.time()
    training_time = end - start
Esempio n. 8
0
maxiters_mimic = [25*i for i in range(1,21)]

SA_start_temp = 1E9
SA_temp_decay = 0.5

GA_popsize = 20
GA_toMate = 10
GA_mutationPercent = 0.3
GA_toMutate = int(GA_mutationPercent*GA_toMate)

MIMIC_samples = 100
MIMIC_toKeep = 10
#========================
#print ranges

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

#print odd

"""
#=======================
# Random Hiil Climbing
#=======================
Esempio n. 9
0
def run_four_peaks():

    N = 200
    T = N / 5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(1E11, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()
            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 100, 10, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(ga.getOptimal()))
            ga_times.append(end - start)
            print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 20, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('four_peaks.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Esempio n. 10
0
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer

from array import array
"""
Commandline parameter(s):
   none
"""

N = 200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

iterations = range(100, 50000, 100)
rhc_results = []
sa_results = []
ga_results = []

import time
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer
import opt.example.KnapsackEvaluationFunction as KnapsackEvaluationFunction
import opt.example.FourPeaksEvaluationFunction as FourPeaksEvaluationFunction
from array import array
"""
Commandline parameter(s):
   none
"""

N = 1000
T = N / 4
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)

initial_distribution = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mutation_function = DiscreteChangeOneMutation(ranges)

cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hill_climbing_problem = GenericHillClimbingProblem(ef, initial_distribution,
                                                   nf)
genetic_problem = GenericGeneticAlgorithmProblem(ef, initial_distribution,
                                                 mutation_function, cf)
probablistic_optimization = GenericProbabilisticOptimizationProblem(
    ef, initial_distribution, df)

from time import time
Esempio n. 12
0
import shared.FixedIterationTrainer as FixedIterationTrainer

from array import array
"""
Commandline parameter(s):
   none
"""

N = 200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ITERATIONS = 5000

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
sa = SimulatedAnnealing(1E11, .95, hcp)
ga = StandardGeneticAlgorithm(200, 100, 10, gap)
mimic = MIMIC(200, 20, pop)

rhc_f = open('out/op/fourpeaks/rhc.csv', 'w')
Esempio n. 13
0
   none
"""

iterations_data = {}
iterations_file = "four_peaks_data.pickle"

if os.path.isfile(iterations_file):
    stdout.write("\nFour Peaks Data found.\n")
    exit(0)

N=200
T=N/10
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)



x = xrange(200, 3200, 200)
optimal_value = {'RHC': [], 'SA': [], 'GA': [], 'MIMIC': []}

Esempio n. 14
0
def main():
    N=200
    tempDenom = 5
    T=N/tempDenom
    fill = [2] * N
    ranges = array('i', fill)
    iterations = 2000
    gaIters = 1000
    mimicIters = 1000
    gaPop = 200
    gaMate = 100
    gaMutate = 10
    mimicSamples = 200
    mimicToKeep = 20
    saTemp = 1E11
    saCooling = .95
    alg = 'all'
    run = 0
    settings = []

    try:
       opts, args = getopt.getopt(sys.argv[1:], "ahn:rsgN:m:t:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
       print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
       sys.exit(2)
    for opt, arg in opts:
       if opt == '-h':
          print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
          sys.exit(1)
       elif opt == '-i':
          iterations = int(arg)
       elif opt == '-N':
          N = int(arg)
       elif opt == '-t':
           T = float(arg)
       elif opt == '-d':
          tempDenom = int(arg)
       elif opt == '-r':
           alg = 'RHC'
       elif opt == '-a':
           alg = 'all'
       elif opt == '-s':
           alg = 'SA'
       elif opt == '-g':
           alg = 'GA'
       elif opt == '-m':
           alg = 'MIMIC'
       elif opt == '--gaPop':
          gaPop = int(arg)
       elif opt == '--gaMate':
          gaMate = int(arg)
       elif opt == '--gaMutate':
          gaMutate = int(arg)
       elif opt == '--mimicSamples':
          mimicSamples = int(arg)
       elif opt == '--mimicToKeep':
          mimicToKeep = int(arg)
       elif opt == '--saTemp':
          saTemp = float(arg)
       elif opt == '--saCooling':
          saCooling = float(arg)
       elif opt == '--gaIters':
          gaIters = int(arg)
       elif opt == '--mimicIters':
          mimicIters = int(arg)
       elif opt == '-n':
           run = int(arg)


    vars = {
        'N':N,
        'tempDenom':tempDenom,
        'T':T,
        'fill':fill,
        'ranges':ranges,
        'iterations' :iterations,
        'gaIters':gaIters,
        'mimicIters':mimicIters,
        'gaPop' :gaPop,
        'gaMate' :gaMate,
        'gaMutate' :gaMutate,
        'mimicSamples' : mimicSamples,
        'mimicToKeep' : mimicToKeep,
        'saTemp' : saTemp,
        'saCooling' : saCooling,
        'alg' : alg,
        'run' : run
    }

    settings = getSettings(alg, settings, vars)

    T=N/tempDenom
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if alg == 'RHC' or alg == 'all':
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(rhc.getOptimal()))
        rows.append(row)
        print "RHC: " + str(ef.value(rhc.getOptimal()))
        output2('4Peaks', 'RHC', rows, settings)
        rows = []
        buildFooter("4Peaks", "RHC", rows, settings),
        outputFooter("4Peaks", "RHC", rows,   settings)

    if alg == 'SA' or alg == 'all':
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        print "SA: " + str(ef.value(sa.getOptimal()))
        output2('4Peaks', 'SA', rows, settings)
        rows = []
        buildFooter("4Peaks", "SA", rows, settings)
        outputFooter("4Peaks", "SA", rows, settings)

    if alg == 'GA' or alg == 'all':
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        print "GA: " + str(ef.value(ga.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        output2('4Peaks', 'GA', rows, settings)
        rows = []
        buildFooter("4Peaks", "GA", rows, settings)
        outputFooter("4Peaks", "GA", rows , settings)

    if alg == 'MIMIC' or alg == 'all':
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        print "MIMIC: " + str(ef.value(mimic.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        output2('4Peaks', 'MIMIC', rows, settings)
        rows = []
        buildFooter("4Peaks", "GA", rows, settings)
        outputFooter("4Peaks", "MIMIC", rows, settings)
Esempio n. 15
0
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer
import shared.ConvergenceTrainer as ConvergenceTrainer

from array import array
"""
Commandline parameter(s):
   none
"""

N = 200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)

hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

times = ""
print "RHC:"
for x in range(20):
    start = time.time()
    iterations = (x + 1) * 2500
def run_four_peaks_experiments():
    OUTPUT_DIRECTORY = './output'

    if not os.path.exists(OUTPUT_DIRECTORY):
        os.makedirs(OUTPUT_DIRECTORY)

    N = 200
    T = N / 5
    fill = [2] * N
    ranges = array('i', fill)
    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    max_iter = 5000
    outfile = OUTPUT_DIRECTORY + '/four_peaks_{}_log.csv'

    # Randomized Hill Climber
    filename = outfile.format('rhc')
    with open(filename, 'w') as f:
        f.write('iterations,fitness,time\n')
    for it in range(0, max_iter, 10):
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, it)
        start_time = time.clock()
        fit.train()
        elapsed_time = time.clock() - start_time
        # fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        data = '{},{},{}\n'.format(it, score, elapsed_time)
        print(data)
        with open(filename, 'a') as f:
            f.write(data)

    # Simulated Annealing
    filename = outfile.format('sa')
    with open(filename, 'w') as f:
        f.write('iteration,cooling_value,fitness,elapsed_time\n')
    for cooling_value in (.19, .38, .76, .95):
        for it in range(0, max_iter, 10):
            sa = SimulatedAnnealing(1E11, cooling_value, hcp)
            fit = FixedIterationTrainer(sa, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevalss
            score = ef.value(sa.getOptimal())
            data = '{},{},{},{}\n'.format(it, cooling_value, score,
                                          elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # Genetic Algorithm
    filename = outfile.format('ga')
    with open(filename, 'w') as f:
        f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n')
    for population_size, to_mate, to_mutate in itertools.product(
        [200], [25, 50, 75, 100], [10, 20, 30, 40]):
        for it in range(0, max_iter, 10):
            ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate,
                                          gap)
            fit = FixedIterationTrainer(ga, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(ga.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate,
                                                to_mutate, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # MIMIC
    filename = outfile.format('mm')
    with open(filename, 'w') as f:
        f.write('iterations,samples,to_keep,fitness,time\n')
    for samples, to_keep, m in itertools.product([200], [20],
                                                 [0.1, 0.3, 0.5, 0.7, 0.9]):
        for it in range(0, 500, 10):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mm = MIMIC(samples, 20, pop)
            fit = FixedIterationTrainer(mm, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(mm.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score,
                                                elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)
Esempio n. 17
0
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer

from array import array
"""
Commandline parameter(s):
   none
"""

N = 200
T = N / 5
fill = [2] * N
ranges = array('i', fill)

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

iters_list = [100, 500, 1000, 2500, 5000, 7500, 10000, 20000]

print "Random Hill Climbing"
rhc = RandomizedHillClimbing(hcp)
for iters in iters_list:
    fit = FixedIterationTrainer(rhc, iters)
Esempio n. 18
0
def fourpeaksfunc(N, iterations):

    rhcMult = 200
    saMult = 200
    gaMult = 2
    mimicMult = 1

    optimalOut = []
    timeOut = []
    evalsOut = []

    T = N / 5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    for niter in iterations:

        iterOptimalOut = [N, niter]
        iterTimeOut = [N, niter]
        iterEvals = [N, niter]

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, niter * rhcMult)
        fit.train()
        end = time.time()
        rhcOptimal = ef.value(rhc.getOptimal())
        rhcTime = end - start
        print "RHC optimum: " + str(rhcOptimal)
        print "RHC time: " + str(rhcTime)
        iterOptimalOut.append(rhcOptimal)
        iterTimeOut.append(rhcTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        sa = SimulatedAnnealing(1E20, .8, hcp)
        fit = FixedIterationTrainer(sa, niter * saMult)
        fit.train()
        end = time.time()
        saOptimal = ef.value(sa.getOptimal())
        saTime = end - start
        print "SA optimum: " + str(saOptimal)
        print "SA time: " + str(saTime)
        iterOptimalOut.append(saOptimal)
        iterTimeOut.append(saTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        ga = StandardGeneticAlgorithm(200, 100, 10, gap)
        fit = FixedIterationTrainer(ga, niter * gaMult)
        fit.train()
        end = time.time()
        gaOptimal = ef.value(ga.getOptimal())
        gaTime = end - start
        print "GA optimum: " + str(gaOptimal)
        print "GA time: " + str(gaTime)
        iterOptimalOut.append(gaOptimal)
        iterTimeOut.append(gaTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        mimic = MIMIC(200, 20, pop)
        fit = FixedIterationTrainer(mimic, niter * mimicMult)
        fit.train()
        end = time.time()
        mimicOptimal = ef.value(mimic.getOptimal())
        mimicTime = end - start
        print "MIMIC optimum: " + str(mimicOptimal)
        print "MIMIC time: " + str(mimicTime)
        iterOptimalOut.append(mimicOptimal)
        iterTimeOut.append(mimicTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        optimalOut.append(iterOptimalOut)
        timeOut.append(iterTimeOut)
        evalsOut.append(iterEvals)

    return [optimalOut, timeOut, evalsOut]
Esempio n. 19
0
def run_algorithm_test(T,
                       ranges,
                       algorithms,
                       output_file_name,
                       trial_number,
                       iterations=False):

    with open(output_file_name, 'w') as f:
        f.write('algorithm,optimal_result,iterations,time,trial\n')

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)

    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    for trial in range(trial_number):
        if iterations is False:
            for item in algorithms:
                start_time = time.time()
                if item in ['rhc']:
                    optimal_result, run_iters = run_rhc(hcp, ef)
                elif item in ['sa']:
                    optimal_result, run_iters = run_sa(hcp, ef)
                elif item in ['ga']:
                    optimal_result, run_iters = run_ga(gap, ef)
                elif item in ['mimic']:
                    optimal_result, run_iters = run_mimic(pop, ef)
                else:
                    print "The algorithm type {} is not supported.".format(
                        item)

                end_time = time.time()
                time_elapsed = end_time - start_time

                run_output = '{},{},{},{},{}\n'.format(item, optimal_result,
                                                       run_iters, time_elapsed,
                                                       trial)
                with open(output_file_name, 'a') as f:
                    f.write(run_output)
        else:
            for iter in iterations:
                for item in algorithms:
                    start_time = time.time()
                    if item in ['rhc']:
                        optimal_result, run_iters = run_rhc(hcp, ef, iter)
                    elif item in ['sa']:
                        optimal_result, run_iters = run_sa(hcp, ef, iter)
                    elif item in ['ga']:
                        optimal_result, run_iters = run_ga(gap, ef, iter)
                    elif item in ['mimic']:
                        optimal_result, run_iters = run_mimic(pop, ef, iter)
                    else:
                        print "The algorithm type {} is not supported.".format(
                            item)

                    end_time = time.time()
                    time_elapsed = end_time - start_time

                    run_output = '{},{},{},{},{}\n'.format(
                        item, optimal_result, run_iters, time_elapsed, trial)

                    with open(output_file_name, 'a') as f:
                        f.write(run_output)

    print "time elapsed is {}".format(time_elapsed)

    return
Esempio n. 20
0
def run_all_2(N=200, T=40, fout=None):
    problem = 'fourpeaks'
    # N=200
    # T=N/10
    maxEpochs = 10**6
    maxTime = 300  #5 minutes
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    # mf = SwapMutation()
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    def run_algo(alg, fit, label, difficulty, iters):
        trainTimes = [0.]
        trainTime = []
        scoreChange = [0.]
        stuckCount = 10**3
        prev = 0.
        for epoch in range(0, maxEpochs, 1):

            st = time.clock()
            fit.train()
            et = time.clock()

            trainTimes.append(trainTimes[-1] + (et - st))
            trainTime.append((et - st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) /
                       float(rollingMean))

            score = ef.value(alg.getOptimal())

            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [
                problem, difficulty, label, score, epoch, trainTimes[-1],
                avgTime, iters
            ]
            # print(trialData)
            # fout.writerow(trialData)
            # print(trialData)
            print(trialData, max(scoreChange))
            # print(max(scoreChange))
            optimum = (difficulty - 1 - T) + difficulty
            if score >= optimum: break

            scoreChange.append(abs(score - prev))
            prev = score
            scoreChange = scoreChange[-stuckCount:]
            # print(scoreChange)
            if max(scoreChange) == 0: break

            if trainTimes[-1] > maxTime: break

        # print(trialData)
        fout.writerow(trialData)

    iters = 1000
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc, fit, 'RHC', N, iters)

    iters = 1000
    startTemp = 1E10
    coolingFactor = .95
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa, fit, 'SA', N, iters)

    iters = 10
    population = 300
    mates = 100
    mutations = 50
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga, fit, 'GA', N, iters)

    iters = 10
    samples = 200
    keep = 20
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic, fit, 'MIMIC', N, iters)