Пример #1
0
def makeProblem(num):
    global N
    N = num

    global fill
    fill = [2] * N
    global ranges
    ranges = array('i', fill)
    global ef
    ef = CountOnesEvaluationFunction()
    global odd
    odd = DiscreteUniformDistribution(ranges)
    global nf
    nf = DiscreteChangeOneNeighbor(ranges)
    global mf
    mf = DiscreteChangeOneMutation(ranges)
    global cf
    cf = SingleCrossOver()
    global df
    df = DiscreteDependencyTree(.1, ranges)
    global hcp
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    global gap
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    global pop
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    global GA_pop
    global myWriter, GA_iters, GA_mut, GA_keep
    GA_keep = int(GA_pop * .75)
    GA_mut = int(GA_pop * .15)
Пример #2
0
def mimic_discrete(name, ef, odd, ranges, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for samples, keep, m in itertools.product(*params):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mimic_instance = MIMIC(samples, keep, pop)
            mimic_trainer = FixedIterationTrainer(mimic_instance, iters_step)
            mimic_state = {'problem': mimic_instance,
                           'trainer': mimic_trainer}
            # wrap into class etc...
            wrapper_mimic = AlgoWrapper(mimic_state,
                                        lambda state: state['trainer'].train(),
                                        lambda state: ef.value(state['problem'].getOptimal()),
                                        lambda state: ef.value(state['problem'].getOptimal())
                                        )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_samples_" + str(samples) + "_keep_" + str(keep) + "_m_" + str(m)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_mimic,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'samples':samples,
                                                      'keep':keep,
                                                      'm':m}
                                         )
            timed_trainer.run()
Пример #3
0
def run_mimic(t, samples, keep, m):
    fill = [N] * N
    ranges = array('i', fill)
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscreteUniformDistribution(ranges)

    fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                           str(t + 1))
    base.write_header(fname)
    df = DiscreteDependencyTree(m, ranges)
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(mimic.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
Пример #4
0
def run_mimic(t, samples, keep, m):
    fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                           str(t + 1))
    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    df = DiscreteDependencyTree(m, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(mimic.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
Пример #5
0
def solveit(oaname, params):
    N = 60
    T = N / 10
    fill = [2] * N
    ranges = array('i', fill)
    iterations = 10000
    tryi = 1

    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    #  fit = FixedIterationTrainer(rhc, 200000)
    #  fit.train()

    if oaname == 'RHC':
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == 'SA':
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == 'GA':
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]),
                                      int(params[2]), gap)
    if oaname == 'MMC':
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (
        oaname, ','.join(params), iterations, tryi)
    print "=" * 20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i % 10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = float(timeit.default_timer() - starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'score: %.3f' % score
    print 'train time: %.3f secs' % (int(timeit.default_timer() - starttime))

    scsv = 'cp-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "=" * 20
Пример #6
0
def mimic():
    for samples, keep, m in product([80], [40], [0.15, 0.35, 0.55, 0.75, 0.95]):
        fname = outfile.replace('XXX','MIMIC{}_{}_{}'.format(samples,keep,m))
        df = DiscreteDependencyTree(m, ranges)
        with open(fname,'w') as f:
            f.write('iterations,fitness,time,fevals\n')
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        mimic = MIMIC(samples, keep, pop)
        perform(mimic, fname)
Пример #7
0
def run_four_peaks_exploringSA():

    N=200
    T=N/5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 30000, 35000, 40000, 45000, 50000]
    num_repeats = 5


    all_sa_results = []
    all_sa_times = []


    coolings = [0.15, 0.35, 0.55, 0.75, 0.95]
    for cooling in coolings:
        sa_results = []
        sa_times = []
        for i in iters:
            print(i)
            for j in range(num_repeats):
                start = time.time()
                sa = SimulatedAnnealing(1E11, cooling, hcp)
                fit = FixedIterationTrainer(sa, i)
                fit.train()
                end = time.time()
                sa_results.append(ef.value(sa.getOptimal()))
                sa_times.append(end - start)
                print "SA cooling " + str(cooling) + ": "  + str(ef.value(sa.getOptimal()))
        all_sa_results.append(sa_results)
        all_sa_results.append(sa_times)
   


   

    with open('four_peaks_exploringSA.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        for sa_results in all_sa_results:
            writer.writerow(sa_results)
        for sa_times in all_sa_times:
            writer.writerow(sa_times)

    return all_sa_results, all_sa_times
Пример #8
0
    def run_experiment(self, opName):
        """Run a MIMIC optimization experiment for a given optimization
        problem.

        Args:
            ef (AbstractEvaluationFunction): Evaluation function.
            ranges (array): Search space ranges.
            op (str): Name of optimization problem.

        """
        outdir = 'results/OPT/{}'.format(opName)  # get results directory
        outfile = 'MIMIC_{}_{}_{}_results.csv'.format(self.s, self.k, self.m)
        fname = get_abspath(outfile, outdir)  # get output filename

        # delete existing results file, if it already exists
        try:
            os.remove(fname)
        except Exception as e:
            print e
            pass

        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals,trial\n')

        # start experiment
        for t in range(self.numTrials):
            # initialize optimization problem and training functions
            ranges, ef = self.op.get_ef()
            mimic = None
            df = DiscreteDependencyTree(self.m, ranges)
            odd = DiscreteUniformDistribution(ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mimic = MIMIC(self.s, self.k, pop)
            fit = FixedIterationTrainer(mimic, 10)

            # run experiment and train evaluation function
            start = time.clock()
            for i in range(0, self.maxIters, 10):
                fit.train()
                elapsed = time.clock() - start
                fe = ef.valueCallCount
                score = ef.value(mimic.getOptimal())
                ef.valueCallCount -= 1

                # write results to output file
                s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t)
                with open(fname, 'a+') as f:
                    f.write(s)
Пример #9
0
def mimicGATest():

    popBegin = 1
    popEnd = 101
    keepBegin = 1
    keepEnd = 90
    mutBegin = 1
    mutEnd = 90
    itersBegin = 1
    itersEnd = 200

    samples = 10
    keep = 2

    problemSize = N
    mimicRange = (problemSize)
    iters = 1

    paramRanges = Vector(8)
    paramRanges.addElement(popBegin)
    paramRanges.addElement(popEnd)
    paramRanges.addElement(keepBegin)
    paramRanges.addElement(keepEnd)
    paramRanges.addElement(mutBegin)
    paramRanges.addElement(mutEnd)
    paramRanges.addElement(itersBegin)
    paramRanges.addElement(itersEnd)

    totalParamSize1 = (popEnd - popBegin + 1) + (keepEnd - keepBegin + 1) + (
        mutEnd - mutBegin + 1) + (itersEnd - itersBegin + 1)
    allParamValues = range(popBegin, popEnd + 1) + range(
        keepBegin, keepEnd + 1) + range(mutBegin, mutEnd + 1) + range(
            itersBegin, itersEnd + 1)
    totalParamSize = len(allParamValues)
    metaFun = RamysEvalMetafunc(ranges)
    discreteDist = RamysMimicDistribution(
        paramRanges)  #DiscreteUniformDistribution(problemSize)
    distFunc = DiscreteDependencyTree(.1, allParamValues)
    findGA = GenericProbabilisticOptimizationProblem(metaFun, discreteDist,
                                                     distFunc)
    mimic = MIMIC(samples, keep, findGA)
    fit = FixedIterationTrainer(mimic, iters)
    fit.train()
    print str(N) + ": MIMIC finds GA : " + str(ef.value(mimic.getOptimal()))
Пример #10
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(PATH + "X_train.csv")
    testing_ints = initialize_instances(PATH + "X_test.csv")
    validation_ints = initialize_instances(PATH + "y_train.csv")
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    logistic_sigmoid = LogisticSigmoid()
    data_set = DataSet(training_ints)
    data_set_size = data_set.size()
    print(data_set_size)
    print(type(data_set_size))
    odd = DiscreteUniformDistribution([data_set_size])
    df = DiscreteDependencyTree(.1, [data_set_size])
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER], logistic_sigmoid)
    evaluation = NeuralNetworkEvaluationFunction(classification_network,
                                                 data_set, measure)
    pop = GenericProbabilisticOptimizationProblem(evaluation, odd, df)
    oa = MIMIC(data_set_size, int(0.1 * data_set_size), pop)
    train(oa, classification_network, 'GA', training_ints, validation_ints,
          testing_ints, measure)
Пример #11
0
# create range
fill = [COPIES_EACH + 1] * NUM_ITEMS
ranges = array('i', fill)

ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)

initial_distribution = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mutation_function = DiscreteChangeOneMutation(ranges)

cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hill_climbing_problem = GenericHillClimbingProblem(ef, initial_distribution, nf)
genetic_problem = GenericGeneticAlgorithmProblem(ef, initial_distribution, mutation_function, cf)
probablistic_optimization = GenericProbabilisticOptimizationProblem(ef, initial_distribution, df)

from time import time
f = open("experiments/results/knapsack_optimal2.txt", "w")

f.write("starting RHC\n")
rhc = RandomizedHillClimbing(hill_climbing_problem)
score = 0
iters = 0
t0 = time()

while iters < 80000:
    score = rhc.train()
    f.write(str(iters) + "," + str(score) +"\n")
    iters += 1
Пример #12
0
def run_all():
    problem = 'knapsack'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack 
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    maxEpochs = 400

    columns = ['problem','label','score','epoch','time','avgTrainTime','iterations']
    outFile = open(filename+'_all.csv','wb')
    fout = csv.writer(outFile,delimiter=',')
    fout.writerow(columns)


    def run_algo(alg,fit,label,iters):
        print(alg)
        trainTimes = [0.]
        trainTime = []
        scores = [0]
        deltaScores = []
        for epoch in range(0,maxEpochs,1):
        
            st = time.clock()
            fit.train()
            et = time.clock()
            
            trainTimes.append(trainTimes[-1]+(et-st))
            trainTime.append((et-st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean))
      
            score = ef.value(alg.getOptimal())
            scores.append(score)
            deltaScores.append(math.fabs(scores[-2] - scores[-1]))
            
            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [problem,label,score,epoch,trainTimes[-1],avgTime,iters]
            print(trialData)
            fout.writerow(trialData)
            
            
    iters = 10
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc,fit,'RHC',10)


    startTemp = 1E11
    coolingFactor = .95
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa,fit,'HCP',10)

    population = 300
    mates = 100
    mutations = 50
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga,fit,'GA',10)
    
    
    samples = 200
    keep = 20
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic,fit,'MIMIC',10)
    
    outFile.close()
Пример #13
0
def run_knapsack_experiments():
    OUTPUT_DIRECTORY = './output'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    max_iter = 5000
    outfile = OUTPUT_DIRECTORY + '/knapsack_{}_log.csv'

    # Randomized Hill Climber
    filename = outfile.format('rhc')
    with open(filename, 'w') as f:
        f.write('iterations,fitness,time\n')
    for it in range(0, max_iter, 10):
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, it)
        start_time = time.clock()
        fit.train()
        elapsed_time = time.clock() - start_time
        # fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        data = '{},{},{}\n'.format(it, score, elapsed_time)
        print(data)
        with open(filename, 'a') as f:
            f.write(data)

    # Simulated Annealing
    filename = outfile.format('sa')
    with open(filename, 'w') as f:
        f.write('iteration,cooling_value,fitness,time\n')
    for cooling_value in (.19, .38, .76, .95):
        for it in range(0, max_iter, 10):
            sa = SimulatedAnnealing(200, cooling_value, hcp)
            fit = FixedIterationTrainer(sa, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(sa.getOptimal())
            data = '{},{},{},{}\n'.format(it, cooling_value, score,
                                          elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # Genetic Algorithm
    filename = outfile.format('ga')
    with open(filename, 'w') as f:
        f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n')
    for population_size, to_mate, to_mutate in itertools.product(
        [200], [110, 120, 130, 140, 150], [2, 4, 6, 8]):
        for it in range(0, max_iter, 10):
            ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate,
                                          gap)
            fit = FixedIterationTrainer(ga, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(ga.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate,
                                                to_mutate, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # MIMIC
    filename = outfile.format('mm')
    with open(filename, 'w') as f:
        f.write('iterations,samples,to_keep,m,fitness,time\n')
    for samples, to_keep, m in itertools.product([200], [100],
                                                 [0.1, 0.3, 0.5, 0.7, 0.9]):
        for it in range(0, 500, 10):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mm = MIMIC(samples, 20, pop)
            fit = FixedIterationTrainer(mm, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(mm.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score,
                                                elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)
def solveit(oaname, params):
    # set N value.  This is the number of points
    N = 50
    iterations = 1000
    tryi = 1
    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    if oaname == "RHC":
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == "SA":    
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == "GA":
        iterations=1000
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]), int(params[2]), gap)
    if oaname == "MMC":
        iterations=1000
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points)
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges)
        df = DiscreteDependencyTree(.1, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (oaname, ','.join(params), iterations, tryi)
    print "="*20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i%10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = int(timeit.default_timer()-starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'Inverse of Distance [score]: %.3f' % score
    print 'train time: %d secs' % (int(timeit.default_timer()-starttime))

    scsv = 'tsp-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "="*20

    print "Route:"
    if oaname == 'MMC':
        optimal = oa.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0,len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        print order
    else:
        path = []
        for x in range(0,N):
            path.append(oa.getOptimal().getDiscrete(x))
        print path
def run_traveling_salesman():
    # set N value.  This is the number of points
    N = 50
    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(rhc.getOptimal().getDiscrete(x))
            # print path

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(1E12, .999, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(sa.getOptimal().getDiscrete(x))
            # print path

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(ga.getOptimal()))
            print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
            ga_times.append(end - start)
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(ga.getOptimal().getDiscrete(x))
            # print path

    # for mimic we use a sort encoding
    ef = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(500, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()

            mimic_results.append(ef.value(mimic.getOptimal()))
            print "MIMIC Inverse of Distance: " + str(
                ef.value(mimic.getOptimal()))
            # print "Route:"
            # path = []
            # optimal = mimic.getOptimal()
            # fill = [0] * optimal.size()
            # ddata = array('d', fill)
            # for i in range(0,len(ddata)):
            #     ddata[i] = optimal.getContinuous(i)
            # order = ABAGAILArrays.indices(optimal.size())
            # ABAGAILArrays.quicksort(ddata, order)
            # print order
            mimic_times.append(end - start)

    with open('travelingsalesman.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Пример #16
0
def fourpeaksfunc(N, iterations):

    rhcMult = 200
    saMult = 200
    gaMult = 2
    mimicMult = 1

    optimalOut = []
    timeOut = []
    evalsOut = []

    T = N / 5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    for niter in iterations:

        iterOptimalOut = [N, niter]
        iterTimeOut = [N, niter]
        iterEvals = [N, niter]

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, niter * rhcMult)
        fit.train()
        end = time.time()
        rhcOptimal = ef.value(rhc.getOptimal())
        rhcTime = end - start
        print "RHC optimum: " + str(rhcOptimal)
        print "RHC time: " + str(rhcTime)
        iterOptimalOut.append(rhcOptimal)
        iterTimeOut.append(rhcTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        sa = SimulatedAnnealing(1E20, .8, hcp)
        fit = FixedIterationTrainer(sa, niter * saMult)
        fit.train()
        end = time.time()
        saOptimal = ef.value(sa.getOptimal())
        saTime = end - start
        print "SA optimum: " + str(saOptimal)
        print "SA time: " + str(saTime)
        iterOptimalOut.append(saOptimal)
        iterTimeOut.append(saTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        ga = StandardGeneticAlgorithm(200, 100, 10, gap)
        fit = FixedIterationTrainer(ga, niter * gaMult)
        fit.train()
        end = time.time()
        gaOptimal = ef.value(ga.getOptimal())
        gaTime = end - start
        print "GA optimum: " + str(gaOptimal)
        print "GA time: " + str(gaTime)
        iterOptimalOut.append(gaOptimal)
        iterTimeOut.append(gaTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        mimic = MIMIC(200, 20, pop)
        fit = FixedIterationTrainer(mimic, niter * mimicMult)
        fit.train()
        end = time.time()
        mimicOptimal = ef.value(mimic.getOptimal())
        mimicTime = end - start
        print "MIMIC optimum: " + str(mimicOptimal)
        print "MIMIC time: " + str(mimicTime)
        iterOptimalOut.append(mimicOptimal)
        iterTimeOut.append(mimicTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        optimalOut.append(iterOptimalOut)
        timeOut.append(iterTimeOut)
        evalsOut.append(iterEvals)

    return [optimalOut, timeOut, evalsOut]
Пример #17
0
ef = TravelingSalesmanRouteEvaluationFunction(points)
odd = DiscretePermutationDistribution(N)
nf = SwapNeighbor()
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

# # for mimic we use a sort encoding
efm = TravelingSalesmanSortEvaluationFunction(points)
fill = [N] * N
ranges = array('i', fill)
oddm = DiscreteUniformDistribution(ranges)
dfm = DiscreteDependencyTree(.1, ranges); 
pop = GenericProbabilisticOptimizationProblem(efm, oddm, dfm)


def merge_two_dicts(x, y):
    z = x.copy()   # start with x's keys and values
    z.update(y)    # modifies z with y's keys and values & returns None
    return z
def rhc_fac(args = {}):
    constant_params = {'hcp':hcp}
    params = merge_two_dicts(args, constant_params)
    print(params)
    rhc = RandomizedHillClimbing(hcp)
    return FixedIterationTrainer(rhc, num_iterations)

def sa_fac(args = {}):
    constant_params = {'hcp':hcp}
Пример #18
0
def travelingsalesmanfunc(N, iterations):

    rhcMult = 1500
    saMult = 1500
    gaMult = 1
    mimicMult = 3

    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    optimalOut = []
    timeOut = []
    evalsOut = []

    for niter in iterations:

        ef = TravelingSalesmanRouteEvaluationFunction(points)
        odd = DiscretePermutationDistribution(N)
        nf = SwapNeighbor()
        mf = SwapMutation()
        cf = TravelingSalesmanCrossOver(ef)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

        iterOptimalOut = [N, niter]
        iterTimeOut = [N, niter]
        iterEvals = [N, niter]

        start = time.time()
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, niter * rhcMult)
        fit.train()
        end = time.time()
        rhcOptimal = ef.value(rhc.getOptimal())
        rhcTime = end - start
        print "RHC Inverse of Distance: optimum: " + str(rhcOptimal)
        print "RHC time: " + str(rhcTime)
        #print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
        print "Route:"
        path = []
        for x in range(0, N):
            path.append(rhc.getOptimal().getDiscrete(x))
        print path
        iterOptimalOut.append(rhcOptimal)
        iterTimeOut.append(rhcTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        sa = SimulatedAnnealing(1E12, .999, hcp)
        fit = FixedIterationTrainer(sa, niter * saMult)
        fit.train()
        end = time.time()
        saOptimal = ef.value(sa.getOptimal())
        saTime = end - start
        print "SA Inverse of Distance optimum: " + str(saOptimal)
        print "SA time: " + str(saTime)
        #print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
        print "Route:"
        path = []
        for x in range(0, N):
            path.append(sa.getOptimal().getDiscrete(x))
        print path
        iterOptimalOut.append(saOptimal)
        iterTimeOut.append(saTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
        fit = FixedIterationTrainer(ga, niter * gaMult)
        fit.train()
        end = time.time()
        gaOptimal = ef.value(ga.getOptimal())
        gaTime = end - start
        print "GA Inverse of Distance optimum: " + str(gaOptimal)
        print "GA time: " + str(gaTime)
        #print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
        print "Route:"
        path = []
        for x in range(0, N):
            path.append(ga.getOptimal().getDiscrete(x))
        print path
        iterOptimalOut.append(gaOptimal)
        iterTimeOut.append(gaTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        start = time.time()
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points)
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges)
        df = DiscreteDependencyTree(.1, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        start = time.time()
        mimic = MIMIC(500, 100, pop)
        fit = FixedIterationTrainer(mimic, niter * mimicMult)
        fit.train()
        end = time.time()
        mimicOptimal = ef.value(mimic.getOptimal())
        mimicTime = end - start
        print "MIMIC Inverse of Distance optimum: " + str(mimicOptimal)
        print "MIMIC time: " + str(mimicTime)
        #print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal()))
        print "Route:"
        path = []
        optimal = mimic.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0, len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        print order
        iterOptimalOut.append(mimicOptimal)
        iterTimeOut.append(mimicTime)
        functionEvals = ef.getNumEvals()
        ef.zeroEvals()
        iterEvals.append(functionEvals)

        optimalOut.append(iterOptimalOut)
        timeOut.append(iterTimeOut)
        evalsOut.append(iterEvals)

    return [optimalOut, timeOut, evalsOut]
Пример #19
0
def run_four_peaks():

    N = 200
    T = N / 5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(1E11, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()
            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 100, 10, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(ga.getOptimal()))
            ga_times.append(end - start)
            print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 20, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('four_peaks.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Пример #20
0
def run_knapsack():
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            #print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(100, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()

            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            #print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 150, 25, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(sa.getOptimal()))
            ga_times.append(end - start)
            #print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            #print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('knapsack.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Пример #21
0
    # Problem Definition

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    ef2 = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd2 = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef2, odd2, df)

    # Algorithm declaration
    rhc = RandomizedHillClimbing(hcp)
    sa = SimulatedAnnealing(SA_TEMPERATURE, SA_COOLING_FACTOR, hcp)
    ga = StandardGeneticAlgorithm(GA_POPULATION, GA_CROSSOVER, GA_MUTATION,
                                  gap)
    mimic = MIMIC(MIMIC_SAMPLES, MIMIC_TO_KEEP, pop)

    # Trainer declaration
    fit_rhc = FixedIterationTrainer(rhc, current_iteration_count)
    fit_sa = FixedIterationTrainer(sa, current_iteration_count)
    fit_ga = FixedIterationTrainer(ga, current_iteration_count)
    fit_mimic = FixedIterationTrainer(mimic, current_iteration_count)

    print("Computing for %d iterations" % current_iteration_count)
Пример #22
0
N = 1000
maxIters = 3001
numTrials = 5
fill = [2] * N
ranges = array('i', fill)
outfile = OUTPUT_DIRECTORY + '/FLIPFLOP/FLIPFLOP_{}_{}_LOG.csv'
ef = FlipFlopEvaluationFunction()
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# RHC
for t in range(numTrials):
    fname = outfile.format('RHC', str(t + 1))
    with open(fname, 'w') as f:
        f.write('iterations,fitness,time,fevals\n')
    ef = FlipFlopEvaluationFunction()
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
Пример #23
0
def knapsackfunc(NUM_ITEMS,  iterations):


        rhcMult = 600
        saMult = 600
        gaMult = 4
        mimicMult = 3


	# Random number generator */
	random = Random()
	# The number of items
	#NUM_ITEMS = 40
	# The number of copies each
	COPIES_EACH = 4
	# The maximum weight for a single element
	MAX_WEIGHT = 50
	# The maximum volume for a single element
	MAX_VOLUME = 50
	# The volume of the knapsack 
	KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

	# create copies
	fill = [COPIES_EACH] * NUM_ITEMS
	copies = array('i', fill)

	# create weights and volumes
	fill = [0] * NUM_ITEMS
	weights = array('d', fill)
	volumes = array('d', fill)
	for i in range(0, NUM_ITEMS):
		weights[i] = random.nextDouble() * MAX_WEIGHT
		volumes[i] = random.nextDouble() * MAX_VOLUME


	# create range
	fill = [COPIES_EACH + 1] * NUM_ITEMS
	ranges = array('i', fill)

	ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
	odd = DiscreteUniformDistribution(ranges)
	nf = DiscreteChangeOneNeighbor(ranges)
	mf = DiscreteChangeOneMutation(ranges)
	cf = UniformCrossOver()
	df = DiscreteDependencyTree(.1, ranges)
	hcp = GenericHillClimbingProblem(ef, odd, nf)
	gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
	pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

	optimalOut = []
	timeOut = []
	evalsOut = []

	for niter in iterations:

		iterOptimalOut = [NUM_ITEMS, niter]
		iterTimeOut = [NUM_ITEMS, niter]
		iterEvals = [NUM_ITEMS, niter]



		start = time.time()
		rhc = RandomizedHillClimbing(hcp)
		fit = FixedIterationTrainer(rhc, niter*rhcMult)
		fit.train()
		end = time.time()
		rhcOptimal = ef.value(rhc.getOptimal())
		rhcTime = end-start
		print "RHC optimum: " + str(rhcOptimal)
		print "RHC time: " + str(rhcTime)
		iterOptimalOut.append(rhcOptimal)
		iterTimeOut.append(rhcTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		sa = SimulatedAnnealing(100, .95, hcp)
		fit = FixedIterationTrainer(sa, niter*saMult)
		fit.train()
		end = time.time()
		saOptimal = ef.value(sa.getOptimal())
		saTime = end-start
		print "SA optimum: " + str(saOptimal)
		print "SA time: " + str(saTime)
		iterOptimalOut.append(saOptimal)
		iterTimeOut.append(saTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)

		start = time.time()
		ga = StandardGeneticAlgorithm(200, 150, 25, gap)
		fit = FixedIterationTrainer(ga, niter*gaMult)
		fit.train()
		end = time.time()
		gaOptimal = ef.value(ga.getOptimal())
		gaTime = end - start
		print "GA optimum: " + str(gaOptimal)
		print "GA time: " + str(gaTime)
		iterOptimalOut.append(gaOptimal)
		iterTimeOut.append(gaTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)


		start = time.time()
		mimic = MIMIC(200, 100, pop)
		fit = FixedIterationTrainer(mimic, niter*mimicMult)
		fit.train()
		end = time.time()
		mimicOptimal = ef.value(mimic.getOptimal())
		mimicTime = end - start
		print "MIMIC optimum: " + str(mimicOptimal)
		print "MIMIC time: " + str(mimicTime)
		iterOptimalOut.append(mimicOptimal)
		iterTimeOut.append(mimicTime)
		functionEvals = ef.getNumEvals()
		ef.zeroEvals()
		iterEvals.append(functionEvals)
		
		optimalOut.append(iterOptimalOut)
		timeOut.append(iterTimeOut)
		evalsOut.append(iterEvals)		
	
	return [optimalOut, timeOut, evalsOut]
Пример #24
0
def run_all_2(N=40,fout=None):
    maxEpochs = 10**5
    maxTime = 300 #5 minutes
    problem = 'knapsack'
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = N
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack 
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    

    def run_algo(alg,fit,label,difficulty,iters):
        trainTimes = [0.]
        trainTime = []
        scoreChange = [0.]
        stuckCount = 10**3
        prev = 0.
        for epoch in range(0,maxEpochs,1):
        
            st = time.clock()
            fit.train()
            et = time.clock()
            
            trainTimes.append(trainTimes[-1]+(et-st))
            trainTime.append((et-st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean))
        
            score = ef.value(alg.getOptimal())
            
            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [problem,difficulty,label,score,epoch,trainTimes[-1],avgTime,iters]
            # print(trialData)
            # fout.writerow(trialData)
            # print(trialData)
            print(trialData,max(scoreChange))
            # print(max(scoreChange))
            # optimum = (difficulty-1-T) + difficulty
            # if score >= optimum: break
            
            scoreChange.append(abs(score-prev))
            prev = score
            scoreChange = scoreChange[-stuckCount:]
            # print(scoreChange)
            if max(scoreChange) < 1.0: break
            
            if trainTimes[-1] > maxTime: break
            
    
        # print(trialData)
        fout.writerow(trialData)
        
        
    iters = 1000
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc,fit,'RHC',N,iters)

    iters = 1000
    startTemp = 1E10
    coolingFactor = .99
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa,fit,'SA',N,iters)

    iters = 10
    population = 300
    mates = 100
    mutations = 50
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga,fit,'GA',N,iters)
    
    iters = 10
    samples = 200
    keep = 20
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic,fit,'MIMIC',N,iters)
Пример #25
0
def run_count_ones_experiments():
    OUTPUT_DIRECTORY = './output'
    N = 80
    fill = [2] * N
    ranges = array('i', fill)
    ef = CountOnesEvaluationFunction()
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    max_iter = 5000
    outfile = OUTPUT_DIRECTORY + '/count_ones_{}_log.csv'

    # Randomized Hill Climber
    filename = outfile.format('rhc')
    with open(filename, 'w') as f:
        f.write('iteration,fitness,time\n')
    for it in range(0, max_iter, 10):
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, it)
        start_time = time.clock()
        fit.train()
        elapsed_time = time.clock() - start_time
        # fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        data = '{},{},{}\n'.format(it, score, elapsed_time)
        print(data)
        with open(filename, 'a') as f:
            f.write(data)

    # Simulated Annealing
    filename = outfile.format('sa')
    with open(filename, 'w') as f:
        f.write('iteration,cooling_value,fitness,time\n')
    for cooling_value in (.19, .38, .76, .95):
        for it in range(0, max_iter, 10):
            sa = SimulatedAnnealing(100, cooling_value, hcp)
            fit = FixedIterationTrainer(sa, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(sa.getOptimal())
            data = '{},{},{},{}\n'.format(it, cooling_value, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # Genetic Algorithm
    filename = outfile.format('ga')
    with open(filename, 'w') as f:
        f.write('iteration,population_size,to_mate,to_mutate,fitness,time\n')
    for population_size, to_mate, to_mutate in itertools.product([20], [4, 8, 16, 20], [0, 2, 4, 6]):
        for it in range(0, max_iter, 10):
            ga = StandardGeneticAlgorithm(population_size, to_mate, to_mutate, gap)
            fit = FixedIterationTrainer(ga, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(ga.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, population_size, to_mate, to_mutate, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)

    # MIMIC
    filename = outfile.format('mm')
    with open(filename, 'w') as f:
        f.write('iterations,samples,to_keep,m,fitness,time\n')
    for samples, to_keep, m in itertools.product([50], [10], [0.1, 0.3, 0.5, 0.7, 0.9]):
        for it in range(0, 500, 10):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mm = MIMIC(samples, 20, pop)
            fit = FixedIterationTrainer(mm, it)
            start_time = time.clock()
            fit.train()
            elapsed_time = time.clock() - start_time
            # fevals = ef.fevals
            score = ef.value(mm.getOptimal())
            data = '{},{},{},{},{},{}\n'.format(it, samples, to_keep, m, score, elapsed_time)
            print(data)
            with open(filename, 'a') as f:
                f.write(data)
Пример #26
0
def run_all_2(N=200,T=40,fout=None):
    problem = 'flipflop'
    # N=200
    # T=N/10
    maxEpochs = 10**5
    maxTime = 300 #5 minutes
    fill = [2] * N
    ranges = array('i', fill)

    # ef = FourPeaksEvaluationFunction(T)
    ef = FlipFlopEvaluationFunction()
    
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    # mf = SwapMutation()
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    

    def run_algo(alg,fit,label,difficulty,iters):
        trainTimes = [0.]
        trainTime = []
        scoreChange = [0.]
        stuckCount = 10**3
        prev = 0.
        for epoch in range(0,maxEpochs,1):
        
            st = time.clock()
            fit.train()
            et = time.clock()
            
            trainTimes.append(trainTimes[-1]+(et-st))
            trainTime.append((et-st))
            rollingMean = 10
            avgTime = (math.fsum(trainTime[-rollingMean:]) / float(rollingMean))
        
            score = ef.value(alg.getOptimal())
            # print(score,difficulty)
            
            # trialString = '{}-{}-{}-{}'.format(label,score,epoch,trainTimes[-1])
            trialData = [problem,difficulty,label,score,epoch,trainTimes[-1],avgTime,iters]
            print(trialData)
            # fout.writerow(trialData)
            
            optimum = (difficulty-1)
            if score >= optimum: break
            
            scoreChange.append(abs(score-prev))
            prev = score
            scoreChange = scoreChange[-stuckCount:]
            # print(scoreChange)
            if max(scoreChange) == 0: break
            
            if trainTimes[-1] > maxTime: break
            
    
        # print(trialData)
        fout.writerow(trialData)
        
        
    iters = 10
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    run_algo(rhc,fit,'RHC',N,iters)


    iters = 10
    startTemp = 1E10
    coolingFactor = .99
    sa = SimulatedAnnealing(startTemp, coolingFactor, hcp)
    fit = FixedIterationTrainer(sa, iters)
    run_algo(sa,fit,'SA',N,iters)

    iters = 10
    population = 300
    mates = 40
    mutations = 10
    ga = StandardGeneticAlgorithm(population, mates, mutations, gap)
    fit = FixedIterationTrainer(ga, iters)
    run_algo(ga,fit,'GA',N,iters)
    
    iters = 10
    samples = 200
    keep = 5
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, iters)
    run_algo(mimic,fit,'MIMIC',N,iters)
Пример #27
0
def run_algorithm_test(T,
                       ranges,
                       algorithms,
                       output_file_name,
                       trial_number,
                       iterations=False):

    with open(output_file_name, 'w') as f:
        f.write('algorithm,optimal_result,iterations,time,trial\n')

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)

    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    for trial in range(trial_number):
        if iterations is False:
            for item in algorithms:
                start_time = time.time()
                if item in ['rhc']:
                    optimal_result, run_iters = run_rhc(hcp, ef)
                elif item in ['sa']:
                    optimal_result, run_iters = run_sa(hcp, ef)
                elif item in ['ga']:
                    optimal_result, run_iters = run_ga(gap, ef)
                elif item in ['mimic']:
                    optimal_result, run_iters = run_mimic(pop, ef)
                else:
                    print "The algorithm type {} is not supported.".format(
                        item)

                end_time = time.time()
                time_elapsed = end_time - start_time

                run_output = '{},{},{},{},{}\n'.format(item, optimal_result,
                                                       run_iters, time_elapsed,
                                                       trial)
                with open(output_file_name, 'a') as f:
                    f.write(run_output)
        else:
            for iter in iterations:
                for item in algorithms:
                    start_time = time.time()
                    if item in ['rhc']:
                        optimal_result, run_iters = run_rhc(hcp, ef, iter)
                    elif item in ['sa']:
                        optimal_result, run_iters = run_sa(hcp, ef, iter)
                    elif item in ['ga']:
                        optimal_result, run_iters = run_ga(gap, ef, iter)
                    elif item in ['mimic']:
                        optimal_result, run_iters = run_mimic(pop, ef, iter)
                    else:
                        print "The algorithm type {} is not supported.".format(
                            item)

                    end_time = time.time()
                    time_elapsed = end_time - start_time

                    run_output = '{},{},{},{},{}\n'.format(
                        item, optimal_result, run_iters, time_elapsed, trial)

                    with open(output_file_name, 'a') as f:
                        f.write(run_output)

    print "time elapsed is {}".format(time_elapsed)

    return
Пример #28
0
def solveit(oaname, params):
    iterations = 10000
    tryi = 1
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if oaname == 'RHC':
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == 'SA':
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == 'GA':
        iterations = 1000
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]),
                                      int(params[2]), gap)
    if oaname == 'MMC':
        iterations = 1000
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (
        oaname, ','.join(params), iterations, tryi)
    print "=" * 20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i % 10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = int(timeit.default_timer() - starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'score: %.3f' % score
    print 'train time: %d secs' % (int(timeit.default_timer() - starttime))

    scsv = 'kn-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "=" * 20