Exemple #1
0
def mimic_fac(args={}):
    constant_params = {'op': pop}
    params = merge_two_dicts(args, constant_params)
    mimic = MIMIC(50, 10, pop)
    mimic = MIMIC(args['samples'], int(args['samples'] * args['tokeep']), pop)

    mfit = FixedIterationTrainer(mimic, num_iterations)
    return mfit
Exemple #2
0
def run_mimic(t, samples, keep, m):
    fill = [N] * N
    ranges = array('i', fill)
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscreteUniformDistribution(ranges)

    fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                           str(t + 1))
    base.write_header(fname)
    df = DiscreteDependencyTree(m, ranges)
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(mimic.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
Exemple #3
0
def run_mimic(t, samples, keep, m):
    fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                           str(t + 1))
    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    df = DiscreteDependencyTree(m, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(mimic.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
Exemple #4
0
def MIMICtest():
    correctCount = 0
    MIMIC_iters = 10
    MIMIC_samples = 5 * N  #max(1,int(N/10))
    MIMIC_keep = int(.1 * MIMIC_samples)
    t = 0
    while correctCount < NUM_RIGHT and MIMIC_iters <= 500:
        MIMIC_keep = int(max(.1 * MIMIC_samples, 1))
        mimic = MIMIC(int(MIMIC_samples), int(MIMIC_keep), pop)
        start = time.time()
        fit = FixedIterationTrainer(mimic, int(MIMIC_iters))
        fitness = fit.train()
        t = time.time() - start
        v = ef.value(mimic.getOptimal())
        myWriter.addValue(fitness, "MIMIC_fitness", runNum)
        myWriter.addValue(t, "MIMIC_searchTimes", runNum)
        if v == N:
            correctCount += 1
        else:
            correctCount = 0
            MIMIC_iters *= 1.1
            MIMIC_samples *= 1.1
    myWriter.addValue(t, "MIMIC_times", 0)
    myWriter.addValue(int(MIMIC_iters), "MIMIC_iters", 0)
    myWriter.addValue(int(MIMIC_samples), "MIMIC_samples", 0)
    myWriter.addValue(int(MIMIC_keep), "MIMIC_keep", 0)

    print(
        str(N) + ": MIMIC: " + str(ef.value(mimic.getOptimal())) + " took " +
        str(t) + " seconds and " + str(int(MIMIC_iters)) + " iterations and " +
        str(int(MIMIC_samples)) + " samples with keep " + str(int(MIMIC_keep)))
Exemple #5
0
def mimic_discrete(name, ef, odd, ranges, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for samples, keep, m in itertools.product(*params):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mimic_instance = MIMIC(samples, keep, pop)
            mimic_trainer = FixedIterationTrainer(mimic_instance, iters_step)
            mimic_state = {'problem': mimic_instance,
                           'trainer': mimic_trainer}
            # wrap into class etc...
            wrapper_mimic = AlgoWrapper(mimic_state,
                                        lambda state: state['trainer'].train(),
                                        lambda state: ef.value(state['problem'].getOptimal()),
                                        lambda state: ef.value(state['problem'].getOptimal())
                                        )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_samples_" + str(samples) + "_keep_" + str(keep) + "_m_" + str(m)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_mimic,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'samples':samples,
                                                      'keep':keep,
                                                      'm':m}
                                         )
            timed_trainer.run()
Exemple #6
0
def MIMICAllRangeExperiment(name, points, problem, sampleRange, keepRange,
                            iterRange, mat):
    currmax = -1
    bestSampleSize = -1
    bestKeep = -1
    bestPath = []
    lastRow = -1
    for jdx, j in enumerate(sampleRange):
        iVec = []
        jVec = []
        for idx, i in enumerate(keepRange):  #percentage to keep
            fitVec = []
            row = idx * len(sampleRange) + jdx
            keep = int(ceil(j * i))  #i is the percentage
            print "samples " + str(j) + " keep " + str(keep)
            if keep < j:  #TODO: no longer needed now that using % keep isntead of absolute
                mimic = MIMIC(j, keep, problem)

                if (row == lastRow):
                    print "Error! lastRow == row! "
                else:
                    lastRow = row
                fitVec = IterRangeExperiment(name, mimic, points, iterRange,
                                             mat, row)
            else:
                print "i <= j, skipping row " + str(row)
            iVec.append(i)
            jVec.append(j)
            saveFit(name + "_keep", iVec, idx * len(sampleRange) + jdx, mat)
            saveFit(name + "_sampleSize", jVec,
                    idx * len(sampleRange) + jdx, mat)
Exemple #7
0
def mimic():
    for samples, keep, m in product([80], [40],
                                    [0.15, 0.35, 0.55, 0.75, 0.95]):
        fname = outfile.replace('XXX',
                                'MIMIC{}_{}_{}'.format(samples, keep, m))
        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals\n')
        mimic = MIMIC(samples, keep, pop)
        perform(mimic, fname)
Exemple #8
0
def MIMICKeepRangeExperiment(name, points, problem, keepRange, iterRange, mat):
    for idx, i in enumerate(keepRange):
        mimic = MIMIC(1000, i, problem)
        fitVec = IterRangeExperiment(name, mimic, points, iterRange, mat,
                                     idx * len(iterRange))
        row = idx
        saveFit(name + "_fitness", fitVec, idx, mat)
        saveFit(name + "_iterations", iterRange, idx, mat)
    saveFit(name + "_numSamples", keepRange, 0, mat)
Exemple #9
0
def run_mimic(pop, ef, iterations=1000):

    mimic = MIMIC(200, 20, pop)
    fit = FixedIterationTrainer(mimic, iterations)
    fit.train()
    optimal_result = str(ef.value(mimic.getOptimal()))
    print "MIMIC: " + optimal_result

    return optimal_result, iterations
Exemple #10
0
def solveit(oaname, params):
    N = 60
    T = N / 10
    fill = [2] * N
    ranges = array('i', fill)
    iterations = 10000
    tryi = 1

    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    #  fit = FixedIterationTrainer(rhc, 200000)
    #  fit.train()

    if oaname == 'RHC':
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == 'SA':
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == 'GA':
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]),
                                      int(params[2]), gap)
    if oaname == 'MMC':
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (
        oaname, ','.join(params), iterations, tryi)
    print "=" * 20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i % 10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = float(timeit.default_timer() - starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'score: %.3f' % score
    print 'train time: %.3f secs' % (int(timeit.default_timer() - starttime))

    scsv = 'cp-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "=" * 20
Exemple #11
0
def mimic():
    for samples, keep, m in product([80], [40], [0.15, 0.35, 0.55, 0.75, 0.95]):
        fname = outfile.replace('XXX','MIMIC{}_{}_{}'.format(samples,keep,m))
        df = DiscreteDependencyTree(m, ranges)
        with open(fname,'w') as f:
            f.write('iterations,fitness,time,fevals\n')
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        mimic = MIMIC(samples, keep, pop)
        perform(mimic, fname)
Exemple #12
0
    def run_experiment(self, opName):
        """Run a MIMIC optimization experiment for a given optimization
        problem.

        Args:
            ef (AbstractEvaluationFunction): Evaluation function.
            ranges (array): Search space ranges.
            op (str): Name of optimization problem.

        """
        outdir = 'results/OPT/{}'.format(opName)  # get results directory
        outfile = 'MIMIC_{}_{}_{}_results.csv'.format(self.s, self.k, self.m)
        fname = get_abspath(outfile, outdir)  # get output filename

        # delete existing results file, if it already exists
        try:
            os.remove(fname)
        except Exception as e:
            print e
            pass

        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals,trial\n')

        # start experiment
        for t in range(self.numTrials):
            # initialize optimization problem and training functions
            ranges, ef = self.op.get_ef()
            mimic = None
            df = DiscreteDependencyTree(self.m, ranges)
            odd = DiscreteUniformDistribution(ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mimic = MIMIC(self.s, self.k, pop)
            fit = FixedIterationTrainer(mimic, 10)

            # run experiment and train evaluation function
            start = time.clock()
            for i in range(0, self.maxIters, 10):
                fit.train()
                elapsed = time.clock() - start
                fe = ef.valueCallCount
                score = ef.value(mimic.getOptimal())
                ef.valueCallCount -= 1

                # write results to output file
                s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t)
                with open(fname, 'a+') as f:
                    f.write(s)
Exemple #13
0
def mimicGATest():

    popBegin = 1
    popEnd = 101
    keepBegin = 1
    keepEnd = 90
    mutBegin = 1
    mutEnd = 90
    itersBegin = 1
    itersEnd = 200

    samples = 10
    keep = 2

    problemSize = N
    mimicRange = (problemSize)
    iters = 1

    paramRanges = Vector(8)
    paramRanges.addElement(popBegin)
    paramRanges.addElement(popEnd)
    paramRanges.addElement(keepBegin)
    paramRanges.addElement(keepEnd)
    paramRanges.addElement(mutBegin)
    paramRanges.addElement(mutEnd)
    paramRanges.addElement(itersBegin)
    paramRanges.addElement(itersEnd)

    totalParamSize1 = (popEnd - popBegin + 1) + (keepEnd - keepBegin + 1) + (
        mutEnd - mutBegin + 1) + (itersEnd - itersBegin + 1)
    allParamValues = range(popBegin, popEnd + 1) + range(
        keepBegin, keepEnd + 1) + range(mutBegin, mutEnd + 1) + range(
            itersBegin, itersEnd + 1)
    totalParamSize = len(allParamValues)
    metaFun = RamysEvalMetafunc(ranges)
    discreteDist = RamysMimicDistribution(
        paramRanges)  #DiscreteUniformDistribution(problemSize)
    distFunc = DiscreteDependencyTree(.1, allParamValues)
    findGA = GenericProbabilisticOptimizationProblem(metaFun, discreteDist,
                                                     distFunc)
    mimic = MIMIC(samples, keep, findGA)
    fit = FixedIterationTrainer(mimic, iters)
    fit.train()
    print str(N) + ": MIMIC finds GA : " + str(ef.value(mimic.getOptimal()))
Exemple #14
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('m_trg.csv')
    testing_ints = initialize_instances('m_test.csv')
    validation_ints = initialize_instances('m_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_names = ["MIMIC"]
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER
    ], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = MIMIC(200, 100, nnop)
    train(oa, classification_network, 'MIMIC', training_ints, validation_ints,
          testing_ints, measure)
Exemple #15
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(PATH + "X_train.csv")
    testing_ints = initialize_instances(PATH + "X_test.csv")
    validation_ints = initialize_instances(PATH + "y_train.csv")
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    logistic_sigmoid = LogisticSigmoid()
    data_set = DataSet(training_ints)
    data_set_size = data_set.size()
    print(data_set_size)
    print(type(data_set_size))
    odd = DiscreteUniformDistribution([data_set_size])
    df = DiscreteDependencyTree(.1, [data_set_size])
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER], logistic_sigmoid)
    evaluation = NeuralNetworkEvaluationFunction(classification_network,
                                                 data_set, measure)
    pop = GenericProbabilisticOptimizationProblem(evaluation, odd, df)
    oa = MIMIC(data_set_size, int(0.1 * data_set_size), pop)
    train(oa, classification_network, 'GA', training_ints, validation_ints,
          testing_ints, measure)
def solveit(oaname, params):
    # set N value.  This is the number of points
    N = 50
    iterations = 1000
    tryi = 1
    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    if oaname == "RHC":
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == "SA":    
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == "GA":
        iterations=1000
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]), int(params[2]), gap)
    if oaname == "MMC":
        iterations=1000
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points)
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges)
        df = DiscreteDependencyTree(.1, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (oaname, ','.join(params), iterations, tryi)
    print "="*20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i%10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = int(timeit.default_timer()-starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'Inverse of Distance [score]: %.3f' % score
    print 'train time: %d secs' % (int(timeit.default_timer()-starttime))

    scsv = 'tsp-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "="*20

    print "Route:"
    if oaname == 'MMC':
        optimal = oa.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0,len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        print order
    else:
        path = []
        for x in range(0,N):
            path.append(oa.getOptimal().getDiscrete(x))
        print path
Exemple #17
0
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)

# ---------------------------------------------------------------
N_ITERS = 100001

# MIMIC
start = time.time()
fit_hist = []
for n_samples in range(100, 1000, 200):
    print n_samples
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(n_samples, 20, pop)

    fh = FixedIterTrainer(ef, mimic, N_ITERS)
    fit_hist.append(fh)

write_hist_csv(fit_hist, 'fitness_mimic_n_samples')

print time.time() - start, 'seconds'
# # 1553 secs

start = time.time()
fit_hist = []
for theta in [0.05, 0.10, 0.2, 0.3, 0.5]:
    print theta
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(200, int(200 * theta), pop)
Exemple #18
0
# MIMIC
for t in range(numTrials):
    for samples, keep, m in product([100], [50], [0.1, 0.3, 0.5, 0.7, 0.9]):
        fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                               str(t + 1))
        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals\n')
        ef = ContinuousPeaksEvaluationFunction(T)
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
        mf = DiscreteChangeOneMutation(ranges)
        cf = SingleCrossOver()
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        df = DiscreteDependencyTree(m, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        mimic = MIMIC(samples, keep, pop)
        fit = FixedIterationTrainer(mimic, 10)
        times = [0]
        for i in range(0, maxIters, 10):
            start = clock()
            fit.train()
            elapsed = time.clock() - start
            times.append(times[-1] + elapsed)
            fevals = ef.getFunctionEvaluations()
            score = ef.value(mimic.getOptimal())
            fevals -= 1
            st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
            print st
            with open(fname, 'a') as f:
                f.write(st)
Exemple #19
0
sa = SimulatedAnnealing(temp, 0.85, hcp)
for iters in iters_list:
    fit = FixedIterationTrainer(sa, iters)
    start = time.time()
    fit.train()
    dur = time.time() - start
    print "Iters: " + str(iters) + ", Fitness: " + str(
        ef.value(sa.getOptimal())) + ", Dur: " + str(dur)

print "Genetic Algorithm"
ga = StandardGeneticAlgorithm(2 * N, 300, 100, gap)
for iters in iters_list:
    fit = FixedIterationTrainer(ga, iters)
    start = time.time()
    fit.train()
    dur = time.time() - start
    print "Iters: " + str(iters) + ", Fitness: " + str(
        ef.value(ga.getOptimal())) + ", Dur: " + str(dur)

print "MIMIC"
# the number of samples to take each iteration
# The number of samples to keep
mimic = MIMIC(250, 25, pop)
for iters in iters_list:
    fit = FixedIterationTrainer(mimic, iters)
    start = time.time()
    fit.train()
    dur = time.time() - start
    print "Iters: " + str(iters) + ", Fitness: " + str(
        ef.value(mimic.getOptimal())) + ", Dur: " + str(dur)
Exemple #20
0
# MIMIC
for t in range(numTrials):
    for samples, keep, m in product([100], [50], [0.1, 0.3, 0.5, 0.7, 0.9]):
        fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                               str(t + 1))
        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals\n')
        ef = FlipFlopEvaluationFunction()
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
        mf = DiscreteChangeOneMutation(ranges)
        cf = SingleCrossOver()
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        df = DiscreteDependencyTree(m, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        mimic = MIMIC(samples, keep, pop)
        fit = FixedIterationTrainer(mimic, 10)
        times = [0]
        for i in range(0, maxIters, 10):
            start = clock()
            fit.train()
            elapsed = time.clock() - start
            times.append(times[-1] + elapsed)
            fevals = ef.fevals
            score = ef.value(mimic.getOptimal())
            ef.fevals -= 1
            st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
            print(st)
            with open(fname, 'a') as f:
                f.write(st)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    ef2 = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd2 = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef2, odd2, df)

    # Algorithm declaration
    rhc = RandomizedHillClimbing(hcp)
    sa = SimulatedAnnealing(SA_TEMPERATURE, SA_COOLING_FACTOR, hcp)
    ga = StandardGeneticAlgorithm(GA_POPULATION, GA_CROSSOVER, GA_MUTATION,
                                  gap)
    mimic = MIMIC(MIMIC_SAMPLES, MIMIC_TO_KEEP, pop)

    # Trainer declaration
    fit_rhc = FixedIterationTrainer(rhc, current_iteration_count)
    fit_sa = FixedIterationTrainer(sa, current_iteration_count)
    fit_ga = FixedIterationTrainer(ga, current_iteration_count)
    fit_mimic = FixedIterationTrainer(mimic, current_iteration_count)

    print("Computing for %d iterations" % current_iteration_count)

    # Fitting
    start_rhc = time.time()
    fit_rhc.train()
    end_rhc = time.time()

    start_sa = time.time()
    train(sa, exp_name, "SA", "13", ef, 200000)

    sa = SimulatedAnnealing(1E12, .90, hcp)
    fit = FixedIterationTrainer(sa, 200000)
    train(sa, exp_name, "SA", "14", ef, 200000)

    sa = SimulatedAnnealing(1E12, .80, hcp)
    fit = FixedIterationTrainer(sa, 200000)
    train(sa, exp_name, "SA", "15", ef, 200000)


#### Experienet 2 - Tuning Algo Prams for Mimic ####
if True:
    exp_name = "exp02"

    mimic = MIMIC(500, 25, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    train(mimic, exp_name, "MIMIC", "0", ef, 2000)

    mimic = MIMIC(500, 50, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    train(mimic, exp_name, "MIMIC", "1", ef, 2000)

    mimic = MIMIC(500, 100, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    train(mimic, exp_name, "MIMIC", "2", ef, 2000)

    mimic = MIMIC(200, 10, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    train(mimic, exp_name, "MIMIC", "3", ef, 2000)
Exemple #23
0
def run_knapsack():
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            #print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(100, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()

            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            #print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 150, 25, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(sa.getOptimal()))
            ga_times.append(end - start)
            #print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            #print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('knapsack.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Exemple #24
0
ga = StandardGeneticAlgorithm(200, 100, 10, genetic_problem)
t0 = time()
iters = 0
score = 0

f.write("starting GA\n")
while iters < 5000:
    ga.train()
    score = ef.value(ga.getOptimal())
    f.write(str(iters) + "," + str(score) +"\n")
    iters += 1

print "GA: " + str(ef.value(ga.getOptimal())), "time taken", time() - t0, "Iterations", iters

mimic = MIMIC(200, 100, probablistic_optimization)
score = 0
t0 = time()
iters = 0

f.write("starting MIMIC\n")
while iters < 1000:
    mimic.train()
    score = ef.value(mimic.getOptimal())
    f.write(str(iters) + "," + str(score) +"\n")
    iters += 1

print "MIMIC: " + str(ef.value(mimic.getOptimal())), "time taken", time() - t0, "Iterations", iters


Exemple #25
0
ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 20, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC: " + str(ef.value(mimic.getOptimal()))
    path = []
    for x in range(0, N):
        path.append(ga.getOptimal().getDiscrete(x))
    print(path)

# MIMIC
# for mimic we use a sort encoding
for i in range(trials):
    ef = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    mimic = MIMIC(500, 100, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    start = clock()
    fit.train()
    end = clock()
    total_time = end - start
    max_fit = ef.value(mimic.getOptimal())
    time_optimum = [total_time, max_fit]
    mimic_data.append(time_optimum)
    print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal()))
    print "Route:"
    path = []
    optimal = mimic.getOptimal()
    fill = [0] * optimal.size()
    ddata = array('d', fill)
    for i in range(0, len(ddata)):
    print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))

    sa = SimulatedAnnealing(1E9, .98, hcp)
    fit = FixedIterationTrainer(sa, 200000)
    score_SA.append(train(sa, "SA", ef, 200000, "test", expt))
    print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))

    ga = StandardGeneticAlgorithm(225, 40, 5, gap)
    fit = FixedIterationTrainer(ga, 1000)
    score_GA.append(train(ga, "GA", ef, 40000, "test", expt))
    print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))

    # for mimic we use a sort encoding
    ef = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    mimic = MIMIC(150, 20, pop)
    fit = FixedIterationTrainer(mimic, 1000)
    score_MIMIC.append(train(mimic, "MIMIC", ef, 4000, "test", expt))
    print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal()))

print("Final averaged results")
print("RHC= " + str(sum(score_RHC) / len(score_RHC)))
print("SA= " + str(sum(score_SA) / len(score_SA)))
print("GA= " + str(sum(score_GA) / len(score_GA)))
print("MIMIC= " + str(sum(score_MIMIC) / len(score_MIMIC)))
def run_traveling_salesman():
    # set N value.  This is the number of points
    N = 50
    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(rhc.getOptimal().getDiscrete(x))
            # print path

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(1E12, .999, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(sa.getOptimal().getDiscrete(x))
            # print path

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(ga.getOptimal()))
            print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
            ga_times.append(end - start)
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(ga.getOptimal().getDiscrete(x))
            # print path

    # for mimic we use a sort encoding
    ef = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(500, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()

            mimic_results.append(ef.value(mimic.getOptimal()))
            print "MIMIC Inverse of Distance: " + str(
                ef.value(mimic.getOptimal()))
            # print "Route:"
            # path = []
            # optimal = mimic.getOptimal()
            # fill = [0] * optimal.size()
            # ddata = array('d', fill)
            # for i in range(0,len(ddata)):
            #     ddata[i] = optimal.getContinuous(i)
            # order = ABAGAILArrays.indices(optimal.size())
            # ABAGAILArrays.quicksort(ddata, order)
            # print order
            mimic_times.append(end - start)

    with open('travelingsalesman.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Exemple #29
0
ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 2000000000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))
sys.exit()
sa = SimulatedAnnealing(100, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 150, 25, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 100, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC: " + str(ef.value(mimic.getOptimal()))
    train(ga, exp_name, "GA", "6", ef, 40000)

    ga = StandardGeneticAlgorithm(200, 50, 5, gap)
    train(ga, exp_name, "GA", "7", ef, 40000)

    ga = StandardGeneticAlgorithm(100, 25, 3, gap)
    train(ga, exp_name, "GA", "8", ef, 40000)

    ga = StandardGeneticAlgorithm(50, 12, 2, gap)
    train(ga, exp_name, "GA", "9", ef, 40000)

#### Experienet 3 - Tuning Algo Prams for MIMIC ####
if True:
    exp_name = "exp03"

    mimic = MIMIC(300, 150, pop)
    train(mimic, exp_name, "MIMIC", "0", ef, 2000)

    mimic = MIMIC(300, 100, pop)
    train(mimic, exp_name, "MIMIC", "1", ef, 2000)

    mimic = MIMIC(200, 100, pop)
    train(mimic, exp_name, "MIMIC", "2", ef, 2000)

    mimic = MIMIC(200, 50, pop)
    train(mimic, exp_name, "MIMIC", "3", ef, 2000)

    mimic = MIMIC(100, 50, pop)
    train(mimic, exp_name, "MIMIC", "4", ef, 2000)

    mimic = MIMIC(100, 25, pop)