예제 #1
0
def run_sa(t, CE):
    fname = outfile.format('SA{}'.format(CE), str(t + 1))
    with open(fname, 'a+') as f:
        content = f.read()
        if "fitness" not in content:
            f.write('iterations,fitness,time,fevals\n')
    ef = FlipFlopEvaluationFunction()
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    sa = SimulatedAnnealing(1E10, CE, hcp)
    fit = FixedIterationTrainer(sa, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(sa.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
예제 #2
0
def SA():
    SA_iters = 10
    correctCount = 0
    t = 0
    totalTime = 0
    totalIters = 0
    global sa
    sa = SimulatedAnnealing(1e11, .85, hcp)
    while correctCount < NUM_RIGHT:
        start = time.time()
        fit = FixedIterationTrainer(sa, SA_iters)
        fitness = fit.train()
        t = time.time() - start
        totalTime += t
        totalIters += SA_iters
        myWriter.addValue(fitness, "SA_fitness", runNum)
        myWriter.addValue(t, "SA_searchTimes", runNum)
        v = ef.value(sa.getOptimal())
        if v == N:
            correctCount += 1
        else:
            correctCount = 0
            #SA_iters += 1
    myWriter.addValue(t, "SA_times", 0)
    myWriter.addValue(int(SA_iters), "SA_iters", 0)
    print str(N) + ": SA: " + str(ef.value(sa.getOptimal())) + " took " + str(
        totalIters) + " seconds and " + str(totalIters) + " iterations"
예제 #3
0
파일: tsp.py 프로젝트: yifanguo247/CS7641
def run_mimic(t, samples, keep, m):
    fill = [N] * N
    ranges = array('i', fill)
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscreteUniformDistribution(ranges)

    fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                           str(t + 1))
    base.write_header(fname)
    df = DiscreteDependencyTree(m, ranges)
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(mimic.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
예제 #4
0
def run_ga(t, pop, mate, mutate):
    fname = outfile.format('GA{}_{}_{}'.format(pop, mate, mutate), str(t + 1))
    with open(fname, 'a+') as f:
        content = f.read()
        if "fitness" not in content:
            f.write('iterations,fitness,time,fevals\n')
    ef = FlipFlopEvaluationFunction()
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    ga = StandardGeneticAlgorithm(pop, mate, mutate, gap)
    fit = FixedIterationTrainer(ga, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(ga.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
예제 #5
0
def sa_generic(name, ef, odd, nf, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for t_param, cooling in itertools.product(*params):
            hcp = GenericHillClimbingProblem(ef, odd, nf)
            sa_instance = SimulatedAnnealing(t_param, cooling, hcp)
            sa_trainer = FixedIterationTrainer(sa_instance, iters_step)
            sa_state = {'problem': sa_instance,
                        'trainer': sa_trainer}
            wrapper_sa = AlgoWrapper(sa_state,
                                     lambda state: state['trainer'].train(),
                                     lambda state: ef.value(state['problem'].getOptimal()),
                                     lambda state: ef.value(state['problem'].getOptimal())
                                     )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_t_" + str(t_param) + "_cooling_" + str(cooling)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_sa,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'temperature':t_param,
                                                      'coolFactor':cooling}
                                         )
            timed_trainer.run()
예제 #6
0
def sa_network(name, network, measure, train_set, test_set, acc_func, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for t_param, cooling in itertools.product(*params):
            network_optimizer = NeuralNetworkOptimizationProblem(train_set, network, measure)
            sa_instance = SimulatedAnnealing(t_param, cooling, network_optimizer)
            sa_trainer = FixedIterationTrainer(sa_instance, iters_step)
            nn_state = {'network': network,
                        'trainer': sa_trainer}
            wrapper_sa = AlgoWrapper(nn_state,
                                     lambda state: state['trainer'].train(),
                                     lambda state: acc_func(train_set, state['network'], measure),
                                     lambda state: acc_func(test_set, state['network'], measure)
                                     )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_t_" + str(t_param) + "_cooling_" + str(cooling)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_sa,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'temperature':t_param,
                                                      'coolFactor':cooling}
                                         )
            timed_trainer.run()
예제 #7
0
def ga_network(name, network, measure, train_set, test_set, acc_func, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for popsize, toMate, toMutate in itertools.product(*params):
            network_optimizer = NeuralNetworkOptimizationProblem(train_set, network, measure)
            ga_instance = StandardGeneticAlgorithm(popsize, int(popsize * toMate), int(popsize * toMutate), network_optimizer)
            ga_trainer = FixedIterationTrainer(ga_instance, iters_step)
            nn_state = {'network': network,
                        'trainer': ga_trainer}
            wrapper_ga = AlgoWrapper(nn_state,
                                      lambda state: state['trainer'].train(),
                                      lambda state: acc_func(train_set, state['network'], measure),
                                      lambda state: acc_func(test_set, state['network'], measure)
                                      )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_popSize_" + str(popsize) + "_toMate_" + str(toMate) + "_toMutate_" + str(toMutate)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_ga,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'popSize':popsize,
                                                      'toMate':toMate,
                                                      'toMutate':toMutate}
                                         )
            timed_trainer.run()
예제 #8
0
def mimic_discrete(name, ef, odd, ranges, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for samples, keep, m in itertools.product(*params):
            df = DiscreteDependencyTree(m, ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mimic_instance = MIMIC(samples, keep, pop)
            mimic_trainer = FixedIterationTrainer(mimic_instance, iters_step)
            mimic_state = {'problem': mimic_instance,
                           'trainer': mimic_trainer}
            # wrap into class etc...
            wrapper_mimic = AlgoWrapper(mimic_state,
                                        lambda state: state['trainer'].train(),
                                        lambda state: ef.value(state['problem'].getOptimal()),
                                        lambda state: ef.value(state['problem'].getOptimal())
                                        )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_samples_" + str(samples) + "_keep_" + str(keep) + "_m_" + str(m)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_mimic,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'samples':samples,
                                                      'keep':keep,
                                                      'm':m}
                                         )
            timed_trainer.run()
예제 #9
0
def ga_generic(name, ef, odd, mf, cf, iter_time, iters_total, iters_step, n_trials, params):
    for i_trial in range(n_trials):
        for popsize, toMate, toMutate in itertools.product(*params):
            gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
            ga_instance = StandardGeneticAlgorithm(popsize, int(popsize * toMate), int(popsize * toMutate), gap)
            ga_trainer = FixedIterationTrainer(ga_instance, iters_step)
            ga_state = {'problem': ga_instance,
                        'trainer': ga_trainer}
            wrapper_ga = AlgoWrapper(ga_state,
                                     lambda state: state['trainer'].train(),
                                     lambda state: ef.value(state['problem'].getOptimal()),
                                     lambda state: ef.value(state['problem'].getOptimal())
                                     )
            # create name and invalidate if super empty
            decorated_name = ""
            if name is not None and name != "":
                decorated_name = name + "_popSize_" + str(popsize) + "_toMate_" + str(toMate) + "_toMutate_" + str(toMutate)
            timed_trainer = TimedTrainer(decorated_name,
                                         wrapper_ga,
                                         iter_time,
                                         iters_total,
                                         iters_step,
                                         _param_dict={'name':name,
                                                      'popSize':popsize,
                                                      'toMate':toMate,
                                                      'toMutate':toMutate}
                                         )
            timed_trainer.run()
예제 #10
0
def run_rhc(t):
    fname = outfile.format('RHC', str(t + 1))
    with open(fname, 'a+') as f:
        content = f.read()
        if "fitness" not in content:
            f.write('iterations,fitness,time,fevals\n')
    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print fname, st
        base.write_to_file(fname, st)

    return
예제 #11
0
def run_mimic(t, samples, keep, m):
    fname = outfile.format('MIMIC{}_{}_{}'.format(samples, keep, m),
                           str(t + 1))
    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    df = DiscreteDependencyTree(m, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
    mimic = MIMIC(samples, keep, pop)
    fit = FixedIterationTrainer(mimic, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(mimic.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
예제 #12
0
def RHC():
    correctCount = 0
    RHC_iters = 10
    t = 0
    totalTime = 0
    totalIters = 0

    global rhc
    rhc = RandomizedHillClimbing(hcp)
    while correctCount < NUM_RIGHT:
        # print str(correctCount)+  " / 20 correct in RHC w/ iters " + str(RHC_iters)
        fit = FixedIterationTrainer(rhc, RHC_iters)
        start = time.time()
        fitness = fit.train()
        t = time.time() - start
        totalIters += RHC_iters
        totalTime += t
        myWriter.addValue(fitness, "RHC_fitness", runNum)
        myWriter.addValue(t, "RHC_searchTimes", runNum)
        v = ef.value(rhc.getOptimal())
        if v == N:
            correctCount += 1
        else:
            correctCount = 0
            #RHC_iters += 1
    myWriter.addValue(totalTime, "RHC_times", runNum)
    myWriter.addValue(totalIters, "RHC_iters", runNum)
    print str(N) + ": RHC: " + str(ef.value(
        rhc.getOptimal())) + " took " + str(totalTime) + " seconds and " + str(
            totalIters) + " iterations"
예제 #13
0
def MIMICtest():
    correctCount = 0
    MIMIC_iters = 10
    MIMIC_samples = 5 * N  #max(1,int(N/10))
    MIMIC_keep = int(.1 * MIMIC_samples)
    t = 0
    while correctCount < NUM_RIGHT and MIMIC_iters <= 500:
        MIMIC_keep = int(max(.1 * MIMIC_samples, 1))
        mimic = MIMIC(int(MIMIC_samples), int(MIMIC_keep), pop)
        start = time.time()
        fit = FixedIterationTrainer(mimic, int(MIMIC_iters))
        fitness = fit.train()
        t = time.time() - start
        v = ef.value(mimic.getOptimal())
        myWriter.addValue(fitness, "MIMIC_fitness", runNum)
        myWriter.addValue(t, "MIMIC_searchTimes", runNum)
        if v == N:
            correctCount += 1
        else:
            correctCount = 0
            MIMIC_iters *= 1.1
            MIMIC_samples *= 1.1
    myWriter.addValue(t, "MIMIC_times", 0)
    myWriter.addValue(int(MIMIC_iters), "MIMIC_iters", 0)
    myWriter.addValue(int(MIMIC_samples), "MIMIC_samples", 0)
    myWriter.addValue(int(MIMIC_keep), "MIMIC_keep", 0)

    print(
        str(N) + ": MIMIC: " + str(ef.value(mimic.getOptimal())) + " took " +
        str(t) + " seconds and " + str(int(MIMIC_iters)) + " iterations and " +
        str(int(MIMIC_samples)) + " samples with keep " + str(int(MIMIC_keep)))
예제 #14
0
def test_iter(algorithm, arguments, no_loops, no_iter):
    results = []
    for loop in range(no_loops):
        algo_init = algorithm(*arguments)
        fit = FixedIterationTrainer(algo_init, no_iter)
        fit.train()
        results += [ef.value(algo_init.getOptimal())]
    return results
예제 #15
0
def mimic_fac(args={}):
    constant_params = {'op': pop}
    params = merge_two_dicts(args, constant_params)
    mimic = MIMIC(50, 10, pop)
    mimic = MIMIC(args['samples'], int(args['samples'] * args['tokeep']), pop)

    mfit = FixedIterationTrainer(mimic, num_iterations)
    return mfit
예제 #16
0
def ga_fac(args={}):
    constant_params = {'hcp': hcp}
    params = merge_two_dicts(args, constant_params)
    ga = StandardGeneticAlgorithm(
        args['populationSize'], int(args['populationSize'] * args['toMate']),
        int(args['populationSize'] * args['toMutate']), gap)
    gfit = FixedIterationTrainer(ga, num_iterations)
    return gfit
예제 #17
0
def run_mimic(pop, ef, iterations=1000):

    mimic = MIMIC(200, 20, pop)
    fit = FixedIterationTrainer(mimic, iterations)
    fit.train()
    optimal_result = str(ef.value(mimic.getOptimal()))
    print "MIMIC: " + optimal_result

    return optimal_result, iterations
예제 #18
0
def run_ga(gap, ef, iterations=1000):

    ga = StandardGeneticAlgorithm(200, 100, 10, gap)
    fit = FixedIterationTrainer(ga, iterations)
    fit.train()
    optimal_result = str(ef.value(ga.getOptimal()))
    print "GA: " + optimal_result

    return optimal_result, iterations
예제 #19
0
def run_sa(hcp, ef, iterations=200000):
    sa = SimulatedAnnealing(1E11, .95, hcp)
    fit = FixedIterationTrainer(sa, iterations)
    fit.train()

    optimal_result = str(ef.value(sa.getOptimal()))
    print "SA: " + optimal_result

    return optimal_result, iterations
예제 #20
0
def sa_fac(args={}):
    constant_params = {'hcp': hcp}
    params = merge_two_dicts(args, constant_params)

    sa = SimulatedAnnealing(args['t'], args['cooling'], hcp)
    # sa = SimulatedAnnealing(**params)

    sfit = FixedIterationTrainer(sa, num_iterations)
    return sfit
예제 #21
0
def eval_algo(ef, algo, fixed_iter):
    fit = FixedIterationTrainer(algo, fixed_iter)
    start = time.time()
    fit.train()
    end = time.time()
    score = ef.value(algo.getOptimal())
    runtime = end - start
    call_count = 0
    return score, call_count, runtime
예제 #22
0
def run_rhc(hcp, ef, iterations=200000):

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iterations)
    fit.train()

    optimal_result = str(ef.value(rhc.getOptimal()))
    print "RHC: " + optimal_result

    return optimal_result, iterations
예제 #23
0
def run_four_peaks_exploringSA():

    N=200
    T=N/5
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 30000, 35000, 40000, 45000, 50000]
    num_repeats = 5


    all_sa_results = []
    all_sa_times = []


    coolings = [0.15, 0.35, 0.55, 0.75, 0.95]
    for cooling in coolings:
        sa_results = []
        sa_times = []
        for i in iters:
            print(i)
            for j in range(num_repeats):
                start = time.time()
                sa = SimulatedAnnealing(1E11, cooling, hcp)
                fit = FixedIterationTrainer(sa, i)
                fit.train()
                end = time.time()
                sa_results.append(ef.value(sa.getOptimal()))
                sa_times.append(end - start)
                print "SA cooling " + str(cooling) + ": "  + str(ef.value(sa.getOptimal()))
        all_sa_results.append(sa_results)
        all_sa_results.append(sa_times)
   


   

    with open('four_peaks_exploringSA.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        for sa_results in all_sa_results:
            writer.writerow(sa_results)
        for sa_times in all_sa_times:
            writer.writerow(sa_times)

    return all_sa_results, all_sa_times
예제 #24
0
    def run_experiment(self, opName):
        """Run a genetic algorithms optimization experiment for a given
        optimization problem.

        Args:
            ef (AbstractEvaluationFunction): Evaluation function.
            ranges (array): Search space ranges.
            op (str): Name of optimization problem.

        """
        outdir = 'results/OPT/{}'.format(opName)  # get results directory
        outfile = 'GA_{}_{}_{}_results.csv'.format(self.p, self.ma, self.mu)
        fname = get_abspath(outfile, outdir)  # get output filename

        # delete existing results file, if it already exists
        try:
            os.remove(fname)
        except Exception as e:
            print e
            pass

        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals,trial\n')

        # start experiment
        for t in range(self.numTrials):
            # initialize optimization problem and training functions
            ranges, ef = self.op.get_ef()
            mf = None
            cf = None
            if opName == 'TSP':
                mf = SwapMutation()
                cf = TravelingSalesmanCrossOver(ef)
            else:
                mf = DiscreteChangeOneMutation(ranges)
                cf = SingleCrossOver()
            odd = DiscreteUniformDistribution(ranges)
            gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
            ga = StandardGeneticAlgorithm(self.p, self.ma, self.mu, gap)
            fit = FixedIterationTrainer(ga, 10)

            # run experiment and train evaluation function
            start = time.clock()
            for i in range(0, self.maxIters, 10):
                fit.train()
                elapsed = time.clock() - start
                fe = ef.valueCallCount
                score = ef.value(ga.getOptimal())
                ef.valueCallCount -= 1

                # write results to output file
                s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t)
                with open(fname, 'a+') as f:
                    f.write(s)
예제 #25
0
def perform(alg, fname):
    fit = FixedIterationTrainer(alg, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        score = ef.value(alg.getOptimal())
        st = '{},{},{}\n'.format(i, score, times[-1])
        # print st
        with open(fname, 'a') as f:
            f.write(st)
예제 #26
0
    def run_experiment(self, opName):
        """Run a simulated annealing optimization experiment for a given
        optimization problem.

        Args:
            ef (AbstractEvaluationFunction): Evaluation function.
            ranges (array): Search space ranges.
            op (str): Name of optimization problem.

        """
        outdir = 'results/OPT/{}'.format(opName)  # get results directory
        outfile = 'SA_{}_results.csv'.format(self.cr)
        fname = get_abspath(outfile, outdir)  # get output filename

        # delete existing results file, if it already exists
        try:
            os.remove(fname)
        except Exception as e:
            print e
            pass

        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals,trial\n')

        # start experiment
        for t in range(self.numTrials):
            # initialize optimization problem and training functions
            ranges, ef = self.op.get_ef()
            nf = None
            if opName == 'TSP':
                nf = SwapNeighbor()
            else:
                nf = DiscreteChangeOneNeighbor(ranges)
            odd = DiscreteUniformDistribution(ranges)
            hcp = GenericHillClimbingProblem(ef, odd, nf)
            sa = SimulatedAnnealing(1E10, self.cr, hcp)
            fit = FixedIterationTrainer(sa, 10)

            # run experiment and train evaluation function
            start = time.clock()
            for i in range(0, self.maxIters, 10):
                fit.train()
                elapsed = time.clock() - start
                fe = ef.valueCallCount
                score = ef.value(sa.getOptimal())
                ef.valueCallCount -= 1

                # write results to output file
                s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t)
                with open(fname, 'a+') as f:
                    f.write(s)
예제 #27
0
파일: helpers.py 프로젝트: rsadek/ABAGAIL
def IterRangeExperiment(name, experiment, points, paramRange, mat, row):
    totalSoFar = 0
    fitVec = []
    timeVec = []
    for idx, i in enumerate(paramRange):
        num = i - totalSoFar
        fit = FixedIterationTrainer(experiment, num)
        totalSoFar += num
        frow = row * len(paramRange) + idx
        fitness, time = TrainAndSave(experiment, points, fit, mat, name, frow)
        fitVec.append(fitness)
        timeVec.append(time)
    saveFit(name + "_fitness", fitVec, row, mat)
    saveFit(name + "_iterations", paramRange, row, mat)
    return fitVec
예제 #28
0
    def run_experiment(self, opName):
        """Run a MIMIC optimization experiment for a given optimization
        problem.

        Args:
            ef (AbstractEvaluationFunction): Evaluation function.
            ranges (array): Search space ranges.
            op (str): Name of optimization problem.

        """
        outdir = 'results/OPT/{}'.format(opName)  # get results directory
        outfile = 'MIMIC_{}_{}_{}_results.csv'.format(self.s, self.k, self.m)
        fname = get_abspath(outfile, outdir)  # get output filename

        # delete existing results file, if it already exists
        try:
            os.remove(fname)
        except Exception as e:
            print e
            pass

        with open(fname, 'w') as f:
            f.write('iterations,fitness,time,fevals,trial\n')

        # start experiment
        for t in range(self.numTrials):
            # initialize optimization problem and training functions
            ranges, ef = self.op.get_ef()
            mimic = None
            df = DiscreteDependencyTree(self.m, ranges)
            odd = DiscreteUniformDistribution(ranges)
            pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
            mimic = MIMIC(self.s, self.k, pop)
            fit = FixedIterationTrainer(mimic, 10)

            # run experiment and train evaluation function
            start = time.clock()
            for i in range(0, self.maxIters, 10):
                fit.train()
                elapsed = time.clock() - start
                fe = ef.valueCallCount
                score = ef.value(mimic.getOptimal())
                ef.valueCallCount -= 1

                # write results to output file
                s = '{},{},{},{},{}\n'.format(i + 10, score, elapsed, fe, t)
                with open(fname, 'a+') as f:
                    f.write(s)
예제 #29
0
def mimicGATest():

    popBegin = 1
    popEnd = 101
    keepBegin = 1
    keepEnd = 90
    mutBegin = 1
    mutEnd = 90
    itersBegin = 1
    itersEnd = 200

    samples = 10
    keep = 2

    problemSize = N
    mimicRange = (problemSize)
    iters = 1

    paramRanges = Vector(8)
    paramRanges.addElement(popBegin)
    paramRanges.addElement(popEnd)
    paramRanges.addElement(keepBegin)
    paramRanges.addElement(keepEnd)
    paramRanges.addElement(mutBegin)
    paramRanges.addElement(mutEnd)
    paramRanges.addElement(itersBegin)
    paramRanges.addElement(itersEnd)

    totalParamSize1 = (popEnd - popBegin + 1) + (keepEnd - keepBegin + 1) + (
        mutEnd - mutBegin + 1) + (itersEnd - itersBegin + 1)
    allParamValues = range(popBegin, popEnd + 1) + range(
        keepBegin, keepEnd + 1) + range(mutBegin, mutEnd + 1) + range(
            itersBegin, itersEnd + 1)
    totalParamSize = len(allParamValues)
    metaFun = RamysEvalMetafunc(ranges)
    discreteDist = RamysMimicDistribution(
        paramRanges)  #DiscreteUniformDistribution(problemSize)
    distFunc = DiscreteDependencyTree(.1, allParamValues)
    findGA = GenericProbabilisticOptimizationProblem(metaFun, discreteDist,
                                                     distFunc)
    mimic = MIMIC(samples, keep, findGA)
    fit = FixedIterationTrainer(mimic, iters)
    fit.train()
    print str(N) + ": MIMIC finds GA : " + str(ef.value(mimic.getOptimal()))
예제 #30
0
파일: tsp.py 프로젝트: dm458/abores3
def run_rhc(t):
    fname = outfile.format('RHC', str(t + 1))
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return