Пример #1
0
def RHC():
    correctCount = 0
    RHC_iters = 10
    t=0
    totalTime =0
    totalIters = 0

    global rhc
    rhc = RandomizedHillClimbing(hcp)
    while correctCount < NUM_RIGHT:
        # print str(correctCount)+  " / 20 correct in RHC w/ iters " + str(RHC_iters)
        fit = FixedIterationTrainer(rhc, RHC_iters)
        start = time.time()
        fitness = fit.train()
        t = time.time() - start
        totalIters+=RHC_iters
        totalTime += t;
        myWriter.addValue(fitness, "RHC_fitness", runNum)
        myWriter.addValue(t, "RHC_searchTimes",runNum)
        v = ef.value(rhc.getOptimal())
        if v == N:
            correctCount += 1
        else:
            correctCount = 0
            #RHC_iters += 1
    myWriter.addValue(totalTime,"RHC_times",runNum)
    myWriter.addValue(totalIters,"RHC_iters",runNum)
    print str(N) + ": RHC: " + str(ef.value(rhc.getOptimal()))+" took "+str(totalTime)+" seconds and " + str(totalIters) + " iterations"
Пример #2
0
def main():
    """Run algorithms on the abalone dataset."""
    instances = initialize_instances()
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(instances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA"]
    results = ""

    for name in oa_names:
        classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
        networks.append(classification_network)
        nnop.append(NeuralNetworkOptimizationProblem(data_set, classification_network, measure))

    oa.append(RandomizedHillClimbing(nnop[0]))
    oa.append(SimulatedAnnealing(1E11, .95, nnop[1]))
    oa.append(StandardGeneticAlgorithm(200, 100, 10, nnop[2]))

    for i, name in enumerate(oa_names):
        start = time.time()
        correct = 0
        incorrect = 0

        train(oa[i], networks[i], oa_names[i], instances, measure)
        end = time.time()
        training_time = end - start

        optimal_instance = oa[i].getOptimal()
        networks[i].setWeights(optimal_instance.getData())

        start = time.time()
        for instance in instances:
            networks[i].setInputValues(instance.getData())
            networks[i].run()

            actual = instance.getLabel().getContinuous()
            predicted = networks[i].getOutputValues().get(0)

            if abs(predicted - actual) < 0.5:
                correct += 1
            else:
                incorrect += 1

        end = time.time()
        testing_time = end - start

        results += "\nResults for %s: \nCorrectly classified %d instances." % (name, correct)
        results += "\nIncorrectly classified %d instances.\nPercent correctly classified: %0.03f%%" % (incorrect, float(correct)/(correct+incorrect)*100.0)
        results += "\nTraining time: %0.03f seconds" % (training_time,)
        results += "\nTesting time: %0.03f seconds\n" % (testing_time,)

    print results
Пример #3
0
def run_rhc(t):
    fname = outfile.format('RHC', str(t + 1))
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print st
        base.write_to_file(fname, st)
    return
Пример #4
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('Cryo_train.csv')
    testing_ints = initialize_instances('Cryo_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, testing_ints, measure)
Пример #5
0
def run_rhc(t):
    fname = outfile.format('RHC', str(t + 1))
    ef = ContinuousPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        # print fname, st
        base.write_to_file(fname, st)

    return
Пример #6
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('./../data/x_train_val.csv')
    testing_ints = initialize_instances('./../data/x_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3,HIDDEN_LAYER4, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)
Пример #7
0
def main():
    """Run algorithms on the adult dataset."""
    train_instances = initialize_instances(trainX)
    test_instances = initialize_instances(testX)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_instances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA", "BP"]
    print(sys.argv)
    if len(sys.argv) > 1:
        oa_names = [sys.argv[1]]
        set_num = sys.argv[2]
    # results = ""
    for name in oa_names:
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
        networks.append(classification_network)
        if name != "BP":
            nnop.append(
                NeuralNetworkOptimizationProblem(data_set,
                                                 classification_network,
                                                 measure))
        else:
            print("adding backprop")
            rule = RPROPUpdateRule()
            nnop.append(
                BatchBackPropagationTrainer(data_set, classification_network,
                                            measure, rule))

    if "RHC" in oa_names:
        rhc_index = oa_names.index("RHC")
        oa.append(RandomizedHillClimbing(nnop[rhc_index]))
    if "SA" in oa_names:
        sa_index = oa_names.index("SA")
        oa.append(SimulatedAnnealing(1E11, .95, nnop[sa_index]))
    if "GA" in oa_names:
        ga_index = oa_names.index("GA")
        oa.append(StandardGeneticAlgorithm(100, 50, 10, nnop[ga_index]))
    if "BP" in oa_names:
        rule = RPROPUpdateRule()
        bp_index = oa_names.index("BP")
        oa.append(nnop[bp_index])

    for i, name in enumerate(oa_names):
        train(oa[i], networks[i], oa_names[i], train_instances, test_instances,
              measure)
Пример #8
0
def main():
    """Run algorithms on the abalone dataset."""
    train_instances = initialize_instances(TRAIN_FILE)
    test_instances = initialize_instances(TEST_FILE)

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_instances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm

    oa_names = ["trial_1", "trial_2", "trial_3", "trail_4", "trial_5"]

    results = ""

    for name in oa_names:
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER_1, HIDDEN_LAYER_1, OUTPUT_LAYER])
        networks.append(classification_network)
        nnop.append(
            NeuralNetworkOptimizationProblem(data_set, classification_network,
                                             measure))

    for num, params in enumerate(oa_names):
        oa.append(RandomizedHillClimbing(nnop[num]))

    result_file = open(WRITE_DIR, "w")

    for i, name in enumerate(oa_names):
        start = time.time()

        train(oa[i], networks[i], oa_names[i], train_instances, measure,
              result_file)
        end = time.time()

        training_time = end - start

        optimal_instance = oa[i].getOptimal()
        networks[i].setWeights(optimal_instance.getData())

        result_file.write("finished_training " + name + " in " +
                          str(training_time))

        test(test_instances, networks[i], name, result_file)

        print "finished training, " + name

    result_file.close()
Пример #9
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('spam_trg.csv')
    testing_ints = initialize_instances('spam_test.csv')
    validation_ints = initialize_instances('spam_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2, OUTPUT_LAYER],relu)
    for trial in xrange(TRIALS):
        oa = RandomizedHillClimbing(NeuralNetworkOptimizationProblem(data_set, classification_network, measure))
        train(oa, classification_network, 'RHC', training_ints,validation_ints,testing_ints, measure)
Пример #10
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(TRAIN_DATA_FILE)
    testing_ints = initialize_instances(TEST_DATA_FILE)
    validation_ints = initialize_instances(VALIDATE_DATA_FILE)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.001, 50, 0.000001)
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, validation_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE.format('RHC'))
Пример #11
0
def initialize_networks_and_optimization(networks, nnop, oa, instances,
                                         factory=BackPropagationNetworkFactory(),
                                         measure = SumOfSquaresError()):
    del networks[:]
    del nnop[:]
    del oa[:]

    data_set = DataSet(instances)
    for _ in OA_NAMES:
        classification_network = factory.createClassificationNetwork([
            INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], LogisticSigmoid())
        networks.append(classification_network)
        nnop.append(NeuralNetworkOptimizationProblem(data_set, classification_network, measure))

    oa.append(RandomizedHillClimbing(nnop[0]))
    oa.append(SimulatedAnnealing(1E11, .95, nnop[1]))
    oa.append(StandardGeneticAlgorithm(200, 100, 10, nnop[2]))
Пример #12
0
    def run_experiment(self, train, test, validation):
        """Run experiment

        Args:
            train (list): List of training instances.
            test (list): List of test instances.
            validation (list): List of validation instances.

        """
        factory = BackPropagationNetworkFactory()  # instantiate main NN class
        params = [
            self.input_layer, self.hidden_layer_one, self.hidden_layer_two,
            self.output_layer
        ]
        self.network = factory.createClassificationNetwork(params)
        dataset = DataSet(train)  # setup training instances dataset
        nnop = NeuralNetworkOptimizationProblem(dataset, self.network,
                                                self.measure)
        oa = None

        # get output file name
        outpath = 'results/NN'
        filename = None

        # options for different optimization algorithms
        if self.oaName == 'BP':
            filename = '{}/results.csv'.format(self.oaName)
            rule = RPROPUpdateRule()
            oa = BatchBackPropagationTrainer(dataset, self.network,
                                             self.measure, rule)
        elif self.oaName == 'RHC':
            filename = '{}/results.csv'.format(self.oaName)
            oa = RandomizedHillClimbing(nnop)
        elif self.oaName == 'SA':
            filename = '{}/results_{}_{}.csv'.format(self.oaName, self.SA_T,
                                                     self.SA_C)
            oa = SimulatedAnnealing(self.SA_T, self.SA_C, nnop)
        elif self.oaName == 'GA':
            filename = '{}/results_{}_{}_{}.csv'.format(
                self.oaName, self.GA_P, self.GA_MA, self.GA_MU)
            oa = StandardGeneticAlgorithm(self.GA_P, self.GA_MA, self.GA_MU,
                                          nnop)

        # train network
        filepath = get_abspath(filename, outpath)
        self.train(oa, train, test, validation, filepath)
Пример #13
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('./clean_data/adult_train.txt')
    testing_ints = initialize_instances('./clean_data/adult_test.txt')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    logunit = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])  #,logunit)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, testing_ints,
          measure)
Пример #14
0
def main(ds_name):
    """Run this experiment"""
    nn_config, train_file, val_file, test_file = get_problemset(ds_name)
    training_ints = initialize_instances(train_file)
    testing_ints = initialize_instances(test_file)
    validation_ints = initialize_instances(val_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = "RHC_{}".format(name)
    classification_network = factory.createClassificationNetwork(nn_config, relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, oa_names, training_ints, validation_ints, testing_ints, measure,
          TRAINING_ITERATIONS, OUTFILE.format(oa_names))
Пример #15
0
def main(layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    nnop = NeuralNetworkOptimizationProblem(
        data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    base.train(oa, classification_network, 'RHC', training_ints, validation_ints, testing_ints, measure,
               training_iterations, OUTFILE.format(data_name, 'RHC'))
    return
Пример #16
0
def main():
    """Run this experiment"""

    training_data = initialize_instances('../data/Pima-train.csv')
    testing_data = initialize_instances('../data/Pima-test.csv')
    print(len(training_data))
    #testing_ints = initialize_instances('m_test.csv')
    #validation_ints = initialize_instances('m_val.csv')

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_data)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_data, testing_data,
          measure)
def main():
    #training_ints = initialize_instances('bCancer_trg.csv')
    #testing_ints = initialize_instances('bCancer_test.csv')
    #validation_ints = initialize_instances('bCancer_val.csv')

    training_ints = initialize_instances('winequality_trg.csv')
    testing_ints = initialize_instances('winequality_test.csv')
    validation_ints = initialize_instances('winequality_val.csv')

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    hts = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER], hts)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, validation_ints,
          testing_ints, measure)
Пример #18
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(
        '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_trg.csv')
    testing_ints = initialize_instances(
        '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_test.csv')
    validation_ints = initialize_instances(
        '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    sig = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER
    ], sig)
    for trial in xrange(TRIALS):
        oa = RandomizedHillClimbing(
            NeuralNetworkOptimizationProblem(data_set, classification_network,
                                             measure))
        train(oa, classification_network, 'RHC', training_ints,
              validation_ints, testing_ints, measure)
Пример #19
0
def main():
    """Run this experiment"""
    pdb.set_trace()
    training_ints = initialize_instances(
        '/Users/lijiang/Desktop/yichuan_HW/Archive/train.csv')
    testing_ints = initialize_instances(
        '/Users/lijiang/Desktop/yichuan_HW/Archive/test.csv')
    validation_ints = initialize_instances(
        '/Users/lijiang/Desktop/yichuan_HW/Archive/validation.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER
    ], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, validation_ints,
          testing_ints, measure)
Пример #20
0
def Random_hill_climb(out_path, train_inst, test_inst, repeats,
                      training_iterations):
    """Run this experiment"""
    for i in range(repeats):
        out_path_ = out_path.replace("RHC_", 'RHC_{}'.format(str(i).zfill(3)))
        with open(out_path_, 'w') as f:
            f.write('{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg',
                                                 'MSE_tst', 'acc_trg',
                                                 'acc_tst', 'elapsed'))
        factory = BackPropagationNetworkFactory()
        measure = SumOfSquaresError()
        data_set = DataSet(train_inst)
        # acti = LogisticSigmoid()
        acti = HyperbolicTangentSigmoid()
        rule = RPROPUpdateRule()
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], acti)
        nnop = NeuralNetworkOptimizationProblem(data_set,
                                                classification_network,
                                                measure)
        oa = RandomizedHillClimbing(nnop)
        train(oa, classification_network, 'RHC', train_inst, test_inst,
              measure, training_iterations, out_path_)
Пример #21
0
def rhc_generic(name, ef, odd, nf, iter_time, iters_total, iters_step, n_trials):
    for i_trial in range(n_trials):
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        rhc_instance = RandomizedHillClimbing(hcp)
        rhc_trainer = FixedIterationTrainer(rhc_instance, iters_step)
        rhc_state = {'problem': rhc_instance,
                     'trainer': rhc_trainer}
        wrapper_rhc = AlgoWrapper(rhc_state,
                                  lambda state: state['trainer'].train(),
                                  lambda state: ef.value(state['problem'].getOptimal()),
                                  lambda state: ef.value(state['problem'].getOptimal())
                                  )
        # create name and invalidate if super empty
        decorated_name = ""
        if name is not None and name != "":
            decorated_name = name
        timed_trainer = TimedTrainer(decorated_name,
                                     wrapper_rhc,
                                     iter_time,
                                     iters_total,
                                     iters_step,
                                     _param_dict={'name':name}
                                     )
        timed_trainer.run()
Пример #22
0
def rhc_network(name, network, measure, train_set, test_set, acc_func, iter_time, iters_total, iters_step, n_trials):
    for i_trial in range(n_trials):
        network_optimizer = NeuralNetworkOptimizationProblem(train_set, network, measure)
        rhc_instance = RandomizedHillClimbing(network_optimizer)
        rhc_trainer = FixedIterationTrainer(rhc_instance, iters_step)
        nn_state = {'network': network,
                    'trainer': rhc_trainer}
        wrapper_rhc = AlgoWrapper(nn_state,
                                 lambda state: state['trainer'].train(),
                                 lambda state: acc_func(train_set, state['network'], measure),
                                 lambda state: acc_func(test_set, state['network'], measure)
                                 )
        # create name and invalidate if super empty
        decorated_name = ""
        if name is not None and name != "":
            decorated_name = name
        timed_trainer = TimedTrainer(decorated_name,
                                     wrapper_rhc,
                                     iter_time,
                                     iters_total,
                                     iters_step,
                                     _param_dict={'name':name}
                                     )
        timed_trainer.run()
Пример #23
0
def main():

    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50

    iterations = 20000
    gaIters = 1000
    mimicIters = 1000
    gaPop = 200
    gaMate = 150
    gaMutate = 25
    mimicSamples = 200
    mimicToKeep = 100
    saTemp = 100
    saCooling = .95
    alg = 'all'
    run = 0
    settings = []

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:N:c:w:v:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
        print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
            sys.exit(1)
        elif opt == '-i':
            iterations = int(arg)
        elif opt == '-N':
            NUM_ITEMS = int(arg)
        elif opt == '-c':
            COPIES_EACH = int(arg)
        elif opt == '-w':
            MAX_WEIGHT = int(arg)
        elif opt == '-v':
            MAX_VOLUME = int(arg)
        elif opt == '-n':
            run = int(arg)
        elif opt == '-r':
            alg = 'RHC'
        elif opt == '-s':
            alg = 'SA'
        elif opt == '-g':
            alg = 'GA'
        elif opt == '-m':
            alg = 'MIMIC'
        elif opt == '-a':
            alg = 'all'
        elif opt == '--gaPop':
            gaPop = int(arg)
        elif opt == '--gaMate':
            gaMate = int(arg)
        elif opt == '--gaMutate':
            gaMutate = int(arg)
        elif opt == '--mimicSamples':
            mimicSamples = int(arg)
        elif opt == '--mimicToKeep':
            mimicToKeep = int(arg)
        elif opt == '--saTemp':
            saTemp = float(arg)
        elif opt == '--saCooling':
            saCooling = float(arg)
        elif opt == '--gaIters':
            gaIters = int(arg)
        elif opt == '--mimicIters':
            mimicIters = int(arg)
    vars ={
    'NUM_ITEMS' : NUM_ITEMS,
    'COPIES_EACH' : COPIES_EACH,
    'MAX_WEIGHT' : MAX_WEIGHT,
    'MAX_VOLUME' : MAX_VOLUME,
    'iterations' : iterations,
    'gaIters' : gaIters,
    'mimicIters' : mimicIters,
    'gaPop' : gaPop,
    'gaMate' : gaMate,
    'gaMutate' : gaMutate,
    'mimicSamples' : mimicSamples,
    'mimicToKeep' : mimicToKeep,
    'saTemp' : saTemp,
    'saCooling' : saCooling,
    'alg' : alg,
    'run' : run
    }

    settings = getSettings(alg, settings, vars)
    # Random number generator */
    random = Random()

    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME


    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if alg == 'RHC' or alg == 'all':
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        print "RHC: " + str(ef.value(rhc.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(str(ef.value(rhc.getOptimal())))
        rows.append(row)
        output2('Knapsack', 'RHC', rows, settings)
        rows = []
        buildFooter("Knapsack", "RHC", rows, settings)
        outputFooter("Knapsack", "RHC", rows , settings)
    if alg == 'SA' or alg == 'all':
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        print "SA: " + str(ef.value(sa.getOptimal()))
        output2('Knapsack', 'SA', rows, settings)
        rows = []
        buildFooter("Knapsack", "SA", rows, settings)
        outputFooter("Knapsack", "SA", rows, settings)
    if alg == 'GA' or alg == 'all':
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        print "GA: " + str(ef.value(ga.getOptimal()))
        output2('Knapsack', 'GA', rows, settings)
        buildFooter("Knapsack", "GA", rows, settings)
        outputFooter("Knapsack", "GA", rows , settings)
    if alg == 'MIMIC' or alg == 'all':
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        print "MIMIC: " + str(ef.value(mimic.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        output2('Knapsack', 'MIMIC', rows, settings)
        rows = []
        buildFooter("Knapsack", "MIMIC", rows, settings)
        outputFooter("Knapsack", "MIMIC", rows , settings)
def run_traveling_salesman():
    # set N value.  This is the number of points
    N = 50
    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(rhc.getOptimal().getDiscrete(x))
            # print path

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(1E12, .999, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(sa.getOptimal().getDiscrete(x))
            # print path

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(ga.getOptimal()))
            print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
            ga_times.append(end - start)
            # print "Route:"
            # path = []
            # for x in range(0,N):
            #     path.append(ga.getOptimal().getDiscrete(x))
            # print path

    # for mimic we use a sort encoding
    ef = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(500, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()

            mimic_results.append(ef.value(mimic.getOptimal()))
            print "MIMIC Inverse of Distance: " + str(
                ef.value(mimic.getOptimal()))
            # print "Route:"
            # path = []
            # optimal = mimic.getOptimal()
            # fill = [0] * optimal.size()
            # ddata = array('d', fill)
            # for i in range(0,len(ddata)):
            #     ddata[i] = optimal.getContinuous(i)
            # order = ABAGAILArrays.indices(optimal.size())
            # ABAGAILArrays.quicksort(ddata, order)
            # print order
            mimic_times.append(end - start)

    with open('travelingsalesman.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Пример #25
0
        T = N / 10
        fill = [2] * N
        ranges = array('i', fill)

        ef = ContinuousPeaksEvaluationFunction(T)
        odd = DiscreteUniformDistribution(ranges)
        nf = DiscreteChangeOneNeighbor(ranges)
        mf = DiscreteChangeOneMutation(ranges)
        cf = SingleCrossOver()
        df = DiscreteDependencyTree(.1, ranges)
        hcp = GenericHillClimbingProblem(ef, odd, nf)
        gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

        if do_rhc:
            rhc = RandomizedHillClimbing(hcp)
            for iter in hc_iter:
                score, call_count, runtime = helpers.eval_algo(ef, rhc, iter)
                rhc_scores.append(score)
                rhc_times.append(runtime)
                sa_params.append([iter, N])
                rhc_results[N][iter]['scores'][trial_num] = score
                rhc_results[N][iter]['runtimes'][trial_num] = runtime
                # print("RHC best score: ", score, " in time: ", runtime, " with iters: ", iter)
            print "\tFinished RHC"

        if do_sa:
            for t in sa_temp:
                for cooling in sa_cooling:
                    sa = SimulatedAnnealing(t, cooling, hcp)
                    for iter in sa_iter:
it_ga = 100

# learn weigths with back propagation
network_bp = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer])
bp = BatchBackPropagationTrainer(set, network_bp, measure, RPROPUpdateRule())
cvt = ConvergenceTrainer(bp)
cvt.train()
print "\nBP training error:", errorRate(network_bp, train)
print "BP training confusion matrix:", confusionMatrix(network_bp, train)
print "    BP test error:", errorRate(network_bp, test)
print "    BP test confusion matrix:", confusionMatrix(network_bp, test)

# learn weights with randomized hill climbing
network_rhc = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer])
nnop_rhc = NeuralNetworkOptimizationProblem(set, network_rhc, measure)
rhc = RandomizedHillClimbing(nnop_rhc)
fit = FixedIterationTrainer(rhc, it_rhc)
fit.train()
op = rhc.getOptimal();
network_rhc.setWeights(op.getData())
print "\nRHC training error:", errorRate(network_rhc, train)
print "RHC training confusion matrix:", confusionMatrix(network_rhc, train)
print "    RHC test error:", errorRate(network_rhc, test)
print "    RHC test confusion matrix:", confusionMatrix(network_rhc, test)

# learn weights with simulated annealing
network_sa = factory.createClassificationNetwork([inputLayer, hiddenLayer, outputLayer])
nnop_sa = NeuralNetworkOptimizationProblem(set, network_sa, measure)
sa = SimulatedAnnealing(1E11, 0.95, nnop_sa)
fit = FixedIterationTrainer(sa, it_sa)
fit.train()
    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    expt = "expt_avg"

    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 200000)
    score_RHC.append(train(rhc, "RHC", ef, 200000, "test", expt))
    print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))

    sa = SimulatedAnnealing(1E9, .98, hcp)
    fit = FixedIterationTrainer(sa, 200000)
    score_SA.append(train(sa, "SA", ef, 200000, "test", expt))
    print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))

    ga = StandardGeneticAlgorithm(225, 40, 5, gap)
    fit = FixedIterationTrainer(ga, 1000)
    score_GA.append(train(ga, "GA", ef, 40000, "test", expt))
    print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))

    # for mimic we use a sort encoding
Пример #28
0
def main():
    N=200
    tempDenom = 5
    T=N/tempDenom
    fill = [2] * N
    ranges = array('i', fill)
    iterations = 2000
    gaIters = 1000
    mimicIters = 1000
    gaPop = 200
    gaMate = 100
    gaMutate = 10
    mimicSamples = 200
    mimicToKeep = 20
    saTemp = 1E11
    saCooling = .95
    alg = 'all'
    run = 0
    settings = []

    try:
       opts, args = getopt.getopt(sys.argv[1:], "ahn:rsgN:m:t:i:", ["gaIters=", "mimicIters=","gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
       print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
       sys.exit(2)
    for opt, arg in opts:
       if opt == '-h':
          print 'knapsack.py -i <iterations> -n <NUM_ITEMS> -c <COPIES_EACH> -w <MAX_WEIGHT> -v <MAX_VOLUME>'
          sys.exit(1)
       elif opt == '-i':
          iterations = int(arg)
       elif opt == '-N':
          N = int(arg)
       elif opt == '-t':
           T = float(arg)
       elif opt == '-d':
          tempDenom = int(arg)
       elif opt == '-r':
           alg = 'RHC'
       elif opt == '-a':
           alg = 'all'
       elif opt == '-s':
           alg = 'SA'
       elif opt == '-g':
           alg = 'GA'
       elif opt == '-m':
           alg = 'MIMIC'
       elif opt == '--gaPop':
          gaPop = int(arg)
       elif opt == '--gaMate':
          gaMate = int(arg)
       elif opt == '--gaMutate':
          gaMutate = int(arg)
       elif opt == '--mimicSamples':
          mimicSamples = int(arg)
       elif opt == '--mimicToKeep':
          mimicToKeep = int(arg)
       elif opt == '--saTemp':
          saTemp = float(arg)
       elif opt == '--saCooling':
          saCooling = float(arg)
       elif opt == '--gaIters':
          gaIters = int(arg)
       elif opt == '--mimicIters':
          mimicIters = int(arg)
       elif opt == '-n':
           run = int(arg)


    vars = {
        'N':N,
        'tempDenom':tempDenom,
        'T':T,
        'fill':fill,
        'ranges':ranges,
        'iterations' :iterations,
        'gaIters':gaIters,
        'mimicIters':mimicIters,
        'gaPop' :gaPop,
        'gaMate' :gaMate,
        'gaMutate' :gaMutate,
        'mimicSamples' : mimicSamples,
        'mimicToKeep' : mimicToKeep,
        'saTemp' : saTemp,
        'saCooling' : saCooling,
        'alg' : alg,
        'run' : run
    }

    settings = getSettings(alg, settings, vars)

    T=N/tempDenom
    fill = [2] * N
    ranges = array('i', fill)

    ef = FourPeaksEvaluationFunction(T)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = SingleCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    if alg == 'RHC' or alg == 'all':
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(rhc.getOptimal()))
        rows.append(row)
        print "RHC: " + str(ef.value(rhc.getOptimal()))
        output2('4Peaks', 'RHC', rows, settings)
        rows = []
        buildFooter("4Peaks", "RHC", rows, settings),
        outputFooter("4Peaks", "RHC", rows,   settings)

    if alg == 'SA' or alg == 'all':
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        print "SA: " + str(ef.value(sa.getOptimal()))
        output2('4Peaks', 'SA', rows, settings)
        rows = []
        buildFooter("4Peaks", "SA", rows, settings)
        outputFooter("4Peaks", "SA", rows, settings)

    if alg == 'GA' or alg == 'all':
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        print "GA: " + str(ef.value(ga.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        output2('4Peaks', 'GA', rows, settings)
        rows = []
        buildFooter("4Peaks", "GA", rows, settings)
        outputFooter("4Peaks", "GA", rows , settings)

    if alg == 'MIMIC' or alg == 'all':
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        print "MIMIC: " + str(ef.value(mimic.getOptimal()))
        rows = []
        row = []
        row.append("Evaluation Function Value")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        output2('4Peaks', 'MIMIC', rows, settings)
        rows = []
        buildFooter("4Peaks", "GA", rows, settings)
        outputFooter("4Peaks", "MIMIC", rows, settings)
Пример #29
0
def run_knapsack():
    # Random number generator */
    random = Random()
    # The number of items
    NUM_ITEMS = 40
    # The number of copies each
    COPIES_EACH = 4
    # The maximum weight for a single element
    MAX_WEIGHT = 50
    # The maximum volume for a single element
    MAX_VOLUME = 50
    # The volume of the knapsack
    KNAPSACK_VOLUME = MAX_VOLUME * NUM_ITEMS * COPIES_EACH * .4

    # create copies
    fill = [COPIES_EACH] * NUM_ITEMS
    copies = array('i', fill)

    # create weights and volumes
    fill = [0] * NUM_ITEMS
    weights = array('d', fill)
    volumes = array('d', fill)
    for i in range(0, NUM_ITEMS):
        weights[i] = random.nextDouble() * MAX_WEIGHT
        volumes[i] = random.nextDouble() * MAX_VOLUME

    # create range
    fill = [COPIES_EACH + 1] * NUM_ITEMS
    ranges = array('i', fill)

    ef = KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies)
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    mf = DiscreteChangeOneMutation(ranges)
    cf = UniformCrossOver()
    df = DiscreteDependencyTree(.1, ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

    iters = [50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000]
    num_repeats = 5

    rhc_results = []
    rhc_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            rhc = RandomizedHillClimbing(hcp)
            fit = FixedIterationTrainer(rhc, i)
            fit.train()
            end = time.time()
            rhc_results.append(ef.value(rhc.getOptimal()))
            rhc_times.append(end - start)
            #print "RHC: " + str(ef.value(rhc.getOptimal()))

    sa_results = []
    sa_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            sa = SimulatedAnnealing(100, .95, hcp)
            fit = FixedIterationTrainer(sa, i)
            fit.train()
            end = time.time()

            sa_results.append(ef.value(sa.getOptimal()))
            sa_times.append(end - start)
            #print "SA: " + str(ef.value(sa.getOptimal()))

    ga_results = []
    ga_times = []
    for i in iters:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            ga = StandardGeneticAlgorithm(200, 150, 25, gap)
            fit = FixedIterationTrainer(ga, i)
            fit.train()
            end = time.time()
            ga_results.append(ef.value(sa.getOptimal()))
            ga_times.append(end - start)
            #print "GA: " + str(ef.value(ga.getOptimal()))

    mimic_results = []
    mimic_times = []
    for i in iters[0:6]:
        print(i)
        for j in range(num_repeats):
            start = time.time()
            mimic = MIMIC(200, 100, pop)
            fit = FixedIterationTrainer(mimic, i)
            fit.train()
            end = time.time()
            mimic_results.append(ef.value(mimic.getOptimal()))
            mimic_times.append(end - start)
            #print "MIMIC: " + str(ef.value(mimic.getOptimal()))

    with open('knapsack.csv', 'w') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rhc_results)
        writer.writerow(rhc_times)
        writer.writerow(sa_results)
        writer.writerow(sa_times)
        writer.writerow(ga_results)
        writer.writerow(ga_times)
        writer.writerow(mimic_results)
        writer.writerow(mimic_times)

    return rhc_results, rhc_times, sa_results, sa_times, ga_results, ga_times, mimic_results, mimic_times
Пример #30
0
N=60
T=N/10
fill = [2] * N
ranges = array('i', fill)

ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 20, pop)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# -- begin problem

t0 = time.time()
calls = []
results = []
for _ in range(runs):
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, iters)
    fitness = fit.train()
    results.append(ef.value(rhc.getOptimal()))
    calls.append(ef.getTotalCalls())    
    ef.clearCount()
print "RHC, average results , " + str(sum(results)/float(runs))
print "RHC, average feval calls , " + str(sum(calls)/float(runs))
t1 = time.time() - t0
print "RHC, average time , " + str(float(t1)/runs)



t0 = time.time()
calls = []
results = []
def solveit(oaname, params):
    # set N value.  This is the number of points
    N = 50
    iterations = 1000
    tryi = 1
    random = Random()

    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        points[i][0] = random.nextDouble()
        points[i][1] = random.nextDouble()

    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    if oaname == "RHC":
        iterations = int(params[0])
        tryi = int(params[1])
        oa = RandomizedHillClimbing(hcp)
    if oaname == "SA":    
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), hcp)
    if oaname == "GA":
        iterations=1000
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]), int(params[2]), gap)
    if oaname == "MMC":
        iterations=1000
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points)
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges)
        df = DiscreteDependencyTree(.1, ranges)
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
        oa = MIMIC(int(params[0]), int(params[1]), pop)

    print "Running %s using %s for %d iterations, try %d" % (oaname, ','.join(params), iterations, tryi)
    print "="*20
    starttime = timeit.default_timer()
    output = []
    for i in range(iterations):
        oa.train()
        if i%10 == 0:
            optimal = oa.getOptimal()
            score = ef.value(optimal)
            elapsed = int(timeit.default_timer()-starttime)
            output.append([str(i), str(score), str(elapsed)])

    print 'Inverse of Distance [score]: %.3f' % score
    print 'train time: %d secs' % (int(timeit.default_timer()-starttime))

    scsv = 'tsp-%s-%s.csv' % (oaname, '-'.join(params))
    print "Saving to %s" % (scsv),
    with open(scsv, 'w') as csvf:
        writer = csv.writer(csvf)
        for row in output:
            writer.writerow(row)
    print "saved."
    print "="*20

    print "Route:"
    if oaname == 'MMC':
        optimal = oa.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0,len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        print order
    else:
        path = []
        for x in range(0,N):
            path.append(oa.getOptimal().getDiscrete(x))
        print path
Пример #33
0
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

    ef2 = TravelingSalesmanSortEvaluationFunction(points)
    fill = [N] * N
    ranges = array('i', fill)
    odd2 = DiscreteUniformDistribution(ranges)
    df = DiscreteDependencyTree(.1, ranges)
    pop = GenericProbabilisticOptimizationProblem(ef2, odd2, df)

    # Algorithm declaration
    rhc = RandomizedHillClimbing(hcp)
    sa = SimulatedAnnealing(SA_TEMPERATURE, SA_COOLING_FACTOR, hcp)
    ga = StandardGeneticAlgorithm(GA_POPULATION, GA_CROSSOVER, GA_MUTATION,
                                  gap)
    mimic = MIMIC(MIMIC_SAMPLES, MIMIC_TO_KEEP, pop)

    # Trainer declaration
    fit_rhc = FixedIterationTrainer(rhc, current_iteration_count)
    fit_sa = FixedIterationTrainer(sa, current_iteration_count)
    fit_ga = FixedIterationTrainer(ga, current_iteration_count)
    fit_mimic = FixedIterationTrainer(mimic, current_iteration_count)

    print("Computing for %d iterations" % current_iteration_count)

    # Fitting
    start_rhc = time.time()
ef = KColorEvaluationFunction(adj)

odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = UniformCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

timeout = 1E6

# first find the global optimum by running for a long time
hcp0 = GenericHillClimbingProblem(ef, odd, nf)
rhc0 = RandomizedHillClimbing(hcp0)
i = 0
max = 0
while (i < timeout/10):
    rhc0.train()
    i += 1
    max = ef.value(rhc0.getOptimal())
    print "rhc0,", i,",", max
goal = max
pop0 = GenericProbabilisticOptimizationProblem(ef, odd, df)
mimic0 = MIMIC(200, 100, pop)
i = 0
while ( i< timeout/1000):
    mimic0.train()
    i += 1
    max = ef.value(mimic0.getOptimal())
Пример #35
0
# repeat a few times to get an average?
trials = 10  # more?

hill_climbing = []
annealing = []
genetic = []
mimic_data = []

hill_climbing_output = "salesman_hill_climbing.csv"
annealing_output = "salesman_annealing.csv"
genetic_output = "salesman_genetic.csv"
mimic_output = "salesman_mimic.csv"

# HILL CLIMBING
for i in range(trials):
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 200000)

    start = clock()
    fit.train()
    end = clock()
    total_time = end - start
    max_fit = ef.value(rhc.getOptimal())
    time_optimum = [total_time, max_fit]
    hill_climbing.append(time_optimum)
    print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
    print "Route:"
    path = []
    for x in range(0, N):
        path.append(rhc.getOptimal().getDiscrete(x))
    print path
Пример #36
0
N = 60
T = N / 10
fill = [2] * N
ranges = array('i', fill)

ef = ContinuousPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC: " + str(ef.value(rhc.getOptimal()))

sa = SimulatedAnnealing(1E11, .95, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA: " + str(ef.value(sa.getOptimal()))

ga = StandardGeneticAlgorithm(200, 100, 10, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA: " + str(ef.value(ga.getOptimal()))

mimic = MIMIC(200, 20, pop)
Пример #37
0
random = Random()

points = [[0 for x in xrange(2)] for x in xrange(N)]
for i in range(0, len(points)):
    points[i][0] = random.nextDouble()
    points[i][1] = random.nextDouble()

ef = TravelingSalesmanRouteEvaluationFunction(points)
odd = DiscretePermutationDistribution(N)
nf = SwapNeighbor()
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)

rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
print "Route:"
path = []
for x in range(0, N):
    path.append(rhc.getOptimal().getDiscrete(x))
print path

sa = SimulatedAnnealing(1e12, 0.999, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
print "Route:"
path = []
Пример #38
0
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

# RHC
for t in range(numTrials):
    fname = outfile.format('RHC', str(t + 1))
    with open(fname, 'w') as f:
        f.write('iterations,fitness,time,fevals\n')
    ef = FlipFlopEvaluationFunction()
    odd = DiscreteUniformDistribution(ranges)
    nf = DiscreteChangeOneNeighbor(ranges)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    rhc = RandomizedHillClimbing(hcp)
    fit = FixedIterationTrainer(rhc, 10)
    times = [0]
    for i in range(0, maxIters, 10):
        start = clock()
        fit.train()
        elapsed = time.clock() - start
        times.append(times[-1] + elapsed)
        fevals = ef.fevals
        score = ef.value(rhc.getOptimal())
        ef.fevals -= 1
        st = '{},{},{},{}\n'.format(i, score, times[-1], fevals)
        print(st)
        with open(fname, 'a') as f:
            f.write(st)
Пример #39
0
def main():

    iterations = 200000
    alg = 'all'
    gaPop = 2000
    gaMate = 1500
    gaMutate = 250
    mimicSamples = 500
    mimicToKeep = 100
    saTemp = 1E12
    saCooling = .999
    gaIters = 1000
    mimicIters = 1000
    run = 0
    settings = []

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ahrsgmn:i:", ["gaIters=", "mimicIters=", "gaPop=", "gaMate=", "gaMutate=", "mimicSamples=", "mimicToKeep=", "saTemp=", "saCooling="])
    except:
        print 'travelingsalesman.py -i <iterations>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'travelingsalesman.py -i <iterations>'
            sys.exit(1)
        elif opt == '-i':
            if arg < 1:
                print 'Iterations must be greater than 0'
                sys.exit(2)
            iterations = int(arg)
        elif opt == '-a':
            alg = 'all'
        elif opt == '-r':
            alg = 'RHC'
        elif opt == '-s':
            alg = 'SA'
        elif opt == '-g':
            alg = 'GA'
        elif opt == '-m':
            alg = 'MIMIC'
        elif opt == '--gaPop':
            if arg < 1:
                print 'Population must be greater than 0'
                sys.exit(2)
            gaPop = int(arg)
        elif opt == '--gaMate':
            if arg < 1:
                print 'Mating must be greater than 0'
                sys.exit(2)
            gaMate = int(arg)
        elif opt == '--gaMutate':
            if arg < 1:
                print 'Mutators must be greater than 0'
                sys.exit(2)
            gaMutate = int(arg)
        elif opt == '--mimicSamples':
            if arg < 1:
                print 'MIMIC samples must be greater than 0'
                sys.exit(2)
            mimicSamples = int(arg)
        elif opt == '--mimicToKeep':
            if arg < 1:
                print 'MIMIC to keep must be greater than 0'
                sys.exit(2)
            mimicToKeep = int(arg)
        elif opt == '--saTemp':
            saTemp = float(arg)
        elif opt == '--saCooling':
            saCooling = float(arg)
        elif opt == '-n':
            run = int(arg)
        elif opt == '--gaIters':
            if arg < 1:
                print 'GA Iterations must be greater than 0'
                sys.exit(2)
            gaIters = int(arg)
        elif opt == '--mimicIters':
            if arg < 1:
                print 'MIMIC Iterations must be greater than 0'
                sys.exit(2)
            mimicIters = int(arg)

    vars = {
            'iterations' : iterations,
            'alg' : alg,
            'gaPop' : gaPop,
            'gaMate' : gaMate,
            'gaMutate' : gaMutate,
            'mimicSamples' : mimicSamples,
            'mimicToKeep' : mimicToKeep,
            'saTemp' : saTemp,
            'saCooling' : saCooling,
            'gaIters' : gaIters,
            'mimicIters' : mimicIters,
            'run' : run
            }

    settings = getSettings(alg, settings, vars)
    if gaPop < gaMate or gaPop < gaMutate or gaMate < gaMutate:
        pebkac({gaPop: 'total population',gaMate : 'mating population', gaMutate : 'mutating population'}, alg, 'total population', settings)
    if mimicSamples < mimicToKeep:
        pebkac({mimicSamples: 'mimic samples', mimicToKeep : 'mimic to keep'}, alg, 'mimic samples', settings)
    prob = 'Traveling Sales Problem'
    invDist = {}
    cities = CityList()
    N = len(cities)
    #random = Random()
    points = [[0 for x in xrange(2)] for x in xrange(N)]
    for i in range(0, len(points)):
        coords = cities.getCoords(i)
        points[i][0] = coords[0]
        points[i][1] = coords[1]
    ef = TravelingSalesmanRouteEvaluationFunction(points)
    odd = DiscretePermutationDistribution(N)
    nf = SwapNeighbor()
    mf = SwapMutation()
    cf = TravelingSalesmanCrossOver(ef)
    hcp = GenericHillClimbingProblem(ef, odd, nf)
    gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
    rows = []


    if alg == 'RHC' or alg == 'all':
        print '\n----------------------------------'
        print 'Using Random Hill Climbing'
        for label, setting in settings:
            print label + ":" + str(setting)
        rhc = RandomizedHillClimbing(hcp)
        fit = FixedIterationTrainer(rhc, iterations)
        fit.train()
        path = []
        for x in range(0,N):
            path.append(rhc.getOptimal().getDiscrete(x))
        output(prob, 'RHC', path, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(rhc.getOptimal()))
        rows.append(row)
        invDist['RHC'] = ef.value(rhc.getOptimal())
        buildFooter(prob, 'RHC', rows, settings)
        outputFooter(prob, 'RHC', rows, settings)


    if alg == 'SA' or alg == 'all':
        print 'Using Simulated Annealing'
        for label, setting in settings:
            print label + ":" + str(setting)
        sa = SimulatedAnnealing(saTemp, saCooling, hcp)
        fit = FixedIterationTrainer(sa, iterations)
        fit.train()
        path = []
        for x in range(0,N):
            path.append(sa.getOptimal().getDiscrete(x))
        output(prob, 'SA', path, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(sa.getOptimal()))
        rows.append(row)
        invDist['SA'] = ef.value(sa.getOptimal())
        buildFooter(prob, 'SA', rows, settings)
        outputFooter(prob, 'SA', rows, settings)

    if alg == 'GA' or alg == 'all':
        print '\n----------------------------------'
        print 'Using Genetic Algorithm'
        for label, setting in settings:
            print label + ":" + str(setting)
        ga = StandardGeneticAlgorithm(gaPop, gaMate, gaMutate, gap)
        fit = FixedIterationTrainer(ga, gaIters)
        fit.train()
        path = []
        for x in range(0,N):
            path.append(ga.getOptimal().getDiscrete(x))
        output(prob, 'GA', path, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(ga.getOptimal()))
        rows.append(row)
        invDist['GA'] = ef.value(ga.getOptimal())
        buildFooter(prob, 'GA', rows, settings)
        outputFooter(prob, 'GA', rows, settings)

    if alg == 'MIMIC' or alg == 'all':
        print '\n----------------------------------'
        print 'Using MIMIC'
        for label, setting in settings:
            print label + ":" + str(setting)
        # for mimic we use a sort encoding
        ef = TravelingSalesmanSortEvaluationFunction(points);
        fill = [N] * N
        ranges = array('i', fill)
        odd = DiscreteUniformDistribution(ranges);
        df = DiscreteDependencyTree(.1, ranges);
        pop = GenericProbabilisticOptimizationProblem(ef, odd, df);
        mimic = MIMIC(mimicSamples, mimicToKeep, pop)
        fit = FixedIterationTrainer(mimic, mimicIters)
        fit.train()
        path = []
        optimal = mimic.getOptimal()
        fill = [0] * optimal.size()
        ddata = array('d', fill)
        for i in range(0,len(ddata)):
            ddata[i] = optimal.getContinuous(i)
        order = ABAGAILArrays.indices(optimal.size())
        ABAGAILArrays.quicksort(ddata, order)
        output(prob, 'MIMIC', order, points, settings)
        rows = []
        row = []
        row.append("Inverse of Distance")
        row.append(ef.value(mimic.getOptimal()))
        rows.append(row)
        invDist['MIMIC'] = ef.value(mimic.getOptimal())
        buildFooter(prob, 'MIMIC', rows, settings)
        outputFooter(prob, 'MIMIC', rows, settings)


    maxn = max(len(key) for key in invDist)
    maxd = max(len(str(invDist[key])) for key in invDist)
    print "Results"
    for result in invDist:
        print "%-*s %s %-*s" % (len('Best Alg') + 2, result, ':', maxd, invDist[result])
    if alg == 'all':
        print "%-*s %s %-*s" % (len('Best Alg') + 2, 'Best Alg', ':', maxd, max(invDist.iterkeys(), key=(lambda key: invDist[key])))
    print '----------------------------------'