def nn_back(name):

    instances = read_dataset('./spambase.csv', [1])
    train_set, test_set = train_test_split(instances, 0.3)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], HyperbolicTangentSigmoid())
    bp_instance = BatchBackPropagationTrainer(train_set,
                                              classification_network, measure,
                                              RPROPUpdateRule())

    nn_state = {'network': classification_network, 'trainer': bp_instance}
    wrapper_nn = AlgoWrapper(
        nn_state, lambda state: state['trainer'].train(),
        lambda state: classification_error_acc(train_set, state[
            'network'], measure), lambda state: classification_error_acc(
                test_set, state['network'], measure))
    # create name and invalidate if super empty
    decorated_name = name
    timed_trainer = TimedTrainer(decorated_name,
                                 wrapper_nn,
                                 200,
                                 4000,
                                 1,
                                 _param_dict={'name': name})
    timed_trainer.run()

    print "NNBP bp done"
Exemple #2
0
def main():
    training_ints = initialize_instances('data/bank_train.csv')
    testing_ints = initialize_instances('data/bank_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    ######################### back prop #####################

    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule,), 
            classification_network,
            'Backprop', 
            training_ints,testing_ints, measure,
            './ANN/BP/BACKPROP_LOG.csv',
            2000)

    ######################### simulated annealing #################

    for CE in [0.15,0.35,0.55,0.75,0.95]:
        for  T in [1e8,1e10,1e12]:
            classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
            nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
            oFile = "./ANN/SA/%s_%s_LOG.csv"%(CE,T)
            train(SimulatedAnnealing(T, CE, nnop), 
            classification_network, 
            'simulated annealing', 
            training_ints, testing_ints, measure,
            oFile,
            2000)
    
    ######################### random hill climbing #################

    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    train(RandomizedHillClimbing(nnop), 
        classification_network, 
        'RHC', 
        training_ints, testing_ints, measure,
        './ANN/RHC/RHC_LOG.csv',
        2000)

    ######################### genetic algorithm #################
    
    for P in [100]:
        for mate in [5, 15, 30]:
            for mutate in [5,15,30]:
                classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
                nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
                oFile = "./ANN/GA/%s_%s_%s_LOG.csv"%(P, mate, mutate)
                train(StandardGeneticAlgorithm(P, mate, mutate, nnop), 
                    classification_network, 
                    'GA', 
                    training_ints, testing_ints, measure,
                    oFile,
                    2000)
Exemple #3
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('titanic_train.csv')
    testing_ints = initialize_instances('titanic_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule), classification_network, 'Backprop', training_ints,testing_ints, measure)
Exemple #4
0
def main():
    """Run algorithms on the adult dataset."""
    train_instances = initialize_instances(trainX)
    test_instances = initialize_instances(testX)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_instances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA", "BP"]
    print(sys.argv)
    if len(sys.argv) > 1:
        oa_names = [sys.argv[1]]
        set_num = sys.argv[2]
    # results = ""
    for name in oa_names:
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
        networks.append(classification_network)
        if name != "BP":
            nnop.append(
                NeuralNetworkOptimizationProblem(data_set,
                                                 classification_network,
                                                 measure))
        else:
            print("adding backprop")
            rule = RPROPUpdateRule()
            nnop.append(
                BatchBackPropagationTrainer(data_set, classification_network,
                                            measure, rule))

    if "RHC" in oa_names:
        rhc_index = oa_names.index("RHC")
        oa.append(RandomizedHillClimbing(nnop[rhc_index]))
    if "SA" in oa_names:
        sa_index = oa_names.index("SA")
        oa.append(SimulatedAnnealing(1E11, .95, nnop[sa_index]))
    if "GA" in oa_names:
        ga_index = oa_names.index("GA")
        oa.append(StandardGeneticAlgorithm(100, 50, 10, nnop[ga_index]))
    if "BP" in oa_names:
        rule = RPROPUpdateRule()
        bp_index = oa_names.index("BP")
        oa.append(nnop[bp_index])

    for i, name in enumerate(oa_names):
        train(oa[i], networks[i], oa_names[i], train_instances, test_instances,
              measure)
Exemple #5
0
def main():
    train_data = initialize_instances(TRAIN_FILE)
    test_data = initialize_instances(TEST_FILE)  # Get data
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_data)
    activation = RELU()
    rule = RPROPUpdateRule()
    oa_name = 'Backprop'
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], activation)
    oa = BatchBackPropagationTrainer(data_set, classification_network, measure,
                                     rule)
    train(oa, classification_network, oa_name, train_data, test_data, measure)
Exemple #6
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('SeasonsStats_trg.csv')
    testing_ints = initialize_instances('SeasonsStats_test.csv')
    validation_ints = initialize_instances('SeasonsStats_val.csv')
    for t in range(TRIALS):
        factory = BackPropagationNetworkFactory()
        measure = SumOfSquaresError()
        data_set = DataSet(training_ints)
        relu = RELU()
        rule = RPROPUpdateRule()
        oa_names = ["Backprop"]
        classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu)
        train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,'Backprop', training_ints, validation_ints, testing_ints, measure)
Exemple #7
0
    def run_experiment(self, train, test, validation):
        """Run experiment

        Args:
            train (list): List of training instances.
            test (list): List of test instances.
            validation (list): List of validation instances.

        """
        factory = BackPropagationNetworkFactory()  # instantiate main NN class
        params = [
            self.input_layer, self.hidden_layer_one, self.hidden_layer_two,
            self.output_layer
        ]
        self.network = factory.createClassificationNetwork(params)
        dataset = DataSet(train)  # setup training instances dataset
        nnop = NeuralNetworkOptimizationProblem(dataset, self.network,
                                                self.measure)
        oa = None

        # get output file name
        outpath = 'results/NN'
        filename = None

        # options for different optimization algorithms
        if self.oaName == 'BP':
            filename = '{}/results.csv'.format(self.oaName)
            rule = RPROPUpdateRule()
            oa = BatchBackPropagationTrainer(dataset, self.network,
                                             self.measure, rule)
        elif self.oaName == 'RHC':
            filename = '{}/results.csv'.format(self.oaName)
            oa = RandomizedHillClimbing(nnop)
        elif self.oaName == 'SA':
            filename = '{}/results_{}_{}.csv'.format(self.oaName, self.SA_T,
                                                     self.SA_C)
            oa = SimulatedAnnealing(self.SA_T, self.SA_C, nnop)
        elif self.oaName == 'GA':
            filename = '{}/results_{}_{}_{}.csv'.format(
                self.oaName, self.GA_P, self.GA_MA, self.GA_MU)
            oa = StandardGeneticAlgorithm(self.GA_P, self.GA_MA, self.GA_MU,
                                          nnop)

        # train network
        filepath = get_abspath(filename, outpath)
        self.train(oa, train, test, validation, filepath)
Exemple #8
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(TRAIN_DATA_FILE)
    testing_ints = initialize_instances(TEST_DATA_FILE)
    validation_ints = initialize_instances(VALIDATE_DATA_FILE)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,
          'Backprop', training_ints, validation_ints, testing_ints, measure, TRAINING_ITERATIONS,
          OUTFILE.format('Backprop'))
def main():
    """Run this experiment"""
    training_ints = initialize_instances('./../data/x_train_val.csv')
    testing_ints = initialize_instances('./../data/x_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    # oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3,
        HIDDEN_LAYER4, OUTPUT_LAYER
    ], acti)
    train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, 'Backprop',
        training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)
def main(layers, training_iterations, test_data_file, train_data_file,
         validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    base.train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, 'Backprop',
        training_ints, validation_ints, testing_ints, measure,
        training_iterations, OUTFILE.format(data_name, 'Backprop'))
    return
def main(ds_name):
    """Run this experiment"""
    nn_config, train_file, val_file, test_file = get_problemset(ds_name)
    training_ints = initialize_instances(train_file)
    testing_ints = initialize_instances(test_file)
    validation_ints = initialize_instances(val_file)

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = "Backprop_{}".format(name)
    classification_network = factory.createClassificationNetwork(
        nn_config, relu)
    train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, oa_names,
        training_ints, validation_ints, testing_ints, measure,
        TRAINING_ITERATIONS, OUTFILE.format(oa_names))
def Backpropogation(out_path, train_inst, test_inst, repeats,
                    training_iterations):
    for i in range(repeats):
        out_path_ = out_path.replace("BP_", 'BP_{}'.format(str(i).zfill(3)))
        with open(out_path_, 'w') as f:
            f.write('{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg',
                                                 'MSE_tst', 'acc_trg',
                                                 'acc_tst', 'elapsed'))
        factory = BackPropagationNetworkFactory()
        measure = SumOfSquaresError()
        data_set = DataSet(train_inst)
        # acti = LogisticSigmoid()
        acti = HyperbolicTangentSigmoid()
        rule = RPROPUpdateRule()
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], acti)
        train(
            BatchBackPropagationTrainer(data_set, classification_network,
                                        measure, rule), classification_network,
            'Backprop', train_inst, test_inst, measure, training_iterations,
            out_path_)
def main():
    """Run this experiment"""

    training_data = initialize_instances('../data/Pima-train.csv')
    testing_data = initialize_instances('../data/Pima-test.csv')
    print(len(training_data))
    #testing_ints = initialize_instances('m_test.csv')
    #validation_ints = initialize_instances('m_val.csv')

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_data)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, 'Backprop',
        training_data, testing_data, measure)
Exemple #14
0
def main(layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    with open(OUTFILE.format(data_name, 'Backprop'), 'a+') as f:
        content = f.read()
        if "MSE_trg" not in content:
            f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                                                        'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst',
                                                        'elapsed'))
    base.train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,
               'Backprop', training_ints, validation_ints, testing_ints, measure, training_iterations,
               OUTFILE.format(data_name, 'Backprop'))
    return
Exemple #15
0
def main():
    """Run algorithms on the abalone dataset."""
    train_instances = initialize_instances()
    test_instances = initialize_instances(test=True)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_instances)

    networks = []  # BackPropagationNetwork
    oa = []  # OptimizationAlgorithm
    oa_names = []
    if do_rhc:
        oa_names.append("RHC")
    if do_sa:
        oa_names.append("SA")
    if do_ga:
        oa_names.append("GA")
    if do_bp:
        oa_names.append("BP")
    results = ""

    # For each algo, need to see if we are doing sweeps

    # No need to sweep rhc as there are no parameters
    if do_rhc and sweep == False:
        training_iter = TRAINING_ITERATIONS
        if do_fmnist:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
        if do_chess:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
        nnop = NeuralNetworkOptimizationProblem(data_set,
                                                classification_network,
                                                measure)
        oa = RandomizedHillClimbing(nnop)
        name = "RHC"
        train(oa, classification_network, name, train_instances, measure,
              training_iter, test_instances, True)

    if do_sa:
        training_iter = TRAINING_ITERATIONS
        count = 0
        for temp, cooling in product(sa_temp, sa_cooling):
            if do_fmnist:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
            if do_chess:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
            nnop = NeuralNetworkOptimizationProblem(data_set,
                                                    classification_network,
                                                    measure)
            oa = SimulatedAnnealing(temp, cooling, nnop)
            name = "SA_sweep"
            if count == 0:
                print_head = True
            else:
                print_head = False
            train(oa, classification_network, name, train_instances, measure,
                  training_iter, test_instances, print_head, temp, cooling)
            count += 1

    if do_ga:
        training_iter = GA_TRAINING_ITERATIONS
        count = 0
        for pop, prop_mate, prop_mutate in product(ga_pop, ga_prop_mate,
                                                   ga_prop_mutate):
            if do_fmnist:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
            if do_chess:
                classification_network = factory.createClassificationNetwork(
                    [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
            nnop = NeuralNetworkOptimizationProblem(data_set,
                                                    classification_network,
                                                    measure)
            mate = int(math.floor(pop * prop_mate))
            mutate = int(math.floor(pop * prop_mutate))
            oa = StandardGeneticAlgorithm(pop, mate, mutate, nnop)
            name = "GA_sweep"
            if count == 0:
                print_head = True
            else:
                print_head = False
            train(oa, classification_network, name, train_instances, measure,
                  training_iter, test_instances, print_head, pop, prop_mate,
                  prop_mutate)
            count += 1

    if do_bp and sweep == False:
        training_iter = TRAINING_ITERATIONS
        if do_fmnist:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
        if do_chess:
            classification_network = factory.createClassificationNetwork(
                [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER])
        oa = BatchBackPropagationTrainer(data_set, classification_network,
                                         measure, RPROPUpdateRule())
        name = "BP"
        train(oa, classification_network, name, train_instances, measure,
              training_iter, test_instances, True)
Exemple #16
0
def main(trainfile, testfile, validfile, oa_name, i, params):
    print("== [{}] ==".format(oa_name))
    res = {}
    #for i in range(25):
    res[i] = {}
    if i == 9:
        print("Invalid i %d" % (i))
        sys.exit(1)
    print("LABEL: {}".format(i))
    traininstances = initialize_instances(trainfile, i)
    testinstances = initialize_instances(testfile, i)
    validinstances = initialize_instances(validfile, i)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(traininstances)
    rule = RPROPUpdateRule()

    # was networks[]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)

    oa = None
    # was oa = []
    suffix = ""
    if oa_name == "BP":
        oa = BatchBackPropagationTrainer(data_set, classification_network,
                                         measure, rule)
    if oa_name == "RHC":
        oa = RandomizedHillClimbing(nnop)
    if oa_name == "SA":
        suffix = '-' + '-'.join(params)
        oa = SimulatedAnnealing(float(params[0]), float(params[1]), nnop)
    if oa_name == "GA":
        suffix = '-' + '-'.join(params)
        oa = StandardGeneticAlgorithm(int(params[0]), int(params[1]),
                                      int(params[2]), nnop)

    ttvinstances = {
        'train': traininstances,
        'test': testinstances,
        'valid': validinstances
    }
    train_start = timeit.default_timer()
    train(oa, classification_network, oa_name, ttvinstances, measure, i,
          suffix)
    train_end = timeit.default_timer()
    print 'train time: %d secs' % (int(train_end - train_start))

    if oa_name != "BP":
        optimal_instance = oa.getOptimal()
        classification_network.setWeights(optimal_instance.getData())

    ttvinstances = {
        'train': traininstances,
        'valid': validinstances,
        'test': testinstances
    }
    for key, instances in zip(ttvinstances.keys(), ttvinstances.values()):
        query_start = timeit.default_timer()
        tp = 0.
        fp = 0.
        fn = 0.
        tn = 0.
        precision = 0.
        recall = 0.
        f1 = 0.
        print "scoring %s..." % (key)
        for instance in instances:
            classification_network.setInputValues(instance.getData())
            classification_network.run()

            actual = instance.getLabel().getContinuous()
            predicted = classification_network.getOutputValues().get(0)
            #print ('actual = %.3f, predicted = %.3f' % (actual, predicted))
            if actual == 1.:
                if predicted >= 0.5:
                    tp += 1.
                else:
                    fn += 1.
            else:
                if predicted >= 0.5:
                    fp += 1.
                else:
                    tn += 1.

        query_end = timeit.default_timer()
        if tp + fp > 0.:
            precision = tp / (tp + fp)
        if fn + tp > 0.:
            recall = tp / (fn + tp)
        if precision + recall > 0.:
            f1 = 2. * precision * recall / (precision + recall)
        correct = tp + tn
        total = correct + fp + fn
        print "%s f1 = %0.10f" % (key, f1)
        print "%s accuracy = %0.10f" % (key, correct / total)
        print "%s query time: %d secs" % (key, int(query_end - query_start))
Exemple #17
0
def run_all():
    dataSource = 'wine'
    INPUT_LAYER = 13
    HIDDEN_LAYER = 100
    OUTPUT_LAYER = 1

    # dataSource = 'wage'
    # INPUT_LAYER = 106
    # HIDDEN_LAYER = 1000
    # OUTPUT_LAYER = 1

    train_data = initialize_instances('data/balanced_' + dataSource +
                                      '_cleaned_train.csv')
    test_data = initialize_instances('data/balanced_' + dataSource +
                                     '_cleaned_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_data)
    update_rule = RPROPUpdateRule()

    alg = 'backprop'
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
    oa = BatchBackPropagationTrainer(data_set, classification_network, measure,
                                     update_rule)
    fit = oa
    run(alg, oa, fit, classification_network, measure, train_data, test_data,
        dataSource)

    alg = 'RHC'
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    iters = 1
    fit = FixedIterationTrainer(oa, iters)
    run(alg, oa, fit, classification_network, measure, train_data, test_data,
        dataSource)

    alg = 'SA'
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    startTemp = 1E10
    coolingFactor = .8
    oa = SimulatedAnnealing(startTemp, coolingFactor, nnop)
    iters = 1
    fit = FixedIterationTrainer(oa, iters)
    run(alg, oa, fit, classification_network, measure, train_data, test_data,
        dataSource)

    alg = 'GA'
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    population = 200
    mates = 50
    mutations = 10
    oa = StandardGeneticAlgorithm(population, mates, mutations, nnop)
    iters = 1
    fit = FixedIterationTrainer(oa, iters)
    run(alg, oa, fit, classification_network, measure, train_data, test_data,
        dataSource)