コード例 #1
0
def main():
    """Run algorithms on the adult dataset."""
    train_instances = initialize_instances(trainX)
    test_instances = initialize_instances(testX)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_instances)

    networks = []  # BackPropagationNetwork
    nnop = []  # NeuralNetworkOptimizationProblem
    oa = []  # OptimizationAlgorithm
    oa_names = ["RHC", "SA", "GA", "BP"]
    print(sys.argv)
    if len(sys.argv) > 1:
        oa_names = [sys.argv[1]]
        set_num = sys.argv[2]
    # results = ""
    for name in oa_names:
        classification_network = factory.createClassificationNetwork(
            [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], RELU())
        networks.append(classification_network)
        if name != "BP":
            nnop.append(
                NeuralNetworkOptimizationProblem(data_set,
                                                 classification_network,
                                                 measure))
        else:
            print("adding backprop")
            rule = RPROPUpdateRule()
            nnop.append(
                BatchBackPropagationTrainer(data_set, classification_network,
                                            measure, rule))

    if "RHC" in oa_names:
        rhc_index = oa_names.index("RHC")
        oa.append(RandomizedHillClimbing(nnop[rhc_index]))
    if "SA" in oa_names:
        sa_index = oa_names.index("SA")
        oa.append(SimulatedAnnealing(1E11, .95, nnop[sa_index]))
    if "GA" in oa_names:
        ga_index = oa_names.index("GA")
        oa.append(StandardGeneticAlgorithm(100, 50, 10, nnop[ga_index]))
    if "BP" in oa_names:
        rule = RPROPUpdateRule()
        bp_index = oa_names.index("BP")
        oa.append(nnop[bp_index])

    for i, name in enumerate(oa_names):
        train(oa[i], networks[i], oa_names[i], train_instances, test_instances,
              measure)
コード例 #2
0
def main():
    """Run this experiment"""
    with open(
            "/home/ec2-user/CS-7641-assignments/my_assignment2/I_am_NN_RHC_Starting_AT",
            'w') as f:
        f.write('{}'.format(datetime.datetime.now()))
    training_ints = initialize_instances(TRAIN_DATA_FILE)
    testing_ints = initialize_instances(TEST_DATA_FILE)
    validation_ints = initialize_instances(VALIDATE_DATA_FILE)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, validation_ints,
          testing_ints, measure, TRAINING_ITERATIONS, OUTFILE.format('RHC'))
    with open(
            "/home/ec2-user/CS-7641-assignments/my_assignment2/I_am_NN_RHC_ending_AT",
            'w') as f:
        f.write('{}'.format(datetime.datetime.now()))
コード例 #3
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(TRAIN_DATA_FILE)
    testing_ints = initialize_instances(TEST_DATA_FILE)
    validation_ints = initialize_instances(VALIDATE_DATA_FILE)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(
        oa,
        classification_network,
        "RHC",
        training_ints,
        validation_ints,
        testing_ints,
        measure,
        TRAINING_ITERATIONS,
        OUTFILE.format("RHC"),
    )
コード例 #4
0
def main(CE):
    oa_name = "SA{}".format(CE)
    with open(OUTFILE.replace('XXX',oa_name),'w') as f:
        f.write('{},{},{},{},{},{}\n'.format('iteration','MSE_train','MSE_test','acc_train','acc_tst','elapsed'))
   
    
    training_data = initialize_instances('../data/Pima-train.csv')
    testing_data = initialize_instances('../data/Pima-test.csv')
    print(len(training_data))
    #testing_ints = initialize_instances('m_test.csv')
    #validation_ints = initialize_instances('m_val.csv')

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_data)
    relu = RELU()
    rule = RPROPUpdateRule()
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER],
        relu
    )
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = SimulatedAnnealing(1E10, CE, nnop)
    train(
        oa,
        classification_network,
        oa_name, 
        training_data,
        testing_data,
        measure
    )      
コード例 #5
0
def main(P, mate, mutate, layers, training_iterations, test_data_file, train_data_file, validate_data_file):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    # relu = RELU()
    relu = LogisticSigmoid()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_name = "GA_{}_{}_{}".format(P, mate, mutate)
    with open(OUTFILE.format(oa_name), 'w') as f:
        f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                                                            'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst',
                                                            'elapsed'))
    classification_network = factory.createClassificationNetwork(
        layers, relu)
    nnop = NeuralNetworkOptimizationProblem(
        data_set, classification_network, measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    base.train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure,
               training_iterations, OUTFILE.format(oa_name))
    return
コード例 #6
0
def main(CE, layers, training_iterations, test_data_file, train_data_file,
         validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_name = "SA_{}".format(CE)
    classification_network = factory.createClassificationNetwork(layers, relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = SimulatedAnnealing(1E10, CE, nnop)
    with open(OUTFILE.format(data_name, oa_name), 'a+') as f:
        content = f.read()
        if "MSE_trg" not in content:
            f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format(
                'iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst', 'elapsed'))
    base.train(oa, classification_network, oa_name, training_ints,
               validation_ints, testing_ints, measure, training_iterations,
               OUTFILE.format(data_name, oa_name))
    return
コード例 #7
0
def main(P, mate, mutate):
    """Run this experiment"""
    training_ints = initialize_instances(
        '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_trg.csv')
    testing_ints = initialize_instances(
        '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_test.csv')
    validation_ints = initialize_instances(
        '/Users/Sean/School/GeorgiaTech/CS7641/Assignment2/s_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    sig = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "GA_%s_%s_%s" % (P, mate, mutate)
    with open(OUTFILE.replace('XXX', oa_name), 'w') as f:
        f.write('%s,%s,%s,%s,%s,%s,%s,%s\n' %
                ('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                 'acc_val', 'acc_tst', 'elapsed'))
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER
    ], sig)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    for trial in xrange(TRIALS):
        oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
        train(oa, classification_network, oa_name, training_ints,
              validation_ints, testing_ints, measure)
コード例 #8
0
def main(CE):
    """Run this experiment"""
    training_ints = initialize_instances('SeasonsStats_trg.csv')
    testing_ints = initialize_instances('SeasonsStats_test.csv')
    validation_ints = initialize_instances('SeasonsStats_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_name = "SA{}".format(CE)
    with open(OUTFILE.replace('XXX', oa_name), 'w') as f:
        f.write('{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg',
                                                   'MSE_val', 'MSE_tst',
                                                   'acc_trg', 'acc_val',
                                                   'acc_tst', 'elapsed'))
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER
    ], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    for trial in range(TRIALS):
        oa = SimulatedAnnealing(1E10, CE, nnop)
        train(oa, classification_network, oa_name, training_ints,
              validation_ints, testing_ints, measure)
コード例 #9
0
def main(P, mate, mutate):
    """Run this experiment"""
    training_ints = initialize_instances('./../data/x_train_val.csv')
    testing_ints = initialize_instances('./../data/x_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "GA_{}_{}_{}".format(P, mate, mutate)
    FILE = OUTFILE.replace('XXX', oa_name)
    with open(FILE, 'w') as f:
        f.write('{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg',
                                                      'MSE_tst', 'acc_trg',
                                                      'acc_tst', 'f1_trg',
                                                      'f1_tst', 'train_time',
                                                      'pred_time'))
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3,
        HIDDEN_LAYER4, OUTPUT_LAYER
    ], acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    train(oa, classification_network, oa_name, training_ints, testing_ints,
          measure, TRAINING_ITERATIONS, FILE)
コード例 #10
0
ファイル: NN_GA.py プロジェクト: orcas333/cs7641
def main(output_filename):
    """Run this experiment"""
    training_ints = initialize_instances('out_digits_train.csv')
    testing_ints = initialize_instances('out_digits_test.csv')
    validation_ints = initialize_instances('out_digits_test.csv')

    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)

    relu = RELU()
    rule = RPROPUpdateRule()

    with open(output_filename,'w') as f:
        f.write('{},{},{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_val','MSE_tst','acc_trg','acc_val','acc_tst','elapsed'))
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    # oa = SimulatedAnnealing(1E10, 0.95, nnop)
    # oa = RandomizedHillClimbing(nnop)

    P = 50
    mate = 20
    mutate = 20
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)

    train(oa, classification_network, output_filename, training_ints,validation_ints,testing_ints, measure)
コード例 #11
0
def main(CE):
    """Run this experiment"""
    training_ints = initialize_instances(TRAIN_DATA_FILE)
    testing_ints = initialize_instances(TEST_DATA_FILE)
    validation_ints = initialize_instances(VALIDATE_DATA_FILE)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_name = "SA_{}".format(CE)
    with open(OUTFILE.format(oa_name), 'w') as f:
        f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format(
            'iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg', 'acc_val',
            'acc_tst', 'f1_trg', 'f1_val', 'f1_tst', 'elapsed'))
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    print(CE)
    print(nnop)
    oa = SimulatedAnnealing(1E10, CE, nnop)
    train(oa, classification_network, oa_name, training_ints, validation_ints,
          testing_ints, measure, TRAINING_ITERATIONS, OUTFILE.format(oa_name))
コード例 #12
0
ファイル: nn_spambase.py プロジェクト: eigendreams/cs7641_hw2
def nn_back(name):

    instances = read_dataset('./spambase.csv', [1])
    train_set, test_set = train_test_split(instances, 0.3)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER], HyperbolicTangentSigmoid())
    bp_instance = BatchBackPropagationTrainer(train_set,
                                              classification_network, measure,
                                              RPROPUpdateRule())

    nn_state = {'network': classification_network, 'trainer': bp_instance}
    wrapper_nn = AlgoWrapper(
        nn_state, lambda state: state['trainer'].train(),
        lambda state: classification_error_acc(train_set, state[
            'network'], measure), lambda state: classification_error_acc(
                test_set, state['network'], measure))
    # create name and invalidate if super empty
    decorated_name = name
    timed_trainer = TimedTrainer(decorated_name,
                                 wrapper_nn,
                                 200,
                                 4000,
                                 1,
                                 _param_dict={'name': name})
    timed_trainer.run()

    print "NNBP bp done"
コード例 #13
0
def main(P, mate, mutate):
    #training_ints = initialize_instances('bCancer_trg.csv')
    #testing_ints = initialize_instances('bCancer_test.csv')
    #validation_ints = initialize_instances('bCancer_val.csv')

    training_ints = initialize_instances('winequality_trg.csv')
    testing_ints = initialize_instances('winequality_test.csv')
    validation_ints = initialize_instances('winequality_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    hts = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "GA_{}_{}_{}".format(P, mate, mutate)
    with open(OUTFILE.replace('XXX', oa_name), 'w') as f:
        f.write('{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg',
                                                   'MSE_val', 'MSE_tst',
                                                   'acc_trg', 'acc_val',
                                                   'acc_tst', 'elapsed'))
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER], hts)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    train(oa, classification_network, oa_name, training_ints, validation_ints,
          testing_ints, measure)
コード例 #14
0
def main():
    training_ints = initialize_instances('data/bank_train.csv')
    testing_ints = initialize_instances('data/bank_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    ######################### back prop #####################

    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule,), 
            classification_network,
            'Backprop', 
            training_ints,testing_ints, measure,
            './ANN/BP/BACKPROP_LOG.csv',
            2000)

    ######################### simulated annealing #################

    for CE in [0.15,0.35,0.55,0.75,0.95]:
        for  T in [1e8,1e10,1e12]:
            classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
            nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
            oFile = "./ANN/SA/%s_%s_LOG.csv"%(CE,T)
            train(SimulatedAnnealing(T, CE, nnop), 
            classification_network, 
            'simulated annealing', 
            training_ints, testing_ints, measure,
            oFile,
            2000)
    
    ######################### random hill climbing #################

    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    train(RandomizedHillClimbing(nnop), 
        classification_network, 
        'RHC', 
        training_ints, testing_ints, measure,
        './ANN/RHC/RHC_LOG.csv',
        2000)

    ######################### genetic algorithm #################
    
    for P in [100]:
        for mate in [5, 15, 30]:
            for mutate in [5,15,30]:
                classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
                nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
                oFile = "./ANN/GA/%s_%s_%s_LOG.csv"%(P, mate, mutate)
                train(StandardGeneticAlgorithm(P, mate, mutate, nnop), 
                    classification_network, 
                    'GA', 
                    training_ints, testing_ints, measure,
                    oFile,
                    2000)
コード例 #15
0
ファイル: ANN_back.py プロジェクト: mcgarrah/CS7641-1
def main():
    """Run this experiment"""
    training_ints = initialize_instances('titanic_train.csv')
    testing_ints = initialize_instances('titanic_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    train(BatchBackPropagationTrainer(data_set,classification_network,measure,rule), classification_network, 'Backprop', training_ints,testing_ints, measure)
コード例 #16
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('./../data/x_train_val.csv')
    testing_ints = initialize_instances('./../data/x_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3,HIDDEN_LAYER4, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)
コード例 #17
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('Cryo_train.csv')
    testing_ints = initialize_instances('Cryo_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, testing_ints, measure)
コード例 #18
0
def main():
    train_data = initialize_instances(TRAIN_FILE)
    test_data = initialize_instances(TEST_FILE)  # Get data
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(train_data)
    activation = RELU()
    rule = RPROPUpdateRule()
    oa_name = 'Backprop'
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], activation)
    oa = BatchBackPropagationTrainer(data_set, classification_network, measure,
                                     rule)
    train(oa, classification_network, oa_name, train_data, test_data, measure)
コード例 #19
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('SeasonsStats_trg.csv')
    testing_ints = initialize_instances('SeasonsStats_test.csv')
    validation_ints = initialize_instances('SeasonsStats_val.csv')
    for t in range(TRIALS):
        factory = BackPropagationNetworkFactory()
        measure = SumOfSquaresError()
        data_set = DataSet(training_ints)
        relu = RELU()
        rule = RPROPUpdateRule()
        oa_names = ["Backprop"]
        classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2,HIDDEN_LAYER3, OUTPUT_LAYER],relu)
        train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,'Backprop', training_ints, validation_ints, testing_ints, measure)
コード例 #20
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('spam_trg.csv')
    testing_ints = initialize_instances('spam_test.csv')
    validation_ints = initialize_instances('spam_val.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,HIDDEN_LAYER2, OUTPUT_LAYER],relu)
    for trial in xrange(TRIALS):
        oa = RandomizedHillClimbing(NeuralNetworkOptimizationProblem(data_set, classification_network, measure))
        train(oa, classification_network, 'RHC', training_ints,validation_ints,testing_ints, measure)
コード例 #21
0
def main(P,mate,mutate):
    """Run this experiment"""
    training_ints = initialize_instances('spambase_train.csv')
    testing_ints = initialize_instances('spambase_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "GA_{}_{}_{}".format(P,mate,mutate)
    with open(OUTFILE.replace('XXX',oa_name),'w') as f:
        f.write('{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_tst','acc_trg','acc_tst','elapsed'))
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    train(oa, classification_network, oa_name, training_ints, testing_ints, measure)
コード例 #22
0
def main(T, CE):
    """Run this experiment"""
    training_ints = initialize_instances('./../data/wine_train.csv')
    testing_ints = initialize_instances('./../data/wine_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "SA_{}_{}".format(T, CE)
    with open(OUTFILE,'w') as f:
        f.write('{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_tst','acc_trg','acc_tst','elapsed'))
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, OUTPUT_LAYER],acti)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = SimulatedAnnealing(T, CE, nnop)
    train(oa, classification_network, oa_name, training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)
コード例 #23
0
    def run_experiment(self, train, test, validation):
        """Run experiment

        Args:
            train (list): List of training instances.
            test (list): List of test instances.
            validation (list): List of validation instances.

        """
        factory = BackPropagationNetworkFactory()  # instantiate main NN class
        params = [
            self.input_layer, self.hidden_layer_one, self.hidden_layer_two,
            self.output_layer
        ]
        self.network = factory.createClassificationNetwork(params)
        dataset = DataSet(train)  # setup training instances dataset
        nnop = NeuralNetworkOptimizationProblem(dataset, self.network,
                                                self.measure)
        oa = None

        # get output file name
        outpath = 'results/NN'
        filename = None

        # options for different optimization algorithms
        if self.oaName == 'BP':
            filename = '{}/results.csv'.format(self.oaName)
            rule = RPROPUpdateRule()
            oa = BatchBackPropagationTrainer(dataset, self.network,
                                             self.measure, rule)
        elif self.oaName == 'RHC':
            filename = '{}/results.csv'.format(self.oaName)
            oa = RandomizedHillClimbing(nnop)
        elif self.oaName == 'SA':
            filename = '{}/results_{}_{}.csv'.format(self.oaName, self.SA_T,
                                                     self.SA_C)
            oa = SimulatedAnnealing(self.SA_T, self.SA_C, nnop)
        elif self.oaName == 'GA':
            filename = '{}/results_{}_{}_{}.csv'.format(
                self.oaName, self.GA_P, self.GA_MA, self.GA_MU)
            oa = StandardGeneticAlgorithm(self.GA_P, self.GA_MA, self.GA_MU,
                                          nnop)

        # train network
        filepath = get_abspath(filename, outpath)
        self.train(oa, train, test, validation, filepath)
コード例 #24
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('./clean_data/adult_train.txt')
    testing_ints = initialize_instances('./clean_data/adult_test.txt')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    logunit = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])  #,logunit)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, 'RHC', training_ints, testing_ints,
          measure)
コード例 #25
0
def main(CE):
    """Run this experiment"""
    training_ints = initialize_instances(PATH + "X_train.csv")
    testing_ints = initialize_instances(PATH + "X_test.csv")
    validation_ints = initialize_instances(PATH + "y_train.csv")
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    logistic_sigmoid = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "SA{}".format(CE)
    with open(OUTFILE.replace('XXX',oa_name),'w') as f:
        f.write('{},{},{},{},{},{},{},{}\n'.format('iteration','MSE_trg','MSE_val','MSE_tst','acc_trg','acc_val','acc_tst','elapsed'))
    classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1,OUTPUT_LAYER],logistic_sigmoid)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = SimulatedAnnealing(1E10, CE, nnop)
    train(oa, classification_network, oa_name, training_ints,validation_ints,testing_ints, measure)
コード例 #26
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances(TRAIN_DATA_FILE)
    testing_ints = initialize_instances(TEST_DATA_FILE)
    validation_ints = initialize_instances(VALIDATE_DATA_FILE)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, OUTPUT_LAYER], relu)
    train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,
          'Backprop', training_ints, validation_ints, testing_ints, measure, TRAINING_ITERATIONS,
          OUTFILE.format('Backprop'))
コード例 #27
0
def main(CE):
    """Run this experiment"""
    training_ints = initialize_instances('./clean_data/adult_train.txt')
    testing_ints = initialize_instances('./clean_data/adult_test.txt')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    logunit = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "\nSA_cooling: %0.02f\n" % (CE)
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = SimulatedAnnealing(1E10, CE, nnop)
    train(oa, classification_network, oa_name, training_ints, testing_ints,
          measure)
コード例 #28
0
def main(P, mate, mutate):
    """Run this experiment"""
    training_ints = initialize_instances('./clean_data/adult_train.txt')
    testing_ints = initialize_instances('./clean_data/adult_test.txt')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    logunit = LogisticSigmoid()
    rule = RPROPUpdateRule()
    oa_name = "\nGA_tuning: %d , %d, %d\n" % (P, mate, mutate)
    classification_network = factory.createClassificationNetwork(
        [INPUT_LAYER, HIDDEN_LAYER, OUTPUT_LAYER])
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    train(oa, classification_network, oa_name, training_ints, testing_ints,
          measure)
コード例 #29
0
ファイル: NN-RHC.py プロジェクト: alfredzj/machine-learning
def main(ds_name):
    """Run this experiment"""
    nn_config, train_file, val_file, test_file = get_problemset(ds_name)
    training_ints = initialize_instances(train_file)
    testing_ints = initialize_instances(test_file)
    validation_ints = initialize_instances(val_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = "RHC_{}".format(name)
    classification_network = factory.createClassificationNetwork(nn_config, relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    train(oa, classification_network, oa_names, training_ints, validation_ints, testing_ints, measure,
          TRAINING_ITERATIONS, OUTFILE.format(oa_names))
コード例 #30
0
def main():
    """Run this experiment"""
    training_ints = initialize_instances('./../data/x_train_val.csv')
    testing_ints = initialize_instances('./../data/x_test.csv')
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    acti = HyperbolicTangentSigmoid()
    rule = RPROPUpdateRule()
    # oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork([
        INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3,
        HIDDEN_LAYER4, OUTPUT_LAYER
    ], acti)
    train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, 'Backprop',
        training_ints, testing_ints, measure, TRAINING_ITERATIONS, OUTFILE)