Exemplo n.º 1
0
def main(P, mate, mutate, layers, training_iterations, test_data_file, train_data_file, validate_data_file):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    # relu = RELU()
    relu = LogisticSigmoid()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_name = "GA_{}_{}_{}".format(P, mate, mutate)
    with open(OUTFILE.format(oa_name), 'w') as f:
        f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                                                            'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst',
                                                            'elapsed'))
    classification_network = factory.createClassificationNetwork(
        layers, relu)
    nnop = NeuralNetworkOptimizationProblem(
        data_set, classification_network, measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    base.train(oa, classification_network, oa_name, training_ints, validation_ints, testing_ints, measure,
               training_iterations, OUTFILE.format(oa_name))
    return
Exemplo n.º 2
0
def main(CE, layers, training_iterations, test_data_file, train_data_file,
         validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_name = "SA_{}".format(CE)
    classification_network = factory.createClassificationNetwork(layers, relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = SimulatedAnnealing(1E10, CE, nnop)
    with open(OUTFILE.format(data_name, oa_name), 'a+') as f:
        content = f.read()
        if "MSE_trg" not in content:
            f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format(
                'iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst', 'elapsed'))
    base.train(oa, classification_network, oa_name, training_ints,
               validation_ints, testing_ints, measure, training_iterations,
               OUTFILE.format(data_name, oa_name))
    return
Exemplo n.º 3
0
def main(layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    nnop = NeuralNetworkOptimizationProblem(
        data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    base.train(oa, classification_network, 'RHC', training_ints, validation_ints, testing_ints, measure,
               training_iterations, OUTFILE.format(data_name, 'RHC'))
    return
def main(layers, training_iterations, test_data_file, train_data_file,
         validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    base.train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, 'Backprop',
        training_ints, validation_ints, testing_ints, measure,
        training_iterations, OUTFILE.format(data_name, 'Backprop'))
    return
Exemplo n.º 5
0
def main(layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    with open(OUTFILE.format(data_name, 'Backprop'), 'a+') as f:
        content = f.read()
        if "MSE_trg" not in content:
            f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                                                        'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst',
                                                        'elapsed'))
    base.train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,
               'Backprop', training_ints, validation_ints, testing_ints, measure, training_iterations,
               OUTFILE.format(data_name, 'Backprop'))
    return