Exemplo n.º 1
0
    weight_init_function_random
]
learning_rate_functions = [
    None
] + [learning_rate_function] * n_hidden_layers + [learning_rate_function]

results = []
df_weights = DataFrame([])
df_netinputs = DataFrame([])

for seed_value in range(n_trials):
    print "seed = ", seed_value,
    random.seed(seed_value)

    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios,
                        weight_init_functions, learning_rate_functions)
    network.set_neurons_drop_out_probabilities(neurons_drop_out_probabilities)
    experiment_set_selected_weights(network)

    ####
    # if disable_2nd_output_neuron:
    #     second_output_neuron = network.layers[-1].neurons[1]
    #     second_output_neuron.activation_function = ConstantOutput()
    ####

    print "\n\nNet BEFORE Training\n", network

    data_collector = NetworkDataCollector(network, data_collection_interval)

    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, error_criterion,
Exemplo n.º 2
0
# specify neuron transforms, weight initialization, and learning rate functions... per layer
neurons_ios = [None] + [hidden_neurons_io_function] * n_hidden_layers + [output_neurons_io_function]
weight_init_functions = [None] + [ weight_init_function_random ]*n_hidden_layers + [ weight_init_function_random ]
learning_rate_functions = [None] + [ learning_rate_function ]*n_hidden_layers + [ learning_rate_function ]

results = []
df_weights = DataFrame([])
df_netinputs = DataFrame([])

for seed_value in range(n_trials):
    print "seed = ", seed_value,
    random.seed(seed_value)
        
    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios, weight_init_functions, learning_rate_functions)
    network.set_neurons_drop_out_probabilities(neurons_drop_out_probabilities)
    experiment_set_selected_weights(network)

    ####
    # if disable_2nd_output_neuron:
    #     second_output_neuron = network.layers[-1].neurons[1]
    #     second_output_neuron.activation_function = ConstantOutput()
    ####

    print "\n\nNet BEFORE Training\n", network

    data_collector = NetworkDataCollector(network, data_collection_interval)
    
    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, error_criterion, max_epochs, data_collector)  # sop call
Exemplo n.º 3
0
nonmon = STDNonMonotonicIOFunction()

# specify neuron transforms, weight initialization, and learning rate functions... per layer
neurons_ios = [None] + [nonmon] * n_hidden_layers + [linear]
weight_init_functions = [None] + [ weight_init_function_random ]*n_hidden_layers + [ weight_init_function_random ]
learning_rate_functions = [None] + [ learning_rate_function ]*n_hidden_layers + [ learning_rate_function ]

results = []
dfs_concatenated = DataFrame([])

for seed_value in range(n_trials):
    print "seed = ", seed_value,
    random.seed(seed_value)
        
    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios, weight_init_functions, learning_rate_functions)
    experiment_set_selected_weights(network)
    print "\n\nNet BEFORE Training\n", network
    
    data_collection_interval = 1000
    data_collector = NetworkDataCollector(network, data_collection_interval)
    
    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, error_criterion, max_epochs, data_collector)  # sop call
    # epoch_and_MSE = network.backpropagation(training_set, 0.0000001, max_epochs, data_collector)
    results.append(epoch_and_MSE[0])

    #print "\n\nNet After Training\n", network

    # save the network
    network.save_to_file( "trained_configuration.pkl" )
Exemplo n.º 4
0
# specify neuron transforms, weight initialization, and learning rate functions... per layer
neurons_ios = [None] + [hidden_neurons_io_function] * n_hidden_layers + [output_neurons_io_function]
weight_init_functions = [None] + [ weight_init_function_random ]*n_hidden_layers + [ weight_init_function_random ]
learning_rate_functions = [None] + [ learning_rate_function ]*n_hidden_layers + [ learning_rate_function ]

results = []
df_weights = DataFrame([])
df_netinputs = DataFrame([])

for seed_value in range(n_trials):
    print "seed = ", seed_value,
    random.seed(seed_value)
        
    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios, weight_init_functions, learning_rate_functions)
    experiment_set_selected_weights(network)

    ####
    if disable_2nd_output_neuron:
        second_output_neuron = network.layers[-1].neurons[1]
        second_output_neuron.activation_function = ConstantOutput()
    ####

    print "\n\nNet BEFORE Training\n", network

    data_collector = NetworkDataCollector(network, data_collection_interval)
    
    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, error_criterion, max_epochs, data_collector)  # sop call
    # epoch_and_MSE = network.backpropagation(training_set, 0.0000001, max_epochs, data_collector)
Exemplo n.º 5
0
    weight_init_function_random
] + [weight_init_function_random] * n_hidden_layers + [
    weight_init_function_random
]
learning_rate_functions = [
    learning_rate_function
] + [learning_rate_function] * n_hidden_layers + [learning_rate_function]

results = []

for seed_value in range(10):
    print "seed = ", seed_value,
    random.seed(seed_value)

    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios,
                        weight_init_functions, learning_rate_functions)

    print "\n\nNetwork State just after creation\n", network

    experimental_weight_setting_function(network)

    data_collection_interval = 1000
    data_collector = NetworkDataCollector(network, data_collection_interval)

    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, 0.01, 3000,
                                            data_collector)
    results.append(epoch_and_MSE[0])

    # save the network
    network.save_to_file("trained_configuration.pkl")
    n_hidden_layers = len(n_neurons_for_each_layer) - 2

# specify neuron transforms, weight initialization, and learning rate functions... per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
an_io_transform = SigmoidIO()
neurons_ios = [None] + [ an_io_transform ]*n_hidden_layers + [ an_io_transform ] # [ SigmoidIO() ] #
weight_init_functions = [ None ] + [ weight_init_function_random ]*n_hidden_layers + [ weight_init_function_random ]
learning_rate_functions = [ None ] + [ learning_rate_function ]*n_hidden_layers + [ learning_rate_function ]

results = []

for seed_value in range(10):
    print "seed = ", seed_value,
    random.seed(seed_value)
        
    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios, weight_init_functions, learning_rate_functions)

    #print "\n\nNetwork State just after creation\n", network
    
    data_collection_interval = 1000
    data_collector = NetworkDataCollector(network, data_collection_interval)
    
    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, 0.01, 3000, data_collector)
    results.append(epoch_and_MSE[0])

    # save the network
    network.save_to_file( "trained_configuration.pkl" )
    # load a stored network
    # network = NeuralNet.load_from_file( "trained_configuration.pkl" )
   
# specify neuron transforms, weight initialization, and learning rate functions... per layer
an_io_transform = STDNonMonotonicIOFunction()
neurons_ios = [an_io_transform] + [ an_io_transform ]*n_hidden_layers + [ an_io_transform ] # [ SigmoidIO() ] #
weight_init_functions = [ weight_init_function_random ] + [ weight_init_function_random ]*n_hidden_layers + [ weight_init_function_random ]
learning_rate_functions = [ learning_rate_function ] + [ learning_rate_function ]*n_hidden_layers + [ learning_rate_function ]

results = []
dfs_concatenated = DataFrame([])

for seed_value in range(10):
    print "seed = ", seed_value,
    random.seed(seed_value)
        
    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios, weight_init_functions, learning_rate_functions)

    print "\n\nNetwork State just after creation\n", network

    data_collector = NetworkDataCollector(network, data_collection_interval=1000)
    
    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set, error_limit=0.0000001, max_epochs=6000, data_collector=data_collector)

    results.append(epoch_and_MSE[0])

    dfs_concatenated = intermediate_post_process(seed_value, data_collector, dfs_concatenated)

    # print out the result
    for example_number, example in enumerate(training_set):
        inputs_for_training_example = example.features
Exemplo n.º 8
0
] + [weight_init_function_random] * n_hidden_layers + [
    weight_init_function_random
]
learning_rate_functions = [
    learning_rate_function
] + [learning_rate_function] * n_hidden_layers + [learning_rate_function]

results = []
dfs_concatenated = DataFrame([])

for seed_value in range(10):
    print "seed = ", seed_value,
    random.seed(seed_value)

    # initialize the neural network
    network = NeuralNet(n_neurons_for_each_layer, neurons_ios,
                        weight_init_functions, learning_rate_functions)

    print "\n\nNetwork State just after creation\n", network

    data_collector = NetworkDataCollector(network,
                                          data_collection_interval=1000)

    # start training on test set one
    epoch_and_MSE = network.backpropagation(training_set,
                                            error_limit=0.0000001,
                                            max_epochs=6000,
                                            data_collector=data_collector)

    results.append(epoch_and_MSE[0])

    dfs_concatenated = intermediate_post_process(seed_value, data_collector,