Example #1
0
features_STDs = numpy.std(a=data_inputs, axis=0)
data_inputs = data_inputs[:, features_STDs > 50]

# Reading the data outputs. Check the 'extract_features.py' script for extracting the features & preparing the outputs of the dataset.
data_outputs = numpy.load("outputs.npy")

# The number of inputs (i.e. feature vector length) per sample
num_inputs = data_inputs.shape[1]
# Number of outputs per sample
num_outputs = 4

HL1_neurons = 150
HL2_neurons = 60

# Building the network architecture.
input_layer = nn.InputLayer(num_inputs)
hidden_layer1 = nn.DenseLayer(num_neurons=HL1_neurons,
                              previous_layer=input_layer,
                              activation_function="relu")
hidden_layer2 = nn.DenseLayer(num_neurons=HL2_neurons,
                              previous_layer=hidden_layer1,
                              activation_function="relu")
output_layer = nn.DenseLayer(num_neurons=num_outputs,
                             previous_layer=hidden_layer2,
                             activation_function="sigmoid")

# Training the network.
nn.train_network(num_epochs=10,
                 last_layer=output_layer,
                 data_inputs=data_inputs,
                 data_outputs=data_outputs,
Example #2
0
def create_network(num_neurons_input,
                   num_neurons_output,
                   num_neurons_hidden_layers=[],
                   output_activation="relu",
                   hidden_activations="relu",
                   parameters_validated=False):
    """
    Creates a neural network as a linked list between the input, hidden, and output layers where the layer at index N (which is the last/output layer) references the layer at index N-1 (which is a hidden layer) using its previous_layer attribute. The input layer does not reference any layer because it is the last layer in the linked list.

    In addition to the parameters_validated parameter, this function accepts the same parameters passed to the constructor of the gann.GANN class except for the num_solutions parameter because only a single network is created out of the create_network() function.

    num_neurons_input: Number of neurons in the input layer.
    num_neurons_output: Number of neurons in the output layer.
    num_neurons_hidden_layers=[]: A list holding the number of neurons in the hidden layer(s). If empty [], then no hidden layers are used. For each int value it holds, then a hidden layer is created with number of hidden neurons specified by the corresponding int value. For example, num_neurons_hidden_layers=[10] creates a single hidden layer with 10 neurons. num_neurons_hidden_layers=[10, 5] creates 2 hidden layers with 10 neurons for the first and 5 neurons for the second hidden layer.
    output_activation="relu": The name of the activation function of the output layer which defaults to "relu".
    hidden_activations="relu": The name(s) of the activation function(s) of the hidden layer(s). It defaults to "relu". If passed as a string, this means the specified activation function will be used across all the hidden layers. If passed as a list, then it must has the same length as the length of the num_neurons_hidden_layers list. An exception is raised if there lengths are different. When hidden_activations is a list, a one-to-one mapping between the num_neurons_hidden_layers and hidden_activations lists occurs.
    parameters_validated=False: If False, then the parameters are not validated and a call to the validate_network_parameters() function is made.

    Returns the reference to the last layer in the network architecture which is the output layer. Based on such reference, all network layer can be fetched.    
    """

    # When parameters_validated is False, then the parameters are not yet validated and a call to validate_network_parameters() is required.
    if parameters_validated == False:
        # Validating the passed parameters before creating the network.
        hidden_activations = validate_network_parameters(
            num_neurons_input=num_neurons_input,
            num_neurons_output=num_neurons_output,
            num_neurons_hidden_layers=num_neurons_hidden_layers,
            output_activation=output_activation,
            hidden_activations=hidden_activations)

    # Creating the input layer as an instance of the nn.InputLayer class.
    input_layer = nn.InputLayer(num_neurons_input)

    if len(num_neurons_hidden_layers) > 0:
        # If there are hidden layers, then the first hidden layer is connected to the input layer.
        hidden_layer = nn.DenseLayer(
            num_neurons=num_neurons_hidden_layers.pop(0),
            previous_layer=input_layer,
            activation_function=hidden_activations.pop(0))
        # For the other hidden layers, each hidden layer is connected to its preceding hidden layer.
        for hidden_layer_idx in range(len(num_neurons_hidden_layers)):
            hidden_layer = nn.DenseLayer(
                num_neurons=num_neurons_hidden_layers.pop(0),
                previous_layer=hidden_layer,
                activation_function=hidden_activations.pop(0))

        # The last hidden layer is connected to the output layer.
        # The output layer is created as an instance of the nn.DenseLayer class.
        output_layer = nn.DenseLayer(num_neurons=num_neurons_output,
                                     previous_layer=hidden_layer,
                                     activation_function=output_activation)

    # If there are no hidden layers, then the output layer is connected directly to the input layer.
    elif len(num_neurons_hidden_layers) == 0:
        # The output layer is created as an instance of the nn.DenseLayer class.
        output_layer = nn.DenseLayer(num_neurons=num_neurons_output,
                                     previous_layer=input_layer,
                                     activation_function=output_activation)

    # Returning the reference to the last layer in the network architecture which is the output layer. Based on such reference, all network layer can be fetched.
    return output_layer
Example #3
0
import nn
from data.arrow import display, train_data, test_data

#display(train_data[0][0])

# define model
input_layer = [nn.InputLayer(inputs=25)]
encoder = [
    nn.LeakyReLULayer(inputs=25, outputs=16, alpha=0.01),
    nn.LeakyReLULayer(inputs=16, outputs=4, alpha=0.01),
    nn.LeakyReLULayer(inputs=4, outputs=2, alpha=0.01),
]
decoder = [
    nn.LeakyReLULayer(inputs=2, outputs=4, alpha=0.01),
    nn.LeakyReLULayer(inputs=4, outputs=16, alpha=0.01),
    nn.LeakyReLULayer(inputs=16, outputs=25, alpha=0.01),
    nn.DropoutLayer(inputs=4, probability=0.1)
]
model = nn.NeuralNetwork(input_layer + encoder + decoder)

# train data
_train_data = [(x, x) for x, _ in train_data]
model.fit(_train_data, learning_rate=0.03, threshold=1e-5, epochs=300000)

# test
for x, y in test_data:
    print('---------\ninput:')
    display(x)
    print('\nprediction:')
    display(model.predict(x))
    print('')
Example #4
0
import nn
from data.arrow import train_data, test_data

# define model
model = nn.NeuralNetwork([
    nn.InputLayer(inputs=25),
    nn.LeakyReLULayer(inputs=25, outputs=25, alpha=0.01),
    nn.LeakyReLULayer(inputs=25, outputs=4, alpha=0.01),
    nn.SigmoidLayer(inputs=4, outputs=4),
])

# train data
model.fit(train_data, learning_rate=0.01, threshold=5e-6, epochs=200000)

# test
for x, y in test_data:
    print(x, y, model.predict(x))