Esempio n. 1
0
import data_loader_nordic_runes as dat
import nn_framework.activation as activation
import nn_framework.framework as framework
import nn_framework.error_fun as error_fun
import nn_framework.layer as layer
from autoencoder_viz import Printer

N_NODES = [31, 22, 13]

training_set, evaluation_set = dat.get_data_sets()

sample = next(training_set())
input_value_range = (0, 1)
n_pixels = sample.shape[0] * sample.shape[1]
printer = Printer(input_shape=sample.shape)

n_nodes = [n_pixels] + N_NODES + [n_pixels]
model = []
for i_layer in range(len(n_nodes) - 1):
    model.append(
        layer.Dense(n_nodes[i_layer], n_nodes[i_layer + 1], activation.tanh))

autoencoder = framework.ANN(
    model=model,
    error_fun=error_fun.abs,
    printer=printer,
    expected_range=input_value_range,
)
autoencoder.train(training_set)
autoencoder.evaluate(evaluation_set)
Esempio n. 2
0
printer = Printer(input_shape=sample.shape)

N_NODES = [24]
n_nodes = N_NODES + [n_pixels]
# dropout_rates = [.2, .5]
model = []

model.append(RangeNormalization(training_set))

for i_layer in range(len(n_nodes)):
    new_layer = Dense(
        n_nodes[i_layer],
        activation.tanh,
        previous_layer=model[-1],
        # dropout_rate=dropout_rates[i_layer],
    )
    new_layer.add_regularizer(L1())
    # new_layer.add_regularizer(L2())
    new_layer.add_regularizer(Limit(4.0))
    model.append(new_layer)

model.append(Difference(model[-1], model[0]))

autoencoder = framework.ANN(
    model=model,
    error_fun=error_fun.sqr,
    printer=printer,
)
autoencoder.train(training_set)
autoencoder.evaluate(evaluation_set)
Esempio n. 3
0
from nn_framework import layer
from nn_framework import activation
# This is the highest level. It is what imports from everywhere else, so you can start here and drill in to any of the packages or things called. At a high level, so far, it creates our training/eval data set generators, then gets the dimensions of a sample of our data, then sets the number of outputs, then creates an architecture which is just like a blueprint of the number of nodes per layer, then creates one dense layer using this architecture, then creates our list of the layer objects called layers. Then we create our high level object, the framework.ANN, and supply the previously list of layers. Then we can call this high level model's functions.

# Using the data loader module, load in some sample data.
# Remember these are generators you are saving in the objects training_set/testing_set
training_set, evaluation_set = dl.get_data_sets()

# Get the number of neurons long our input and output layers will be for every layer and create the architecture,
# which paints a picture of the number of layers in every layer of our network
input_nodes = len(np.reshape(next(training_set()), -1))
output_nodes = input_nodes

HL_nodes = [6, 3, 9]

architecture = [input_nodes] + HL_nodes + [output_nodes]
layers = []
for layer_index in range(len(architecture) - 1):
    layers.append(
        layer.Dense(num_inputs=architecture[layer_index],
                    num_outputs=architecture[layer_index + 1],
                    activation_func=activation.tanh))
# Putting in the pixel range to help us normalize to a wanted range
actual_pixel_range = {'low': 0, 'high': 1}

# Call our framework module from the nn_framework package and instantiate a high level ANN object
autoencoder = framework.ANN(layers=layers,
                            actual_pixel_range=actual_pixel_range)
autoencoder.train(training_set)
autoencoder.evaluate(evaluation_set)
Esempio n. 4
0
import autoencoder_viz as viz

input_pixel_range = [0, 1]  # range of values of input pixels
normalized_pixel_range = [-.5, .5]  # tuning range for where to normalize input
n_hidden_nodes = [24]

training_set, evaluation_set = dat.get_data_sets()

sample = next(training_set())
n_pixels = sample.shape[0] * sample.shape[1]
n_nodes = [n_pixels] + n_hidden_nodes + [n_pixels]

model = []
for i_layer in range(len(n_nodes) - 1):
    model.append(
        layer.Dense(n_nodes[i_layer], n_nodes[i_layer + 1], activation.tanh))

printer = viz.Printer(input_shape=sample.shape)
try:
    os.mkdir('nn_images')
except:
    pass

autoencoder = framework.ANN(model=model,
                            error_function=error_fun.sqr,
                            printer=printer,
                            normalized_pixel_range=normalized_pixel_range,
                            input_pixel_range=input_pixel_range)
autoencoder.train(training_set)
autoencoder.evaluate(evaluation_set)