Exemple #1
0
    def populate(self, in_size, out_layer, hidden_layers=[]):
        """
        initializes the layers according to size parameters
        @in_size input size for first lstm layer
        @out_size output size for output layer
        @memory_sizes sizes for state vectors of lstm layers
        """
        layers = hidden_layers
        layers.append(out_layer)

        self.training_layers.append(CachingNeuralLayer(
            in_size=in_size,  # input size equals output(=memory) size of preceding layer
            out_size=layers[0]["size"],
            activation_fn=layers[0]["fn"],
            activation_fn_deriv=layers[0]["fn_deriv"]
        ))
        # for each size in memory_sizes create another lstm layer
        for i in range(1, len(layers)):
            self.training_layers.append(CachingNeuralLayer(
                in_size=self.training_layers[-1].size,  # input size equals output(=memory) size of preceding layer
                out_size=layers[i]["size"],
                activation_fn=layers[i]["fn"],
                activation_fn_deriv=layers[i]["fn_deriv"]
            ))
        # mark network as populated
        Logger.log(str(len(self.training_layers)) + " layers created.")
        self.populated = True
from neural_network.util import Logger
from neural_network.NeuralLayer import NeuralLayer as Layer
from multi_layer_perceptron.MLPNetwork import MLPNetwork

Logger.DEBUG = False

data_in = numpy.array([[[0], [0]], [[0], [1]], [[1], [0]], [[1], [1]]])
data_out_xor = numpy.array([[0], [1], [1], [0]])

data_out_and = numpy.array([[0], [0], [0], [1]])

data_out_or = numpy.array([[0], [1], [1], [1]])

data_out = data_out_xor

Logger.log("data_shape: " + str(numpy.shape(data_in)))
Logger.log("data[0]_shape: " + str(numpy.shape(data_in[0])))

mlp = MLPNetwork()

mlp.populate(
        in_size=2,
        out_layer= {
            "size": 1,
            "fn": None,
            "fn_deriv": None
        },
        hidden_layers=[{
            "size": 3,
            "fn": Layer.activation_sigmoid,
            "fn_deriv": Layer.activation_sigmoid_deriv