def test_neuralNetwork_addLayer(): inputs = 6 outputs = 4 layers = 3 neurons = 87 activations = 'sigmoid' nn = NeuralNetwork(inputs=inputs, outputs=outputs, layers=layers, neurons=neurons, activations=activations) nn.addLayer() assert nn.weights[-1].shape == (inputs, neurons) assert nn.biases[-1].shape == (neurons, 1) assert type(nn.act[-1]) is Activation assert nn.act[-1].function == nn.act[-1]._sigmoid new_neurons = 10 new_activations = 'relu' nn.addLayer(neurons=new_neurons, activations=new_activations) assert nn.weights[-1].shape == (neurons, new_neurons) assert nn.biases[-1].shape == (new_neurons, 1) assert type(nn.act[-1]) is Activation assert nn.act[-1].function == nn.act[-1]._relu nn_copy1 = copy.deepcopy(nn) nn_copy2 = copy.deepcopy(nn) nn_copy3 = copy.deepcopy(nn) nn_copy4 = copy.deepcopy(nn) nn_copy1.addOutputLayer() assert nn_copy1.weights[-1].shape == (new_neurons, outputs) assert nn_copy1.biases[-1].shape == (outputs, 1) nn_copy2.addLayer(output=True) assert nn_copy2.weights[-1].shape == (new_neurons, outputs) assert nn_copy2.biases[-1].shape == (outputs, 1) new_outputs = 24 with warnings.catch_warnings(): warnings.simplefilter("ignore") nn_copy3.addOutputLayer(outputs=new_outputs) assert nn_copy3.weights[-1].shape == (new_neurons, new_outputs) assert nn_copy3.biases[-1].shape == (new_outputs, 1) with warnings.catch_warnings(): warnings.simplefilter("ignore") nn_copy4.addLayer(outputs=new_outputs, output=True) assert nn_copy4.weights[-1].shape == (new_neurons, new_outputs) assert nn_copy4.biases[-1].shape == (new_outputs, 1) # Now that we constructed an entire network, check that the matrices line # up so that the network can be evaluated nn = copy.deepcopy(nn_copy1) x = np.random.uniform(-1.0, 1.0, size=(inputs, 1)) assert nn(x).shape == (outputs, 1) assert nn.network(x).shape == (outputs, 1) nn = copy.deepcopy(nn_copy3) assert nn(x).shape == (new_outputs, 1) assert nn.network(x).shape == (new_outputs, 1)