Beispiel #1
0
def test_output1():
    """If a single-layer neural network has homogeneous inputs and a
    homogeneous weight matrix, all the outputs should be the same."""
    n_inputs = 10
    n_outputs = 5
    weights = np.ones((n_inputs + 1, n_outputs))  # Adding an extra weight for the bias

    nn = neural_network.SimpleNeuralNetworkExecutable([ weights ], neural_network.sigmoid)

    x = [1]*n_inputs
    result = nn(x)
    assert(len(result) == n_outputs)
    for o in result:
        assert(approx(result[0]) == o)
Beispiel #2
0
def test_output5():
    """If we give each ReLu neuron in a single-layer network a 
    negative input, their output should be zero."""
    n_inputs = 10
    n_outputs = 5
    # Random weights all < 0, adding an extra weight for the bias
    weights = -np.random.uniform(0, 1, (n_inputs + 1, n_outputs)) 

    nn = neural_network.SimpleNeuralNetworkExecutable([ weights ], neural_network.relu)

    x = [1]*n_inputs
    result = nn(x)
    assert(len(result) == n_outputs)
    for o in result:
        assert(approx(0.0) == o)
Beispiel #3
0
def test_output4():
    """If we give each sigmoid neuron in a single-layer network a very 
    negative input, their output should be approximately zero."""
    n_inputs = 10
    n_outputs = 5
    # Random weights all < -100, adding an extra weight for the bias
    weights = -100*np.random.uniform(1, 2, (n_inputs + 1, n_outputs))  

    nn = neural_network.SimpleNeuralNetworkExecutable([ weights ], neural_network.sigmoid)

    x = [1]*n_inputs
    result = nn(x)
    assert(len(result) == n_outputs)
    for o in result:
        assert(approx(0.0) == o)
Beispiel #4
0
def test_output6():
    """If we give each ReLu neuron in a single-layer network a 
    positive input, with network inputs of all ones, their
    output should equal the sum of all the weights."""
    n_inputs = 10
    n_outputs = 5
    # Random weights all < 0, adding an extra weight for the bias
    weights = np.random.uniform(0, 1, (n_inputs + 1, n_outputs)) 

    nn = neural_network.SimpleNeuralNetworkExecutable([ weights ], neural_network.relu)

    x = [1]*n_inputs
    result = nn(x)
    assert(len(result) == n_outputs)
    for i, o in enumerate(result):
        expected = np.sum(weights[:,i])
        assert(approx(expected) == o)
Beispiel #5
0
def test_output2():
    """If a multi-layer neural network has homogeneous inputs and
    homogeneous weight matrices, all the outputs should be the same."""
    n_inputs = 10
    n_hidden1 = 5
    n_hidden2 = 3
    n_outputs = 5
    # Three layers, three matrices
    weights = [ np.ones((n_inputs + 1, n_hidden1)),  # Adding an extra weight for the bias
                3*np.ones((n_hidden1 + 1, n_hidden2)),
                2*np.ones((n_hidden2 + 1, n_outputs)) ]

    nn = neural_network.SimpleNeuralNetworkExecutable(weights, neural_network.sigmoid)

    x = [1]*n_inputs
    result = nn(x)
    assert(len(result) == n_outputs)
    for o in result:
        assert(approx(result[0]) == o)