Пример #1
0
def test_nest_dense_normalisation():
    p1 = pynn.Population(12, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    p2 = pynn.Population(10, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    l = v.Dense(p1, p2, v.ReLU(), weights=1)
    m = v.Model(l)
    out = m.predict(np.ones(12) * 12, 50)
    assert np.allclose(np.ones(10), out, atol=0.1)
Пример #2
0
def test_nest_model_linear_scaling():
    p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count, weights=1)
    m = v.Model(l1)
    xs = np.array([12, 12])
    out = m.predict(xs, 50)
    assert np.allclose([38, 38], out)
Пример #3
0
def test_nest_model_predict_inactive():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(2, pynn.IF_cond_exp())
    l = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count)
    m = v.Model(l)
    out = m.predict(np.array([0, 0]), 10)
    assert len(out) == 2
    assert out.sum() < 10
Пример #4
0
def test_nest_model_predict_active():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(2, pynn.IF_cond_exp())
    l = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count, weights=1)
    m = v.Model(l)
    out = m.predict(np.array([1, 1]), 1000)
    assert len(out) == 2
    assert abs(out[0] - out[1]) <= 10  # Must be approx same spikes
Пример #5
0
def test_nest_create_input_populations():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(2, pynn.IF_cond_exp())
    l = v.Dense(p1, p2, v.ReLU())
    m = v.Model(l)
    assert len(m.input_populations) == 2
    inp = np.array([1, 0.2])
    m.set_input(inp)
    assert m.input_populations[0].get('i_offset') == 1
    assert m.input_populations[1].get('i_offset') == 0.2
Пример #6
0
def test_nest_model_spike_normalisation():
    p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    p2 = pynn.Population(4, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    p3 = pynn.Population(4, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    l1 = v.Dense(p1, p2, decoder=v.spike_rate(50), weights=1)
    l2 = v.Dense(p2, p3, decoder=v.spike_rate(50), weights=1)
    m = v.Model(l1, l2)
    m.predict([12, 12], 50)
    for rate in np.concatenate((l1.get_output(), l2.get_output())):
        assert rate > 0
        assert rate < 1
Пример #7
0
def test_nest_input_projection():
    p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    l = v.Dense(p1, p2, v.ReLU(), weights=1)
    m = v.Model(l)
    t = v.LinearTranslation()
    assert np.allclose(l.get_weights(), np.ones((2, 2)))
    assert m.input_projection[0].weight == t.weights(1, 1)
    assert m.input_projection[1].weight == t.weights(1, 1)
    m.predict([1, 1], 50)
    spiketrains = l.output
    assert abs(len(spiketrains[0]) - len(spiketrains[1])) <= 20
Пример #8
0
def test_nest_model_backwards():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(3, pynn.IF_cond_exp())
    l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_linear, weights=1)
    m = v.Model(l1)
    xs = np.array([12, 12])
    spikes = m.predict(xs, 50)

    def l(w, g, b, bg):
        return w - g, b - bg

    m.backward([0, 1, 1], l)  # no learning rate
    expected_weights = np.array([[1, 0, 0], [1, 0, 0]])
    assert np.allclose(l1.get_weights(), expected_weights, atol=0.2)
Пример #9
0
def test_nest_model_backwards_reset():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(2, pynn.IF_cond_exp())
    l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_normalised, weights=1)
    m = v.Model(l1)
    xs1 = np.array([10, 10])
    ys1 = np.array([0, 10])
    xs2 = np.array([10, 10])
    ys2 = np.array([0, 10])
    # First pass
    target1 = m.predict(xs1, 50)
    m.backward([0, 1], lambda w, g, b, bg: (w - g, b - bg))
    expected_weights = np.array([[1, 0], [1, 0]])
    assert np.allclose(l1.get_weights(), expected_weights)
    # Second pass
    target2 = m.predict(xs2, 50)
    m.backward([1, 0], lambda w, g, b, bg: (w - g, b - bg))
    expected_weights = np.array([[-1, 0], [-1, 0]])
    assert np.allclose(l1.get_weights(), expected_weights)
Пример #10
0
def test_nest_dense_numerical_gradient():
    # Test idea from https://github.com/stephencwelch/Neural-Networks-Demystified/blob/master/partSix.py
    # Use simple power function
    f = lambda x: x**2
    fd = lambda x: 2 * x
    e = 1e-4

    weights1 = np.ones((2, 3)).ravel()
    weights2 = np.ones((3, 1)).ravel()

    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(3, pynn.IF_cond_exp())
    p3 = pynn.Population(1, pynn.IF_cond_exp())
    l1 = v.Dense(p1, p2, v.Sigmoid(), decoder = lambda x: x)
    l2 = v.Dense(p2, p3, v.Sigmoid(), decoder = lambda x: x)
    m = v.Model(l1, l2)
    error = v.SumSquared()

    def forward_pass(xs):
        "Simple sigmoid forward pass function"
        l1.input_cache = xs
        l1.output = l2.input_cache = v.Sigmoid()(np.matmul(xs, l1.weights))
        l2.output = v.Sigmoid()(np.matmul(l2.input_cache, l2.weights))
        return l2.output

    def compute_numerical_gradient(xs, ys):
        "Computes the numerical gradient of a layer"
        weights1 = l1.get_weights().ravel() # 1D
        weights2 = l2.get_weights().ravel()
        weights = np.concatenate((weights1, weights2))
        gradients = np.zeros(weights.shape)

        def initialise_with_distortion(index, delta):
            distortion = np.copy(weights)
            distortion[index] = distortion[index] + delta
            l1.set_weights(distortion[:len(weights1)].reshape(l1.weights.shape))
            l2.set_weights(distortion[len(weights1):].reshape(l2.weights.shape))
            forward_pass(xs)

        # Calculate gradients
        for index in range(len(weights)):
            initialise_with_distortion(index, e)
            error1 = -error(l2.output, ys)
            initialise_with_distortion(index, -e)
            error2 = -error(l2.output, ys)
            gradients[index] = (error2 - error1) / (2 * e)
        
        # Reset weights
        l1.set_weights(weights1.reshape(2, 3))
        l2.set_weights(weights2.reshape(3, 1))

        return gradients

    def compute_gradients(xs, ys):
        class GradientOptimiser():
            counter = 2
            gradients1 = None
            gradients2 = None
            def __call__(self, w, wg, b, bg):
                if self.counter > 1:
                    self.gradients2 = wg
                else: 
                    self.gradients1 = wg
                self.counter -= 1
                return (w, b)
        output = forward_pass(xs)
        optimiser = GradientOptimiser()
        m.backward(error.prime(l2.output, ys), optimiser)
        return np.concatenate((optimiser.gradients1.ravel(), optimiser.gradients2.ravel()))

    # Normalise inputs
    xs = np.array(([3,5], [5,1], [10,2]), dtype=float)
    xs = xs - np.amax(xs, axis=0)
    ys = np.array(([75], [82], [93]), dtype=float)
    ys = ys / 100

    # Calculate numerical gradients
    numerical_gradients = compute_numerical_gradient(xs, ys)
    # Calculate 'normal' gradients
    gradients = compute_gradients(xs, ys)
    # Calculate the ratio between the difference and the sum of vector norms
    ratio = np.linalg.norm(gradients - numerical_gradients) /\
               np.linalg.norm(gradients + numerical_gradients)
    assert ratio < 1e-07
     
Пример #11
0
p5 = pynn.Population(
    2,
    pynn.IF_cond_exp(
        **{
            "tau_syn_I": 5,
            "tau_refrac": 0,
            "v_thresh": -50,
            "v_rest": -65,
            "tau_syn_E": 5,
            "v_reset": -65,
            "tau_m": 20,
            "e_rev_I": -70,
            "i_offset": 0,
            "cm": 1,
            "e_rev_E": 0
        }))
layer0 = v.Dense(p1,
                 p3,
                 weights=np.random.normal(1.0, 1.0, (2, 4)),
                 biases=0.0)
layer1 = v.Dense(p3,
                 p5,
                 weights=np.random.normal(1.0, 1.0, (4, 2)),
                 biases=0.0)
l_decode = v.Decode(p5)
model = v.Model(layer0, layer1, l_decode)

optimiser = v.GradientDescentOptimiser(0.1, simulation_time=50.0)
if __name__ == "__main__":
    v.Main(model).train(optimiser)
Пример #12
0
            "cm": 1,
            "e_rev_E": 0
        }))
layer0 = v.Dense(p1,
                 p3,
                 weights=np.random.normal(1.0, 1.0, (100, 20)),
                 biases=0.0)
layer3 = v.Replicate(p3, (p5, p9),
                     weights=(np.random.normal(1.0, 1.0, (20, 20)),
                              np.random.normal(1.0, 1.0, (20, 20))),
                     biases=0.0)
layer1 = v.Dense(p5,
                 p7,
                 weights=np.random.normal(1.0, 1.0, (20, 10)),
                 biases=0.0)
layer2 = v.Dense(p9,
                 p11,
                 weights=np.random.normal(1.0, 1.0, (20, 10)),
                 biases=0.0)
layer4 = v.Merge((p7, p11), p13)
layer5 = v.Dense(p13,
                 p15,
                 weights=np.random.normal(1.0, 1.0, (20, 10)),
                 biases=0.0)
l_decode = v.Decode(p15)
model = v.Model(layer0, layer1, layer2, layer3, layer4, layer5, l_decode)

optimiser = v.GradientDescentOptimiser(0.1, simulation_time=50.0)
if __name__ == "__main__":
    v.Main(model).train(optimiser)