예제 #1
0
def test_nest_input_projection():
    p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS))
    l = v.Dense(p1, p2, v.ReLU(), weights=1)
    m = v.Model(l)
    t = v.LinearTranslation()
    assert np.allclose(l.get_weights(), np.ones((2, 2)))
    assert m.input_projection[0].weight == t.weights(1, 1)
    assert m.input_projection[1].weight == t.weights(1, 1)
    m.predict([1, 1], 50)
    spiketrains = l.output
    assert abs(len(spiketrains[0]) - len(spiketrains[1])) <= 20
예제 #2
0
def test_nest_dense_projection():
    p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 10))
    p2 = pynn.Population(10, pynn.IF_cond_exp())
    p2.record('spikes')
    d = v.Dense(p1, p2, v.ReLU(), weights = 1)
    pynn.run(1000)
    spiketrains = p2.get_data().segments[-1].spiketrains
    assert len(spiketrains) == 10
    avg_len = np.array(list(map(len, spiketrains))).mean()
    # Should have equal activation
    for train in spiketrains:
        assert abs(len(train) - avg_len) <= 1
예제 #3
0
def test_nest_dense_restore():
    p1 = pynn.Population(12, pynn.IF_cond_exp())
    p2 = pynn.Population(10, pynn.IF_cond_exp())
    d = v.Dense(p1, p2, v.ReLU(), weights = 2)
    d.set_weights(-1)
    t = v.LinearTranslation()
    assert np.array_equal(d.projection.get('weight', format='array'),
             t.weights(np.ones((12, 10)) * -1, 12))
    d.projection.set(weight = 1) # Simulate reset()
    assert np.array_equal(d.projection.get('weight', format='array'),
            np.ones((12, 10)))
    d.restore_weights()
    assert np.array_equal(d.projection.get('weight', format='array'),
            t.weights(np.ones((12, 10)) * -1, 12))
예제 #4
0
def test_nest_model_backwards():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(3, pynn.IF_cond_exp())
    l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_linear, weights=1)
    m = v.Model(l1)
    xs = np.array([12, 12])
    spikes = m.predict(xs, 50)

    def l(w, g, b, bg):
        return w - g, b - bg

    m.backward([0, 1, 1], l)  # no learning rate
    expected_weights = np.array([[1, 0, 0], [1, 0, 0]])
    assert np.allclose(l1.get_weights(), expected_weights, atol=0.2)
예제 #5
0
def test_nest_model_backwards_reset():
    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(2, pynn.IF_cond_exp())
    l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_normalised, weights=1)
    m = v.Model(l1)
    xs1 = np.array([10, 10])
    ys1 = np.array([0, 10])
    xs2 = np.array([10, 10])
    ys2 = np.array([0, 10])
    # First pass
    target1 = m.predict(xs1, 50)
    m.backward([0, 1], lambda w, g, b, bg: (w - g, b - bg))
    expected_weights = np.array([[1, 0], [1, 0]])
    assert np.allclose(l1.get_weights(), expected_weights)
    # Second pass
    target2 = m.predict(xs2, 50)
    m.backward([1, 0], lambda w, g, b, bg: (w - g, b - bg))
    expected_weights = np.array([[-1, 0], [-1, 0]])
    assert np.allclose(l1.get_weights(), expected_weights)
예제 #6
0
def test_nest_dense_numerical_gradient():
    # Test idea from https://github.com/stephencwelch/Neural-Networks-Demystified/blob/master/partSix.py
    # Use simple power function
    f = lambda x: x**2
    fd = lambda x: 2 * x
    e = 1e-4

    weights1 = np.ones((2, 3)).ravel()
    weights2 = np.ones((3, 1)).ravel()

    p1 = pynn.Population(2, pynn.IF_cond_exp())
    p2 = pynn.Population(3, pynn.IF_cond_exp())
    p3 = pynn.Population(1, pynn.IF_cond_exp())
    l1 = v.Dense(p1, p2, v.Sigmoid(), decoder = lambda x: x)
    l2 = v.Dense(p2, p3, v.Sigmoid(), decoder = lambda x: x)
    m = v.Model(l1, l2)
    error = v.SumSquared()

    def forward_pass(xs):
        "Simple sigmoid forward pass function"
        l1.input_cache = xs
        l1.output = l2.input_cache = v.Sigmoid()(np.matmul(xs, l1.weights))
        l2.output = v.Sigmoid()(np.matmul(l2.input_cache, l2.weights))
        return l2.output

    def compute_numerical_gradient(xs, ys):
        "Computes the numerical gradient of a layer"
        weights1 = l1.get_weights().ravel() # 1D
        weights2 = l2.get_weights().ravel()
        weights = np.concatenate((weights1, weights2))
        gradients = np.zeros(weights.shape)

        def initialise_with_distortion(index, delta):
            distortion = np.copy(weights)
            distortion[index] = distortion[index] + delta
            l1.set_weights(distortion[:len(weights1)].reshape(l1.weights.shape))
            l2.set_weights(distortion[len(weights1):].reshape(l2.weights.shape))
            forward_pass(xs)

        # Calculate gradients
        for index in range(len(weights)):
            initialise_with_distortion(index, e)
            error1 = -error(l2.output, ys)
            initialise_with_distortion(index, -e)
            error2 = -error(l2.output, ys)
            gradients[index] = (error2 - error1) / (2 * e)
        
        # Reset weights
        l1.set_weights(weights1.reshape(2, 3))
        l2.set_weights(weights2.reshape(3, 1))

        return gradients

    def compute_gradients(xs, ys):
        class GradientOptimiser():
            counter = 2
            gradients1 = None
            gradients2 = None
            def __call__(self, w, wg, b, bg):
                if self.counter > 1:
                    self.gradients2 = wg
                else: 
                    self.gradients1 = wg
                self.counter -= 1
                return (w, b)
        output = forward_pass(xs)
        optimiser = GradientOptimiser()
        m.backward(error.prime(l2.output, ys), optimiser)
        return np.concatenate((optimiser.gradients1.ravel(), optimiser.gradients2.ravel()))

    # Normalise inputs
    xs = np.array(([3,5], [5,1], [10,2]), dtype=float)
    xs = xs - np.amax(xs, axis=0)
    ys = np.array(([75], [82], [93]), dtype=float)
    ys = ys / 100

    # Calculate numerical gradients
    numerical_gradients = compute_numerical_gradient(xs, ys)
    # Calculate 'normal' gradients
    gradients = compute_gradients(xs, ys)
    # Calculate the ratio between the difference and the sum of vector norms
    ratio = np.linalg.norm(gradients - numerical_gradients) /\
               np.linalg.norm(gradients + numerical_gradients)
    assert ratio < 1e-07
     
예제 #7
0
p5 = pynn.Population(
    2,
    pynn.IF_cond_exp(
        **{
            "tau_syn_I": 5,
            "tau_refrac": 0,
            "v_thresh": -50,
            "v_rest": -65,
            "tau_syn_E": 5,
            "v_reset": -65,
            "tau_m": 20,
            "e_rev_I": -70,
            "i_offset": 0,
            "cm": 1,
            "e_rev_E": 0
        }))
layer0 = v.Dense(p1,
                 p3,
                 weights=np.random.normal(1.0, 1.0, (2, 4)),
                 biases=0.0)
layer1 = v.Dense(p3,
                 p5,
                 weights=np.random.normal(1.0, 1.0, (4, 2)),
                 biases=0.0)
l_decode = v.Decode(p5)
model = v.Model(layer0, layer1, l_decode)

optimiser = v.GradientDescentOptimiser(0.1, simulation_time=50.0)
if __name__ == "__main__":
    v.Main(model).train(optimiser)
예제 #8
0
    pynn.IF_cond_exp(
        **{
            "tau_syn_I": 5,
            "tau_refrac": 0,
            "v_thresh": -50,
            "v_rest": -65,
            "tau_syn_E": 5,
            "v_reset": -65,
            "tau_m": 20,
            "e_rev_I": -70,
            "i_offset": 0,
            "cm": 1,
            "e_rev_E": 0
        }))
layer0 = v.Dense(p1,
                 p3,
                 weights=np.random.normal(1.0, 1.0, (100, 20)),
                 biases=0.0)
layer3 = v.Replicate(p3, (p5, p9),
                     weights=(np.random.normal(1.0, 1.0, (20, 20)),
                              np.random.normal(1.0, 1.0, (20, 20))),
                     biases=0.0)
layer1 = v.Dense(p5,
                 p7,
                 weights=np.random.normal(1.0, 1.0, (20, 10)),
                 biases=0.0)
layer2 = v.Dense(p9,
                 p11,
                 weights=np.random.normal(1.0, 1.0, (20, 10)),
                 biases=0.0)
layer4 = v.Merge((p7, p11), p13)
layer5 = v.Dense(p13,