예제 #1
0
파일: test_neurons.py 프로젝트: nengo/nengo
def test_current(rng):
    neuron_type = NeuronType()
    n_neurons = 20
    gain = rng.rand(n_neurons)
    bias = rng.rand(n_neurons)

    # 3 samples
    x = rng.rand(3)
    current = neuron_type.current(x, gain, bias)
    assert np.allclose(current, gain * x.reshape(-1, 1) + bias)
    assert current.shape == (3, n_neurons)

    # 10 samples, different values for each neuron
    x = rng.rand(10, n_neurons)
    current = neuron_type.current(x, gain, bias)
    assert np.allclose(current, gain * x + bias)
    assert current.shape == (10, n_neurons)

    with pytest.raises(ValidationError):
        # Incorrect second dimension
        x = rng.rand(10, 2)
        current = neuron_type.current(x, gain, bias)

    with pytest.raises(ValidationError):
        # Too many dimensions
        x = rng.rand(10, n_neurons, 1)
        current = neuron_type.current(x, gain, bias)
예제 #2
0
def test_gain_bias(rng, NonDirectNeuronType, generic, allclose):
    if NonDirectNeuronType == Sigmoid and generic:
        # the generic method doesn't work with sigmoid neurons (because they're
        # always positive). that's not a failure, because the sigmoid neurons
        # never need to use the generic method normally, so we'll just skip
        # it for this test.
        return

    n = 100
    max_rates = rng.uniform(300, 400, size=n)
    intercepts = rng.uniform(-0.5, 0.5, size=n)
    neuron_type = NonDirectNeuronType()
    tolerance = 0.1 if generic else 1e-8

    if generic:
        gain, bias = NeuronType.gain_bias(neuron_type, max_rates, intercepts)
    else:
        gain, bias = neuron_type.gain_bias(max_rates, intercepts)

    assert allclose(neuron_type.rates(1, gain, bias),
                    max_rates,
                    atol=tolerance)

    if NonDirectNeuronType == Sigmoid:
        threshold = 0.5 / neuron_type.tau_ref
    else:
        threshold = 0

    x = (intercepts - tolerance)[np.newaxis, :]
    assert np.all(neuron_type.rates(x, gain, bias) <= threshold)
    x = (intercepts + tolerance)[np.newaxis, :]
    assert np.all(neuron_type.rates(x, gain, bias) > threshold)

    if generic:
        max_rates0, intercepts0 = NeuronType.max_rates_intercepts(
            neuron_type, gain, bias)
    else:
        max_rates0, intercepts0 = neuron_type.max_rates_intercepts(gain, bias)

    assert allclose(max_rates, max_rates0, atol=tolerance)
    assert allclose(intercepts, intercepts0, atol=tolerance)
예제 #3
0
def test_current(rng, allclose):
    neuron_type = NeuronType()
    n_neurons = 20
    gain = rng.rand(n_neurons)
    bias = rng.rand(n_neurons)

    # 3 samples
    x = rng.rand(3)
    current = neuron_type.current(x, gain, bias)
    assert allclose(current, gain * x.reshape(-1, 1) + bias)
    assert current.shape == (3, n_neurons)

    # 10 samples, different values for each neuron
    x = rng.rand(10, n_neurons)
    current = neuron_type.current(x, gain, bias)
    assert allclose(current, gain * x + bias)
    assert current.shape == (10, n_neurons)

    with pytest.raises(ValidationError):
        # Incorrect second dimension
        x = rng.rand(10, 2)
        current = neuron_type.current(x, gain, bias)

    with pytest.raises(ValidationError):
        # Too many dimensions
        x = rng.rand(10, n_neurons, 1)
        current = neuron_type.current(x, gain, bias)
예제 #4
0
파일: test_neurons.py 프로젝트: nengo/nengo
def test_gain_bias(rng, nl_nodirect, generic):
    if nl_nodirect == Sigmoid and generic:
        # the generic method doesn't work with sigmoid neurons (because they're
        # always positive). that's not a failure, because the sigmoid neurons
        # never need to use the generic method normally, so we'll just skip
        # it for this test.
        return

    n = 100
    max_rates = rng.uniform(300, 400, size=n)
    intercepts = rng.uniform(-0.5, 0.5, size=n)
    nl = nl_nodirect()
    tolerance = 0.1 if generic else 1e-8

    if generic:
        gain, bias = NeuronType.gain_bias(nl, max_rates, intercepts)
    else:
        gain, bias = nl.gain_bias(max_rates, intercepts)

    assert np.allclose(nl.rates(1, gain, bias), max_rates, atol=tolerance)

    if nl_nodirect == Sigmoid:
        threshold = 0.5 / nl.tau_ref
    else:
        threshold = 0

    x = (intercepts - tolerance)[np.newaxis, :]
    assert np.all(nl.rates(x, gain, bias) <= threshold)
    x = (intercepts + tolerance)[np.newaxis, :]
    assert np.all(nl.rates(x, gain, bias) > threshold)

    if generic:
        max_rates0, intercepts0 = (
            NeuronType.max_rates_intercepts(nl, gain, bias))
    else:
        max_rates0, intercepts0 = nl.max_rates_intercepts(gain, bias)

    assert np.allclose(max_rates, max_rates0, atol=tolerance)
    assert np.allclose(intercepts, intercepts0, atol=tolerance)
예제 #5
0
def test_gain_bias(rng, nl_nodirect, generic):
    if nl_nodirect == Sigmoid and generic:
        # the generic method doesn't work with sigmoid neurons (because they're
        # always positive). that's not a failure, because the sigmoid neurons
        # never need to use the generic method normally, so we'll just skip
        # it for this test.
        return

    n = 100
    max_rates = rng.uniform(300, 400, size=n)
    intercepts = rng.uniform(-0.5, 0.5, size=n)
    nl = nl_nodirect()
    tolerance = 0.1 if generic else 1e-8

    if generic:
        gain, bias = NeuronType.gain_bias(nl, max_rates, intercepts)
    else:
        gain, bias = nl.gain_bias(max_rates, intercepts)

    assert np.allclose(nl.rates(np.ones(n), gain, bias),
                       max_rates,
                       atol=tolerance)

    if nl_nodirect == Sigmoid:
        threshold = 0.5 / nl.tau_ref
    else:
        threshold = 0

    assert np.all(nl.rates(intercepts - tolerance, gain, bias) <= threshold)
    assert np.all(nl.rates(intercepts + tolerance, gain, bias) > threshold)

    if generic:
        max_rates0, intercepts0 = (NeuronType.max_rates_intercepts(
            nl, gain, bias))
    else:
        max_rates0, intercepts0 = nl.max_rates_intercepts(gain, bias)

    assert np.allclose(max_rates, max_rates0, atol=tolerance)
    assert np.allclose(intercepts, intercepts0, atol=tolerance)