Ejemplo n.º 1
0
def test_eval_points(Simulator, nl_nodirect, plt, seed, rng):
    n = 100
    d = 5
    filter = 0.08

    eval_points = np.logspace(np.log10(300), np.log10(5000), 11)
    eval_points = np.round(eval_points).astype('int')
    max_points = eval_points.max()
    n_trials = 1

    rmses = np.nan * np.zeros((len(eval_points), n_trials))
    for j in range(n_trials):
        points = rng.normal(size=(max_points, d))
        points *= (rng.uniform(size=max_points) / norm(points, axis=-1))[:,
                                                                         None]

        rng_j = np.random.RandomState(348 + j)
        seed = 903824 + j

        # generate random input in unit hypersphere
        x = rng_j.normal(size=d)
        x *= rng_j.uniform() / norm(x)

        for i, n_points in enumerate(eval_points):
            model = nengo.Network(seed=seed)
            with model:
                model.config[nengo.Ensemble].neuron_type = nl_nodirect()
                u = nengo.Node(output=x)
                a = nengo.Ensemble(n * d,
                                   dimensions=d,
                                   eval_points=points[:n_points])
                nengo.Connection(u, a, synapse=0)
                up = nengo.Probe(u)
                ap = nengo.Probe(a)

            with Timer() as timer:
                sim = Simulator(model)
            sim.run(10 * filter)

            t = sim.trange()
            xt = nengo.synapses.filtfilt(sim.data[up], filter, dt=sim.dt)
            yt = nengo.synapses.filtfilt(sim.data[ap], filter, dt=sim.dt)
            t0 = 5 * filter
            t1 = 7 * filter
            tmask = (t > t0) & (t < t1)

            rmses[i, j] = rms(yt[tmask] - xt[tmask])
            print("done %d (%d) in %0.3f s" % (n_points, j, timer.duration))

    # subtract out mean for each model
    rmses_norm = rmses - rmses.mean(0, keepdims=True)

    mean = rmses_norm.mean(1)
    low = rmses_norm.min(1)
    high = rmses_norm.max(1)
    plt.semilogx(eval_points, mean, 'k-')
    plt.semilogx(eval_points, high, 'r-')
    plt.semilogx(eval_points, low, 'b-')
    plt.xlim([eval_points[0], eval_points[-1]])
    plt.xticks(eval_points, eval_points)
Ejemplo n.º 2
0
Archivo: idmp.py Proyecto: tbekolay/phd
def similarity(v1, v2):
    # v1 and v2 are vectors
    eps = np.nextafter(0, 1)  # smallest float above zero
    dot = np.dot(v1, v2)
    dot /= max(npext.norm(v1), eps)
    dot /= max(npext.norm(v2), eps)
    return dot
Ejemplo n.º 3
0
def similarity(v1, v2):
    # v1 and v2 are vectors
    eps = np.nextafter(0, 1)  # smallest float above zero
    dot = np.dot(v1, v2)
    dot /= max(npext.norm(v1), eps)
    dot /= max(npext.norm(v2), eps)
    return dot
Ejemplo n.º 4
0
def cd_encoders_biases(n_encoders, trainX, trainY, rng=np.random, mask=None,
                       norm_min=0.05, norm_tries=10):
    """Constrained difference (CD) method for encoders from data [1]_.

    Parameters
    ==========
    n_encoders : int
        Number of encoders to generate.
    trainX : (n_samples, n_dimensions) array-like
        Training features.
    trainY : (n_samples,) array-like
        Training labels.

    Returns
    =======
    encoders : (n_encoders, n_dimensions) array
        Generated encoders.
    biases : (n_encoders,) array
        Generated biases. These are biases assuming `f = G[E * X + b]`,
        and are therefore more like Nengo's `intercepts`.

    References
    ==========
    .. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
       Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
       handwritten digit classification by training shallow neural network
       classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
       10(8), 1-20. doi:10.1371/journal.pone.0134254
    """
    assert trainX.shape[0] == trainY.size
    trainX = trainX.reshape(trainX.shape[0], -1)
    trainY = trainY.ravel()
    d = trainX.shape[1]
    classes = np.unique(trainY)
    assert mask is None or mask.shape == (n_encoders, d)

    inds = [(trainY == label).nonzero()[0] for label in classes]
    train_norm = npext.norm(trainX, axis=1).mean()

    encoders = np.zeros((n_encoders, d))
    biases = np.zeros(n_encoders)
    for k in range(n_encoders):
        for _ in range(norm_tries):
            i, j = rng.choice(len(classes), size=2, replace=False)
            a, b = trainX[rng.choice(inds[i])], trainX[rng.choice(inds[j])]
            dab = a - b
            if mask is not None:
                dab *= mask[k]
            ndab = npext.norm(dab)**2
            if ndab >= norm_min * train_norm:
                break
        else:
            raise ValueError("Cannot find valid encoder")

        encoders[k] = (2. / ndab) * dab
        biases[k] = np.dot(a + b, dab) / ndab

    return encoders, biases
Ejemplo n.º 5
0
def test_sqrt_beta(n, m, rng):
    num_samples = 1000
    num_bins = 5

    vectors = rng.randn(num_samples, n + m)
    vectors /= npext.norm(vectors, axis=1, keepdims=True)
    expectation, _ = np.histogram(npext.norm(vectors[:, :m], axis=1), bins=num_bins)

    dist = SqrtBeta(n, m)
    samples = dist.sample(num_samples, 1, rng=rng)
    histogram, _ = np.histogram(samples, bins=num_bins)

    assert np.all(np.abs(np.asfarray(histogram - expectation) / num_samples) < 0.16)
Ejemplo n.º 6
0
def test_sqrt_beta(n, m, rng):
    num_samples = 250
    num_bins = 5

    vectors = rng.randn(num_samples, n + m)
    vectors /= npext.norm(vectors, axis=1, keepdims=True)
    expectation, _ = np.histogram(
        npext.norm(vectors[:, :m], axis=1), bins=num_bins)

    dist = dists.SqrtBeta(n, m)
    samples = dist.sample(num_samples, 1, rng=rng)
    hist, _ = np.histogram(samples, bins=num_bins)

    assert np.all(np.abs(np.asfarray(hist - expectation) / num_samples) < 0.16)
Ejemplo n.º 7
0
    def test_derivative(f, df):

        get_network = lambda **kwargs: Network(
            weights, f=f, df=df, biases=None, noise=0, **kwargs)

        bp_learner = BPLearner(get_network(),
                               squared_cost,
                               rms_error,
                               eta=eta,
                               alpha=alpha,
                               name='BP')
        bp_learner.weight_norms = []

        fas_learner = FASkipLearner(get_network(),
                                    squared_cost,
                                    rms_error,
                                    eta=eta,
                                    alpha=alpha,
                                    name='DFA')
        fas_learner.Bs = directBs

        learners = [bp_learner, fas_learner]
        for learner in learners:
            learner.train(1, batch_fn)

        for learner in learners:
            print(", ".join("||W%d|| = %0.3f" % (i, norm(w))
                            for i, w in enumerate(learner.network.weights)))

        return learners
Ejemplo n.º 8
0
def test_eval_points_scaling(Simulator, sample, radius, seed, rng, scale):
    eval_points = UniformHypersphere()
    if sample:
        eval_points = eval_points.sample(500, 3, rng=rng)

    model = nengo.Network(seed=seed)
    with model:
        a = nengo.Ensemble(
            1,
            3,
            encoders=np.ones((1, 3)),
            intercepts=nengo.dists.Choice([-1]),
            radius=radius,
        )
        b = nengo.Ensemble(1, 3)
        con = nengo.Connection(a,
                               b,
                               eval_points=eval_points,
                               scale_eval_points=scale)

    with Simulator(model) as sim:
        dists = npext.norm(sim.data[con].eval_points, axis=1)
    limit = radius if scale else 1.0
    assert np.all(dists <= limit)
    assert np.any(dists >= 0.9 * limit)
Ejemplo n.º 9
0
def test_sqrt_beta(n, m):
    np.random.seed(33)

    num_samples = 250
    num_bins = 5

    vectors = np.random.randn(num_samples, n + m)
    vectors /= npext.norm(vectors, axis=1, keepdims=True)
    expectation, _ = np.histogram(npext.norm(vectors[:, :m], axis=1),
                                  bins=num_bins)

    dist = dists.SqrtBeta(n, m)
    samples = dist.sample(num_samples, 1)
    hist, _ = np.histogram(samples, bins=num_bins)

    assert np.all(np.abs(np.asfarray(hist - expectation) / num_samples) < 0.16)
Ejemplo n.º 10
0
def test_hypersphere_surface(dimensions, rng):
    n = 150 * dimensions
    dist = dists.UniformHypersphere(surface=True)
    samples = dist.sample(n, dimensions, rng=rng)
    assert samples.shape == (n, dimensions)
    assert np.allclose(npext.norm(samples, axis=1), 1)
    assert np.allclose(np.mean(samples, axis=0), 0, atol=0.25 / dimensions)
Ejemplo n.º 11
0
def test_hypersphere_surface(dimensions, rng):
    n = 150 * dimensions
    dist = dists.UniformHypersphere(surface=True)
    samples = dist.sample(n, dimensions, rng=rng)
    assert samples.shape == (n, dimensions)
    assert np.allclose(npext.norm(samples, axis=1), 1)
    assert np.allclose(np.mean(samples, axis=0), 0, atol=0.25 / dimensions)
Ejemplo n.º 12
0
def test_hypersphere_surface(cls, dimensions, rng, allclose):
    n = 200 * dimensions
    dist = cls(surface=True)
    samples = dist.sample(n, dimensions, rng=rng)
    assert samples.shape == (n, dimensions)
    assert allclose(npext.norm(samples, axis=1), 1)
    assert allclose(np.mean(samples, axis=0), 0, atol=0.25 / dimensions)
Ejemplo n.º 13
0
def test_hypersphere_volume(min_magnitude, d, rng):
    n = 150 * d
    dist = dists.UniformHypersphere(min_magnitude=min_magnitude)
    samples = dist.sample(n, d, rng=rng)
    assert samples.shape == (n, d)
    assert np.allclose(np.mean(samples, axis=0), 0, atol=0.1)
    assert np.all(npext.norm(samples, axis=1) >= min_magnitude)
Ejemplo n.º 14
0
def test_state_norm(plt):
    # Choose a filter, timestep, and number of simulation timesteps
    sys = Alpha(0.1)
    dt = 0.000001
    length = 2000000

    # Modify the state-space to read out the state vector
    A, B, C, D = sys2ss(sys)
    old_C = C
    C = np.eye(len(A))
    D = np.zeros((len(A), B.shape[1]))

    response = np.empty((length, len(C)))
    for i in range(len(C)):
        # Simulate the state vector
        response[:, i] = impulse((A, B, C[i, :], D[i, :]), dt, length)

    # Check that the power of each state equals the H2-norm of each state
    # The analog case is the same after scaling since dt is approx 0.
    actual = norm(response, axis=0) * dt
    assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
    assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))

    plt.figure()
    plt.plot(response[:, 0], label="$x_0$")
    plt.plot(response[:, 1], label="$x_1$")
    plt.plot(np.dot(response, old_C.T), label="$y$")
    plt.legend()
Ejemplo n.º 15
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between simulation data and Semantic Pointers.

    Computes the dot products between all Semantic Pointers in the Vocabulary
    and the simulation data for each timestep. If ``normalize=True``,
    normalizes all vectors to compute the cosine similarity.

    Parameters
    ----------
    data: (D,) or (T, D) array_like
        The *D*-dimensional data for *T* timesteps used for comparison.
    vocab: Vocabulary or array_like
        Vocabulary (or list of vectors) used to calculate the similarity
        values.
    normalize : bool, optional
        Whether to normalize all vectors, to compute the cosine similarity.
    """

    if isinstance(data, SemanticPointer):
        data = data.v

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif is_iterable(vocab):
        if isinstance(next(iter(vocab)), SemanticPointer):
            vocab = [p.v for p in vocab]
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary" %
                              (type(vocab).__name__),
                              attr='vocab')

    dots = np.dot(vectors, data.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data.T, axis=0, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        if len(dots.shape) == 1:
            vnorm = np.squeeze(vnorm)

        dots /= dnorm
        dots /= vnorm

    return dots.T
Ejemplo n.º 16
0
def ciw_encoders(
    n_encoders,
    trainX,
    trainY,
    rng=np.random,
    normalize_data=True,
    normalize_encoders=True,
):
    """Computed Input Weights (CIW) method for encoders from data.

    Parameters
    ----------
    n_encoders : int
        Number of encoders to generate.
    trainX : (n_samples, n_dimensions) array-like
        Training features.
    trainY : (n_samples,) array-like
        Training labels.

    Returns
    -------
    encoders : (n_encoders, n_dimensions) array
        Generated encoders.

    References
    ----------
    .. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
       Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
       handwritten digit classification by training shallow neural network
       classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
       10(8), 1-20. doi:10.1371/journal.pone.0134254
    """
    assert trainX.shape[0] == trainY.size
    trainX = trainX.reshape((trainX.shape[0], -1))
    trainY = trainY.ravel()
    classes = np.unique(trainY)

    assert n_encoders % len(classes) == 0
    n_enc_per_class = n_encoders / len(classes)

    # normalize
    if normalize_data:
        trainX = (trainX - trainX.mean()) / trainX.std()
        # trainX = (trainX - trainX.mean(axis=0)) / trainX.std()
        # trainX = (trainX - trainX.mean(axis=0)) / (trainX.std(axis=0) + 1e-8)

    # generate
    encoders = []
    for label in classes:
        X = trainX[trainY == label]
        plusminus = rng.choice([-1, 1], size=(X.shape[0], n_enc_per_class))
        samples = np.dot(plusminus.T, X)
        encoders.append(samples)

    encoders = np.vstack(encoders)
    if normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    return encoders
Ejemplo n.º 17
0
def test_hypersphere_surface(dimensions):
    n = 100 * dimensions
    dist = dists.UniformHypersphere(surface=True)
    samples = dist.sample(n, dimensions, np.random.RandomState(1))
    assert samples.shape == (n, dimensions)
    assert np.allclose(npext.norm(samples, axis=1), 1)
    assert np.allclose(
        np.mean(samples, axis=0), np.zeros(dimensions), atol=0.1)
Ejemplo n.º 18
0
def test_sphere(d, rng):
    n = 200
    x = sphere.sample(n, d, rng)
    assert x.shape == (n, d)
    assert np.allclose(norm(x, axis=1), 1)

    f = _furthest(x)
    assert (f > 1.5).all()
Ejemplo n.º 19
0
def test_hypersphere_surface(dimensions):
    n = 100 * dimensions
    dist = dists.UniformHypersphere(dimensions, surface=True)
    samples = dist.sample(n, np.random.RandomState(1))
    assert samples.shape == (n, dimensions)
    assert np.allclose(npext.norm(samples, axis=1), 1)
    assert np.allclose(
        np.mean(samples, axis=0), np.zeros(dimensions), atol=0.1)
Ejemplo n.º 20
0
def error_layers_plots(dt, t, learners):
    vsynapse = Alpha(0.01, default_dt=dt)

    for learner in [l for l in learners if 'els' in l]:
        plt.figure()
        plt.subplot(211)
        dind = 0

        e = vsynapse.filtfilt(learner['e'])
        els = [vsynapse.filtfilt(el) for el in learner['els']]
        plt.plot(t, e[:, dind])
        [plt.plot(t, el[:, dind]) for el in els]

        plt.subplot(212)
        plt.plot(t, norm(e, axis=1))
        [plt.plot(t, norm(el, axis=1)) for el in els]

    plt.show()
Ejemplo n.º 21
0
def build_ensemble(model, ens):

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up encoders
    if isinstance(ens.neuron_type, nengo.Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders,
                               ens.n_neurons,
                               ens.dimensions,
                               rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng,
                                                      model.intercept_limit)

    group = CxGroup(ens.n_neurons, label='%s' % ens)
    group.bias[:] = bias
    model.build(ens.neuron_type, ens.neurons, group)

    # set default filter just in case no other filter gets set
    group.configure_default_filter(model.inter_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")
        # scaled_encoders = encoders
    else:
        # to keep scaling reasonable, we don't include the radius
        # scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]
        scaled_encoders = encoders * gain[:, np.newaxis]

    model.add_group(group)

    model.objs[ens]['in'] = group
    model.objs[ens]['out'] = group
    model.objs[ens.neurons]['in'] = group
    model.objs[ens.neurons]['out'] = group
    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Ejemplo n.º 22
0
def test_ball(d, rng):
    n = 200
    x = ball.sample(n, d, rng)
    assert x.shape == (n, d)

    dist = norm(x, axis=1)
    assert (dist <= 1).all()

    f = _furthest(x)
    assert (f > dist + 0.5).all()
Ejemplo n.º 23
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If ``normalize=True``, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: Vocabulary or array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values.
    normalize : bool, optional (Default: False)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif npext.is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary" %
                              (type(vocab).__name__),
                              attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
Ejemplo n.º 24
0
def ciw_encoders(n_encoders, trainX, trainY, rng=np.random,
                 normalize_data=True, normalize_encoders=True):
    """Computed Input Weights (CIW) method for encoders from data [1]_.

    Parameters
    ==========
    n_encoders : int
        Number of encoders to generate.
    trainX : (n_samples, n_dimensions) array-like
        Training features.
    trainY : (n_samples,) array-like
        Training labels.

    Returns
    =======
    encoders : (n_encoders, n_dimensions) array
        Generated encoders.

    References
    ==========
    .. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
       Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
       handwritten digit classification by training shallow neural network
       classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
       10(8), 1-20. doi:10.1371/journal.pone.0134254
    """
    assert trainX.shape[0] == trainY.size
    trainX = trainX.reshape(trainX.shape[0], -1)
    trainY = trainY.ravel()
    classes = np.unique(trainY)

    assert n_encoders % len(classes) == 0
    n_enc_per_class = n_encoders / len(classes)

    # normalize
    if normalize_data:
        trainX = (trainX - trainX.mean()) / trainX.std()
        # trainX = (trainX - trainX.mean(axis=0)) / trainX.std()
        # trainX = (trainX - trainX.mean(axis=0)) / (trainX.std(axis=0) + 1e-8)

    # generate
    encoders = []
    for label in classes:
        X = trainX[trainY == label]
        plusminus = rng.choice([-1, 1], size=(X.shape[0], n_enc_per_class))
        samples = np.dot(plusminus.T, X)
        encoders.append(samples)

    encoders = np.vstack(encoders)
    if normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    return encoders
Ejemplo n.º 25
0
Archivo: utils.py Proyecto: hunse/phd
def initial_w(shape,
              kind='',
              normkind=None,
              offset=0.0,
              scale=1.0,
              rng=np.random):
    assert len(shape) == 2
    kind = kind.lower()
    if kind == 'zeros':
        w = np.zeros(shape)
    elif kind == 'identity':
        min_shape = min(shape)
        ceili = lambda s: int(np.ceil(float(s) / min_shape))
        w = np.tile(np.eye(min_shape),
                    (ceili(shape[0]), ceili(shape[1])))[:shape[0], :shape[1]]
        w -= w.mean()
    elif kind == 'binary':
        w = 2. * rng.randint(0, 2, size=shape) - 1.
    elif kind == 'uniform':
        w = rng.uniform(-1, 1, size=shape)
    elif kind == 'norm_uniform':
        w = rng.uniform(-1, 1, size=shape)
        w /= norm(w)
    elif kind in ['normal', 'gaussian']:
        w = rng.normal(size=shape)
    elif kind in ['norm_normal', 'norm_gaussian']:
        w = rng.normal(size=shape)
        w /= norm(w)
    elif kind == 'ortho':
        # w = orthogonalize(rng.uniform(-1, 1, size=shape))
        w = orthogonalize(rng.normal(size=shape))
    elif kind == 'orthonorm':
        # w = orthogonalize(rng.uniform(-1, 1, size=shape))
        w = orthogonalize(rng.normal(size=shape))
        w /= norm(w)
    else:
        raise ValueError("Unrecognized kind %r" % kind)

    # normalixe (let X ~ N(0, 1))
    if normkind == 'left':  # then WX ~ N(0, 1)
        w /= norm(w, axis=1, keepdims=True)
    elif normkind == 'right':  # then XW ~ N(0, 1)
        w /= norm(w, axis=0, keepdims=True)
    if normkind == 'leftmean':  # then WX ~ N(0, 1)
        w /= norm(w, axis=1).mean()
    elif normkind == 'rightmean':  # then XW ~ N(0, 1)
        w /= norm(w, axis=0).mean()
    elif normkind:
        raise ValueError("Unrecognized norm kind %r" % normkind)

    if scale != 1.0:
        w *= scale
    if offset != 0.0:
        w += offset
    return w
Ejemplo n.º 26
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If `normalize=True`, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: spa.Vocabulary, array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values
    normalize : boolean (optional)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary"
                              % (vocab.__class__.__name__), attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
Ejemplo n.º 27
0
def test_eval_points_scaling(Simulator, sample, radius, seed, rng):
    eval_points = UniformHypersphere()
    if sample:
        eval_points = eval_points.sample(500, 3, rng=rng)

    model = nengo.Network(seed=seed)
    with model:
        a = nengo.Ensemble(1, 3, eval_points=eval_points, radius=radius)

    with Simulator(model) as sim:
        dists = npext.norm(sim.data[a].eval_points, axis=1)
        assert np.all(dists <= radius)
        assert np.any(dists >= 0.9 * radius)
Ejemplo n.º 28
0
def test_eval_points_scaling(Simulator, sample, radius, seed, rng):
    eval_points = UniformHypersphere()
    if sample:
        eval_points = eval_points.sample(500, 3, rng=rng)

    model = nengo.Network(seed=seed)
    with model:
        a = nengo.Ensemble(1, 3, eval_points=eval_points, radius=radius)

    with Simulator(model) as sim:
        dists = npext.norm(sim.data[a].eval_points, axis=1)
        assert np.all(dists <= radius)
        assert np.any(dists >= 0.9 * radius)
Ejemplo n.º 29
0
def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens)
        )

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        scaled_encoders=scaled_encoders,
        max_rates=max_rates,
        intercepts=intercepts,
        gain=gain,
        bias=bias
    )

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
Ejemplo n.º 30
0
def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders /= norm(encoders, axis=-1, keepdims=True)

    model = nengo.Network(label="_test_encoders")
    with model:
        ens = nengo.Ensemble(neurons=nengo.LIF(n_neurons),
                             dimensions=n_dimensions,
                             encoders=encoders,
                             label="A")
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
Ejemplo n.º 31
0
def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders /= norm(encoders, axis=-1, keepdims=True)

    args = {'label': 'A',
            'neurons': nengo.LIF(n_neurons),
            'dimensions': n_dimensions}

    model = nengo.Model('_test_encoders')
    ens = nengo.Ensemble(encoders=encoders, **args)
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
Ejemplo n.º 32
0
def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders /= norm(encoders, axis=-1, keepdims=True)

    model = nengo.Network(label="_test_encoders")
    with model:
        ens = nengo.Ensemble(neurons=nengo.LIF(n_neurons),
                             dimensions=n_dimensions,
                             encoders=encoders,
                             label="A")
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
Ejemplo n.º 33
0
def test_sphere(d, ntm, rng):
    sphere = ScatteredHypersphere(surface=True, base=ntm)

    n = 1000
    x = sphere.sample(n, d, rng)
    assert x.shape == (n, d)
    assert abs(np.mean(x)) < 0.1 / d

    _compare_samples(x, UniformHypersphere(surface=True).sample(n, d, rng))

    assert np.allclose(norm(x, axis=1), 1)

    f = _furthest(x)
    assert (f > 1.5).all()
Ejemplo n.º 34
0
    def sample(self, n, d=None, rng=np.random):
        assert d == self.n_dimensions

        if self.weighting == 'dimensions':
            # sample in d-dimensional space, but project down to
            # len(dist)-dimensional space for the weights to bias
            # the projection so that each distribution is projected
            # from a proportional number of dimensions
            samples = rng.randn(n, d)
            samples /= npext.norm(samples, axis=1, keepdims=True)
            weights = np.zeros((n, len(self.distributions)))
            start = 0
            for i, end in enumerate(np.cumsum(self.dimensions)):
                weights[:, i] = npext.norm(samples[:, start:end],
                                           axis=1,
                                           keepdims=False)
                start = end
        elif self.weighting == 'distributions':
            samples = rng.randn(n, len(self.distributions))
            samples /= npext.norm(samples, axis=1, keepdims=True)
            weights = samples
        else:
            # use the original weighting method if nothing is specified
            data = [
                dist.sample(n, d=self.dimensions[i], rng=rng) * self.weights[i]
                for i, dist in enumerate(self.distributions)
            ]

            return np.hstack(data)

        data = [
            dist.sample(n, d=self.dimensions[i], rng=rng) *
            weights[:, i].reshape((n, 1))
            for i, dist in enumerate(self.distributions)
        ]

        return np.hstack(data)
Ejemplo n.º 35
0
def test_encoders(RefSimulator, dimensions, seed, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, dimensions))
        encoders = npext.array(encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    model = nengo.Network(label="_test_encoders", seed=seed)
    with model:
        ens = nengo.Ensemble(n_neurons=n_neurons,
                             dimensions=dimensions,
                             encoders=encoders,
                             label="A")

    with RefSimulator(model) as sim:
        assert np.allclose(encoders, sim.data[ens].encoders)
Ejemplo n.º 36
0
def test_encoders(RefSimulator, dimensions, seed, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, dimensions))
        encoders = npext.array(encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    model = nengo.Network(label="_test_encoders", seed=seed)
    with model:
        ens = nengo.Ensemble(n_neurons=n_neurons,
                             dimensions=dimensions,
                             encoders=encoders,
                             label="A")
    sim = RefSimulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
Ejemplo n.º 37
0
def test_ball(d, ntm, rng):
    ball = ScatteredHypersphere(surface=False, base=ntm)

    n = 1000
    x = ball.sample(n, d, rng)
    assert x.shape == (n, d)
    assert abs(np.mean(x)) < 0.1 / d

    _compare_samples(x, UniformHypersphere(surface=False).sample(n, d, rng))

    dist = norm(x, axis=1)
    assert (dist <= 1).all()

    f = _furthest(x)
    assert (f > dist + 0.5).all()
Ejemplo n.º 38
0
    def sample(self, n, d, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValueError("Dimensions must be a positive integer")

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.rand(n, 1)**(1.0 / d)

        return samples
Ejemplo n.º 39
0
def test_leech_kissing(tmpdir):
    cache_file = os.path.join(str(tmpdir), "leech_kissing.npy")
    x = leech_kissing(cache_file)

    assert x.shape == (196560, 24)
    assert np.allclose(norm(x, axis=1, keepdims=True), 1)
    assert len(set(map(tuple, x))) == len(x)  # no duplicates
    assert os.path.exists(cache_file)

    x_cached = leech_kissing(cache_file)
    assert np.allclose(x, x_cached)

    s = np.random.choice(len(x), 1000, replace=False)
    # The only vectors in x that have dot product with s > 0.5 are those
    # from s themselves, and so this is exactly 1 (per vector in s)
    assert np.all(np.sum(np.dot(x, x[s].T) > 0.5, axis=0) == 1)
Ejemplo n.º 40
0
    def sample(self, n, d, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValueError("Dimensions must be a positive integer")

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.rand(n, 1) ** (1.0 / d)

        return samples
Ejemplo n.º 41
0
def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens))

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      scaled_encoders=scaled_encoders,
                                      max_rates=max_rates,
                                      intercepts=intercepts,
                                      gain=gain,
                                      bias=bias)

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
Ejemplo n.º 42
0
def test_eval_points_scaling(Simulator, sample, radius, seed, rng, scale):
    eval_points = UniformHypersphere()
    if sample:
        eval_points = eval_points.sample(500, 3, rng=rng)

    model = nengo.Network(seed=seed)
    with model:
        a = nengo.Ensemble(1, 3, radius=radius)
        b = nengo.Ensemble(1, 3)
        con = nengo.Connection(a, b, eval_points=eval_points,
                               scale_eval_points=scale)

    sim = Simulator(model)
    dists = npext.norm(sim.data[con].eval_points, axis=1)
    limit = radius if scale else 1.0
    assert np.all(dists <= limit)
    assert np.any(dists >= 0.9 * limit)
Ejemplo n.º 43
0
def test_hypersphere_volume(cls, min_magnitude, d, rng, allclose):
    n = 250 * d
    dist = cls(min_magnitude=min_magnitude)
    samples = dist.sample(n, d, rng=rng)
    assert samples.shape == (n, d)
    assert allclose(np.mean(samples, axis=0), 0, atol=0.1)

    norms = npext.norm(samples, axis=1)
    assert np.all(norms >= min_magnitude)
    assert np.all(norms <= 1)

    # probability of not finding a point in [min_magnitude, r_tol_min], [r_tol_max, 1]
    q = 1e-5
    r_min_d = min_magnitude ** d
    r_tol_min = (r_min_d + (1 - r_min_d) * (1 - q ** (1 / n))) ** (1 / d)
    assert norms.min() <= r_tol_min
    r_tol_max = (1 - (1 - r_min_d) * (1 - q ** (1 / n))) ** (1 / d)
    assert norms.max() >= r_tol_max
Ejemplo n.º 44
0
def test_state_norm(plt):
    # Choose a filter, timestep, and number of simulation timesteps
    sys = Alpha(0.1)
    dt = 0.000001
    length = 2000000
    assert np.allclose(dt * length, 2.0)

    # Check that the power of each state equals the H2-norm of each state
    # The analog case is the same after scaling since dt is approx 0.
    response = sys.X.impulse(length, dt)
    actual = norm(response, axis=0) * dt
    assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
    assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))

    step = int(0.002 / dt)
    plt.figure()
    plt.plot(response[::step, 0], label="$x_0$")
    plt.plot(response[::step, 1], label="$x_1$")
    plt.plot(np.dot(response[::step], sys.C.T), label="$y$")
    plt.legend()
Ejemplo n.º 45
0
def trials_error_plot(prestime, t, ystar, learners):
    pdt = 0.01
    vsynapse = Alpha(0.02, default_dt=pdt)

    plt.figure()
    dinds = slice(0, 2)

    plt.subplot(211)
    plt.plot(t, ystar[:, dinds])
    for learner in learners:
        y = vsynapse.filtfilt(learner['y'][:, dinds])
        plt.plot(t, y)
    plt.ylabel('outputs')

    plt.subplot(212)
    esynapse = Alpha(5 * prestime, default_dt=pdt)
    for learner in learners:
        e = norm(esynapse.filtfilt(learner['e']), axis=1)
        plt.plot(t, e)
    plt.ylabel('errors')
Ejemplo n.º 46
0
def test_derivative(f, df):
    get_network = lambda **kwargs: Network(
        weights, f=f, df=df, biases=None, noise=0, **kwargs)

    bp_learner = BPLearner(get_network(),
                           cost,
                           error,
                           eta=eta,
                           alpha=alpha,
                           name='BP')
    bp_learner.weight_norms = []

    # fa_learner = FALearner(
    #     get_network(), squared_cost, rms_error, eta=eta, alpha=alpha)
    # fa_learner.Bs = [initial_w((j, i), kind='ortho', scale=2)
    #                  for i, j in zip(dhids, dhids[1:] + [dout])]
    # fa_learner.bp_angles = []
    # # fa_learner.pbp_angles = []

    fas_learner = FASkipLearner(get_network(),
                                cost,
                                error,
                                eta=eta,
                                alpha=alpha,
                                name='FA')
    genB = lambda shape: initial_w(
        shape, kind='ortho', normkind='rightmean', scale=0.2)
    fas_learner.Bs = [genB((dout, dhid)) for dhid in dhids]

    # learners = [bp_learner, fa_learner]
    learners = [bp_learner, fas_learner]
    for learner in learners:
        learner.train(epochs, batch_fn, test_set=test_set)

    for learner in learners:
        print(", ".join("||W%d|| = %0.3f" % (i, norm(w))
                        for i, w in enumerate(learner.network.weights)))

    return learners
Ejemplo n.º 47
0
    def make_pool(self, ens):
        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)


        if self.config[ens].compact:
            p = pool.CompactPool(ens.n_neurons)
        elif self.config[ens].fixed:
            p = pool.FixedPool(ens.n_neurons,
                               bits_soma=self.config[ens].fixed_bits_soma,
                               bits_syn=self.config[ens].fixed_bits_syn)
        else:
            p = pool.StdPool(ens.n_neurons)
        intercepts = nengo.builder.sample(ens.intercepts, ens.n_neurons,
                                          rng=self.rng)
        max_rates = nengo.builder.sample(ens.max_rates, ens.n_neurons,
                                          rng=self.rng)
        gain, bias = self.find_gain_bias(p.soma, intercepts, max_rates)
        p.set_bias(bias)
        print 'bias', p.get_bias()

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.pools[ens] = p

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=None,
                                               )
Ejemplo n.º 48
0
def build_ensemble(model, ens):
    """Builds an `.Ensemble` object into a model.

    A brief summary of what happens in the ensemble build process, in order:

    1. Generate evaluation points and encoders.
    2. Normalize encoders to unit length.
    3. Determine bias and gain.
    4. Create neuron input signal
    5. Add operator for injecting bias.
    6. Call build function for neuron type.
    7. Scale encoders by gain and radius.
    8. Add operators for multiplying decoded input signal by encoders and
       incrementing the result in the neuron input signal.
    9. Call build function for injected noise.

    Some of these steps may be altered or omitted depending on the parameters
    of the ensemble, in particular the neuron type. For example, most steps are
    omitted for the `.Direct` neuron type.

    Parameters
    ----------
    model : Model
        The model to build into.
    ens : Ensemble
        The ensemble to build.

    Notes
    -----
    Sets ``model.params[ens]`` to a `.BuiltEnsemble` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = get_samples(
            ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.sig[ens.neurons]['bias'] = Signal(
            bias, name="%s.bias" % ens, readonly=True)
        model.add_op(Copy(model.sig[ens.neurons]['bias'],
                          model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens, readonly=True)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Ejemplo n.º 49
0
def sorted_neurons(ensemble, sim, iterations=100, seed=None):
    '''Sort neurons in an ensemble by encoder and intercept.

    Parameters
    ----------
    ensemble: nengo.Ensemble
        The population of neurons to be sorted.
        The ensemble must have its encoders specified.

    iterations: int
        The number of times to iterate during the sort.

    seed: float
        A random number seed.

    Returns
    -------
    indices: ndarray
        An array with sorted indices into the neurons in the ensemble

    Examples
    --------

    You can use this to generate an array of sorted indices for plotting. This
    can be done after collecting the data. E.g.

    >>>indices = sorted_neurons(simulator, 'My neurons')
    >>>plt.figure()
    >>>rasterplot(sim.data['My neurons.spikes'][:,indices])

    Algorithm
    ---------

    The algorithm is for each encoder in the initial set, randomly
    pick another encoder and check to see if swapping those two
    encoders would reduce the average difference between the
    encoders and their neighbours.  Difference is measured as the
    dot product.  Each encoder has four neighbours (N, S, E, W),
    except for the ones on the edges which have fewer (no wrapping).
    This algorithm is repeated `iterations` times, so a total of
    `iterations*N` swaps are considered.
    '''

    # Normalize all the encoders
    encoders = np.array(sim.data[ensemble].encoders)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Make an array with the starting order of the neurons
    N = encoders.shape[0]
    indices = np.arange(N)
    rng = np.random.RandomState(seed)

    for k in range(iterations):
        target = rng.randint(0, N, N)  # pick random swap targets
        for i in range(N):
            j = target[i]
            if i != j:  # if not swapping with yourself
                # compute similarity score how we are (unswapped)
                sim1 = (_similarity(encoders, i, N)
                        + _similarity(encoders, j, N))
                # swap the encoder
                encoders[[i, j], :] = encoders[[j, i], :]
                indices[[i, j]] = indices[[j, i]]
                # compute similarity score how we are (swapped)
                sim2 = (_similarity(encoders, i, N)
                        + _similarity(encoders, j, N))

                # if we were better unswapped
                if sim1 > sim2:
                    # swap them back
                    encoders[[i, j], :] = encoders[[j, i], :]
                    indices[[i, j]] = indices[[j, i]]

    return indices
Ejemplo n.º 50
0
    def from_object(cls, ens, out_conns, dt, rng):
        assert isinstance(ens.neuron_type, nengo.neurons.LIF)
        assert isinstance(ens, nengo.Ensemble)

        if ens.seed is None:
            rng = np.random.RandomState(rng.tomaxint())
        else:
            rng = np.random.RandomState(ens.seed)

        # Generate evaluation points
        if isinstance(ens.eval_points, dists.Distribution):
            n_points = ens.n_eval_points
            if n_points is None:
                n_points = nengo.utils.builder.default_n_eval_points(
                    ens.n_neurons, ens.dimensions)
            eval_points = ens.eval_points.sample(n_points, ens.dimensions, rng)
            eval_points *= ens.radius
        else:
            if (ens.eval_points is not None and
                    ens.eval_points.shape[0] != ens.n_eval_points):
                warnings.warn("Number of eval points doesn't match "
                              "n_eval_points.  Ignoring n_eval_points.")
            eval_points = np.array(ens.eval_points, dtype=np.float64)

        # Determine max_rates and intercepts
        if isinstance(ens.max_rates, dists.Distribution):
            max_rates = ens.max_rates.sample(ens.n_neurons, rng=rng)
        else:
            max_rates = np.array(max_rates)
        if isinstance(ens.intercepts, dists.Distribution):
            intercepts = ens.intercepts.sample(ens.n_neurons, rng=rng)
        else:
            intercepts = np.array(intercepts)

        # Generate gains, bias
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

        # Generate encoders
        if isinstance(ens.encoders, dists.Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        # Generate decoders for outgoing connections
        decoders = list()
        tfses = utils.connections.OutgoingEnsembleConnections(out_conns)

        def build_decoder(function, evals, solver):
            """Internal function for building a single decoder."""
            if evals is None:
                evals = npext.array(eval_points, min_dims=2)
            else:
                evals = npext.array(evals, min_dims=2)

            assert solver is None or not solver.weights

            x = np.dot(evals, encoders.T / ens.radius)
            activities = ens.neuron_type.rates(x, gain, bias)

            if function is None:
                targets = evals
            else:
                (value, _) = checked_call(function, evals[0])
                function_size = np.asarray(value).size
                targets = np.zeros((len(evals), function_size))

                for i, ep in enumerate(evals):
                    targets[i] = function(ep)

            if solver is None:
                solver = nengo.solvers.LstsqL2()

            return solver(activities, targets, rng=rng)[0]

        decoder_builder = utils.decoders.DecoderBuilder(build_decoder)

        # Build each of the decoders in turn
        for tfse in tfses.transforms_functions:
            decoders.append(decoder_builder.get_transformed_decoder(
                tfse.function, tfse.transform, tfse.eval_points, tfse.solver))

        # Build list of learning rule, connection-index tuples
        learning_rules = list()
        for c in tfses:
            for l in utils.connections.get_learning_rules(c):
                learning_rules.append((l, tfses[c]))

        # By default compress all decoders
        decoders_to_compress = [True for d in decoders]

        # Turn off compression for all decoders associated with learning rules
        for l in learning_rules:
            decoders_to_compress[l[1]] = False

        # Compress and merge the decoders
        (decoder_headers, decoders) =\
            utils.decoders.get_combined_compressed_decoders(
                decoders, compress=decoders_to_compress)
        decoders /= dt

        return cls(ens.n_neurons, gain, bias, encoders, decoders,
                   ens.neuron_type.tau_rc, ens.neuron_type.tau_ref,
                   eval_points, decoder_headers, learning_rules)
Ejemplo n.º 51
0
for images in [train_images, test_images]:
    images[:] = 2 * images - 1  # normalize to -1 to 1

# --- set up network parameters
n_vis = train_images.shape[1]
n_hid = 500
rng = np.random

if 1:
    rf_shape = (9, 9)
    mask = create_mask(n_hid, (28, 28), rf_shape)
    weights = rng.normal(size=(n_hid, n_vis)) * mask

weights = weights.T
mask = mask.T
weights /= norm(weights, axis=0, keepdims=True)

neurons = nengo.LIF()
gain, bias = neurons.gain_bias(200, -0.5)

def encode(x):
    return neurons.rates(np.dot(x, weights), gain, bias)

# --- determine initial decoders
x = train_images[:1000]
decoders, _ = nengo.decoders.LstsqL2()(encode(x), x)

# x = train_images[:1000]
# A = encode(x)
# dshape = (n_hid, n_vis)
Ejemplo n.º 52
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Generate eval points
    if isinstance(ens.eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = ens.eval_points.sample(n_points, ens.dimensions, rng)
        # eval_points should be in the ensemble's representational range
        eval_points *= ens.radius
    else:
        if (ens.n_eval_points is not None
                and ens.eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(ens.eval_points, dtype=np.float64)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Determine max_rates and intercepts
    max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Ejemplo n.º 53
0
def test_eval_points(Simulator, nl_nodirect, plt, seed, rng, logger):
    n = 100
    d = 5
    filter = 0.08

    eval_points = np.logspace(np.log10(300), np.log10(5000), 11)
    eval_points = np.round(eval_points).astype('int')
    max_points = eval_points.max()
    n_trials = 1

    rmses = np.nan * np.zeros((len(eval_points), n_trials))
    for j in range(n_trials):
        points = rng.normal(size=(max_points, d))
        points *= (rng.uniform(size=max_points)
                   / norm(points, axis=-1))[:, None]

        rng_j = np.random.RandomState(348 + j)
        seed = 903824 + j

        # generate random input in unit hypersphere
        x = rng_j.normal(size=d)
        x *= rng_j.uniform() / norm(x)

        for i, n_points in enumerate(eval_points):
            model = nengo.Network(seed=seed)
            with model:
                model.config[nengo.Ensemble].neuron_type = nl_nodirect()
                u = nengo.Node(output=x)
                a = nengo.Ensemble(n * d, dimensions=d,
                                   eval_points=points[:n_points])
                nengo.Connection(u, a, synapse=0)
                up = nengo.Probe(u)
                ap = nengo.Probe(a)

            with Timer() as timer:
                sim = Simulator(model)
            sim.run(10 * filter)

            t = sim.trange()
            xt = nengo.synapses.filtfilt(sim.data[up], filter, dt=sim.dt)
            yt = nengo.synapses.filtfilt(sim.data[ap], filter, dt=sim.dt)
            t0 = 5 * filter
            t1 = 7 * filter
            tmask = (t > t0) & (t < t1)

            rmses[i, j] = rms(yt[tmask] - xt[tmask])
            logger.info('trial %d', j)
            logger.info('  n_points: %d', n_points)
            logger.info('  duration: %0.3f s', timer.duration)

    # subtract out mean for each model
    rmses_norm = rmses - rmses.mean(0, keepdims=True)

    mean = rmses_norm.mean(1)
    low = rmses_norm.min(1)
    high = rmses_norm.max(1)
    plt.semilogx(eval_points, mean, 'k-')
    plt.semilogx(eval_points, high, 'r-')
    plt.semilogx(eval_points, low, 'b-')
    plt.xlim([eval_points[0], eval_points[-1]])
    plt.xticks(eval_points, eval_points)
Ejemplo n.º 54
0
    def make_ensemble(self, ens):
        """Build an Ensemble."""
        self.model.ensembles.append(ens)

        if isinstance(ens.neuron_type, nengo.Direct):
            self.make_direct_ensemble(ens)
            return

        p = self.make_pool(ens)

        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        intercepts = nengo.builder.ensemble.sample(ens.intercepts,
                                                   ens.n_neurons, rng=self.rng)
        max_rates = nengo.builder.ensemble.sample(ens.max_rates,
                                                  ens.n_neurons, rng=self.rng)

        if ens.gain is not None and ens.bias is not None:
            gain = nengo.builder.ensemble.sample(ens.gain, ens.n_neurons,
                                                 rng=self.rng)
            bias = nengo.builder.ensemble.sample(ens.bias, ens.n_neurons,
                                                 rng=self.rng)
        elif ens.gain is not None or ens.bias is not None:
            raise NotImplementedError("gain or bias set for %s, but not both. "
                                      "Solving for one given the other is not "
                                      "implemented yet." % ens)
        else:
            gain, bias = neuron_tuning.find_gain_bias(p, intercepts, max_rates)

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.model.pools[ens] = p

        self.model.decoders[ens] = {}

        if isinstance(ens.eval_points, Distribution):
            n_points = ens.n_eval_points
            if n_points is None:
                n_points = nengo.utils.builder.default_n_eval_points(
                    ens.n_neurons, ens.dimensions)
            eval_points = ens.eval_points.sample(n_points, ens.dimensions,
                                                 self.rng)
            eval_points *= ens.radius
        else:
            if (ens.n_eval_points is not None
                    and ens.eval_points.shape[0] != ens.n_eval_points):
                warnings.warn("Number of eval_points doesn't match "
                              "n_eval_points. Ignoring n_eval_points.")
            eval_points = np.array(ens.eval_points, dtype=np.float64)

            eval_points *= ens.radius

        J = gain * np.dot(eval_points, encoders.T / ens.radius) + bias
        activity = self.compute_activity(ens, J)

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=eval_points,
                                               activity=activity,
                                               neurons=ens.neurons,
                                               radius=ens.radius
                                               )
        self.model.outputs[ens] = np.zeros(ens.n_neurons, dtype=float)
        self.model.input_filters[ens] = {}
        self.model.input_filters[ens.neurons] = {}
def create_encoders(neurons, dimensions, mean, variance):

    encoders = variance * np.random.randn(neurons, dimensions) + mean
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    return encoders
Ejemplo n.º 56
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        bias_sig = Signal(bias, name="%s.bias" % ens, readonly=True)
        model.add_op(Copy(src=bias_sig, dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens, readonly=True)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Ejemplo n.º 57
0
def test_eval_points(Simulator, nl_nodirect):
    rng = np.random.RandomState(0)
    n = 100
    d = 5
    filter = 0.08
    dt = 1e-3

    eval_points = np.logspace(np.log10(300), np.log10(5000), 11)
    eval_points = np.round(eval_points).astype('int')
    max_points = eval_points.max()
    n_trials = 1

    rmses = np.nan * np.zeros((len(eval_points), n_trials))
    for j in range(n_trials):
        points = rng.normal(size=(max_points, d))
        points *= (rng.uniform(size=max_points)
                   / norm(points, axis=-1))[:, None]

        rng_j = np.random.RandomState(348 + j)
        seed = 903824 + j

        # generate random input in unit hypersphere
        x = rng_j.normal(size=d)
        x *= rng_j.uniform() / norm(x)

        for i, n_points in enumerate(eval_points):
            model = nengo.Network(
                'test_eval_points(%d,%d)' % (i, j), seed=seed)
            with model:
                u = nengo.Node(output=x)
                a = nengo.Ensemble(nl_nodirect(n * d), d,
                                   eval_points=points[:n_points])
                nengo.Connection(u, a, synapse=0)
                up = nengo.Probe(u)
                ap = nengo.Probe(a)

            with Timer() as timer:
                sim = Simulator(model, dt=dt)
            sim.run(10 * filter)

            t = sim.trange()
            xt = filtfilt(sim.data[up], filter / dt)
            yt = filtfilt(sim.data[ap], filter / dt)
            t0 = 5 * filter
            t1 = 7 * filter
            tmask = (t > t0) & (t < t1)

            rmses[i, j] = rms(yt[tmask] - xt[tmask])
            print("done %d (%d) in %0.3f s" % (n_points, j, timer.duration))

    # subtract out mean for each model
    rmses_norm = rmses - rmses.mean(0, keepdims=True)

    with Plotter(Simulator, nl_nodirect) as plt:
        mean = rmses_norm.mean(1)
        low = rmses_norm.min(1)
        high = rmses_norm.max(1)
        plt.semilogx(eval_points, mean, 'k-')
        plt.semilogx(eval_points, high, 'r-')
        plt.semilogx(eval_points, low, 'b-')
        plt.xlim([eval_points[0], eval_points[-1]])
        plt.xticks(eval_points, eval_points)
        plt.savefig('test_decoders.test_eval_points.pdf')
        plt.close()
Ejemplo n.º 58
0
    def build_ensemble(self, ens):
        # Create random number generator
        seed = self.next_seed() if ens.seed is None else ens.seed
        rng = np.random.RandomState(seed)

        # Generate eval points
        if ens.eval_points is None or is_integer(ens.eval_points):
            eval_points = self.generate_eval_points(
                ens=ens, n_points=ens.eval_points, rng=rng)
        else:
            eval_points = npext.array(
                ens.eval_points, dtype=np.float64, min_dims=2)

        # Set up signal
        self.model.sig_in[ens] = Signal(np.zeros(ens.dimensions),
                                        name="%s.signal" % ens.label)
        self.model.operators.append(Reset(self.model.sig_in[ens]))

        # Set up encoders
        if ens.encoders is None:
            if isinstance(ens.neurons, nengo.Direct):
                encoders = np.identity(ens.dimensions)
            else:
                sphere = dists.UniformHypersphere(ens.dimensions, surface=True)
                encoders = sphere.sample(ens.neurons.n_neurons, rng=rng)
        else:
            encoders = np.array(ens.encoders, dtype=np.float64)
            enc_shape = (ens.neurons.n_neurons, ens.dimensions)
            if encoders.shape != enc_shape:
                raise ShapeMismatch(
                    "Encoder shape is %s. Should be (n_neurons, dimensions); "
                    "in this case %s." % (encoders.shape, enc_shape))
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        # Determine max_rates and intercepts
        if isinstance(ens.max_rates, dists.Distribution):
            max_rates = ens.max_rates.sample(
                ens.neurons.n_neurons, rng=rng)
        else:
            max_rates = np.array(ens.max_rates)
        if isinstance(ens.intercepts, dists.Distribution):
            intercepts = ens.intercepts.sample(
                ens.neurons.n_neurons, rng=rng)
        else:
            intercepts = np.array(ens.intercepts)

        # Build the neurons
        if isinstance(ens.neurons, nengo.Direct):
            bn = self.build(ens.neurons, ens.dimensions)
        else:
            bn = self.build(ens.neurons, max_rates, intercepts)

        # Scale the encoders
        if isinstance(ens.neurons, nengo.Direct):
            scaled_encoders = encoders
        else:
            scaled_encoders = encoders * (bn.gain / ens.radius)[:, np.newaxis]

        # Create output signal, using built Neurons
        self.model.operators.append(DotInc(
            Signal(scaled_encoders, name="%s.scaled_encoders" % ens.label),
            self.model.sig_in[ens],
            self.model.sig_in[ens.neurons],
            tag="%s encoding" % ens.label))

        # Output is neural output
        self.model.sig_out[ens] = self.model.sig_out[ens.neurons]

        for probe in ens.probes["decoded_output"]:
            self.build(probe, dimensions=ens.dimensions)
        for probe in ens.probes["spikes"] + ens.probes["voltages"]:
            self.build(probe, dimensions=ens.neurons.n_neurons)

        return BuiltEnsemble(eval_points=eval_points,
                             encoders=encoders,
                             intercepts=intercepts,
                             max_rates=max_rates,
                             scaled_encoders=scaled_encoders)
Ejemplo n.º 59
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
        max_rates, intercepts = None, None  # TODO: determine from gain & bias
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
        intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)