def test_meshgrid_nd(allclose): a = [0, 0, 1] b = [1, 2, 3] c = [23, 42] expected = [ np.array( [ [[0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0]], [[1, 1], [1, 1], [1, 1]], ] ), np.array( [ [[1, 1], [2, 2], [3, 3]], [[1, 1], [2, 2], [3, 3]], [[1, 1], [2, 2], [3, 3]], ] ), np.array( [ [[23, 42], [23, 42], [23, 42]], [[23, 42], [23, 42], [23, 42]], [[23, 42], [23, 42], [23, 42]], ] ), ] actual = meshgrid_nd(a, b, c) assert allclose(expected, actual)
def tuning_curves(ens, sim, inputs=None): """Calculates the tuning curves of an ensemble. That is the neuron responses in dependence of the vector represented by the ensemble. For 1-dimensional ensembles, the unpacked return value of this function can be passed directly to :func:`matplotlib.pyplot.plot`. Parameters ---------- ens : nengo.Ensemble Ensemble to calculate the tuning curves of. sim : nengo.Simulator Simulator providing information about the built ensemble. (An unbuilt ensemble does not have tuning curves assigned to it.) inputs : sequence of ndarray, optional The inputs at which the tuning curves will be evaluated. For each of the `D` ensemble dimensions one array of dimensionality `D` is needed. The output of :func:`numpy.meshgrid` with ``indexing='ij'`` is in the right format. Returns ------- inputs : sequence of ndarray The passed or auto-generated `inputs`. activities : ndarray The activities of the individual neurons given the `inputs`. For ensembles with 1 dimension, the rows correspond to the `inputs` and the columns to individual neurons. For ensembles with > 1 dimension, the first dimension enumerates the neurons, the remaining dimensions map to `inputs`. See Also -------- response_curves """ if inputs is None: inputs = np.linspace(-ens.radius, ens.radius) if ens.dimensions > 1: inputs = npext.meshgrid_nd(*(ens.dimensions * [inputs])) else: inputs = [inputs] inputs = np.asarray(inputs).T flattened = np.reshape(inputs, (-1, ens.dimensions)) flattened /= ens.radius x = np.dot(flattened, sim.data[ens].encoders.T) activities = ens.neuron_type.rates( x, sim.data[ens].gain, sim.data[ens].bias) activities = np.reshape(activities, inputs[..., 0].shape + (-1,)) return inputs, activities
def test_meshgrid_nd(): a = [0, 0, 1] b = [1, 2, 3] c = [23, 42] expected = [ np.array([[[0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0]], [[1, 1], [1, 1], [1, 1]]]), np.array([[[1, 1], [2, 2], [3, 3]], [[1, 1], [2, 2], [3, 3]], [[1, 1], [2, 2], [3, 3]]]), np.array([[[23, 42], [23, 42], [23, 42]], [[23, 42], [23, 42], [23, 42]], [[23, 42], [23, 42], [23, 42]]])] actual = meshgrid_nd(a, b, c) assert np.allclose(expected, actual)