Example #1
0
    def model_cascade(self, in_arr, pt_list, layers, use_jit):
        """Horsager model cascade

        Parameters
        ----------
        in_arr: array-like
            A 2D array specifying the effective current values
            at a particular spatial location (pixel); one value
            per retinal layer and electrode.
            Dimensions: <#layers x #electrodes>
        pt_list : list
            List of pulse train 'data' containers.
            Dimensions: <#electrodes x #time points>
        layers : list
            List of retinal layers to simulate.
            Choose from:
            - 'OFL': optic fiber layer
            - 'GCL': ganglion cell layer
        use_jit : bool
            If True, applies just-in-time (JIT) compilation to
            expensive computations for additional speed-up
            (requires Numba).
        """
        if 'INL' in layers:
            raise ValueError("The Nanduri2012 model does not support an inner "
                             "nuclear layer.")

        # Although the paper says to use cathodic-first, the code only
        # reproduces if we use what we now call anodic-first. So flip the sign
        # on the stimulus here:
        stim = -self.calc_layer_current(in_arr, pt_list, layers)

        # R1 convolved the entire stimulus (with both pos + neg parts)
        r1 = self.tsample * utils.conv(
            stim, self.gamma1, mode='full', method='sparse')[:stim.size]

        # It's possible that charge accumulation was done on the anodic phase.
        # It might not matter too much (timing is slightly different, but the
        # data are not accurate enough to warrant using one over the other).
        # Thus use what makes the most sense: accumulate on cathodic
        ca = self.tsample * np.cumsum(np.maximum(0, -stim))
        ca = self.tsample * utils.conv(
            ca, self.gamma2, mode='full', method='fft')[:stim.size]
        r2 = r1 - self.epsilon * ca

        # Then half-rectify and pass through the power-nonlinearity
        r3 = np.maximum(0.0, r2)**self.beta

        # Then convolve with slow gamma
        r4 = self.tsample * utils.conv(
            r3, self.gamma3, mode='full', method='fft')[:stim.size]

        return utils.TimeSeries(self.tsample, r4)
Example #2
0
    def model_cascade(self, in_arr, pt_list, layers, use_jit):
        """Nanduri model cascade

        Parameters
        ----------
        in_arr: array-like
            A 2D array specifying the effective current values
            at a particular spatial location (pixel); one value
            per retinal layer and electrode.
            Dimensions: <#layers x #electrodes>
        pt_list : list
            List of pulse train 'data' containers.
            Dimensions: <#electrodes x #time points>
        layers : list
            List of retinal layers to simulate.
            Choose from:
            - 'OFL': optic fiber layer
            - 'GCL': ganglion cell layer
        use_jit : bool
            If True, applies just-in-time (JIT) compilation to
            expensive computations for additional speed-up
            (requires Numba).
        """
        if 'INL' in layers:
            raise ValueError("The Nanduri2012 model does not support an inner "
                             "nuclear layer.")

        # `b1` contains a scaled PulseTrain per layer for this particular
        # pixel: Use as input to model cascade
        b1 = self.calc_layer_current(in_arr, pt_list, layers)

        # Fast response
        b2 = self.tsample * utils.conv(
            b1, self.gamma1, mode='full', method='sparse',
            use_jit=use_jit)[:b1.size]

        # Charge accumulation
        ca = self.tsample * np.cumsum(np.maximum(0, b1))
        ca = self.tsample * utils.conv(
            ca, self.gamma2, mode='full', method='fft')[:b1.size]
        b3 = np.maximum(0, b2 - self.eps * ca)

        # Stationary nonlinearity
        sigmoid = ss.expit((b3.max() - self.shift) / self.slope)
        b4 = b3 * sigmoid * self.asymptote

        # Slow response
        b5 = self.tsample * utils.conv(
            b4, self.gamma3, mode='full', method='fft')[:b1.size]

        return utils.TimeSeries(self.tsample, b5)
Example #3
0
    def slow_response(self, stim):
        """Slow response function

        Convolve a stimulus `stim` with a low-pass filter (3-stage gamma)
        with time constant self.tau_slow.
        This is Box 5 in Nanduri et al. (2012).

        Parameters
        ----------
        stim : array
           Temporal signal to process, stim(r,t) in Nanduri et al. (2012)

        Returns
        -------
        Slow response, b5(r,t) in Nanduri et al. (2012).

        Notes
        -----
        This is by far the most computationally involved part of the perceptual
        sensitivity model.
        Conversion to TimeSeries is avoided for the sake of speedup.
        """
        # No need to zero-pad: fftconvolve already takes care of optimal
        # kernel/data size
        conv = utils.conv(stim, self.gamma_slow, method='fft', mode='full')

        # Cut off the tail of the convolution to make the output signal match
        # the dimensions of the input signal.
        return self.scale_slow * self.tsample * conv[:stim.shape[-1]]
Example #4
0
    def fast_response(self, stim, gamma, method, use_jit=True):
        """Fast response function

        Convolve a stimulus `stim` with a temporal low-pass filter `gamma`.

        Parameters
        ----------
        stim : array
           Temporal signal to process, stim(r,t) in Nanduri et al. (2012).
        use_jit : bool, optional
           If True (default), use numba just-in-time compilation.
        usefft : bool, optional
           If False (default), use sparseconv, else fftconvolve.

        Returns
        -------
        Fast response, b2(r,t) in Nanduri et al. (2012).

        Notes
        -----
        The function utils.sparseconv can be much faster than np.convolve and
        signal.fftconvolve if `stim` is sparse and much longer than the
        convolution kernel.
        The output is not converted to a TimeSeries object for speedup.
        """
        conv = utils.conv(stim,
                          gamma,
                          mode='full',
                          method=method,
                          use_jit=use_jit)

        # Cut off the tail of the convolution to make the output signal
        # match the dimensions of the input signal.
        return self.tsample * conv[:stim.shape[-1]]
Example #5
0
    def charge_accumulation(self, ecm):
        """Calculates the charge accumulation

        Charge accumulation is calculated on the effective input current
        `ecm`, as opposed to the output of the fast response stage.

        Parameters
        ----------
        ecm : array-like
            A 2D array specifying the effective current values at a particular
            spatial location (pixel); one value per retinal layer, averaged
            over all electrodes through that pixel.
            Dimensions: <#layers x #time points>
        """
        ca = np.zeros_like(ecm)

        for i in range(ca.shape[0]):
            summed = self.tsample * np.cumsum(np.abs(ecm[i, :]))
            conved = self.tsample * utils.conv(
                summed, self.gamma_ca, mode='full', method='fft')
            ca[i, :] = self.scale_ca * conved[:ecm.shape[-1]]
        return ca
Example #6
0
def test_conv():
    reload(utils)
    # time vector for stimulus (long)
    stim_dur = 0.5  # seconds
    tsample = 0.001 / 1000
    t = np.arange(0, stim_dur, tsample)

    # stimulus (10 Hz anondic and cathodic pulse train)
    stim = np.zeros_like(t)
    stim[::1000] = 1
    stim[100::1000] = -1

    # kernel
    _, gg = utils.gamma(1, 0.005, tsample)

    # make sure conv returns the same result as
    # make sure sparseconv returns the same result as np.convolve
    # for all modes
    methods = ["fft", "sparse"]
    modes = ["full", "valid", "same"]
    for mode in modes:
        # np.convolve
        npconv = np.convolve(stim, gg, mode=mode)

        for method in methods:
            conv = utils.conv(stim, gg, mode=mode, method=method)

            npt.assert_equal(conv.shape, npconv.shape)
            npt.assert_almost_equal(conv, npconv)

    with pytest.raises(ValueError):
        utils.conv(gg, stim, mode="invalid")
    with pytest.raises(ValueError):
        utils.conv(gg, stim, method="invalid")

    with mock.patch.dict("sys.modules", {"numba": {}}):
        with pytest.raises(ImportError):
            reload(utils)
            utils.conv(stim, gg, mode='full', method='sparse', use_jit=True)