Example #1
0
def extract_nsl_spectrogram(sig, Fs, cfs):
    '''Implements a version of the "wav2aud" function in the NSL toolbox.
    Uses Brian hears to chain most of the computations to be done online.

    This is effectively what it does:
        1. Gammatone filterbank at provided cfs (erbspace recommended)
        2. Half-wave rectification
        3. Low-pass filtering at 2Khz
        4. First-order derivative across frequencies (basically just
            taking the diff of successive frequencies to sharpen output)
        5. Half-wave rectification #2
        6. An exponentially-decaying average, with time constant chosen
            to be similar to that reported in the NSL toolbox (8ms)

    INPUTS
    --------
    sig : array
        The auditory signals we'll use to extract. Should be time x feats, or 1-d
    Fs : float, int
        The sampling rate of the signal
    cfs : list of floats, ints
        The center frequencies that we'll use for initial filtering.

    OUTPUTS
    --------
    out : array, [tpts, len(cfs)]
        The auditory spectrogram of the signal
    '''
    Fs = float(Fs) * Hz
    snd = hears.Sound(sig, samplerate=Fs)

    # Cochlear model
    snd_filt = hears.Gammatone(snd, cfs)

    # Hair cell stages
    clp = lambda x: np.clip(x, 0, np.inf)
    snd_hwr = hears.FunctionFilterbank(snd_filt, clp)
    snd_lpf = hears.LowPass(snd_hwr, 2000)

    # Lateral inhibitory network
    rands = lambda x: sigp.roll_and_subtract(x, hwr=True)
    snd_lin = hears.FunctionFilterbank(snd_lpf, rands)

    # Initial processing
    out = snd_lin.process()

    # Time integration.
    # Time constant is 8ms, which we approximate with halfwidth of 12
    half_pt = (12. / 1000) * Fs
    out = pd.stats.moments.ewma(out, halflife=half_pt)
    return out
Example #2
0
def rectify(filterbank, scale=3):
    """Half wave rectify and scale."""
    def _bm2ihc(x, scale=scale):
        return scale * np.clip(x, 0, np.inf)

    ihc = bh.FunctionFilterbank(filterbank, _bm2ihc)
    ihc.cached_buffer_end = 0  # Fails if we don't do this...
    return ihc
Example #3
0
def compress(filterbank, scale=3):
    """Half wave rectify and compress with a 1/3 power law."""
    def _bm2ihc(x, scale=scale):
        return scale * np.clip(x, 0, np.inf)**(1. / 3.)

    ihc = bh.FunctionFilterbank(filterbank, _bm2ihc)
    ihc.cached_buffer_end = 0  # Fails if we don't do this...
    return ihc
def gammatone(sound, freqs, dt, b=1.019):
    duration = int(dt * sound.samplerate)
    fb = bh.Gammatone(sound, freqs, b=b)
    fb.buffersize = duration
    ihc = bh.FunctionFilterbank(
        fb, (lambda x: 3 * np.clip(x, 0, np.inf)**(1. / 3.)))
    ihc.cached_buffer_end = 0
    return ihc
Example #5
0
def spectrogram_nsl(sig, sfreq, cfs, comp_kind='exp', comp_fac=3):
    '''Extract a cochlear / mid-brain spectrogram.

    Implements a version of the "wav2aud" function in the NSL toolbox.
    Uses Brian hears to chain most of the computations to be done online.

    This is effectively what it does:
        1. Gammatone filterbank at provided cfs (erbspace recommended)
        2. Half-wave rectification
        3. Low-pass filtering at 2Khz
        4. First-order derivative across frequencies (basically just
            taking the diff of successive frequencies to sharpen output)
        5. Half-wave rectification #2
        6. An exponentially-decaying average, with time constant chosen
            to be similar to that reported in the NSL toolbox (8ms)

    Parameters
    ----------
    sig : numpy array, shape (n_times,)
        The auditory waveform
    sfreq : int
        The sampling frequency of the sound waveform
    cfs : array, shape (n_freqs,)
        The center frequencies to be extracted
    comp_kind : string
        The kind of compression to use. See `compress_signal`
    comp_fac : int
        The compression factor to pass to `compress_signal`.

    OUTPUTS
    --------
    spec : array, shape (n_frequencies, n_times)
        The extracted audio spectrogram.
    freqs : array, shape (n_frequencies,)
        The center frequencies for the spectrogram
    '''
    sfreq = float(sfreq)*Hz
    snd = hears.Sound(sig, samplerate=sfreq)

    # ---- Cochlear model
    print('Pulling frequencies with cochlear model')
    snd_filt = hears.Gammatone(snd, cfs)

    # ---- Hair cell stages
    # Halfwave Rectify
    print('Half-wave rectification')
    clp = lambda x: np.clip(x, 0, np.inf)
    snd_hwr = hears.FunctionFilterbank(snd_filt, clp)

    # Non-linear compression
    print('Non-linear compression and low-pass filter')
    comp = lambda x: compress_signal(x, comp_kind, comp_fac)
    snd_cmp = hears.FunctionFilterbank(snd_hwr, comp)

    # Lowpass filter
    snd_lpf = hears.LowPass(snd_cmp, 2000)

    # ---- Lateral inhibitory network
    print('Lateral inhibitory network')
    rands = lambda x: roll_and_subtract(x, hwr=True)
    snd_lin = hears.FunctionFilterbank(snd_lpf, rands)

    # Initial processing
    out = snd_lin.process()

    # Time integration.
    print('leaky integration')
    for i in range(out.shape[1]):
        out[:, i] = leaky_integrate(out[:, i], time_const=8,
                                    sfreq=float(sfreq))
    return out.T