Example #1
0
def mps(wavtemp,
        audio_fs=44100,
        window_size=256,
        frame_step=128,
        duration=0.25,
        duration_cut_decay=0.05,
        resampling_fs=16000,
        offset=0):
    spectrogram_ = np.transpose(
        spectrogram(wavtemp, audio_fs, window_size, frame_step, duration,
                    duration_cut_decay, resampling_fs, offset))
    N = spectrogram_.shape[0]
    M = spectrogram_.shape[1]
    N1 = 2**utils.nextpow2(spectrogram_.shape[0])
    N2 = 2 * N1
    M1 = 2**utils.nextpow2(spectrogram_.shape[1])
    M2 = 2 * M1
    Y = np.zeros((N2, M2), dtype=np.complex_)
    for n in range(N):
        R1 = np.fft.fft(spectrogram_[n, :], M2)
        Y[n, :] = R1[:M2]
    for m in range(M2):
        R1 = np.fft.fft(Y[:N, m], N2)
        Y[:, m] = R1
    mps_ = np.absolute(Y[:, :int(Y.shape[1] / 2)])
    return mps_
def mps(wavtemp,
        audio_fs=44100,
        duration=0.25,
        duration_cut_decay=0.05,
        resampling_fs=16000,
        sr_time=250,
        offset=0):
    # auditory_params = load_static_params()
    # resampling_fs = auditory_params['newFs']
    # duration = auditory_params['duration']
    # duration_cut_decay = auditory_params['duration_cut_decay']
    # sr_time = auditory_params['sr_time']

    # wavtemp = np.r_[wavtemp, np.zeros(resampling_fs)]

    # if wavtemp.shape[0] > math.floor(duration * fs):
    #     wavtemp = wavtemp[:int(duration * fs)]
    #     wavtemp[wavtemp.shape[0] - int(fs * duration_cut_decay):] = wavtemp[
    #         wavtemp.shape[0] - int(
    #             fs * duration_cut_decay):] * utils.raised_cosine(
    #                 np.arange(int(fs * duration_cut_decay)), 0,
    #                 int(fs * duration_cut_decay))

    # wavtemp = (wavtemp / 1.01) / (np.max(wavtemp) + np.finfo(float).eps)
    # wavtemp = signal.resample(wavtemp,
    #                           int(wavtemp.shape[0] / fs * resampling_fs))

    # waveform2auditoryspectrogram_args = {
    #     'frame_length':
    #     1000 / sr_time,  # sample rate 125 Hz in the NSL toolbox
    #     'time_constant': 8,
    #     'compression_factor': -2,
    #     'octave_shift': math.log2(resampling_fs / resampling_fs),
    #     'filt': 'p',
    #     'VERB': 0
    # }
    # stft = features.waveform2auditoryspectrogram(
    #     wavtemp, **waveform2auditoryspectrogram_args)
    auditory_spectrogram_ = spectrogram(wavtemp, audio_fs, duration,
                                        duration_cut_decay, resampling_fs,
                                        sr_time, offset)
    strf_args = {
        'num_channels': 128,
        'num_ch_oct': 24,
        'sr_time': sr_time,
        'nfft_rate': 2 * 2**utils.nextpow2(auditory_spectrogram_.shape[0]),
        'nfft_scale': 2 * 2**utils.nextpow2(auditory_spectrogram_.shape[1]),
        'KIND': 2
    }
    # Spectro-temporal modulation analysis
    # Based on Hemery & Aucouturier (2015) Frontiers Comp Neurosciences
    # nfft_fac = 2  # multiplicative factor for nfft_scale and nfft_rate
    # nfft_scale = nfft_fac * 2**utils.nextpow2(auditory_spectrogram_.shape[1])
    mod_scale, phase_scale, _, _ = features.spectrum2scaletime(
        auditory_spectrogram_, **strf_args)
    mps_, phase_scale_rate, _, _ = features.scaletime2scalerate(
        mod_scale * np.exp(1j * phase_scale), **strf_args)
    # repres = repres[:, :int(repres.shape[1] / 2)]
    return mps_
Example #3
0
def mps(wavtemp, fs):
    auditory_params = load_auditory_params()
    new_fs = auditory_params['newFs']
    durationCut = auditory_params['durationCut']
    durationRCosDecay = auditory_params['durationRCosDecay']
    sr_time = auditory_params['sr_time']

    wavtemp = np.r_[wavtemp, np.zeros(new_fs)]

    if wavtemp.shape[0] > math.floor(durationCut * fs):
        wavtemp = wavtemp[:int(durationCut * fs)]
        wavtemp[wavtemp.shape[0] - int(fs * durationRCosDecay):] = wavtemp[
            wavtemp.shape[0] - int(
                fs * durationRCosDecay):] * utils.raised_cosine(
                    np.arange(int(fs * durationRCosDecay)), 0,
                    int(fs * durationRCosDecay))

    wavtemp = (wavtemp / 1.01) / (np.max(wavtemp) + np.finfo(float).eps)
    wavtemp = signal.resample(wavtemp, int(wavtemp.shape[0] / fs * new_fs))

    waveform2auditoryspectrogram_args = {
        'frame_length':
        1000 / sr_time,  # sample rate 125 Hz in the NSL toolbox
        'time_constant': 8,
        'compression_factor': -2,
        'octave_shift': math.log2(new_fs / new_fs),
        'filt': 'p',
        'VERB': 0
    }
    stft = features.waveform2auditoryspectrogram(
        wavtemp, **waveform2auditoryspectrogram_args)

    strf_args = {
        'num_channels': 128,
        'num_ch_oct': 24,
        'sr_time': sr_time,
        'nfft_rate': 2 * 2**utils.nextpow2(stft.shape[0]),
        'nfft_scale': 2 * 2**utils.nextpow2(stft.shape[1]),
        'KIND': 2
    }
    # Spectro-temporal modulation analysis
    # Based on Hemery & Aucouturier (2015) Frontiers Comp Neurosciences
    # nfft_fac = 2  # multiplicative factor for nfft_scale and nfft_rate
    # nfft_scale = nfft_fac * 2**utils.nextpow2(stft.shape[1])
    mod_scale, phase_scale, _, _ = features.spectrum2scaletime(
        stft, **strf_args)

    # plt.imshow(phase_scale)
    # plt.colorbar()
    # plt.show()

    # Scales vs. Time => Scales vs. Rates

    repres, phase_scale_rate, _, _ = features.scaletime2scalerate(
        mod_scale * np.exp(1j * phase_scale), **strf_args)
    # repres = repres[:, :int(repres.shape[1] / 2)]
    return repres
Example #4
0
def mps(wavtemp, fs):
    # load analysis parameters
    params = load_params()
    new_fs = params['newFs']
    windowSize = params['windowSize']
    frameStep = params['frameStep']
    durationCut = params['durationCut']
    durationRCosDecay = params['durationRCosDecay']

    wavtemp = np.r_[wavtemp,np.zeros(16000)]

    if wavtemp.shape[0] > math.floor(durationCut * fs):
        wavtemp = wavtemp[:int(durationCut * fs)]
        wavtemp[wavtemp.shape[0] - int(fs * durationRCosDecay):] = wavtemp[
            wavtemp.shape[0] - int(
                fs * durationRCosDecay):] * utils.raised_cosine(
                    np.arange(int(fs * durationRCosDecay)), 0,
                    int(fs * durationRCosDecay))
    wavtemp = (wavtemp / 1.01) / (np.max(wavtemp) + np.finfo(float).eps)
    wavtemp = signal.resample(wavtemp, int(wavtemp.shape[0] / fs * new_fs))
    wavtemp = np.r_[np.zeros(1000), wavtemp, np.zeros(1000)]

    spectrogram__ = features.complexSpectrogram(wavtemp, windowSize, frameStep)
    repres = np.transpose(
        np.abs(spectrogram__[:int(spectrogram__.shape[0] / 2), :]))

    N = repres.shape[0]
    M = repres.shape[1]
    # spatial, temporal zeros padding
    N1 = 2**utils.nextpow2(repres.shape[0])
    N2 = 2 * N1
    M1 = 2**utils.nextpow2(repres.shape[1])
    M2 = 2 * M1

    Y = np.zeros((N2, M2),dtype=np.complex_)

    # % first fourier transform (w.r.t. frequency axis)
    for n in range(N):
        R1 = np.fft.fft(repres[n, :], M2)
        Y[n, :] = R1[:M2]

    # % second fourier transform (w.r.t. temporal axis)
    for m in range(M2):
        R1 = np.fft.fft(Y[:N, m], N2)
        Y[:, m] = R1

    repres = np.absolute(Y[:, :int(Y.shape[1] / 2)])
    # %scaleRateAngle = angle(Y) ;

    return repres
Example #5
0
def strf(wavtemp, fs):

    auditory_params = load_auditory_params()
    scales = auditory_params['scales']
    rates = auditory_params['rates']
    durationCut = auditory_params['durationCut']
    durationRCosDecay = auditory_params['durationRCosDecay']
    new_fs = auditory_params['newFs']
    sr_time = auditory_params['sr_time']

    wavtemp = np.r_[wavtemp, np.zeros(new_fs)]

    if wavtemp.shape[0] > math.floor(durationCut * fs):
        wavtemp = wavtemp[:int(durationCut * fs)]
        wavtemp[wavtemp.shape[0] - int(fs * durationRCosDecay):] = wavtemp[
            wavtemp.shape[0] - int(
                fs * durationRCosDecay):] * utils.raised_cosine(
                    np.arange(int(fs * durationRCosDecay)), 0,
                    int(fs * durationRCosDecay))
    else:
        wavtemp = np.r_[wavtemp,
                        np.zeros(
                            np.abs(len(wavtemp) - int(durationCut * fs)) + 10)]

    wavtemp = (wavtemp / 1.01) / (np.max(wavtemp) + np.finfo(float).eps)
    wavtemp = signal.resample(wavtemp, int(wavtemp.shape[0] / fs * new_fs))

    # Peripheral auditory model (from NSL toolbox)

    # # compute spectrogram with waveform2auditoryspectrogram (from NSL toolbox), first f0 = 180 Hz
    # num_channels = 128  # nb channels (128 ch. in the NSL toolbox)
    # num_ch_oct = 24  # nb channels per octaves (24 ch/oct in the NSL toolbox)
    # sr_time = 125  # sample rate (125 Hz in the NSL toolbox)

    waveform2auditoryspectrogram_args = {
        'frame_length':
        1000 / sr_time,  # sample rate 125 Hz in the NSL toolbox
        'time_constant': 8,
        'compression_factor': -2,
        'octave_shift': math.log2(new_fs / new_fs),
        'filt': 'p',
        'VERB': 0
    }
    # frame_length = 1000 / sr_time  # frame length (in ms)
    # time_constant = 8  # time constant (lateral inhibitory network)
    # compression_factor = -2
    # # fac =  0,  y = (x > 0), full compression, booleaner.
    # # fac = -1, y = max(x, 0), half-wave rectifier
    # # fac = -2, y = x, linear function
    # octave_shift = math.log2(new_fs / new_fs)  # octave shift
    stft = features.waveform2auditoryspectrogram(
        wavtemp, **waveform2auditoryspectrogram_args)
    
    strf_args = {
        'num_channels': 128,
        'num_ch_oct': 24,
        'sr_time': sr_time,
        'nfft_rate': 2 * 2**utils.nextpow2(stft.shape[0]),
        'nfft_scale': 2 * 2**utils.nextpow2(stft.shape[1]),
        'KIND': 2
    }
    # Spectro-temporal modulation analysis
    # Based on Hemery & Aucouturier (2015) Frontiers Comp Neurosciences
    # nfft_fac = 2  # multiplicative factor for nfft_scale and nfft_rate
    # nfft_scale = nfft_fac * 2**utils.nextpow2(stft.shape[1])
    mod_scale, phase_scale, _, _ = features.spectrum2scaletime(
        stft, **strf_args)

    # Scales vs. Time => Scales vs. Rates
    # nfft_rate = nfft_fac * 2**utils.nextpow2(stft.shape[0])
    scale_rate, phase_scale_rate, _, _ = features.scaletime2scalerate(
        mod_scale * np.exp(1j * phase_scale), **strf_args)
    # print(scale_rate.shape)
    # print(phase_scale_rate.shape)
    #num_channels, num_ch_oct, sr_time, nfft_rate, nfft_scale)
    cortical_rep = features.scalerate2cortical(
        stft, scale_rate, phase_scale_rate, scales, rates, **strf_args)
    # print(cortical_rep.shape)
    #num_ch_oct, sr_time, nfft_scale, nfft_rate, 2)
    return cortical_rep
Example #6
0
def strf(wavtemp,
         audio_fs=44100,
         window_size=256,
         frame_step=128,
         duration=0.25,
         duration_cut_decay=0.05,
         resampling_fs=16000,
         offset=0):
    spectrogram_ = np.transpose(
        spectrogram(wavtemp, audio_fs, window_size, frame_step, duration,
                    duration_cut_decay, resampling_fs, offset))
    N = spectrogram_.shape[0]
    M = spectrogram_.shape[1]
    N1 = 2**utils.nextpow2(spectrogram_.shape[0])
    N2 = 2 * N1
    M1 = 2**utils.nextpow2(spectrogram_.shape[1])
    M2 = 2 * M1
    Y = np.zeros((N2, M2), dtype=np.complex_)
    for n in range(N):
        R1 = np.fft.fft(spectrogram_[n, :], M2)
        Y[n, :] = R1[:M2]
    for m in range(M2):
        R1 = np.fft.fft(Y[:N, m], N2)
        Y[:, m] = R1
    mps_ = np.absolute(Y[:, :int(Y.shape[1] / 2)])
    maxRate = resampling_fs / frame_step / 2  #; % max rate values
    maxScale = window_size / (resampling_fs * 1e-3) / 2  #; % max scale value
    ratesVector = np.linspace(-maxRate + 5, maxRate - 5, num=22)
    deltaRates = ratesVector[1] - ratesVector[0]
    scalesVector = np.linspace(0, maxScale - 5, num=11)
    deltaScales = scalesVector[2] - scalesVector[1]

    overlapRate = .75
    overlapScale = .75
    stdRate = deltaRates / 2 * (overlapRate + 1)
    stdScale = deltaScales / 2 * (overlapScale + 1)

    maxRatePoints = int(len(mps_) / 2)
    maxScalePoints = mps_.shape[1]
    stdRatePoints = maxRatePoints * stdRate / maxRate
    stdScalePoints = maxScalePoints * stdScale / maxScale
    # strf_ = np.zeros((N, M, len(ratesVector), len(scalesVector)))
    strf_ = np.zeros((N, M, len(scalesVector), len(ratesVector)))
    for iRate in range(len(ratesVector)):
        rateCenter = ratesVector[iRate]
        # %rate center in point
        if rateCenter <= 0:
            rateCenterPoint = maxRatePoints * (2 -
                                               np.abs(rateCenter) / maxRate)
        else:
            rateCenterPoint = maxRatePoints * np.abs(rateCenter) / maxRate

        for iScale in range(len(scalesVector)):
            scaleCenter = scalesVector[iScale]
            # %scale center in point
            scaleCenterPoint = maxScalePoints * np.abs(scaleCenter) / maxScale
            filterPoint = gaussianWdw2d(
                rateCenterPoint, stdRatePoints, scaleCenterPoint,
                stdScalePoints,
                np.linspace(1, 2 * maxRatePoints, num=2 * maxRatePoints),
                np.linspace(1, maxScalePoints, num=maxScalePoints))
            MPS_filtered = mps_ * filterPoint
            MPS_quadrantPoint = np.c_[MPS_filtered, np.fliplr(MPS_filtered)]
            stftRec = np.fft.ifft(np.transpose(np.fft.ifft(MPS_quadrantPoint)))
            ll = len(stftRec)
            stftRec = np.transpose(np.r_[stftRec[:M, :N],
                                         stftRec[ll - M:ll, :N]])
            # !! taking real values
            strf_[:, :, iScale,
                  iRate] = np.abs(stftRec[:, :int(stftRec.shape[1] / 2)])

    return strf_
def strf(wavtemp,
         audio_fs=44100,
         duration=0.25,
         duration_cut_decay=0.05,
         resampling_fs=16000,
         sr_time=250,
         offset=0):
    auditory_params = load_static_params()
    scales = auditory_params['scales']
    rates = auditory_params['rates']
    # duration = auditory_params['duration']
    # duration_cut_decay = auditory_params['duration_cut_decay']
    # resampling_fs = auditory_params['newFs']
    # sr_time = auditory_params['sr_time']

    # wavtemp = np.r_[wavtemp, np.zeros(resampling_fs)]

    # if wavtemp.shape[0] > math.floor(duration * fs):
    #     wavtemp = wavtemp[:int(duration * fs)]
    #     wavtemp[wavtemp.shape[0] - int(fs * duration_cut_decay):] = wavtemp[
    #         wavtemp.shape[0] - int(
    #             fs * duration_cut_decay):] * utils.raised_cosine(
    #                 np.arange(int(fs * duration_cut_decay)), 0,
    #                 int(fs * duration_cut_decay))
    # else:
    #     wavtemp = np.r_[wavtemp,
    #                     np.zeros(
    #                         np.abs(len(wavtemp) - int(duration * fs)) + 10)]

    # wavtemp = (wavtemp / 1.01) / (np.max(wavtemp) + np.finfo(float).eps)
    # wavtemp = signal.resample(wavtemp,
    #                           int(wavtemp.shape[0] / fs * resampling_fs))

    # # Peripheral auditory model (from NSL toolbox)

    # # # compute spectrogram with waveform2auditoryspectrogram (from NSL toolbox), first f0 = 180 Hz
    # # num_channels = 128  # nb channels (128 ch. in the NSL toolbox)
    # # num_ch_oct = 24  # nb channels per octaves (24 ch/oct in the NSL toolbox)
    # # sr_time = 125  # sample rate (125 Hz in the NSL toolbox)

    # waveform2auditoryspectrogram_args = {
    #     'frame_length':
    #     1000 / sr_time,  # sample rate 125 Hz in the NSL toolbox
    #     'time_constant': 8,
    #     'compression_factor': -2,
    #     'octave_shift': math.log2(resampling_fs / resampling_fs),
    #     'filt': 'p',
    #     'VERB': 0
    # }
    # # frame_length = 1000 / sr_time  # frame length (in ms)
    # # time_constant = 8  # time constant (lateral inhibitory network)
    # # compression_factor = -2
    # # # fac =  0,  y = (x > 0), full compression, booleaner.
    # # # fac = -1, y = max(x, 0), half-wave rectifier
    # # # fac = -2, y = x, linear function
    # # octave_shift = math.log2(resampling_fs / resampling_fs)  # octave shift
    # stft = features.waveform2auditoryspectrogram(
    #     wavtemp, **waveform2auditoryspectrogram_args)
    auditory_spectrogram_ = spectrogram(wavtemp, audio_fs, duration,
                                        duration_cut_decay, resampling_fs,
                                        sr_time, offset)
    strf_args = {
        'num_channels': 128,
        'num_ch_oct': 24,
        'sr_time': sr_time,
        'nfft_rate': 2 * 2**utils.nextpow2(auditory_spectrogram_.shape[0]),
        'nfft_scale': 2 * 2**utils.nextpow2(auditory_spectrogram_.shape[1]),
        'KIND': 2
    }
    # Spectro-temporal modulation analysis
    # Based on Hemery & Aucouturier (2015) Frontiers Comp Neurosciences
    # nfft_fac = 2  # multiplicative factor for nfft_scale and nfft_rate
    # nfft_scale = nfft_fac * 2**utils.nextpow2(stft.shape[1])
    mod_scale, phase_scale, _, _ = features.spectrum2scaletime(
        auditory_spectrogram_, **strf_args)

    # Scales vs. Time => Scales vs. Rates
    # nfft_rate = nfft_fac * 2**utils.nextpow2(stft.shape[0])
    scale_rate, phase_scale_rate, _, _ = features.scaletime2scalerate(
        mod_scale * np.exp(1j * phase_scale), **strf_args)
    # print(scale_rate.shape)
    # print(phase_scale_rate.shape)
    #num_channels, num_ch_oct, sr_time, nfft_rate, nfft_scale)
    strf_ = features.scalerate2cortical(auditory_spectrogram_, scale_rate,
                                        phase_scale_rate, scales, rates,
                                        **strf_args)
    # print(strf_.shape)
    #num_ch_oct, sr_time, nfft_scale, nfft_rate, 2)
    return strf_
Example #8
0
def strf(wavtemp, fs):
    # load analysis parameters
    params = load_params()
    new_fs = params['newFs']
    windowSize = params['windowSize']
    frameStep = params['frameStep']
    durationCut = params['durationCut']
    durationRCosDecay = params['durationRCosDecay']

    wavtemp = np.r_[wavtemp,np.zeros(16000)]

    if wavtemp.shape[0] > math.floor(durationCut * fs):
        wavtemp = wavtemp[:int(durationCut * fs)]
        wavtemp[wavtemp.shape[0] - int(fs * durationRCosDecay):] = wavtemp[
            wavtemp.shape[0] - int(
                fs * durationRCosDecay):] * utils.raised_cosine(
                    np.arange(int(fs * durationRCosDecay)), 0,
                    int(fs * durationRCosDecay))
    wavtemp = (wavtemp / 1.01) / (np.max(wavtemp) + np.finfo(float).eps)
    wavtemp = signal.resample(wavtemp, int(wavtemp.shape[0] / fs * new_fs))
    wavtemp = np.r_[np.zeros(1000), wavtemp, np.zeros(1000)]

    spectrogram__ = features.complexSpectrogram(wavtemp, windowSize, frameStep)
    repres = np.transpose(
        np.abs(spectrogram__[:int(spectrogram__.shape[0] / 2), :]))
    # print(spectrogram__.shape)
    # print(repres.shape)
    N = repres.shape[0]
    M = repres.shape[1]
    # spatial, temporal zeros padding
    N1 = 2**utils.nextpow2(repres.shape[0])
    N2 = 2 * N1
    M1 = 2**utils.nextpow2(repres.shape[1])
    M2 = 2 * M1

    Y = np.zeros((N2, M2))

    # % first fourier transform (w.r.t. frequency axis)
    for n in range(N):
        R1 = np.abs(np.fft.fft(repres[n, :], M2))
        Y[n, :] = R1[:M2]

    # % second fourier transform (w.r.t. temporal axis)
    for m in range(M2):
        R1 = np.abs(np.fft.fft(Y[:N, m], N2))
        Y[:, m] = R1

    MPS_repres = np.abs(Y[:, :int(Y.shape[1] / 2)])

    # %% fourier strf
    maxRate = fs / frameStep / 2  #; % max rate values
    maxScale = windowSize / (fs * 1e-3) / 2  #; % max scale value
    ratesVector = np.linspace(-maxRate + 5, maxRate - 5, num=22)
    deltaRates = ratesVector[1] - ratesVector[0]
    scalesVector = np.linspace(0, maxScale - 5, num=11)
    deltaScales = scalesVector[2] - scalesVector[1]

    overlapRate = .75
    overlapScale = .75
    stdRate = deltaRates / 2 * (overlapRate + 1)
    stdScale = deltaScales / 2 * (overlapScale + 1)

    maxRatePoints = int(len(MPS_repres) / 2)
    maxScalePoints = MPS_repres.shape[1]
    stdRatePoints = maxRatePoints * stdRate / maxRate
    stdScalePoints = maxScalePoints * stdScale / maxScale

    # %STRF_repres = np.zeros((2*M, N, length(ratesVector), length(scalesVector)))
    STRF_repres = np.zeros((N, M, len(ratesVector), len(scalesVector)))
    # print('STRF_repres', STRF_repres.shape)
    for iRate in range(len(ratesVector)):
        rateCenter = ratesVector[iRate]
        # %rate center in point
        if rateCenter <= 0:
            rateCenterPoint = maxRatePoints * (
                2 - np.abs(rateCenter) / maxRate)
        else:
            rateCenterPoint = maxRatePoints * np.abs(rateCenter) / maxRate

        for iScale in range(len(scalesVector)):
            scaleCenter = scalesVector[iScale]
            # %scale center in point
            scaleCenterPoint = maxScalePoints * np.abs(scaleCenter) / maxScale
            filterPoint = gaussianWdw2d(rateCenterPoint, stdRatePoints,
                                          scaleCenterPoint, stdScalePoints,
                                          np.linspace(
                                              1,
                                              2 * maxRatePoints,
                                              num=2 * maxRatePoints),
                                          np.linspace(
                                              1,
                                              maxScalePoints,
                                              num=maxScalePoints))

            MPS_filtered = MPS_repres * filterPoint
            MPS_quadrantPoint = np.c_[MPS_filtered, np.fliplr(MPS_filtered)]
            stftRec = np.fft.ifft(np.transpose(np.fft.ifft(MPS_quadrantPoint)))
            ll = len(stftRec)
            stftRec = np.transpose(
                np.r_[stftRec[:M, :N], stftRec[ll - M:ll, :N]])
            # !! taking real values
            STRF_repres[:, :, iRate, iScale] = np.abs(stftRec[:, :int(stftRec.shape[1] / 2)])

    return STRF_repres