Esempio n. 1
0
def resample(s, up, down, axis=0, fc='nn', **kwargs):
    r"""
    Resample a signal from rate "down" to rate "up"

    Parameters
    ----------
    s : array_like
        The data to be resampled.
    up : int
        The upsampling factor.
    down : int
        The downsampling factor.
    axis : int, optional
        The axis of `x` that is resampled. Default is 0.
    As : float (optional)
        Stopband attenuation in dB
    N : float (optional)
        Filter order (length of impulse response in samples)
    df : float (optional)
        Transition band width, normalized to Nyquist (fs/2)
    beta : float (optional)
        The beta parameter of the Kaiser window

    Returns
    -------
    resampled_x : array
        The resampled array.

    Notes
    -----
    The function keeps a global cache of filters, since they are
    determined entirely by up, down, fc, beta, and L.  If a filter
    has previously been used it is looked up instead of being
    recomputed.
    """

    # from design parameters, find the generative parameters
    N, beta, As = disambiguate_params(**kwargs)

    # check if a resampling filter with the chosen parameters already exists
    params = (up, down, fc, beta, N)
    if params in _precomputed_filters.keys():
        # if so, use it.
        filt = _precomputed_filters[params]
    else:
        # if not, generate filter, store it, use it
        filt = compute_filt(up, down, fc, beta=beta, N=N)
        _precomputed_filters[params] = filt

    return sig.resample_poly(s, up, down, window=np.array(filt), axis=axis)
Esempio n. 2
0
def resample_to_24000_hz(samples, input_rate):
    
    case = _24000_HZ_SPECIAL_CASES.get(float(input_rate))

    if case is not None:
        # input rate is a special case for which we can resample
        # efficiently using a polyphase filter
        
        up, down, filter_ = case
        # print('Resampling from {} Hz to 24000 Hz...'.format(input_rate))
        return signal.resample_poly(samples, up, down, window=filter_)
    
    else:
        return resampy.resample(samples, input_rate, 24000)
Esempio n. 3
0
def _resample_nnresample2(s, up, down, beta=5.0, L=16001, axis=0):
    # type: (np.ndarray, float, float, float, int, int) -> np.ndarray
    """
    Taken from https://github.com/jthiem/nnresample

    Resample a signal from rate "down" to rate "up"
    
    Parameters
    ----------
    x : array_like
        The data to be resampled.
    up : int
        The upsampling factor.
    down : int
        The downsampling factor.
    beta : float
        Beta factor for Kaiser window.  Determines tradeoff between
        stopband attenuation and transition band width
    L : int
        FIR filter order.  Determines stopband attenuation.  The higher
        the better, ath the cost of complexity.
    axis : int, optional
        The axis of `x` that is resampled. Default is 0.
        
    Returns
    -------
    resampled_x : array
        The resampled array.
        
    Notes
    -----
    The function keeps a global cache of filters, since they are
    determined entirely by up, down, beta, and L.  If a filter
    has previously been used it is looked up instead of being
    recomputed.
    """       
    # check if a resampling filter with the chosen parameters already exists
    params = (up, down, beta, L)
    if params in _precomputed_filters.keys():
        # if so, use it.
        filt = _precomputed_filters[params]
    else:
        # if not, generate filter, store it, use it
        filt = _nnresample_compute_filt(up, down, beta, L)
        _precomputed_filters[params] = filt
    return sig.resample_poly(s, up, down, window=np.array(filt), axis=axis)
def resample_poly(samples, input_rate, output_rate, N):
    
    # Ensure that `input_rate` and `output_rate` are integers, since `math.gcd`
    # rejects floats.
    input_rate = int(input_rate)
    output_rate = int(output_rate)
    
    gcd = math.gcd(input_rate, output_rate)
    up = output_rate / gcd
    down = input_rate / gcd
    
    # The following filter design code is from the `resample_poly` function
    # of `scipy.signal.signaltools`, but with the fixed factor of 10 in the
    # `half_len` calculation replaced by `N`.
    max_rate = max(up, down)
    f_c = 1. / max_rate  # cutoff of FIR filter (rel. to Nyquist)
    half_len = N * max_rate  # reasonable cutoff for our sinc-like function
    h = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0))
    
    return signal.resample_poly(samples, up, down, window=h)
Esempio n. 5
0
    def __init__(self, timeaxis, timecourse, padvalue=30.0, upsampleratio=100, doplot=False, debug=False,
                 method='univariate'):
        self.upsampleratio = upsampleratio
        self.padvalue = padvalue
        self.initstep = timeaxis[1] - timeaxis[0]
        self.initstart = timeaxis[0]
        self.initend = timeaxis[-1]
        self.hiresstep = self.initstep / np.float64(self.upsampleratio)
        self.hires_x = np.arange(timeaxis[0] - self.padvalue, self.initstep * len(timeaxis) + self.padvalue,
                                 self.hiresstep)
        self.hiresstart = self.hires_x[0]
        self.hiresend = self.hires_x[-1]
        if method == 'poly':
            self.hires_y = 0.0 * self.hires_x
            self.hires_y[int(self.padvalue // self.hiresstep) + 1:-(int(self.padvalue // self.hiresstep) + 1)] = \
                signal.resample_poly(timecourse, np.int(self.upsampleratio * 10), 10)
        elif method == 'fourier':
            self.hires_y = 0.0 * self.hires_x
            self.hires_y[int(self.padvalue // self.hiresstep) + 1:-(int(self.padvalue // self.hiresstep) + 1)] = \
                signal.resample(timecourse, self.upsampleratio * len(timeaxis))
        else:
            self.hires_y = doresample(timeaxis, timecourse, self.hires_x, method=method)
        self.hires_y[:int(self.padvalue // self.hiresstep)] = self.hires_y[int(self.padvalue // self.hiresstep)]
        self.hires_y[-int(self.padvalue // self.hiresstep):] = self.hires_y[-int(self.padvalue // self.hiresstep)]
        if debug:
            print('fastresampler __init__:')
            print('    padvalue:, ', self.padvalue)
            print('    initstep, hiresstep:', self.initstep, self.hiresstep)
            print('    initial axis limits:', self.initstart, self.initend)
            print('    hires axis limits:', self.hiresstart, self.hiresend)

        # self.hires_y[:int(self.padvalue // self.hiresstep)] = 0.0
        # self.hires_y[-int(self.padvalue // self.hiresstep):] = 0.0
        if doplot:
            fig = pl.figure()
            ax = fig.add_subplot(111)
            ax.set_title('fastresampler initial timecourses')
            pl.plot(timeaxis, timecourse, self.hires_x, self.hires_y)
            pl.legend(('input', 'hires'))
            pl.show()
Esempio n. 6
0
def compute_irasa(sig, fs=None, f_range=(1, 30), hset=None, **spectrum_kwargs):
    """Separate the aperiodic and periodic components using the IRASA method.

    Parameters
    ----------
    sig : 1d array
        Time series.
    fs : float
        The sampling frequency of sig.
    f_range : tuple or None
        Frequency range.
    hset : 1d array
        Resampling factors used in IRASA calculation.
        If not provided, defaults to values from 1.1 to 1.9 with an increment of 0.05.
    spectrum_kwargs : dict
        Optional keywords arguments that are passed to `compute_spectrum`.

    Returns
    -------
    freqs : 1d array
        Frequency vector.
    psd_aperiodic : 1d array
        The aperiodic component of the power spectrum.
    psd_periodic : 1d array
        The periodic component of the power spectrum.

    Notes
    -----
    Irregular-Resampling Auto-Spectral Analysis (IRASA) is described in Wen & Liu (2016).
    Briefly, it aims to separate 1/f and periodic components by resampling time series, and
    computing power spectra, effectively averaging away any activity that is frequency specific.

    References
    ----------
    Wen, H., & Liu, Z. (2016). Separating Fractal and Oscillatory Components in the Power Spectrum
    of Neurophysiological Signal. Brain Topography, 29(1), 13–26. DOI: 10.1007/s10548-015-0448-0
    """

    # Check & get the resampling factors, with rounding to avoid floating point precision errors
    hset = np.arange(1.1, 1.95, 0.05) if not hset else hset
    hset = np.round(hset, 4)

    # The `nperseg` input needs to be set to lock in the size of the FFT's
    if 'nperseg' not in spectrum_kwargs:
        spectrum_kwargs['nperseg'] = int(4 * fs)

    # Calculate the original spectrum across the whole signal
    freqs, psd = compute_spectrum(sig, fs, **spectrum_kwargs)

    # Do the IRASA resampling procedure
    psds = np.zeros((len(hset), *psd.shape))
    for ind, h_val in enumerate(hset):

        # Get the up-sampling / down-sampling (h, 1/h) factors as integers
        rat = fractions.Fraction(str(h_val))
        up, dn = rat.numerator, rat.denominator

        # Resample signal
        sig_up = signal.resample_poly(sig, up, dn, axis=-1)
        sig_dn = signal.resample_poly(sig, dn, up, axis=-1)

        # Calculate the power spectrum using the same params as original
        freqs_up, psd_up = compute_spectrum(sig_up, h_val * fs,
                                            **spectrum_kwargs)
        freqs_dn, psd_dn = compute_spectrum(sig_dn, fs / h_val,
                                            **spectrum_kwargs)

        # Geometric mean of h and 1/h
        psds[ind, :] = np.sqrt(psd_up * psd_dn)

    # Now we take the median resampled spectra, as an estimate of the aperiodic component
    psd_aperiodic = np.median(psds, axis=0)

    # Subtract aperiodic from original, to get the periodic component
    psd_periodic = psd - psd_aperiodic

    # Restrict spectrum to requested range
    if f_range:
        psds = np.array([psd_aperiodic, psd_periodic])
        freqs, (psd_aperiodic,
                psd_periodic) = trim_spectrum(freqs, psds, f_range)

    return freqs, psd_aperiodic, psd_periodic
Esempio n. 7
0
def demo_rsHRF(input_file,
               mask_file,
               output_dir,
               para,
               p_jobs,
               file_type=".nii",
               mode="bids",
               wiener=False,
               temporal_mask=[]):
    # book-keeping w.r.t parameter values
    if 'localK' not in para or para['localK'] == None:
        if para['TR'] <= 2:
            para['localK'] = 1
        else:
            para['localK'] = 2
    # creating the output-directory if not already present
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    # for four-dimensional input
    if mode != 'time-series':
        if mode == 'bids' or mode == 'bids w/ atlas':
            name = input_file.split('/')[-1].split('.')[0]
            v1 = spm_dep.spm.spm_vol(input_file)
        else:
            name = input_file.split('/')[-1].split('.')[0]
            v1 = spm_dep.spm.spm_vol(input_file)
        if mask_file != None:
            if mode == 'bids':
                mask_name = mask_file.split('/')[-1].split('.')[0]
                v = spm_dep.spm.spm_vol(mask_file)
            else:
                mask_name = mask_file.split('/')[-1].split('.')[0]
                v = spm_dep.spm.spm_vol(mask_file)
            if file_type == ".nii" or file_type == ".nii.gz":
                brain = spm_dep.spm.spm_read_vols(v)
            else:
                brain = v.agg_data().flatten(order='F')
            if  ((file_type == ".nii" or file_type == ".nii.gz") and \
                    v1.header.get_data_shape()[:-1] != v.header.get_data_shape()) or \
                ((file_type == ".gii" or file_type == ".gii.gz") and \
                    v1.agg_data().shape[0]!= v.agg_data().shape[0]):
                raise ValueError('Inconsistency in input-mask dimensions' +
                                 '\n\tinput_file == ' + name + file_type +
                                 '\n\tmask_file == ' + mask_name + file_type)
            else:
                if file_type == ".nii" or file_type == ".nii.gz":
                    data = v1.get_data()
                else:
                    data = v1.agg_data()
        else:
            print('No atlas provided! Generating mask file...')
            if file_type == ".nii" or file_type == ".nii.gz":
                data = v1.get_data()
                brain = np.nanvar(data.reshape(-1, data.shape[3]), -1, ddof=0)
            else:
                data = v1.agg_data()
                brain = np.nanvar(data, -1, ddof=0)
            print('Done')
        voxel_ind = np.where(brain > 0)[0]
        mask_shape = data.shape[:-1]
        nobs = data.shape[-1]
        data1 = np.reshape(data, (-1, nobs), order='F').T
        bold_sig = stats.zscore(data1[:, voxel_ind], ddof=1)
# for time-series input
    else:
        name = input_file.split('/')[-1].split('.')[0]
        data1 = (np.loadtxt(input_file, delimiter=","))
        if data1.ndim == 1:
            data1 = np.expand_dims(data1, axis=1)
        nobs = data1.shape[0]
        bold_sig = stats.zscore(data1, ddof=1)
    if len(temporal_mask) > 0 and len(temporal_mask) != nobs:
        raise ValueError('Inconsistency in temporal_mask dimensions.\n' +
                         'Size of mask: ' + str(len(temporal_mask)) + '\n' +
                         'Size of time-series: ' + str(nobs))
    bold_sig = np.nan_to_num(bold_sig)
    bold_sig_deconv = processing. \
                      rest_filter. \
                      rest_IdealFilter(bold_sig, para['TR'], para['passband_deconvolve'])
    bold_sig = processing. \
               rest_filter. \
               rest_IdealFilter(bold_sig, para['TR'], para['passband'])
    data_deconv = np.zeros(bold_sig.shape)
    event_number = np.zeros((1, bold_sig.shape[1]))
    print('Retrieving HRF ...')
    #Estimate HRF for the fourier / hanning / gamma / cannon basis functions
    if not (para['estimation'] == 'sFIR' or para['estimation'] == 'FIR'):
        bf = basis_functions.basis_functions.get_basis_function(
            bold_sig.shape, para)
        beta_hrf, event_bold = utils.hrf_estimation.compute_hrf(bold_sig,
                                                                para,
                                                                temporal_mask,
                                                                p_jobs,
                                                                bf=bf)
        hrfa = np.dot(bf, beta_hrf[np.arange(0, bf.shape[1]), :])
    #Estimate HRF for FIR and sFIR
    else:
        para['T'] = 1
        beta_hrf, event_bold = utils.hrf_estimation.compute_hrf(
            bold_sig, para, temporal_mask, p_jobs)
        hrfa = beta_hrf[:-1, :]
    nvar = hrfa.shape[1]
    PARA = np.zeros((3, nvar))
    for voxel_id in range(nvar):
        hrf1 = hrfa[:, voxel_id]
        PARA[:, voxel_id] = \
            parameters.wgr_get_parameters(hrf1, para['TR'] / para['T'])
    print('Done')
    print('Deconvolving HRF ...')
    if para['T'] > 1:
        hrfa_TR = signal.resample_poly(hrfa, 1, para['T'])
    else:
        hrfa_TR = hrfa
    for voxel_id in range(nvar):
        hrf = hrfa_TR[:, voxel_id]
        if not wiener:
            H = np.fft.fft(np.append(hrf, np.zeros(
                (nobs - max(hrf.shape), 1))),
                           axis=0)
            M = np.fft.fft(bold_sig_deconv[:, voxel_id])
            data_deconv[:, voxel_id] = \
                np.fft.ifft(H.conj() * M / (H * H.conj() + .1*np.mean((H * H.conj()))))
        else:
            data_deconv[:,
                        voxel_id] = iterative_wiener_deconv.rsHRF_iterative_wiener_deconv(
                            bold_sig_deconv[:, voxel_id], hrf)
        event_number[:, voxel_id] = np.amax(event_bold[voxel_id].shape)
    print('Done')
    print('Saving Output ...')
    # setting the output-path
    if mode == 'bids' or mode == 'bids w/ atlas':
        layout_output = BIDSLayout(output_dir)
        entities = parse_file_entities(input_file)
        sub_save_dir = layout_output.build_path(entities).rsplit('/', 1)[0]
    else:
        sub_save_dir = output_dir
    if not os.path.isdir(sub_save_dir):
        os.makedirs(sub_save_dir, exist_ok=True)
    dic = {'para': para, 'hrfa': hrfa, 'event_bold': event_bold, 'PARA': PARA}
    ext = '_hrf.mat'
    if mode == "time-series":
        dic["event_number"] = event_number
        dic["data_deconv"] = data_deconv
        ext = '_hrf_deconv.mat'
    name = name.rsplit('_bold', 1)[0]
    sio.savemat(os.path.join(sub_save_dir, name + ext), dic)
    HRF_para_str = ['height', 'T2P', 'FWHM']
    if mode != "time-series":
        mask_data = np.zeros(mask_shape).flatten(order='F')
        for i in range(3):
            fname = os.path.join(sub_save_dir, name + '_' + HRF_para_str[i])
            mask_data[voxel_ind] = PARA[i, :]
            mask_data = mask_data.reshape(mask_shape, order='F')
            spm_dep.spm.spm_write_vol(v1, mask_data, fname, file_type)
            mask_data = mask_data.flatten(order='F')
        fname = os.path.join(sub_save_dir, name + '_eventnumber')
        mask_data[voxel_ind] = event_number
        mask_data = mask_data.reshape(mask_shape, order='F')
        spm_dep.spm.spm_write_vol(v1, mask_data, fname, file_type)
        mask_data = np.zeros(data.shape)
        dat3 = np.zeros(data.shape[:-1]).flatten(order='F')
        for i in range(nobs):
            fname = os.path.join(sub_save_dir, name + '_deconv')
            dat3[voxel_ind] = data_deconv[i, :]
            dat3 = dat3.reshape(data.shape[:-1], order='F')
            if file_type == ".nii" or file_type == ".nii.gz":
                mask_data[:, :, :, i] = dat3
            else:
                mask_data[:, i] = dat3
            dat3 = dat3.flatten(order='F')
        spm_dep.spm.spm_write_vol(v1, mask_data, fname, file_type)
    pos = 0
    while pos < hrfa_TR.shape[1]:
        if np.any(hrfa_TR[:, pos]):
            break
        pos += 1
    event_plot = lil_matrix((1, nobs))
    if event_bold.size:
        event_plot[:, event_bold[pos]] = 1
    else:
        print("No Events Detected!")
        return 0
    event_plot = np.ravel(event_plot.toarray())
    plt.figure()
    plt.plot(para['TR'] * np.arange(1,
                                    np.amax(hrfa_TR[:, pos].shape) + 1),
             hrfa_TR[:, pos],
             linewidth=1)
    plt.xlabel('time (s)')
    plt.savefig(os.path.join(sub_save_dir, name + '_hrf_plot.png'))
    plt.figure()
    plt.plot(para['TR'] * np.arange(1, nobs + 1),
             np.nan_to_num(stats.zscore(bold_sig[:, pos], ddof=1)),
             linewidth=1)
    plt.plot(para['TR'] * np.arange(1, nobs + 1),
             np.nan_to_num(stats.zscore(data_deconv[:, pos], ddof=1)),
             color='r',
             linewidth=1)
    markerline, stemlines, baseline = \
        plt.stem(para['TR'] * np.arange(1, nobs + 1), event_plot)
    plt.setp(baseline, 'color', 'k', 'markersize', 1)
    plt.setp(stemlines, 'color', 'k')
    plt.setp(markerline, 'color', 'k', 'markersize', 3, 'marker', 'd')
    plt.legend(['BOLD', 'Deconvolved BOLD', 'Events'], loc='best')
    plt.xlabel('time (s)')
    plt.savefig(os.path.join(sub_save_dir, name + '_deconvolution_plot.png'))
    print('Done')
    return 0
Esempio n. 8
0
    parser.add_argument(
        '--not_requiem',
        action='store_false',
        help='use new waveform generator method from WORLD version 0.2.2')
    args = parser.parse_args()

    # load wav file
    wav_path = Path(args.inFILE)
    print('input wave path ', wav_path)
    fs, x_int16 = wavread(wav_path)
    x = x_int16 / (2**15 - 1)
    print('fs', fs)

    if 0:  # resample
        fs_new = 16000
        x = signal.resample_poly(x, fs_new, fs)
        fs = fs_new

    if 0:  # low-cut
        B = signal.firwin(127, [0.01], pass_zero=False)
        A = np.array([1.0])
        if 0:
            import matplotlib.pyplot as plt
            w, H = signal.freqz(B, A)

            fig, (ax1, ax2) = plt.subplots(2, figsize=(16, 6))
            ax1.plot(w / np.pi, abs(H))
            ax1.set_ylabel('magnitude')
            ax2.plot(w / np.pi, np.unwrap(np.angle(H)))
            ax2.set_ylabel('unwrapped phase')
            plt.show()
f = np.linspace(-Fs / 2.0, Fs / 2.0, len(psd))
plt.figure(5)
plt.plot(f, psd)
plt.show()

# correct coarse frequency offset
max_freq = f[np.argmax(psd)]
Ts = 1 / Fs  # sample period
t = np.arange(0, Ts * len(samples), Ts)  # create time vector
# use -1j instead of 1j to subtract the frequency
# divide by 2 due to the given frequency is twice as large as the actual offset
samples = samples * np.exp(-1j * 2 * np.pi * max_freq * t / 2.0)

# interpolation allows shifting by a fractional amount of samples
InterpFactor = 16
samples_interpolated = signal.resample_poly(samples, InterpFactor, 1)
plt.figure(6)
plt.subplot(211)
plt.plot(samples, '.-')
plt.subplot(212)
plt.plot(samples_interpolated, '.-')
plt.show()

# mueller-muller clock recovery pll with interpolation
mu = 0
out = np.zeros(len(samples) + 10, dtype=np.complex)
out_rail = np.zeros(
    len(samples) + 10,
    dtype=np.complex)  # used to save 2 previous values plus current value
i_in = 0  # input sample index
i_out = 2  # output index; first two outputs are 0
Esempio n. 10
0
 def custom_resampling(self, audio):
     audio = signal.resample_poly(
         audio, 1, self.sampling_rate // self.resampling_rate)
     audio = tf.convert_to_tensor(audio, dtype=tf.float32)
     return audio
Esempio n. 11
0
def read_delsys(fname,
                fname2='',
                sensors=None,
                freq_trc=150,
                emg=True,
                imu=False,
                resample=[1200, 150],
                freqs=[20, 20, 450],
                show_msg=True,
                show=False,
                ax=None,
                suptitle=''):
    """Read Delsys csv file from Cortex MAC (Asynchronous device data file).

    Parameters
    ----------
    fname : string
        Full file name of the Delsys csv file from Cortex file to be opened.
    fname2 : string, optional (default = '')
        Full file name of the text file to be saved with data if desired.
        If both parameters `emg` and `imu` are True, you must input a list with
        the two full file names (EMG and IMU).
        If fname2 is '', no file is saved.
        If fname2 is '=', the original file name will be used but its extension
        will be .emg and .imu for the files with EMG data and with IMU data (if
        parameters `emg` and `imu` are True).
    sensors : list of strings, optional
        Names of the sensors to be used as column names for the EMG and IM data.
    freq_trc : number, optional (default = 150)
        Sampling frequency of the markers data
    emg : bool, optional (default = True)
        Read and save EMG data
    imu : bool, optional (default = False)
        Read and save IMU data
    resample : list with two numbers, optional (default = [1200, 150])
        Whether to resample the data to have the given frequencies.
        The list order is [freq_emg, freq_imu]. Enter 0 (zero) to not resample.
        It's used signal.resample_poly scipy function.
        For the EMG signal, if the parameter frequency is lower than 1000 Hz,
        first it will be calculated the linear envelope with a low-pass
        frequency given by parameter freqs[0] (but first the EMG data will be
        band-pass filtered with frequencies given by parameters freqs[1], freqs[2].
    freqs : list of three numbers, optional (default = [20, 20, 450])
        Frequencies to be used at the linear envelope calculation if desired.
        See the parameter `resample`.
    show_msg : bool, optional (default = True)
        Whether to print messages about the execution of the intermediary steps
        (True) or not (False).
    show : bool, optional (default = False)
        if True (1), plot data in matplotlib figure.
    ax : a matplotlib.axes.Axes instance, optional (default = None).
    suptitle : string, optional (default = '')
        If string, shows string as suptitle. If empty, doesn't show suptitle.

    Returns
    -------
    data : 1 or 2 pandas dataframe
        df_emg and df_imu if paramters `emg` and `imu`.
        The units of df_emg will be mV (the raw signal is multiplied by 1000).
        The units of the IMU data are according to Delsys specification.

    """

    with open(file=fname, mode='rt', newline=None) as f:
        if show_msg:
            print('Opening file "{}" ... '.format(fname), end='')
        file = f.read().splitlines()
        if file[0] != 'Cortex generated Asynchronous device data file (.add)':
            print(
                '\n"{}" is not a valid Delsys from Cortex file.'.format(fname))
            if emg and imu:
                return None, None
            elif emg:
                return None
            elif imu:
                return None
        # find start and final lines of data in file
        idx = file.index('[Devices]') + 2
        count = int(file[idx].split('=')[1])
        devices = [
            name.split(', ')[-1] for name in file[idx + 1:idx + 1 + count]
        ]
        if sensors is None:
            sensors = devices
        idx = idx + 3 + count
        count2 = int(file[idx].split('=')[1])
        channels = [name for name in file[idx + 1:idx + 1 + count2]]
        n_im = int((count2 - count) / count)
        # indexes for ini_emg, end_emg, ini_im, end_im
        idxs = np.zeros((count, 4), dtype=int)
        for i, device in enumerate(devices):
            idxs[i, 0] = file.index(device) + 3
            idxs[i, 1] = file[idxs[i, 0]:].index('') + idxs[i, 0] - 1
        idxs[:, 2] = idxs[:, 1] + 3
        idxs[:, 3] = np.r_[idxs[1:, 0] - 6,
                           np.array(len(file) - 3, dtype=int, ndmin=1)]

        # read emg data
        if emg:
            nrows_emg = int(np.min(idxs[:, 1] - idxs[:, 0]) + 1)
            f.seek(0)
            t_emg = pd.read_csv(f,
                                sep=',',
                                header=None,
                                names=None,
                                index_col=None,
                                usecols=[2],
                                skiprows=idxs[0, 0],
                                nrows=nrows_emg,
                                squeeze=True,
                                dtype=np.float32,
                                encoding='utf-8',
                                engine='c').values
            # the above is faster than simply:
            # np.array([x.split(',')[2] for x in file[idxs[0, 0]:idxs[0, 1]+1]], dtype=np.float32)
            # and faster than:
            # np.loadtxt(f, dtype=np.float32, comments=None, delimiter=',', skiprows=idxs[0, 0], usecols=2, max_rows=nrows_emg)
            freq_emg = np.mean(freq_trc / np.diff(t_emg))
            if resample[0]:
                fr = Fraction(resample[0] / freq_emg).limit_denominator(1000)
                nrows_emg = int(
                    np.ceil(nrows_emg * fr.numerator / fr.denominator))
                freq_emg2 = resample[0]
            else:
                freq_emg2 = freq_emg
            ys = np.empty((nrows_emg, count), dtype=np.float32)
            for i, sensor in enumerate(sensors):
                f.seek(0)
                y = pd.read_csv(f,
                                sep=',',
                                header=None,
                                names=[sensor],
                                index_col=None,
                                usecols=[3],
                                skiprows=idxs[i, 0],
                                nrows=len(t_emg),
                                squeeze=True,
                                dtype=np.float32,
                                encoding='utf-8',
                                engine='c').values

                if resample[0]:
                    if resample[0] < 1000:
                        y = linear_envelope(y,
                                            freq_emg,
                                            fc_bp=[freqs[1], freqs[2]],
                                            fc_lp=freqs[0],
                                            method='rms')
                    y = signal.resample_poly(y, fr.numerator, fr.denominator)
                ys[:, i] = y * 1000
            df_emg = pd.DataFrame(data=ys, columns=sensors)
            df_emg.index = df_emg.index / freq_emg2
            df_emg.index.name = 'Time'

        # read IM data
        if imu:
            nrows_imu = int(np.min(idxs[:, 3] - idxs[:, 2]) + 1)
            cols = [
                sensor + channel.split(',')[3] for sensor in sensors
                for channel in channels[1:int(count2 / count)]
            ]
            f.seek(0)
            t_imu = pd.read_csv(f,
                                sep=',',
                                header=None,
                                names=None,
                                index_col=None,
                                usecols=[2],
                                skiprows=idxs[0, 2],
                                nrows=nrows_imu,
                                squeeze=True,
                                dtype=np.float32,
                                encoding='utf-8',
                                engine='c').values
            freq_imu = np.mean(freq_trc / np.diff(t_imu))
            if resample[1]:
                fr = Fraction(resample[1] / freq_imu).limit_denominator(1000)
                nrows_imu = int(
                    np.ceil(nrows_imu * fr.numerator / fr.denominator))
                freq_imu = resample[1]
            ys = np.empty((nrows_imu, count2 - count), dtype=np.float32)
            for i, sensor in enumerate(sensors):
                f.seek(0)
                cs = slice(int(n_im * i), int((n_im * (i + 1))))
                y = pd.read_csv(f,
                                sep=',',
                                header=None,
                                names=cols[cs],
                                index_col=None,
                                usecols=range(3, 12),
                                skiprows=idxs[i, 2],
                                nrows=len(t_imu),
                                squeeze=False,
                                dtype=np.float32,
                                encoding='utf-8',
                                engine='c').values
                if resample[1]:
                    y2 = np.empty((nrows_imu, y.shape[1]), dtype=np.float32)
                    for c in range(y.shape[1]):
                        y2[:,
                           c] = signal.resample_poly(y[:, c], fr.numerator,
                                                     fr.denominator)
                else:
                    y2 = y
                ys[:, cs] = y2
            df_imu = pd.DataFrame(data=ys, columns=cols)
            df_imu.index = df_imu.index / freq_imu
            df_imu.index.name = 'Time'

        if show_msg:
            print('done.')

    # save file
    if len(fname2):
        if isinstance(fname2, list):
            fname2_emg = fname2[0]
            fname2_imu = fname2[1]
        else:
            if emg:
                fname2_emg = fname2
            if imu:
                fname2_imu = fname2
        if emg and fname2_emg == '=':
            name, extension = os.path.splitext(fname)
            fname2_emg = name + '.emg'
        if imu and fname2_imu == '=':
            name, extension = os.path.splitext(fname)
            fname2_imu = name + '.imu'
        if emg:
            df_emg.to_csv(fname2_emg, sep='\t', float_format='%.6f')
            if show_msg:
                print('Saving file "{}" ... '.format(fname2_emg), end='')
        if imu:
            df_imu.to_csv(fname2_imu, sep='\t', float_format='%.6f')
            if show_msg:
                print('\nSaving file "{}" ... '.format(fname2_imu), end='')
        if show_msg:
            print('done.')

    if show and emg:
        _plot_df_emg(df_emg, ax=None, suptitle=suptitle)

    if emg and imu:
        return df_emg, df_imu
    elif emg:
        return df_emg
    elif imu:
        return df_imu
Esempio n. 12
0
def compute_unit_template_features(recording, sorting, unit_ids=None, channel_ids=None, feature_names=None,
                                   max_channels_per_features=1, recovery_slope_window=0.7, upsampling_factor=1,
                                   invert_waveforms=False, as_dataframe=False, **kwargs):
    """
    Use SpikeInterface/spikefeatures to compute features for the unit template.

    These consist of a set of 1D features:
        - peak to valley (peak_to_valley), time between peak and valley
        - halfwidth (halfwidth), width of peak at half its amplitude
        - peak trough ratio (peak_trough_ratio), amplitude of peak over amplitude of trough
        - repolarization slope (repolarization_slope), slope between trough and return to base
        - recovery slope (recovery_slope), slope after peak towards baseline

    And 2D features:
        - unit_spread
        - propagation velocity
        To be implemented

    The metrics are computed on 'negative' waveforms, if templates are saved as
    positive, pass keyword 'invert_waveforms'.

    Parameters
    ----------
    recording: RecordingExtractor
        The recording extractor
    sorting: SortingExtractor
        The sorting extractor
    unit_ids: list
        List of unit ids to compute features
    channel_ids: list
        List of channels ids to compute templates on which features are computed
    feature_names: list
        List of feature names to be computed. If None, all features are computed
    max_channels_per_features: int
        Maximum number of channels to compute features on (default 1). If channel_ids is used, this parameter
        is ignored
    upsampling_factor: int
        Factor with which to upsample the template resolution (default 1)
    invert_waveforms: bool
        Invert templates before computing features (default False)
    recovery_slope_window: float
        Window after peak in ms wherein to compute recovery slope (default 0.7)
    as_dataframe: bool
        IfTrue, output is returned as a pandas dataframe, otherwise as a dictionary
    **kwargs: Keyword arguments
        A dictionary with default values can be retrieved with:
        st.postprocessing.get_waveforms_params():
            grouping_property: str
                Property to group channels. E.g. if the recording extractor has the 'group' property and
                'grouping_property' is 'group', then waveforms are computed group-wise.
            ms_before: float
                Time period in ms to cut waveforms before the spike events
            ms_after: float
                Time period in ms to cut waveforms after the spike events
            dtype: dtype
                The numpy dtype of the waveforms
            compute_property_from_recording: bool
                If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
                property of the recording extractor channel on which the average waveform is the largest
            max_channels_per_waveforms: int or None
                Maximum channels per waveforms to return. If None, all channels are returned
            n_jobs: int
                Number of parallel jobs (default 1)
            max_spikes_per_unit: int
                The maximum number of spikes to extract per unit
            memmap: bool
                If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
            seed: int
                Random seed for extracting random waveforms
            save_property_or_features: bool
                If True (default), waveforms are saved as features of the sorting extractor object
            recompute_info: bool
                If True, waveforms are recomputed (default False)
            verbose: bool
                If True output is verbose


    Returns
    -------
    features: dict or pandas.DataFrame
        The computed features as a dictionary or a pandas.DataFrame (if as_dataframe is True)
    """

    # ------------------- SETUP ------------------------------
    if isinstance(unit_ids, (int, np.integer)):
        unit_ids = [unit_ids]
    elif unit_ids is None:
        unit_ids = sorting.get_unit_ids()
    elif not isinstance(unit_ids, (list, np.ndarray)):
        raise Exception("unit_ids is is invalid")
    if isinstance(channel_ids, (int, np.integer)):
        channel_ids = [channel_ids]

    if channel_ids is None:
        channel_ids = recording.get_channel_ids()

    assert np.all([u in sorting.get_unit_ids() for u in unit_ids]), "Invalid unit_ids"
    assert np.all([ch in recording.get_channel_ids() for ch in channel_ids]), "Invalid channel_ids"

    params_dict = update_all_param_dicts_with_kwargs(kwargs)
    save_property_or_features = params_dict['save_property_or_features']

    if feature_names is None:
        feature_names = sf.all_1D_features
    else:
        bad_features = []
        for m in feature_names:
            if m not in sf.all_1D_features:
                bad_features.append(m)
        if len(bad_features) > 0:
            raise ValueError(f"Improper feature names: {str(bad_features)}. The following features names can be "
                             f"calculated: {str(sf.all_1D_features)}")

    templates = np.array(get_unit_templates(recording, sorting, unit_ids=unit_ids, channel_ids=channel_ids,
                                            mode='median', **kwargs))

    # -------------------- PROCESS TEMPLATES -----------------------------
    if upsampling_factor > 1:
        upsampling_factor = int(upsampling_factor)
        processed_templates = resample_poly(templates, up=upsampling_factor, down=1, axis=2)
        resampled_fs = recording.get_sampling_frequency() * upsampling_factor
    else:
        processed_templates = templates
        resampled_fs = recording.get_sampling_frequency()

    if invert_waveforms:
        processed_templates = -processed_templates

    features_dict = dict()
    for feat in feature_names:
        features_dict[feat] = []
    # --------------------- COMPUTE FEATURES ------------------------------
    for unit_id, unit in enumerate(unit_ids):
        template = processed_templates[unit_id]
        max_channel_idxs = select_max_channels_from_templates(template, recording, max_channels_per_features)
        template_channels = template[max_channel_idxs]
        if len(template_channels.shape) == 1:
            template_channels = template_channels[np.newaxis, :]
        feat_list = sf.calculate_features(waveforms=template_channels,
                                          sampling_frequency=resampled_fs,
                                          feature_names=feature_names,
                                          recovery_slope_window=recovery_slope_window)

        for feat, feat_val in feat_list.items():
            features_dict[feat].append(feat_val)

    # ---------------------- DEAL WITH OUTPUT -------------------------
    if save_property_or_features:
        for feat_name, feat_val in features_dict.items():
            sorting.set_units_property(unit_ids=unit_ids,
                                       property_name=feat_name,
                                       values=feat_val)
    if as_dataframe:
        features = pandas.DataFrame.from_dict(features_dict)
        features = features.rename(index={original_idx: unit_ids[i] for
                                          i, original_idx in enumerate(range(len(features)))})
    else:
        features = features_dict
    return features
Esempio n. 13
0
def cool_distributed_cpgs(mode):
    """
    main function to manipulate neurons/synapses in distributed recurrent networks
    loads previously fitted output weights
    computes correlation between neural trajectories as a similarity measure
    :return: nothing
    """
    # load output weights
    out_dir = '/Users/robert/project_src/cooling/single_cpg_manipulation'
    weight_suffix1 = 'outunit1_parallel_weights_force.npy'
    weight_suffix2 = 'outunit2_parallel_weights_force.npy'
    weight_suffix3 = 'Wrec1_parallel_weights_force.npy'
    weight_suffix4 = 'Wrec2_parallel_weights_force.npy'
    Wout1 = np.load(os.path.join(out_dir, weight_suffix1))
    Wout2 = np.load(os.path.join(out_dir, weight_suffix2))
    Wrec1 = np.load(os.path.join(out_dir, weight_suffix3))
    Wrec2 = np.load(os.path.join(out_dir, weight_suffix4))

    # create network with same parameters
    t_max = 2000.0
    dt = 1.0
    nw1 = rn.Network(N=800, g=1.5, pc=1.0)
    nw1.Wrec = Wrec1
    nw2 = rn.Network(N=800, g=1.5, pc=1.0, seed=5432)
    nw2.Wrec = Wrec2
    def ext_inp(t):
        return np.zeros(nw1.N)

    # run dynamics at reference temperature and compute neural/behavioral trajectory
    ref_t1, ref_rates1 = nw1.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    ref_t2, ref_rates2 = nw2.simulate_network(T=t_max, dt=dt, external_input=ext_inp)
    neuron_out1 = np.dot(Wout1, ref_rates1)
    neuron_out2 = np.dot(Wout2, ref_rates2)
    ref_behavior = np.array([neuron_out1, neuron_out2])

    fig1 = plt.figure(1)
    ax1 = fig1.add_subplot(1, 1, 1)
    ax1.plot(neuron_out1, neuron_out2, 'k', linewidth=0.5, label='ref')

    # run dynamics at different temperatures using some Q10 for tau
    # and compute neural/behavioral trajectories
    if mode == 'sweep':
        dT_steps = [-0.2, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0, -3.5, -4.0, -4.5, -5.0]
    elif mode == 'vis':
        dT_steps = [-1.0, -3.0, -5.0]
    # dT_steps = [-0.5, -1.0, -2.0]
    cooled_behaviors = []
    for i, dT in enumerate(dT_steps):
        cooled_q = _q(dT)
        cooled_nw = rn.Network(N=800, g=1.5, pc=1.0, q=cooled_q)
        cooled_nw.Wrec = Wrec1
        cooled_t, cooled_rates = cooled_nw.simulate_network(T=t_max/cooled_q, dt=dt, external_input=ext_inp)

        cooled_behavior = np.dot(Wout1, cooled_rates)
        target_length = len(ref_behavior[1])
        original_length = len(cooled_behavior)
        cooled_behavior_resampled = resample_poly(cooled_behavior, target_length, original_length)
        cooled_behaviors.append(np.array([cooled_behavior_resampled, ref_behavior[1]]))

        label_str = 'dT = %.1f' % dT
        ax1.plot(cooled_behavior_resampled, ref_behavior[1], linewidth=0.5, label=label_str)

    ax1.legend()
    ax1.set_xlabel('Output (a.u.)')

    # measure similarity of neural/behavioral trajectories as a function of temperature
    behavior_similarities = []
    for i in range(len(dT_steps)):
        similarity = rn.measure_trajectory_similarity(ref_behavior, cooled_behaviors[i])
        behavior_similarities.append(similarity)
    fig2 = plt.figure(2)
    ax2 = fig2.add_subplot(1, 1, 1)
    ax2.plot(dT_steps, behavior_similarities, 'ro-', label='behavior')
    ax2.set_xlim(ax2.get_xlim()[::-1])
    ax2.set_xlabel('Temperature change')
    ax2.set_ylabel('Corr. coeff.')
    ax2.legend()

    plt.show()
Esempio n. 14
0
def signal_envelope(signal,
                    srate,
                    cutoff=20.,
                    method='hilbert',
                    comp_factor=1. / 3,
                    resample=125,
                    rescale=True):
    """Compute the broadband envelope of the input signal.
    Several methods are available:

        - Hilbert -> abs -> low-pass (-> resample)
        - Rectify -> low-pass (-> resample)
        - subenvelopes -> sum

    The envelope can also be compressed by raising to a certain power factor.

    Parameters
    ----------
    signal : ndarray (nsamples,)
        1-dimensional input signal
    srate : float
        Original sampling rate of the signal
    cutoff : float (default 20Hz)
        Cutoff frequency (transition will be 10 Hz)
        In Hz
    method : str {'hilbert', 'rectify', 'subs'}
        Method to be used
    comp_factor : float (default 1/3)
        Compression factor (final envelope = env**comp_factor)
    resample : float (default 125Hz)
        New sampling rate of envelope (must be 2*cutoff < .. <= srate)
        Explicitly set to False or None to skip resampling

    Returns
    -------
    env : ndarray (nsamples_env,)
        Envelope
    
    """
    print("Computing envelope...")
    if method.lower() == 'subs':
        raise NotImplementedError
    else:
        if method.lower() == 'hilbert':
            # Get modulus of hilbert transform
            out = abs(scisig.hilbert(signal))
        elif method.lower() == 'rectify':
            # Rectify signal
            out = abs(signal)
        else:
            raise ValueError(
                "Method can only be 'hilbert', 'rectify' or 'subs'.")

        # Non linear compression before filtering to avoid NaN
        out = np.power(out + np.finfo(float).eps, comp_factor)
        # Design low-pass filter
        ntaps = fir_order(
            10, srate,
            ripples=1e-3)  # + 1 -> using odd ntaps for Type I filter,
        # so I have an integer group delay (instead of half)
        b = scisig.firwin(ntaps, cutoff, fs=srate)
        # Filter with convolution
        out = scisig.convolve(np.pad(out, (len(b) // 2, len(b) // 2),
                                     mode='edge'),
                              b,
                              mode='valid')
        #out = scisig.filtfilt(b, [1.0], signal) # This attenuates twice as much
        #out = scisig.lfilter(b, [1.0], pad(signal, (0, len(b)//2), mode=edge))[len(b)//2:]  # slower than scipy.signal.convolve method

        # Resample
        if resample:
            if not 2 * cutoff < resample < srate:
                raise ValueError(
                    "Chose resampling rate more carefully, must be > %.1f Hz" %
                    (cutoff))
            if srate // resample == srate / resample:
                env = scisig.resample_poly(out, 1, srate // resample)
            else:
                dur = (len(signal) - 1) / srate
                new_n = int(np.ceil(resample * dur))
                env = scisig.resample(out, new_n)

    # Scale output between 0 and 1:
    if rescale:
        return minmax_scale(env)
    else:
        return env
Esempio n. 15
0
def create_stream(EEG_data,
                  sampling_rate,
                  compressionbit=True,
                  hashbit=True,
                  check_quality=True):
    warnings.warn(
        'You are using version 1 of CFS, CFS version 1 is deprecated and will not be supported by Z3Score in the future.',
        RuntimeWarning)
    SRATE = 100  #Hz
    LOWPASS = 45.0  #Hz
    HIGHPASS = 0.3  #Hz
    LOWPASSEOG = 12.0  #Hz
    Fs = sampling_rate / 2.0
    one = np.array(1)
    bEEG = firwin(51, [HIGHPASS / Fs, LOWPASS / Fs],
                  pass_zero=False,
                  window='hamming',
                  scale=True)
    bEOG = firwin(51, [HIGHPASS / Fs, LOWPASSEOG / Fs],
                  pass_zero=False,
                  window='hamming',
                  scale=True)
    eogL = lfilter(bEOG, one, EEG_data[2, :])
    eogR = lfilter(bEOG, one, EEG_data[3, :])
    eeg = (lfilter(bEEG, one, EEG_data[0, :]) +
           lfilter(bEEG, one, EEG_data[1, :])) / 2.0

    if sampling_rate != 100:
        P = 100
        Q = sampling_rate
        eogL = resample_poly(eogL, P, Q)
        eogR = resample_poly(eogR, P, Q)
        eeg = resample_poly(eeg, P, Q)

    totalEpochs = int(len(eogL) / 30.0 / SRATE)
    data_length = 32 * 32 * 3 * totalEpochs
    mean_power = np.empty((3, totalEpochs))
    data = np.empty([data_length], dtype=np.float32)
    window = np.hamming(128)
    epochSize = 32 * 32 * 3

    #STFT based spectrogram computation
    for i in range(totalEpochs):
        for j in range(0, 3000 - 128 - 1, 90):
            tIDX = int(j / 90)
            frame1 = abs(
                np.fft.fft(eeg[i * 3000 + j:i * 3000 + j + 128] * window))
            frame2 = abs(
                np.fft.fft(eogL[i * 3000 + j:i * 3000 + j + 128] * window))
            frame3 = abs(
                np.fft.fft(eogR[i * 3000 + j:i * 3000 + j + 128] * window))
            mean_power[:, i] = [
                np.mean(frame1),
                np.mean(frame2),
                np.mean(frame3)
            ]
            data[i * epochSize + tIDX * 32:i * epochSize + tIDX * 32 +
                 32] = frame1[0:32]
            data[i * epochSize + 32 * 32 + tIDX * 32:i * epochSize + 32 * 32 +
                 tIDX * 32 + 32] = frame2[0:32]
            data[i * epochSize + 32 * 32 * 2 + tIDX * 32:i * epochSize +
                 32 * 32 * 2 + tIDX * 32 + 32] = frame3[0:32]

    quality = np.sum(mean_power > 800, 1) * 100 / totalEpochs

    if np.any(quality > 10) and check_quality:
        print(
            "Warning: Electrode Falloff detected, use qc_cfs function to check which channel is problematic"
        )

    signature = bytearray(
        struct.pack('<3sBBBBh??', b'CFS', 1, 32, 32, 3, totalEpochs,
                    compressionbit, hashbit))
    data = data.tostring()

    raw_digest = []
    if hashbit:
        shaHash = hashlib.sha1()
        shaHash.update(data)
        raw_digest = shaHash.digest()

    if compressionbit:
        data = zlib.compress(data)

    if hashbit:
        stream = signature + raw_digest + data
    else:
        stream = signature + data

    return stream
Esempio n. 16
0
def create_stream_v2(C3,
                     C4,
                     EOGL,
                     EOGR,
                     EMG,
                     sampling_rates,
                     compressionbit=True,
                     hashbit=True,
                     check_quality=True):
    SRATE = 100  # Hz
    LOWPASS = 35.0  # Hz
    HIGHPASS = 0.3  # Hz
    LOWPASSEOG = 35.0  # Hz
    LOWPASSEMG = 80.0  # Hz
    channels = 5  # 2EEG 2EOG 1EMG

    if (sampling_rates[0] < 100 or sampling_rates[1] < 100
            or sampling_rates[2] < 200):
        raise RuntimeError("Sampling rate too low.")

    Fs_EEG = sampling_rates[0] / 2.0
    Fs_EOG = sampling_rates[1] / 2.0
    Fs_EMG = sampling_rates[2] / 2.0

    one = np.array(1)

    bEEG = firwin(51, [HIGHPASS / Fs_EEG, LOWPASS / Fs_EEG],
                  pass_zero=False,
                  window='hamming',
                  scale=True)
    bEOG = firwin(51, [HIGHPASS / Fs_EOG, LOWPASSEOG / Fs_EOG],
                  pass_zero=False,
                  window='hamming',
                  scale=True)
    bEMG = firwin(51, [HIGHPASS / Fs_EMG, LOWPASSEMG / Fs_EMG],
                  pass_zero=False,
                  window='hamming',
                  scale=True)

    eogL = lfilter(bEOG, one, EOGL)
    eogR = lfilter(bEOG, one, EOGR)
    eeg = (lfilter(bEEG, one, C3) + lfilter(bEEG, one, C4)) / 2.0
    emg = lfilter(bEMG, one, EMG)

    if sampling_rates[0] != 100:
        P = 100
        Q = sampling_rates[0]
        eeg = resample_poly(eeg, P, Q)

    if sampling_rates[1] != 100:
        P = 100
        Q = sampling_rates[1]
        eogL = resample_poly(eogL, P, Q)
        eogR = resample_poly(eogR, P, Q)

    if sampling_rates[2] != 200:
        P = 200
        Q = sampling_rates[2]
        emg = resample_poly(emg, P, Q)

    totalEpochs = int(len(eogL) / 30.0 / SRATE)
    data_length = 32 * 32 * (channels - 1) * totalEpochs
    data = np.empty([data_length], dtype=np.float32)
    window_eog = np.hamming(128)
    window_eeg = np.hamming(128)
    window_emg = np.hamming(256)
    epochSize = 32 * 32 * (channels - 1)
    data_frame = np.empty([32, 32, channels - 1])
    mean_power = np.empty([channels - 1, totalEpochs])

    # spectrogram computation
    for i in range(totalEpochs):
        frame1 = stft(eeg[i * 3000:(i + 1) * 3000],
                      window=window_eeg,
                      noverlap=36,
                      boundary=None,
                      nperseg=128,
                      return_onesided=True,
                      padded=False)
        frame2 = stft(eogL[i * 3000:(i + 1) * 3000],
                      window=window_eog,
                      noverlap=36,
                      boundary=None,
                      nperseg=128,
                      return_onesided=True,
                      padded=False)
        frame3 = stft(eogR[i * 3000:(i + 1) * 3000],
                      window=window_eog,
                      noverlap=36,
                      boundary=None,
                      nperseg=128,
                      return_onesided=True,
                      padded=False)
        frame4 = stft(emg[i * 6000:(i + 1) * 6000],
                      window=window_emg,
                      noverlap=71,
                      boundary=None,
                      nperseg=256,
                      return_onesided=True,
                      padded=False)

        data_frame[:, :,
                   0] = abs(frame1[2][1:33, 0:32]) * np.sum(window_eeg)  # EEG
        data_frame[:, :, 1] = abs(frame2[2][1:33, 0:32]) * np.sum(
            window_eog)  # EOG-L
        data_frame[:, :, 2] = abs(frame3[2][1:33, 0:32]) * np.sum(
            window_eog)  # EOG-R
        data_frame[:, :, 3] = block_reduce(
            abs(frame4[2][1:129, :]) * np.sum(window_emg), (4, 1),
            np.mean)  # EMG
        mean_power[:, i] = np.mean(data_frame, (0, 1))

        data[i * epochSize:(i + 1) * epochSize] = np.reshape(data_frame,
                                                             epochSize,
                                                             order='F')

    quality = np.sum(mean_power > 800, 1) * 100 / totalEpochs

    if np.any(quality > 10) and check_quality:
        print(
            "Warning: Electrode Falloff detected, use qc_cfs function to check which channel is problematic"
        )

    signature = bytearray(
        struct.pack('<3sBBBBh??', b'CFS', 2, 32, 32, (channels - 1),
                    totalEpochs, compressionbit, hashbit))

    data = data.tostring()

    raw_digest = []
    if hashbit:
        shaHash = hashlib.sha1()
        shaHash.update(data)
        raw_digest = shaHash.digest()

    if compressionbit:
        data = zlib.compress(data)

    if hashbit:
        stream = signature + raw_digest + data
    else:
        stream = signature + data

    return stream
Esempio n. 17
0
maxi = np.max(i)
maxq = np.max(q)


print("Samples " + str(len(i)))


[i_filt, W, h] = LPF(i, fc, Fs)
[q_filt, W, h] = LPF(q, fc, Fs)

plt.plot(i, '.-')
plt.plot(q, '.-')
plt.grid()
plt.show()

samples_i = sig.resample_poly(i_filt, 4, 1)
samples_q = sig.resample_poly(q_filt, 4, 1)


si = samples_i[154::32]
sq = samples_q[154::32]

"""
for o in range(32):
    plt.plot(samples_i[o+150::32], samples_q[o+150::32], '.')
"""


plt.plot(si, sq, '.')
plt.show()
Esempio n. 18
0
def pdm_to_pcm(input_file, out_file, pdm_sample_rate, pcm_sample_rate,
               desired_max_abs,
               preserve_all_channels=False, verbose=False, apply_recording_conditioning=False):

    if verbose:
      print("PDM -> PCM")

    binary_file =  open(input_file, "rb")

    if verbose:
      print('File length: ' + str(os.stat(input_file).st_size) + ' bytes')

    # Read the whole file at once
    input_data = bytearray(binary_file.read())

    nsamples = len(input_data)

    input_data = np.unpackbits(input_data)

    multi_channel_pdm =  np.reshape(input_data, (len(input_data) // 8, -1)).T

    non_zero_channel = []
    if not preserve_all_channels:
      for ch in range(8):
        if sum(multi_channel_pdm[ch]) != 0:
          non_zero_channel.append(ch)
          if verbose:
            print('Keeping channel ' + str(ch))
    else:
      for ch in range(8):
        non_zero_channel.append(ch)
        if verbose:
          print('Keeping channel ' + str(ch))

    nchannels = len(non_zero_channel)

    upsample_ratio , downsample_ratio = up_down_ratio(pdm_sample_rate, pcm_sample_rate)

    seconds = np.round(float(nsamples) / float(pdm_sample_rate), 3)

    if verbose:
      print("Number of sample: " + str(nsamples) + ' ~ ' + str(seconds) + ' seconds.')
      print("Input rate: " + str(pdm_sample_rate) + 'Hz')
      print("Output rate: " + str(pcm_sample_rate) + 'Hz')
      print("Channel count: " + str(nchannels))
      print("Upsample ratio: " + str(upsample_ratio))
      print("Downsample ratio: " + str(downsample_ratio))

    pcm = np.zeros((nchannels, (nsamples*upsample_ratio // downsample_ratio)), dtype=np.float64)

    factors = get_prime_factors(downsample_ratio)

    factors = np.flipud(factors)

    for ch in range(nchannels):
      if verbose:
        print('processing channel ' + str(ch))
      pdm_ch = non_zero_channel[ch]

      pdm = np.zeros(nsamples, dtype=np.float64)
      pdm = multi_channel_pdm[pdm_ch]*2.0
      pdm -= np.ones(len(multi_channel_pdm[ch]))

      # TODO do this in chunks to save memory
      if upsample_ratio != 1:
        t = signal.resample_poly(pdm, upsample_ratio, 1)
      else:
        t = pdm

      for f in factors:
        t = decimate(t, f, zero_phase = True, n = 21)

      pcm[ch] = t[:len(pcm[ch] )]


    if apply_recording_conditioning:

      # high pass filter to removed everything below 30Hz
      b, a = butter_highpass(30., pcm_sample_rate, order=5)
      for ch in range(nchannels):
        pcm[ch] = signal.filtfilt(b, a, pcm[ch], padlen = 1000, padtype='even')
        pcm[ch][0] = 0.0
        
    # Scale the output to the requested level
    m = np.amax(pcm) 

    # make it full scale
    pcm /= m
    pcm *= desired_max_abs

    scipy.io.wavfile.write(out_file, pcm_sample_rate, pcm.T)
    return
Esempio n. 19
0
 #phase corrections
 phase_cor = []
 for ph_ga, ph_cc in zip(harmonics_phase_ga, harmonics_phase_cc):
     phase_cor.append(ph_cc/ph_ga)
     
 #resampled spectrum
 upsample_factor = int(round(ff_cc))
 downsample_factor = int(round(ff_ga))
 new_fs = frequency_ratio*fs_cc
 up_fs = upsample_factor*fs_cc
 #gcd_fs = fs_cc*upsample_factor//gcd(upsample_factor, downsample_factor)
 ff_max = max(int(round(ff_cc)),int(round(ff_ga)))
 cutoff_frequency = up_fs/(2*ff_max)
 cutoff_frequency_n = cutoff_frequency*2/(up_fs)
 fir_filter = firwin(240,cutoff_frequency_n,window='hamming')    
 wav_ga_resampled = resample_poly(wav_ga,int(round(ff_cc)),int(round(ff_ga)),window=fir_filter)
 #hamming_3 = windows.hamming(wav_ga_resampled.size)
 #wav_ga_resampled = wav_ga_resampled*hamming_3
 y_res = fft(wav_ga_resampled)
 freq_res = fftfreq(y_res.size,1/new_fs)
 new_bins = [int((freq * y_res.size)/new_fs) for freq in frequencies_cc]
 #plt.figure(2)
 #w,h = freqz(fir_filter, fs=up_fs)
 #plt.plot(w[:10],abs(h[:10]))
 #print(fir_filter)
 #harmonics mapping and filtering
 #print(y_res.size,y_cc.size)
 synthesis_spectrum = np.zeros(y_cc.size,dtype=complex)
 #for i in range(len(bins)-1) :
     #if synthesis_spectrum[bins[i]:bins[i+1]].shape==y_res[int(frequencies_cc[1:][i]):int(frequencies_cc[1:][i])].shape:
         #synthesis_spectrum[bins[i]:bins[i+1]] = y_res[bins[i]+freq_shifts[i]:bins[i+1]+freq_shifts[i+1]]#*gains[i]*exp(1j*phase_cor[i])
Esempio n. 20
0
plt.close('all')

#%% parameters
numBits = 1000
fs = 1000
f = 200
upsqrt = 2
up = upsqrt**2
down = 1


#%% initialization
bits = np.random.randint(0,4,numBits)
qpskSyms = np.exp(1j*bits*2*np.pi/4)
rx = sps.resample_poly(qpskSyms, up, down)

#%% checks
plt.figure('Spectrum')
plt.plot(20*np.log10(sp.fft.fftshift(np.abs(sp.fft.fft(rx)))))

plt.figure('Symbols, Original')
plt.plot(np.real(qpskSyms), np.imag(qpskSyms), 'r.')

plt.figure('Symbols, Received')
plt.plot(np.real(rx), np.imag(rx), 'r.')

plt.figure('Symbols, Absolute Rx')
plt.plot(np.abs(rx))

def preprocUS(data, t, xd):
    """Analog time-gain compensation is typically applied followed by an 
    anti-aliasing filter (low-pass) and then A/D conversion. The input data is 
    already digitized here, so no need for anti-alias filtering. Following A/D
    conversion, one would ideally begin beamforming, however the summing process
    in beamforming can produce very high values if low frequencies are included. 
    This can result in the generation of a dynamic range in the data that
    exceeds what's allowable by the number of bits, thereby yielding data loss.
    Therefore it's necessary to high-pass filter before beamforming. In addition,
    beamforming is more accurate with a higher sampling rate because the 
    calculated beamforming delays are more accurately achieved. Hence 
    interpolation is used to upsample the signal. Finally, apodization is 
    applied before the beamformer.
    
    This preprocessing function therefore consists of:
    1) time-gain compensation
    2) filtering
    3) interpolation
    4) apodization 
    
    In the filtering step I've appied a band-pass, as higher frequencies are 
    also problematic and are usually addressed after beamforming. 
    
    inputs: data - transmission number x receive channel x time index
            t - time vector [s]
            xd - dector position vector [m]
    
    outputs: dataApod - processed data
             t2 - new time vectors    
    """

    sampleRate = 1 / (t[1] - t[0])
    samplesPerAcq = data.shape[2]

    a0 = 0.4

    # get time-gain compensation vectors based on estimate for propagation
    # distance to each element
    zd = t * c0 / 2
    zd2 = zd**2
    dist1 = zd
    tgc = np.zeros((numProbeChan, samplesPerAcq))
    for r in range(numProbeChan):
        dist2 = np.sqrt(xd[r]**2 + zd2)
        propDist = dist1 + dist2
        tgc[r, :] = getTGC(a0, propDist)

    # apply tgc
    dataAmp = np.zeros(data.shape)
    for m in range(numTxBeams):
        dataAmp[m, :, :] = data[m, :, :] * tgc

    # retrieve filter coefficients

    filtOrd = 201
    lc, hc = 0.5e6, 2.5e6
    lc = lc / (sampleRate / 2)  # normalize to nyquist frequency
    hc = hc / (sampleRate / 2)
    B = signal.firwin(filtOrd, [lc, hc], pass_zero=False)  # band-pass filter

    # specify interpolation factor
    interpFact = 4
    sampleRate = sampleRate * interpFact
    samplesPerAcq2 = samplesPerAcq * interpFact

    # get apodization window
    apodWin = signal.tukey(numProbeChan)  # np.ones(numProbeChan)

    # process
    dataApod = np.zeros((numTxBeams, numProbeChan, samplesPerAcq2))
    for m in range(numTxBeams):
        for n in range(numProbeChan):
            w = dataAmp[m, n, :]
            dataFilt = signal.lfilter(B, 1, w)
            dataInterp = signal.resample_poly(dataFilt, interpFact, 1)
            dataApod[m, n, :] = apodWin[n] * dataInterp

    # create new time vector based on interpolation and filter delay
    freqs, delay = signal.group_delay((B, 1))
    delay = int(delay[0]) * interpFact
    t2 = np.arange(samplesPerAcq2) / sampleRate + t[0] - delay / sampleRate

    # remove signal before t = 0
    f = np.where(t2 < 0)[0]
    t2 = np.delete(t2, f)
    dataApod = dataApod[:, :, f[-1] + 1:]

    return dataApod, t2
Esempio n. 22
0
def reconstruct(binfile, outfile):
    # Read in data and settings
    uid = int(binfile.split('.')[0])
    SOI = 0
    recon = []

    with open(binfile, 'rb') as fh:
        SOI = fh.read(2)

        ## For you to modify
        print(outfile)
        print(SOI)
        if SOI != bytes.fromhex("FFD1"):
            raise Exception("Start of File marker not found!")
        M = int.from_bytes(fh.read(2), "big")
        N = int.from_bytes(fh.read(2), "big")
        recon = []  #np.zeros(2,M,N,3)
        temp = []
        quality = int.from_bytes(fh.read(2), "big")
        skipper = int.from_bytes(fh.read(2), "big")
        rate = int.from_bytes(fh.read(2), "big")
        SOI = fh.read(2)
        count = 0
        up = 4
        while SOI != bytes.fromhex("FFD2"):
            if SOI != bytes.fromhex("FFD8"):
                raise Exception("Start of Image marker not found!")
            bits = ()
            L = 0
            if SOI != bytes.fromhex("FFDA"):
                SOI = fh.read(2)
            for _ in range(5):
                ba = bitarray.bitarray()
                for b in iter(lambda: fh.read(2), bytes.fromhex("FFDA")):
                    #print(b)
                    ba.frombytes(b)
                    #if L <= 0:
                    #    print(L,'\n\nreading bits\n:',ba,'\n')
                bits = (*bits, ba)
                #print('run:',L,'inside loop\n\n\n',bits)
                L += 1
            ba = bitarray.bitarray()
            for b in iter(lambda: fh.read(2), bytes.fromhex("FFD9")):
                ba.frombytes(b)
            bits = (*bits, ba)
            #print('lenth of bits is :',len(bits))
            #print('in run:',count,'the bits are:\n',bits,'\n\n\n')
            #for i in range(len(bits)):
            #    print(i,' run:  ',bits[i],'\n\n\n\n\n')
            x = decode_image(bits, M, N, quality)
            temp.append(x)
            #x = Image.fromarray(x.astype(np.uint8))
            count += 1
            if count % skipper == 0:
                for i in range(skipper - 1):
                    h = (x + temp[count - 2]) / 2
                    h = signal.resample_poly(h, up, 1, axis=0, padtype='mean')
                    h = signal.resample_poly(h, up, 1, axis=1, padtype='mean')
                    recon.append(Image.fromarray(h.astype(np.uint8)))
            x = signal.resample_poly(x, up, 1, axis=0, padtype='mean')
            x = signal.resample_poly(x, up, 1, axis=1, padtype='mean')
            recon.append(Image.fromarray(x.astype(
                np.uint8)))  #Image.fromarray(np_im)
            SOI = fh.read(2)
    #print('\n:second:',recon.shape)

    # signal.resample_poly(Y, up, 1, padtype='mean')
    for i in range(len(recon)):
        im_ = recon[i]
        rec_fname = "frame_{:02d}.tiff".format(i + 1)
        im_.save(rec_fname, save_all=True)
    #recon[0].save(outfile,save_all=True, append_images=recon[1:])
    GIF_save(path='', framerate=rate)

    ##########3
    with open(binfile, 'rb') as f:
        B = fh.read()

    # Check length
    if len(B) == 0:
        print("Empty data!")
        msg = "empty data component"
        ffail = str(uid) + '.fail'
        with open(ffail, 'w') as f:
            f.write(msg)
        return ffail
    if (len(B) - 4) != np.frombuffer(B[:4], dtype='<u4')[0]:
        print("Length of received bytes ({}) != length header ({})".format(
            len(B) - 4,
            np.frombuffer(B[:4], dtype='<u4')[0]))
        msg = "length mismatch"
        ffail = str(uid) + '.fail'
        with open(ffail, 'w') as f:
            f.write(msg)
        return ffail
    data_out = B[4:]  # Drop uint containing bit length

    ## Save as the given filename
    # Write output
    with open(outfile + '1', 'wb') as f:
        # Drop initial uint containing bit length
        f.write(data_out)
    return outfile
def resample(x, sr1=25, sr2=125, axis=0):
    a, b = Fraction(sr1, sr2)._numerator, Fraction(sr1, sr2)._denominator
    return resample_poly(x, a, b, axis).astype(np.float32)
def get_network_input(channels,
                      S,
                      T,
                      fs=8000,
                      fs_out=100000,
                      fds=100,
                      Tsil=[0.5, 0.2]):
    '''
    Obtain the input to the all excictatory neurons in the network given
    spectral (S) and temporal (T) part of the kernel. Implementation based on (Hyafil, et al., 2015)

    Inputs:
        channels: auditory channels from the model of subcortical processing (128 x N)
        S: spectral component of the STRF filter (1 x 32)
        T: temporal component of the STRF filter (1 x 6)
        fs: sampling rate of the auditory channels input X (default: 8kHz).
        fs_out: output sampling rate in Hz (i.e. simulation sampling freq, default: 100kHz)
        fds: sampling rate of the STRF kernel in Hz (default: 100 Hz)
        Tsil: added silence [before onset, after the end] in s
    Outputs:
        Iext: auditory input to the model (84 x N matrix)
        dt: simulation timestep, in ms
    '''
    # Silence from seconds -> samples
    Tsil = (np.array(Tsil) * fs).astype(np.int)
    nchan = S.shape[1]

    # Auditory channels to be used. Here, every 4th to obtain 32 channels projected to Ge neurons.
    ch_idx = (np.arange(1, nchan + 1) * np.ceil(channels.shape[1] / nchan) -
              1).astype(np.int)

    # Obtain input to Ge neurons
    GE_input = channels[:, ch_idx].T

    # Obtain input to Te neurons
    TE_input = channels[:, ch_idx].T
    TE_input = np.dot(S, TE_input)  # Process input through spectral weights
    # Downsample the resulting signal to match the temporal resolution of STRF (here 100 Hz)
    TE_input = signal.resample_poly(smooth(TE_input[0, :], np.int(fs / fds)),
                                    fds, fs)
    # Filter the resulting signal using temporal portion of the STRF kernel
    TE_input = signal.lfilter(T[0, :], np.array([1, 0, 0, 0, 0, 0]), TE_input)
    # Upsample to the original sampling rate (padding)
    TE_input = np.concatenate(
        [np.ones(np.int(fs / fds)) * i for i in TE_input])
    TE_input = np.tile(
        TE_input, (10, 1))  # Tile the resulting signal x10, input to Te cells

    # Control step
    # If one input is longer than the other (due to resampling rounding)
    # crop both to the length of the shorter one.
    TE_input = TE_input[:, :min([TE_input.shape[1], GE_input.shape[1]])]
    GE_input = GE_input[:, :min([TE_input.shape[1], GE_input.shape[1]])]

    # Pre-allocate matrix of network inputs in the simulations
    Iext = np.zeros((84, sum(Tsil) + TE_input.shape[1]))
    # Popualte with the obtained inputs to Te and Ge neurons (incl. silence before and after)
    Iext[:10, Tsil[0]:(-1 * Tsil[1])] = TE_input[:, :]
    Iext[20:52, Tsil[0]:(-1 * Tsil[1])] = GE_input[:, :]

    # Upsampling to the desired fs_out
    if fs_out > fs:
        Iext = signal.resample_poly(Iext, fs_out, fs, axis=1)
        dt = 1000. / fs_out  # in ms
    else:
        dt = 1000. / fs  # in ms

    return Iext, dt
Esempio n. 25
0
 def up_sample_example(iq_mat, label, up):
     iq_mat = signal.resample_poly(iq_mat, up, down=1, axis=0)
     return iq_mat, label
Esempio n. 26
0
def recognize_one_audio(input_path):
    # load audio
    logger.info('Loading wavfile...')
    wav, sr = sf.read(input_path)
    
    if wav.dtype != np.float32:
        wav = wav.astype(np.float32)

    if wav.ndim == 2 :
        if args.stereo:
            wav = np.transpose(wav,(1,0))   # stereo to batch
        else:
            wav = (wav[:,0][np.newaxis,:] + wav[:,1][np.newaxis,:])/2   # convert to mono
    else:
        wav = wav[np.newaxis,:]

    calc_time(wav.shape[1], sr)

    # convert sample rate
    logger.info('Converting sample rate...')
    if not sr == DESIRED_SR :
        if args.ailia_audio:
            wav = ailia.audio.resample(wav,sr,DESIRED_SR)
        else:
            wav = signal.resample_poly(wav, DESIRED_SR, sr, axis=1)

    # apply preenphasis filter
    logger.info('Generating input feature...')
    wav = preemphasis(wav)

    input_feature = tfconvert(wav, WINDOW_LEN, HOP_LEN, MULT)

    # create instance
    if not args.onnx :
        logger.info('Use ailia')
        env_id = args.env_id
        logger.info(f'env_id: {env_id}')
        memory_mode = ailia.get_memory_mode(reuse_interstage=True)
        session = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id, memory_mode=memory_mode)
    else :
        logger.info('Use onnxruntime')
        import onnxruntime
        session = onnxruntime.InferenceSession(WEIGHT_PATH)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for c in range(5) :
            start = int(round(time.time() * 1000))
            sep = src_sep(input_feature, session)
            end = int(round(time.time() * 1000))
            logger.info("\tprocessing time {} ms".format(end-start))
    else:
        sep = src_sep(input_feature, session)

    # postprocessing
    logger.info('Start postprocessing...')
    if LPF_CUTOFF > 0 :
        sep = lowpass(sep, LPF_CUTOFF, DESIRED_SR)

    out_wav = inv_preemphasis(sep).clip(-1.,1.)
    out_wav = out_wav.swapaxes(0,1)
    
    # save sapareted signal
    savepath = get_savepath(args.savepath, input_path)
    logger.info(f'saved at : {savepath}')

    sf.write(savepath, out_wav, DESIRED_SR)
    
    logger.info('Saved separated signal. ')
    logger.info('Script finished successfully.')
Esempio n. 27
0
def pcm_to_pdm(in_wav_file, out_pdm_file, pdm_sample_rate, verbose = False):

    pcm_sample_rate, pcm = scipy.io.wavfile.read(in_wav_file, 'r')

    nsamples = pcm.shape[0]

    multi_channel_pcm = pcm.T

    if len(multi_channel_pcm.shape ) == 1:
       multi_channel_pcm = np.reshape(multi_channel_pcm, (1, nsamples))

    # FIXME this need a handler for float types
    first_sample = multi_channel_pcm[0][0]
    if isinstance( first_sample, ( int, np.int16, np.int32 ) ):
      pcm_full_scale = np.iinfo(first_sample).max
      if verbose:
        print("Full scale PCM value: " + str(pcm_full_scale) + ' (Integer type PCM)')
    else:
      pcm_full_scale = 1.0
      if verbose:
        print("Full scale PCM value: " + str(pcm_full_scale) + ' (Float type PCM)')

    nchannels = multi_channel_pcm.shape[0]

    upsample_ratio , downsample_ratio = up_down_ratio(pcm_sample_rate, pdm_sample_rate)

    if nchannels > 8:
      print("Error: More than 8 channels is not supported, found " + str(nchannels) + ".")
      return

    seconds = np.round(float(nsamples) / float(pcm_sample_rate), 3)

    if verbose:
      print("PCM -> PDM")
      print("Number of sample: " + str(nsamples) + ' ~ ' + str(seconds) + ' seconds.')
      print("Channel count: " + str(nchannels))
      print("Upsample ratio: " + str(upsample_ratio))
      print("Downsample ratio: " + str(downsample_ratio))

    # Stability limit
    pdm_magnitude_stability_limit = 0.4 # This seems to be safe

    output_length = nsamples*upsample_ratio // downsample_ratio

    pdm_samples = np.zeros((8, output_length))

    max_abs_pcm_all_channels = 0.
    for ch in range(nchannels):
      if verbose:
        print('processing channel ' + str(ch))
      pcm = multi_channel_pcm[ch]

      # limit the max pcm input to 0.4 - for stability of the modulator
      pcm = np.asarray(pcm, dtype=np.float64) / pcm_full_scale
      max_abs_pcm = max(abs(pcm))
      max_abs_pcm_all_channels = max(max_abs_pcm, max_abs_pcm_all_channels)
      if max_abs_pcm >= pdm_magnitude_stability_limit:

        pcm /= max(abs(pcm))
        pcm *= pdm_magnitude_stability_limit
        if verbose:
          print('Abs max sample: '+ str(max_abs_pcm) +' limiting the abs max sample to 0.4')
      else :
        if verbose:
          print('No sample limiting applied')
      # TODO do this in chunks to save memory
      up_sampled_pcm = signal.resample_poly(pcm, upsample_ratio, downsample_ratio)

      pdm_samples[ch] = delta_sigma_5th_order(up_sampled_pcm, output_length )

      # Write output to file
      # Convert from [-1, 1] -> [0, 1] range
      pdm_samples[ch] = (pdm_samples[ch]*0.5) + np.ones(len(pdm_samples[ch]))*0.5

    # pdm_samples = np.flip(pdm_samples, 0)
    pdm_samples = np.flipud(pdm_samples)

    my_bytes = np.array(pdm_samples, dtype=np.uint8)
    b = np.packbits(my_bytes.T, axis = -1)

    print("Max abs value {}".format(max_abs_pcm_all_channels))

    fp = open(out_pdm_file,'wb')

    b.tofile(fp)
    return
Esempio n. 28
0
def load_edf_file(filename, channels):
    # Load EDF file
    f = pyedflib.EdfReader(filename)
    # Channel labels
    channel_labels = f.getSignalLabels()
    # Sampling frequencies
    fss = f.getSampleFrequencies()
    # Pre-allocation of x
    x = []
    # Extract channels
    for channel in channels:
        # Is channel referenced?
        if any([x in channel_alias[channel] for x in channel_labels]):
            channel_idx = channel_labels.index(
                next(
                    filter(lambda i: i in channel_alias[channel],
                           channel_labels)))
            # Gain factor
            # TODO: check if this makes sense
            g = f.getPhysicalMaximum(channel_idx) / f.getDigitalMaximum(
                channel_idx)
            # Read signal
            sig = g * f.readSignal(channel_idx)

        # Else: reference channels
        elif any([x in unref_channel_alias[channel] for x in channel_labels]):
            channel_idx = channel_labels.index(
                next(
                    filter(lambda i: i in unref_channel_alias[channel],
                           channel_labels)))
            ref_idx = channel_labels.index(
                next(
                    filter(lambda i: i in ref_channel_alias[channel],
                           channel_labels)))
            # Gain factor
            # TDO: check if this makes sense
            g = f.getPhysicalMaximum(channel_idx) / f.getDigitalMaximum(
                channel_idx)
            g_ref = f.getPhysicalMaximum(ref_idx) / f.getDigitalMaximum(
                ref_idx)
            # Assuming fs for signal and reference is identical
            sig = g * f.readSignal(channel_idx) - g_ref * f.readSignal(ref_idx)

        # Else empty
        else:
            sig = []

        # If not empty
        if len(sig) != 0:
            # Resampling
            fs = fss[channel_idx]
            if fs != des_fs[channel]:
                resample_frac = Fraction(des_fs[channel] /
                                         fs).limit_denominator(100)
                sig = resample_poly(sig, resample_frac.numerator,
                                    resample_frac.denominator)

            # Filter signals
            if hp_fs[channel] != 0:
                sig_filtered = psg_highpass_filter(sig,
                                                   hp_fs[channel],
                                                   des_fs[channel],
                                                   order=16)
            else:
                sig_filtered = sig
            # Scale signal
            sig_scaled = rescale(sig_filtered, 'soft')

        x.append(sig_scaled)

    # Replace empty with zeros
    N = max([len(s) for s in x])
    for i, channel in enumerate(channels):
        if len(x[i]) == 0:
            x[i] = np.zeros(N)

    data = {'x': x, 'fs': [des_fs[x] for x in channels], 'channels': channels}
    f._close()
    return data
Esempio n. 29
0
    def resampling_poly(self, audio):
        audio = signal.resample_poly(audio, 1, 2)  # resample 8kHz
        audio = tf.convert_to_tensor(audio, dtype=tf.float32)

        return audio
Esempio n. 30
0
dist_in_samp = my_int(ds_config['distance'] * ds_config['target_fs'])
w_in_samp = my_int(ds_config['width'] * ds_config['target_fs'])
hw_in_samp = my_int(ds_config['width'] * ds_config['target_fs'] / 2)

my_win = sig.gaussian(hw_in_samp + 1, (hw_in_samp - 1) / 5)
my_win = np.diff(my_win) * sig.gaussian(hw_in_samp, (hw_in_samp - 1) / 5)

for this_rec in records:

    data, field = wf.rdsamp(os.path.join(rec_path, this_rec))

    pq_ratio = ds_config['target_fs'] / field['fs']
    resample_frac = Fraction(pq_ratio).limit_denominator(20)
    #recalculo el ratio real
    pq_ratio = resample_frac.numerator / resample_frac.denominator
    data = sig.resample_poly(data, resample_frac.numerator,
                             resample_frac.denominator)

    detection_sig = np.abs(
        sig.filtfilt(my_win,
                     1,
                     data[:my_int(
                         np.min([
                             data.shape[0], ds_config['explore_win'] *
                             ds_config['target_fs']
                         ])), :],
                     axis=0))

    this_scale = np.repeat(np.nan, field['n_sig']).reshape(field['n_sig'], 1)

    for ii in range(field['n_sig']):
        posible_beats, _ = sig.find_peaks(detection_sig[:, ii],
fsResample = 16000  # リサンプリングの周波数 (Hz)
FFT_LENGTH = 4096  # STFT時のFFT長 (points)
HOP_LENGTH = 2048  # STFT時のフレームシフト長 (points)
N_SOURCES = 2  # 音源数
N_ITER = 100  # ILRMAにおける推定回数(内部パラメタ)
N_BASES = 10  # ILRMAにおける基底数(内部パラメタ)

# ### 混合音の作成 ###
# sig: signal x channel x source という3次元アレイ
fs, sig_src1 = wavfile.read(SRC_WAV1)
fs, sig_src2 = wavfile.read(SRC_WAV2)
sig_src2 = sig_src2[:len(sig_src1)]
sig = np.stack([sig_src1, sig_src2], axis=1)

# 元の音源をリサンプリング (多項式補完)
sig_src1 = signal.resample_poly(sig[:, :, 0], fsResample, fs)
sig_src2 = signal.resample_poly(sig[:, :, 1], fsResample, fs)
sig_resample = np.stack([sig_src1, sig_src2], axis=1)

# 混合信号を作成
# 各チャネルごとに、音源の足し算
mix1 = sig_resample[:, 0, 0] + sig_resample[:, 0, 1]  # 第0チャネル (left)
mix2 = sig_resample[:, 1, 0] + sig_resample[:, 1, 1]  # 第1チャネル (right)
mixed = np.stack([mix1, mix2], axis=1)

# ### 音源分離の実行 ###
# 分析窓
win_a = pra.hamming(FFT_LENGTH)

# 合成窓: 分析窓を事前に並べておく
win_s = pra.transform.compute_synthesis_window(win_a, HOP_LENGTH)
Esempio n. 32
0
 def __call__(self, data):
     signal, label, orig_fs = data
     return resample_poly(signal, self.fs, orig_fs), label, self.fs
Esempio n. 33
0
def demo_4d_rsHRF(input_file, mask_file, output_dir, para, mode='bids'):
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    if mode == 'bids':
        name = input_file.filename.split('/')[-1].split('.')[0]
        v = spm_dep.spm.spm_vol(mask_file.filename)
    elif mode == 'bids w/ atlas':
        name = input_file.filename.split('/')[-1].split('.')[0]
        v = spm_dep.spm.spm_vol(mask_file)
    else:
        name = input_file.split('/')[-1].split('.')[0]
        v = spm_dep.spm.spm_vol(mask_file)
    brain = spm_dep.spm.spm_read_vols(v)

    voxel_ind = np.where(brain > 0)[0]

    temporal_mask = []

    if mode == 'bids' or mode == 'bids w/ atlas':
        v1 = spm_dep.spm.spm_vol(input_file.filename)
    else:
        v1 = spm_dep.spm.spm_vol(input_file)

    if v1.header.get_data_shape()[:-1] != v.header.get_data_shape():
        print('The dimension of your mask is different than '
              'the one of your fMRI data!')
        return
    else:
        data = v1.get_data()
        nobs = data.shape[3]
        data1 = np.reshape(data, (-1, nobs), order='F').T
        bold_sig = stats.zscore(data1[:, voxel_ind], ddof=1)
        bold_sig = np.nan_to_num(bold_sig)
        bold_sig = processing. \
            rest_filter. \
            rest_IdealFilter(bold_sig, para['TR'], para['passband'])
        data_deconv = np.zeros(bold_sig.shape)
        event_number = np.zeros((1, bold_sig.shape[1]))

        print('Retrieving HRF ...')

        if 'canon' in para['estimation']:
            beta_hrf, bf, event_bold = \
                canon.canon_hrf2dd.wgr_rshrf_estimation_canonhrf2dd_par2(
                    bold_sig, para, temporal_mask
                )
            hrfa = np.dot(bf, beta_hrf[np.arange(0, bf.shape[1]), :])
        elif 'FIR' in para['estimation']:
            para['T'] = 1
            hrfa, event_bold = sFIR. \
                smooth_fir. \
                wgr_rsHRF_FIR(bold_sig, para, temporal_mask)

        nvar = hrfa.shape[1]
        PARA = np.zeros((3, nvar))

        for voxel_id in range(nvar):
            hrf1 = hrfa[:, voxel_id]
            PARA[:, voxel_id] = \
                parameters.wgr_get_parameters(hrf1, para['TR'] / para['T'])

        print('Done')

        print('Deconvolving HRF ...')

        T = np.around(para['len'] / para['TR'])

        if para['T'] > 1:
            hrfa_TR = signal.resample_poly(hrfa, 1, para['T'])
        else:
            hrfa_TR = hrfa

        for voxel_id in range(nvar):
            hrf = hrfa_TR[:, voxel_id]
            H = np.fft.fft(np.append(hrf, np.zeros(
                (nobs - max(hrf.shape), 1))),
                           axis=0)
            M = np.fft.fft(bold_sig[:, voxel_id])
            data_deconv[:, voxel_id] = \
                np.fft.ifft(H.conj() * M / (H * H.conj() + .1*np.mean((H * H.conj()))))
            event_number[:, voxel_id] = np.amax(event_bold[voxel_id].shape)

        if mode == 'bids' or mode == 'bids w/ atlas':
            try:
                sub_save_dir = os.path.join(output_dir,
                                            'sub-' + input_file.subject,
                                            'session-' + input_file.session,
                                            input_file.modality)
            except AttributeError as e:
                sub_save_dir = os.path.join(output_dir,
                                            'sub-' + input_file.subject,
                                            input_file.modality)
        else:
            sub_save_dir = output_dir

        if not os.path.isdir(sub_save_dir):
            os.makedirs(sub_save_dir, exist_ok=True)

        sio.savemat(os.path.join(sub_save_dir, name + '_hrf.mat'), {
            'para': para,
            'hrfa': hrfa,
            'event_bold': event_bold,
            'PARA': PARA
        })
        HRF_para_str = ['Height.nii', 'Time2peak.nii', 'FWHM.nii']
        data = np.zeros(v.get_data().shape).flatten(order='F')

        for i in range(3):
            fname = os.path.join(sub_save_dir, name + '_' + HRF_para_str[i])
            data[voxel_ind] = PARA[i, :]
            data = data.reshape(v.get_data().shape, order='F')
            spm_dep.spm.spm_write_vol(v, data, fname)
            data = data.flatten(order='F')

        fname = os.path.join(sub_save_dir, name + '_event_number.nii')
        data[voxel_ind] = event_number
        data = data.reshape(v.get_data().shape, order='F')
        spm_dep.spm.spm_write_vol(v, data, fname)

        data = np.zeros(v1.get_data().shape)
        dat3 = np.zeros(v1.header.get_data_shape()[:-1]).flatten(order='F')
        for i in range(nobs):
            fname = os.path.join(sub_save_dir, name + '_deconv')
            dat3[voxel_ind] = data_deconv[i, :]
            dat3 = dat3.reshape(v1.header.get_data_shape()[:-1], order='F')
            data[:, :, :, i] = dat3
            dat3 = dat3.flatten(order='F')
        spm_dep.spm.spm_write_vol(v1, data, fname)

        event_plot = lil_matrix((1, nobs))
        event_plot[:, event_bold[0]] = 1
        event_plot = np.ravel(event_plot.toarray())

        plt.figure()
        plt.plot(para['TR'] * np.arange(1,
                                        np.amax(hrfa[:, 0].shape) + 1),
                 hrfa[:, 0],
                 linewidth=1)
        plt.xlabel('time (s)')
        plt.savefig(os.path.join(sub_save_dir, name + '_plot_1.png'))

        plt.figure()
        plt.plot(para['TR'] * np.arange(1, nobs + 1),
                 np.nan_to_num(stats.zscore(bold_sig[:, 0], ddof=1)),
                 linewidth=1)
        plt.plot(para['TR'] * np.arange(1, nobs + 1),
                 np.nan_to_num(stats.zscore(data_deconv[:, 0], ddof=1)),
                 color='r',
                 linewidth=1)
        markerline, stemlines, baseline = \
            plt.stem(para['TR'] * np.arange(1, nobs + 1), event_plot)
        plt.setp(baseline, 'color', 'k', 'markersize', 1)
        plt.setp(stemlines, 'color', 'k', 'markersize', 1)
        plt.setp(markerline, 'color', 'k', 'markersize', 3, 'marker', 'd')
        plt.legend(['BOLD', 'deconvolved', 'events'])
        plt.xlabel('time (s)')
        plt.savefig(os.path.join(sub_save_dir, name + '_plot_2.png'))
Esempio n. 34
0
def resample(x, up, dn):
    '''rational resampling by a factor of up/dn'''
    return signal.resample_poly(x, up, dn, padtype='line')
Esempio n. 35
0
from scipy import signal
from scipy.io import wavfile
import numpy as np

##########################################
#miscellaneous useful functions, in python
##########################################

samplerate, data = wavfile.read(
    'C:/Apps/INSTINCT/Out/decimateTest/AU-ALIC01-170421-115000.wav')

print("done 1")

up = 5
down = 8

data_resamp = signal.resample_poly(data, up, down)

print("done 2")

wavfile.write(
    'C:/Apps/INSTINCT/Out/decimateTest/AU-ALIC01-170421-115000_python.wav',
    int(samplerate * up / down), np.asarray(data_resamp, dtype=np.int16))

print("done 3")