Пример #1
0
def test_parzen_win():
    test_npoints = 3
    test_result = np.array([0.07407407, 1, 0.07407407])
    win = parzen(test_npoints)
    assert np.allclose(win, test_result)

    test_npoints = 1
    win = parzen(test_npoints)
    assert win == 1
Пример #2
0
 def test_basic(self):
     assert_allclose(windows.parzen(6),
                     [0.009259259259259254, 0.25, 0.8611111111111112,
                      0.8611111111111112, 0.25, 0.009259259259259254])
     assert_allclose(windows.parzen(7, sym=True),
                     [0.00583090379008747, 0.1574344023323616,
                      0.6501457725947521, 1.0, 0.6501457725947521,
                      0.1574344023323616, 0.00583090379008747])
     assert_allclose(windows.parzen(6, False),
                     [0.00583090379008747, 0.1574344023323616,
                      0.6501457725947521, 1.0, 0.6501457725947521,
                      0.1574344023323616])
Пример #3
0
 def test_basic(self):
     assert_allclose(windows.parzen(6),
                     [0.009259259259259254, 0.25, 0.8611111111111112,
                      0.8611111111111112, 0.25, 0.009259259259259254])
     assert_allclose(windows.parzen(7, sym=True),
                     [0.00583090379008747, 0.1574344023323616,
                      0.6501457725947521, 1.0, 0.6501457725947521,
                      0.1574344023323616, 0.00583090379008747])
     assert_allclose(windows.parzen(6, False),
                     [0.00583090379008747, 0.1574344023323616,
                      0.6501457725947521, 1.0, 0.6501457725947521,
                      0.1574344023323616])
Пример #4
0
 def _get_acf(self, smooth=False):
     self._is_valid_acf()
     acf = atleast_1d(self.data).ravel()
     n = self._get_lag_where_acf_is_almost_zero()
     if smooth:
         rwin = parzen(2 * n + 1)
         return acf[:n] * rwin[n:2 * n]
     else:
         return acf[:n]
Пример #5
0
 def _get_acf(self, smooth=False):
     self._is_valid_acf()
     acf = atleast_1d(self.data).ravel()
     n = self._get_lag_where_acf_is_almost_zero()
     if smooth:
         rwin = parzen(2 * n + 1)
         return acf[:n] * rwin[n:2 * n]
     else:
         return acf[:n]
Пример #6
0
def circular_distribution(amples, angles, angle_step, nkernel=15):
    from scipy.ndimage.filters import convolve1d
    from scipy.signal.windows import parzen

    kernel = parzen(nkernel)
    bins = np.arange(-np.pi, np.pi + angle_step, angle_step)
    distr, _ = np.histogram(angles, bins=bins, weights=amples)

    distr = convolve1d(distr, kernel, mode="wrap")
    bins = np.convolve(bins, [0.5, 0.5], mode="valid")

    return bins, distr
Пример #7
0
def circular_distribution(amples,
                          angles,
                          angle_step,
                          nkernel=15,
                          density=True):
    """
    return circular distribution smoothed by the parsen kernel
    """
    kernel = parzen(nkernel)
    bins = np.arange(-np.pi, np.pi + angle_step, angle_step)
    distr, _ = np.histogram(angles, bins=bins, weights=amples, density=density)

    distr = convolve1d(distr, kernel, mode="wrap")
    bins = np.convolve(bins, [0.5, 0.5], mode="valid")

    return bins, distr
Пример #8
0
def get_phase_disrtibution(train, lfp, fs):
    """
    compute disrtibution of spikes by phases of LFP
    """
    if train.size == 0:
        return np.empty(0, dtype=np.float), np.empty(
            0, dtype=np.float), np.empty(0, dtype=np.float)

    nkernel = 15

    analitic_signal = sig.hilbert(lfp)
    lfp_phases = np.angle(analitic_signal, deg=False)
    lfp_ampls = np.abs(analitic_signal)

    train = np.floor(train * fs * 0.001).astype(
        np.int)  # multiply on 0.001 because train in ms, fs in Hz

    train = train[train < lfp.size - 1]

    train_phases = lfp_phases[train]
    train_ampls = lfp_ampls[train]

    R = np.abs(np.mean(analitic_signal[train]))

    count, bins = np.histogram(train_phases,
                               bins=50,
                               density=True,
                               range=[-np.pi, np.pi],
                               weights=train_ampls)

    kernel = parzen(nkernel)

    # distr, _ = np.histogram(angles, bins=bins, weights=amples, density=density)

    count = convolve1d(count, kernel, mode="wrap")

    bins = np.convolve(bins, [0.5, 0.5], mode="valid")
    return bins, count, R
Пример #9
0
def cossfrequency_phase_amp_coupling(phase_signal,
                                     coefAmp,
                                     phasebins=20,
                                     nkernel=15):
    """
    compute disribution amplitudes by phases of phase_signal
    """
    phase_signal = np.angle(sig.hilbert(phase_signal), deg=False)

    coefAmp = np.abs(coefAmp)
    coefAmp = stat.zscore(coefAmp, axis=1)

    coupling = np.empty(shape=(coefAmp.shape[0], phasebins), dtype=np.float)

    kernel = parzen(nkernel)
    for freq_idx in range(coefAmp.shape[0]):
        coup, _ = np.histogram(phase_signal,
                               bins=phasebins,
                               weights=coefAmp[freq_idx, :],
                               range=[-np.pi, np.pi])
        coup = convolve1d(coup, kernel, mode="wrap")
        coupling[freq_idx, :] = coup
    coupling = coupling / (coupling.max() - coupling.min())
    return coupling
Пример #10
0
    def pre_processing(self):
        """
        Complete various pre-processing steps for encoded protein sequences before
        doing any of the DSP-related functions or transformations. Zero-pad
        the sequences, remove any +/- infinity or NAN values, get the approximate
        protein spectra and window function parameter names.

        Parameters
        ----------
        :self (PyDSP object): 
            instance of PyDSP class.
            
        Returns
        -------
        None

        """
        #zero-pad encoded sequences so they are all the same length
        self.protein_seqs = zero_padding(self.protein_seqs)

        #get shape parameters of proteins seqs
        self.num_seqs = self.protein_seqs.shape[0]
        self.signal_len = self.protein_seqs.shape[1]

        #replace any positive or negative infinity or NAN values with 0
        self.protein_seqs[self.protein_seqs == -np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.nan] = 0

        #replace any NAN's with 0's
        #self.protein_seqs.fillna(0, inplace=True)
        self.protein_seqs = np.nan_to_num(self.protein_seqs)

        #initialise zeros array to store all protein spectra
        self.fft_power = np.zeros((self.num_seqs, self.signal_len))
        self.fft_real = np.zeros((self.num_seqs, self.signal_len))
        self.fft_imag = np.zeros((self.num_seqs, self.signal_len))
        self.fft_abs = np.zeros((self.num_seqs, self.signal_len))

        #list of accepted spectra, window functions and filters
        all_spectra = ['power', 'absolute', 'real', 'imaginary']
        all_windows = [
            'hamming', 'blackman', 'blackmanharris', 'gaussian', 'bartlett',
            'kaiser', 'barthann', 'bohman', 'chebwin', 'cosine', 'exponential'
            'flattop', 'hann', 'boxcar', 'hanning', 'nuttall', 'parzen',
            'triang', 'tukey'
        ]
        all_filters = [
            'savgol', 'medfilt', 'symiirorder1', 'lfilter', 'hilbert'
        ]

        #set required input parameters, raise error if spectrum is none
        if self.spectrum == None:
            raise ValueError(
                'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                .format(self.spectrum, all_spectra))
        else:
            #get closest correct spectra from user input, if no close match then raise error
            spectra_matches = (get_close_matches(self.spectrum,
                                                 all_spectra,
                                                 cutoff=0.4))

            if spectra_matches == []:
                raise ValueError(
                    'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                    .format(self.spectrum, all_spectra))
            else:
                self.spectra = spectra_matches[0]  #closest match in array

        if self.window_type == None:
            self.window = 1  #window = 1 is the same as applying no window
        else:
            #get closest correct window function from user input
            window_matches = (get_close_matches(self.window,
                                                all_windows,
                                                cutoff=0.4))

            #check if sym=True or sym=False
            #get window function specified by window input parameter, if no match then window = 1
            if window_matches != []:
                if window_matches[0] == 'hamming':
                    self.window = hamming(self.signal_len, sym=True)
                    self.window_type = "hamming"
                elif window_matches[0] == "blackman":
                    self.window = blackman(self.signal_len, sym=True)
                    self.window = "blackman"
                elif window_matches[0] == "blackmanharris":
                    self.window = blackmanharris(self.signal_len,
                                                 sym=True)  #**
                    self.window_type = "blackmanharris"
                elif window_matches[0] == "bartlett":
                    self.window = bartlett(self.signal_len, sym=True)
                    self.window_type = "bartlett"
                elif window_matches[0] == "gaussian":
                    self.window = gaussian(self.signal_len, std=7, sym=True)
                    self.window_type = "gaussian"
                elif window_matches[0] == "kaiser":
                    self.window = kaiser(self.signal_len, beta=14, sym=True)
                    self.window_type = "kaiser"
                elif window_matches[0] == "hanning":
                    self.window = hanning(self.signal_len, sym=True)
                    self.window_type = "hanning"
                elif window_matches[0] == "barthann":
                    self.window = barthann(self.signal_len, sym=True)
                    self.window_type = "barthann"
                elif window_matches[0] == "bohman":
                    self.window = bohman(self.signal_len, sym=True)
                    self.window_type = "bohman"
                elif window_matches[0] == "chebwin":
                    self.window = chebwin(self.signal_len, sym=True)
                    self.window_type = "chebwin"
                elif window_matches[0] == "cosine":
                    self.window = cosine(self.signal_len, sym=True)
                    self.window_type = "cosine"
                elif window_matches[0] == "exponential":
                    self.window = exponential(self.signal_len, sym=True)
                    self.window_type = "exponential"
                elif window_matches[0] == "flattop":
                    self.window = flattop(self.signal_len, sym=True)
                    self.window_type = "flattop"
                elif window_matches[0] == "boxcar":
                    self.window = boxcar(self.signal_len, sym=True)
                    self.window_type = "boxcar"
                elif window_matches[0] == "nuttall":
                    self.window = nuttall(self.signal_len, sym=True)
                    self.window_type = "nuttall"
                elif window_matches[0] == "parzen":
                    self.window = parzen(self.signal_len, sym=True)
                    self.window_type = "parzen"
                elif window_matches[0] == "triang":
                    self.window = triang(self.signal_len, sym=True)
                    self.window_type = "triang"
                elif window_matches[0] == "tukey":
                    self.window = tukey(self.signal_len, sym=True)
                    self.window_type = "tukey"

            else:
                self.window = 1  #window = 1 is the same as applying no window

        #calculate convolution from protein sequences
        if self.convolution is not None:
            if self.window is not None:
                self.convoled_seqs = signal.convolve(
                    self.protein_seqs, self.window, mode='same') / sum(
                        self.window)

        if self.filter != None:
            #get closest correct filter from user input
            filter_matches = (get_close_matches(self.filter,
                                                all_filters,
                                                cutoff=0.4))

            #set filter attribute according to approximate user input
            if filter_matches != []:
                if filter_matches[0] == 'savgol':
                    self.filter = savgol_filter(self.signal_len,
                                                self.signal_len)
                elif filter_matches[0] == 'medfilt':
                    self.filter = medfilt(self.signal_len)
                elif filter_matches[0] == 'symiirorder1':
                    self.filter = symiirorder1(self.signal_len, c0=1, z1=1)
                elif filter_matches[0] == 'lfilter':
                    self.filter = lfilter(self.signal_len)
                elif filter_matches[0] == 'hilbert':
                    self.filter = hilbert(self.signal_len)
            else:
                self.filter = ""  #no filter
Пример #11
0
def ent_rate_sp(data, sm_window):
    """
    Calculate the entropy rate of a stationary Gaussian random process using
    spectrum estimation with smoothing window.

    Parameters
    ----------
    data : ndarray
        Data to calculate the entropy rate of and smooth
    sm_window : boolean
        Whether there is a Parzen window to use

    Returns
    -------
    ent_rate : float
        The entropy rate

    Notes
    -----
    This function attempts to calculate the entropy rate following

    References
    ----------
    * Li, Y.O., Adalı, T. and Calhoun, V.D., (2007).
      Estimating the number of independent components for
      functional magnetic resonance imaging data.
      Human brain mapping, 28(11), pp.1251-1266.
    """

    dims = data.shape

    if data.ndim == 3 and min(dims) != 1:
        pass
    else:
        raise ValueError("Incorrect matrix dimensions.")

    # Normalize x_sb to be unit variance
    data_std = np.std(np.reshape(data, (-1, 1)))

    # Make sure we do not divide by zero
    if data_std == 0:
        raise ValueError("Divide by zero encountered.")
    data = data / data_std

    # Apply windows to 3D
    data_corr = fftconvolve(data, np.flip(data))

    # Create bias-correcting vectors
    v1 = np.hstack((np.arange(1, dims[0] + 1), np.arange(dims[0] - 1, 0,
                                                         -1)))[np.newaxis, :]
    v2 = np.hstack((np.arange(1, dims[1] + 1), np.arange(dims[1] - 1, 0,
                                                         -1)))[np.newaxis, :]
    v3 = np.arange(dims[2], 0, -1)

    vd = np.dot(v1.T, v2)
    vcu = np.zeros((2 * dims[0] - 1, 2 * dims[1] - 1, 2 * dims[2] - 1))
    for m3 in range(dims[2]):
        vcu[:, :, (dims[2] - 1) - m3] = vd * v3[m3]
        vcu[:, :, (dims[2] - 1) + m3] = vd * v3[m3]

    data_corr /= vcu

    if sm_window:
        M = [int(i) for i in np.ceil(np.array(dims) / 10)]

        # Get Parzen window for each spatial direction
        parzen_w_3 = np.zeros((2 * dims[2] - 1, ))
        parzen_w_3[(dims[2] - M[2] - 1):(dims[2] + M[2])] = parzen(2 * M[2] +
                                                                   1)

        parzen_w_2 = np.zeros((2 * dims[1] - 1, ))
        parzen_w_2[(dims[1] - M[1] - 1):(dims[1] + M[1])] = parzen(2 * M[1] +
                                                                   1)

        parzen_w_1 = np.zeros((2 * dims[0] - 1, ))
        parzen_w_1[(dims[0] - M[0] - 1):(dims[0] + M[0])] = parzen(2 * M[0] +
                                                                   1)

        # Scale Parzen windows
        parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T,
                                  parzen_w_2[np.newaxis, :])
        parzen_window_3D = np.zeros(
            (2 * dims[0] - 1, 2 * dims[1] - 1, 2 * dims[2] - 1))
        for m3 in range(dims[2] - 1):
            parzen_window_3D[:, :, (dims[2] - 1) - m3] = np.dot(
                parzen_window_2D, parzen_w_3[dims[2] - 1 - m3])
            parzen_window_3D[:, :, (dims[2] - 1) + m3] = np.dot(
                parzen_window_2D, parzen_w_3[dims[2] - 1 + m3])
        # Apply 3D Parzen Window
        data_corr *= parzen_window_3D

    data_fft = abs(fftshift(fftn(data_corr)))
    data_fft[data_fft < 1e-4] = 1e-4

    # Estimation of the entropy rate
    ent_rate = 0.5 * np.log(2 * np.pi * np.exp(1)) + np.sum(
        np.log(abs((data_fft)))[:]) / 2 / np.sum(abs(data_fft)[:])

    return ent_rate