def __init__(self, sf, npts, Id='113', pha_f=[2, 4], pha_meth='hilbert', pha_cycle=3, amp_f=[60, 200], amp_meth='hilbert', amp_cycle=6, nbins=18, window=None, width=None, step=None, time=None, **kwargs): # Check pha and amp methods: _checkref('pha_meth', pha_meth, ['hilbert', 'hilbert1', 'hilbert2']) _checkref('amp_meth', amp_meth, ['hilbert', 'hilbert1', 'hilbert2']) # Check the type of f: if (len(pha_f) == 4) and isinstance(pha_f[0], (int, float)): pha_f = binarize(pha_f[0], pha_f[1], pha_f[2], pha_f[3], kind='list') if (len(amp_f) == 4) and isinstance(amp_f[0], (int, float)): amp_f = binarize(amp_f[0], amp_f[1], amp_f[2], amp_f[3], kind='list') self.xvec = [] # Initalize pac object : self.Id = Id me = Id[0] # Manage settings : # 1 - Choose if we extract phase or amplitude : # - Methods using phase // amplitude : if me in ['1', '2', '3', '5', '6']: pha_kind, amp_kind = 'phase', 'amplitude' # - Methods using phase // phase : elif me in ['4']: pha_kind, amp_kind = 'phase', 'amplitude' # 2 - Specific case of Ozkurt : if me == '5': Id = '500' # Initialize cfc : _coupling.__init__(self, pha_f, pha_kind, pha_meth, pha_cycle, amp_f, amp_kind, amp_meth, amp_cycle, sf, npts, window, width, step, time, **kwargs) # Get pac model : _, _, _, ModelStr, SurStr, NormStr = CfcSettings(Id, nbins) self.model = [ 'Method : ' + ModelStr, 'Surrogates : ' + SurStr, 'Normalization : ' + NormStr ] self._nbins = nbins
def __init__(self, sf, npts, f=[2, 4], method='hilbert', cycle=3, sample=None, time=None, **kwargs): # Check pha and amp methods: _checkref('pha_meth', method, ['hilbert', 'hilbert1', 'hilbert2']) # Check the type of f: if (len(f) == 4) and isinstance(f[0], (int, float)): f = binarize(f[0], f[1], f[2], f[3], kind='list') # Initialize PLV : _coupling.__init__(self, f, 'phase', method, cycle, f, 'phase', method, cycle, sf, npts, None, None, None, time, **kwargs) if time is None: time = np.arange(npts) else: time = time if sample is None: sample = slice(npts) self._sample = sample self.time = time[sample] del self.amp
def __init__(self, sf, npts, f=[60, 200], method='hilbert', window=None, width=None, step=None, time=None, **kwargs): _checkref('method', method, ['hilbert', 'hilbert1', 'hilbert2']) _spectral.__init__(self, sf, npts, 'phase', f, None, 0, method, window, width, step, None, time, False, **kwargs)
def __init__(self, sf, npts, f=(2, 200, 10, 5), baseline=(1, 2), norm=0, method='hilbert1', window=None, width=None, step=None, time=None, **kwargs): _checkref('method', method, ['hilbert', 'hilbert1', 'hilbert2', 'wavelet']) _spectral.__init__(self, sf, npts, 'power', f, baseline, norm, method, window, width, step, None, time, True, **kwargs)
def __init__(self, sf, npts, nbins=18, pha_f=[2, 4], pha_meth='hilbert', pha_cycle=3, amp_f=[60, 200], amp_meth='hilbert', amp_cycle=6, window=None, width=None, step=None, time=None, **kwargs): # Check pha and amp methods: _checkref('pha_meth', pha_meth, ['hilbert', 'hilbert1', 'hilbert2']) _checkref('amp_meth', amp_meth, ['hilbert', 'hilbert1', 'hilbert2']) # Check the type of f: if (len(pha_f) == 4) and isinstance(pha_f[0], (int, float)): pha_f = binarize(pha_f[0], pha_f[1], pha_f[2], pha_f[3], kind='list') if (len(amp_f) == 4) and isinstance(amp_f[0], (int, float)): amp_f = binarize(amp_f[0], amp_f[1], amp_f[2], amp_f[3], kind='list') self.xvec = [] # Binarize phase vector : self._binsize = 360 / nbins self._phabin = np.arange(0, 360, self._binsize) self.phabin = np.concatenate( (self._phabin[:, np.newaxis], self._phabin[:, np.newaxis] + self._binsize), axis=1) # Initialize coupling: _coupling.__init__(self, pha_f, 'phase', pha_meth, pha_cycle, amp_f, 'amplitude', amp_meth, amp_cycle, sf, npts, window, width, step, time, **kwargs) self._nbins = nbins
def __init__(self, sf, npts, f=[60, 200], baseline=None, norm=None, method='hilbert1', window=None, width=None, step=None, split=None, time=None, **kwargs): _checkref('method', method, ['hilbert', 'hilbert1', 'hilbert2', 'wavelet']) _spectral.__init__(self, sf, npts, 'power', f, baseline, norm, method, window, width, step, split, time, False, **kwargs)
def __init__(self, sf, npts, pha_f=[2, 4], pha_meth='hilbert', pha_cycle=3, amp_f=[60, 200], amp_meth='hilbert', amp_cycle=6, window=None, step=None, width=None, time=None, **kwargs): # Check pha and amp methods: _checkref('pha_meth', pha_meth, ['hilbert', 'hilbert1', 'hilbert2']) _checkref('amp_meth', amp_meth, ['hilbert', 'hilbert1', 'hilbert2']) # Check the type of f: if (len(pha_f) == 4) and isinstance(pha_f[0], (int, float)): pha_f = binarize(pha_f[0], pha_f[1], pha_f[2], pha_f[3], kind='list') if (len(amp_f) == 4) and isinstance(amp_f[0], (int, float)): amp_f = binarize(amp_f[0], amp_f[1], amp_f[2], amp_f[3], kind='list') # Initialize cfc : _coupling.__init__(self, pha_f, 'phase', pha_meth, pha_cycle, amp_f, 'amplitude', amp_meth, amp_cycle, sf, npts, window, width, step, time, **kwargs)
def get(self, x, statmeth=None, tail=2, maxstat=-1, metric='m_center', n_perm=200, n_jobs=-1): """Get the spectral informations of the signal x. Args: x: array Data. x should have a shape of (n_electrodes x n_pts x n_trials) Kargs: n_perm: integer, optional, [def: 200] Number of permutations for assessing statistical significiancy. statmeth: string, optional, [def: None] Method to evaluate the statistical significiancy. To get p-values, the program will compare real values with a defined baseline. As a consequence, the 'norm' parameter should not be equal to zero. - None: no statistical evaluation - 'permutation': randomly shuffle real data with baseline.Control the number of permutations with the n_perm parameter. For example, if n_perm = 1000, this mean that minimum p-valueswill be 0.001. - 'wilcoxon': Wilcoxon signed-rank test - 'kruskal': Kruskal-Wallis H-test tail: int, optional, [def: 1] For the permutation method, get p-values from one or two tails of the distribution. maxtstat: integer, optional, [def -1] Correct p-values with maximum statistique. maxstat correspond to the dimension of perm for correction. Use -1 to correct through all dimensions. Otherwise, use d1, d2, ... or dn to correct through a specific dimension. n_perm: integer, optional, [def: 200] Number of permutations for assessing statistical significiancy. Return: xF: array The un/normalized feature of x, with a shape of (n_frequency x n_electrodes x n_window x n_trials) """ # Get variables : self._statmeth = statmeth self._n_perm = n_perm self._2t = tail self._mxst = maxstat self._metric = metric # Check input size : if len(x.shape) == 2: x = x[np.newaxis, ...] if x.shape[1] != self._npts: raise ValueError('The second dimension must be ' + str(self._npts)) nfeat = x.shape[0] # Check statistical method : _checkref('statmeth', statmeth, ['permutation', 'wilcoxon', 'kruskal']) # run feature computation: data = Parallel(n_jobs=n_jobs)(delayed(_get)(x[k, ...], self) for k in range(nfeat)) # xF, pvalues = zip(*data) # Re-organize data : xF = np.swapaxes(np.array(data), 0, 1) # Remove last dimension (for TF): if self._meanT: xF = xF[..., 0] return xF # , np.swapaxes(np.array(pvalues), 0, 1)
def get(self, x, statmeth=None, tail=2, n_perm=200, metric='m_center', maxstat=False, n_jobs=-1): """Get the spectral feature of the signal x. Args: x: array Data with a shape of (n_electrodes x n_pts x n_trials) Kargs: statmeth: string, optional, [def: None] Method to evaluate the statistical significiancy. To get p-values, the program will compare real values with a defined baseline. As a consequence, the 'norm' and 'baseline' parameter should not be None. - 'permutation': randomly shuffle real data with baseline.Control the number of permutations with the n_perm parameter. For example, if n_perm = 1000, this mean that minimum p-valueswill be 0.001. - 'wilcoxon': Wilcoxon signed-rank test - 'kruskal': Kruskal-Wallis H-test n_perm: integer, optional, [def: 200] Number of permutations for assessing statistical significiancy. tail: int, optional, [def: 2] For the permutation method, get p-values from one or two tails of the distribution. Use -1 for testing A<B, 1 for A>B and 2 for A~=B. metric: string/function type, optional, [def: 'm_center'] Use diffrent metrics to normalize data and permutations by the defined baseline. Use: - None: compare directly values without transformation - 'm_center': (A-B)/mean(B) transformation - 'm_zscore': (A-B)/std(B) transformation - 'm_minus': (A-B) transformation - function: user defined function [def myfcn(A, B): return array_like] maxstat: bool, optional, [def: False] Correct p-values with maximum statistique. If maxstat is True, the correction will be applied only trhough frequencies. n_jobs: integer, optional, [def: -1] Control the number of jobs to extract features. If n_jobs = -1, all the jobs are used. Return: xF: array The un/normalized feature of x, with a shape of (n_frequency x n_electrodes x n_window x n_trials) pvalues: array p-values with a shape of (n_frequency x n_electrodes x n_window) """ # Get variables : self._statmeth = statmeth self._n_perm = n_perm self._2t = tail self._mxst = maxstat self._metric = metric # Check input size : if len(x.shape) == 2: x = x[np.newaxis, ...] if x.shape[1] != self._npts: raise ValueError('The second dimension must be ' + str(self._npts)) nfeat = x.shape[0] warnmsg = 'You define a normalization but no baseline has been' + \ ' specified. Normalization will be ignore' if (self._norm is not None) and (self._baseline is None): warn(warnmsg) self._norm = None # Check statistical method : if statmeth is not None: _checkref('statmeth', statmeth, ['permutation', 'wilcoxon', 'kruskal']) # run feature computation: data = Parallel(n_jobs=n_jobs, prefer='threads')(delayed(_get)(x[k, ...], self) for k in range(nfeat)) xF, pvalues = zip(*data) # Re-organize data : xF = np.swapaxes(np.array(xF), 0, 1) if pvalues[0] is not None: pvalues = np.swapaxes(np.array(pvalues), 0, 1) else: pvalues = None # Remove last dimension (for TF): if self._meanT: xF = xF[..., 0] return xF, pvalues