def select_best_parcellation(parcellations, clf, avg_signals, y, n_jobs=1, verbose=False): """ Returns the best parcellation of the parcellations given in arguments. Parameters ---------- parcellations : (int list) list list of all the parcellations possible for the current iteration clf : classifier avg_signals : nparray average_signals for each node y : ndarray of shape = (n_samples) n_jobs : int, optional number of cpu to use verbose : int, optional does it really need explanations? Returns ------- parcellation : int list best parcellation of the parcellations given in argument """ # Computing scores for each parcellation scores = Parallel(n_jobs)( delayed(cross_val.cross_val_score)( estimator=clf, X=avg_signals[:, j], y=y, #cv=cross_val.KFold(avg_signals.shape[0], 5), n_jobs=1) for j in parcellations) if verbose >= 2: print " Scores of each parcellation for current iteration :" print scores print scores scores = Parallel(n_jobs)(delayed(np.mean)(i) for i in scores) indice = np.argmax(scores) return parcellations[indice], scores[indice]
def select_best_parcellation(parcellations, clf, avg_signals, y, n_jobs=1, verbose=False): """ Returns the best parcellation of the parcellations given in arguments. Parameters ---------- parcellations : (int list) list list of all the parcellations possible for the current iteration clf : classifier avg_signals : nparray average_signals for each node y : ndarray of shape = (n_samples) n_jobs : int, optional number of cpu to use verbose : int, optional does it really need explanations? Returns ------- parcellation : int list best parcellation of the parcellations given in argument """ # Computing scores for each parcellation scores = Parallel(n_jobs)(delayed(cross_val.cross_val_score) (estimator=clf, X=avg_signals[:, j], y=y, #cv=cross_val.KFold(avg_signals.shape[0], 5), n_jobs=1) for j in parcellations) if verbose >= 2: print " Scores of each parcellation for current iteration :" print scores print scores scores = Parallel(n_jobs)(delayed(np.mean)(i) for i in scores) indice = np.argmax(scores) return parcellations[indice], scores[indice]
def para_bmu_find(self, x, y, njb=1): dlen = x.shape[0] Y2 = None Y2 = np.einsum('ij, ij->i', y, y) bmu = None b = None # Here it finds BMUs for chunk of data in parallel t_temp = time() b = Parallel(n_jobs=njb, pre_dispatch='3 * n_jobs')( delayed(chunk_based_bmu_find) (x[i * dlen // njb:min((i + 1) * dlen // njb, dlen)], y, Y2) for i in xrange(njb)) # print 'bmu finding: {} seconds '.format(round(time() - t_temp, 3)) t1 = time() bmu = np.asarray(list(itertools.chain(*b))).T # print 'bmu to array: {} seconds'.format(round(time() - t1, 3)) del b return bmu
def parallel_func(func, n_jobs, verbose=5): """Return parallel instance with delayed function Util function to use joblib only if available Parameters ---------- func: callable A function n_jobs: int Number of jobs to run in parallel verbose: int Verbosity level Returns ------- parallel: instance of joblib.Parallel or list The parallel object my_func: callable func if not parallel or delayed(func) n_jobs: int Number of jobs >= 0 """ try: from scikits.learn.externals.joblib import Parallel, delayed parallel = Parallel(n_jobs, verbose=verbose) my_func = delayed(func) if n_jobs == -1: try: import multiprocessing n_jobs = multiprocessing.cpu_count() except ImportError: print "multiprocessing not installed. Cannot run in parallel." n_jobs = 1 except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 my_func = func parallel = list return parallel, my_func, n_jobs
def permutation_t_test(X, n_permutations=10000, tail=0, n_jobs=1): """One sample/paired sample permutation test based on a t-statistic. This function can perform the test on one variable or simultaneously on multiple variables. When applying the test to multiple variables, the "tmax" method is used for adjusting the p-values of each variable for multiple comparisons. Like Bonferroni correction, this method adjusts p-values in a way that controls the family-wise error rate. However, the permutation method will be more powerful than Bonferroni correction when different variables in the test are correlated. Parameters ---------- X : array of shape [n_samples x n_tests] Data of size number of samples (aka number of observations) times number of tests (aka number of variables) n_permutations : int or 'all' Number of permutations. If n_permutations is 'all' all possible permutations are tested (2**n_samples). It's the exact test, that can be untractable when the number of samples is big (e.g. > 20). If n_permutations >= 2**n_samples then the exact test is performed tail : -1 or 0 or 1 (default = 0) If tail is 1, the alternative hypothesis is that the mean of the data is greater than 0 (upper tailed test). If tail is 0, the alternative hypothesis is that the mean of the data is different than 0 (two tailed test). If tail is -1, the alternative hypothesis is that the mean of the data is less than 0 (lower tailed test). n_jobs : int Number of CPUs to use for computation. Returns ------- T_obs : array of shape [n_tests] T-statistic observed for all variables p_values : array of shape [n_tests] P-values for all the tests (aka variables) H0 : array of shape [n_permutations] T-statistic obtained by permutations and t-max trick for multiple comparison. Notes ----- A reference (among many) in field of neuroimaging: Nichols, T. E. & Holmes, A. P. (2002). Nonparametric permutation tests for functional neuroimaging: a primer with examples. Human Brain Mapping, 15, 1-25. Overview of standard nonparametric randomization and permutation testing applied to neuroimaging data (e.g. fMRI) DOI: http://dx.doi.org/10.1002/hbm.1058 """ n_samples, n_tests = X.shape do_exact = False if n_permutations is 'all' or (n_permutations >= 2 ** n_samples - 1): do_exact = True n_permutations = 2 ** n_samples - 1 X2 = np.mean(X ** 2, axis=0) # precompute moments mu0 = np.mean(X, axis=0) dof_scaling = sqrt(n_samples / (n_samples - 1.0)) std0 = np.sqrt(X2 - mu0 ** 2) * dof_scaling # get std with var splitting T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples)) if do_exact: perms = bin_perm_rep(n_samples, a=1, b=-1)[1:, :] else: perms = np.sign(0.5 - np.random.rand(n_permutations, n_samples)) try: from scikits.learn.externals.joblib import Parallel, delayed parallel = Parallel(n_jobs) my_max_stat = delayed(_max_stat) except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 my_max_stat = _max_stat parallel = list if n_jobs == -1: try: import multiprocessing n_jobs = multiprocessing.cpu_count() except ImportError: print "multiprocessing not installed. Cannot run in parallel." n_jobs = 1 max_abs = np.concatenate(parallel(my_max_stat(X, X2, p, dof_scaling) for p in np.array_split(perms, n_jobs))) H0 = np.sort(max_abs) scaling = float(n_permutations + 1) if tail == 0: p_values = 1.0 - np.searchsorted(H0, np.abs(T_obs)) / scaling elif tail == 1: p_values = 1.0 - np.searchsorted(H0, T_obs) / scaling elif tail == -1: p_values = 1.0 - np.searchsorted(H0, -T_obs) / scaling return T_obs, p_values, H0
def source_induced_power(epochs, inverse_operator, bands, lambda2=1.0 / 9.0, dSPM=True, n_cycles=5, df=1, use_fft=False, baseline=None, baseline_mode='logratio', pca=True, subtract_evoked=False, n_jobs=1): """Compute source space induced power Parameters ---------- epochs: instance of Epochs The epochs inverse_operator: instance of inverse operator The inverse operator bands: dict Example : bands = dict(alpha=[8, 9]) lambda2: float The regularization parameter of the minimum norm dSPM: bool Do dSPM or not? n_cycles: int Number of cycles df: float delta frequency within bands use_fft: bool Do convolutions in time or frequency domain with FFT baseline: None (default) or tuple of length 2 The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. baseline_mode: None | 'logratio' | 'zscore' Do baseline correction with ratio (power is divided by mean power during baseline) or zscore (power is divided by standard deviatio of power during baseline after substracting the mean, power = [power - mean(power_baseline)] / std(power_baseline)) pca: bool If True, the true dimension of data is estimated before running the time frequency transforms. It reduces the computation times e.g. with a dataset that was maxfiltered (true dim is 64) subtract_evoked: bool If True, the evoked component (average of all epochs) if subtracted from each epochs. n_jobs: int Number of jobs to run in parallel """ if n_jobs == -1: try: import multiprocessing n_jobs = multiprocessing.cpu_count() except ImportError: print "multiprocessing not installed. Cannot run in parallel." n_jobs = 1 try: from scikits.learn.externals.joblib import Parallel, delayed parallel = Parallel(n_jobs) my_compute_power = delayed(_compute_power) except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 my_compute_power = _compute_power parallel = list # # Set up the inverse according to the parameters # epochs_data = epochs.get_data() if subtract_evoked: # subtract with a copy not to touch epochs epochs_data = epochs_data - np.mean(epochs_data, axis=0) nave = len(epochs_data) # XXX : can do better when no preload inv = prepare_inverse_operator(inverse_operator, nave, lambda2, dSPM) # # Pick the correct channels from the data # sel = [epochs.ch_names.index(name) for name in inv['noise_cov']['names']] print 'Picked %d channels from the data' % len(sel) print 'Computing inverse...', # # Simple matrix multiplication followed by combination of the # three current components # # This does all the data transformations to compute the weights for the # eigenleads # K = inv['reginv'][:, None] * reduce(np.dot, [inv['eigen_fields']['data'], inv['whitener'], inv['proj']]) if pca: U, s, Vh = linalg.svd(K) rank = np.sum(s > 1e-8*s[0]) K = s[:rank] * U[:, :rank] Vh = Vh[:rank] print 'Reducing data rank to %d' % rank else: Vh = None # # Transformation into current distributions by weighting the # eigenleads with the weights computed above # if inv['eigen_leads_weighted']: # # R^0.5 has been already factored in # # print '(eigenleads already weighted)...', K = np.dot(inv['eigen_leads']['data'], K) else: # # R^0.5 has to factored in # # print '(eigenleads need to be weighted)...', K = np.sqrt(inv['source_cov']['data'])[:, None] * \ np.dot(inv['eigen_leads']['data'], K) Fs = epochs.info['sfreq'] # sampling in Hz stcs = dict() src = inv['src'] for name, band in bands.iteritems(): print 'Computing power in band %s [%s, %s] Hz...' % (name, band[0], band[1]) freqs = np.arange(band[0], band[1] + df / 2.0, df) # frequencies Ws = morlet(Fs, freqs, n_cycles=n_cycles) power = sum(parallel(my_compute_power(data, K, sel, Ws, inv['source_ori'], use_fft, Vh) for data in np.array_split(epochs_data, n_jobs))) if dSPM: # print '(dSPM)...', power *= inv['noisenorm'][:, None] ** 2 # average power in band + mean over epochs power /= len(epochs_data) * len(freqs) # Run baseline correction if baseline is not None: print "Applying baseline correction ..." times = epochs.times bmin, bmax = baseline if bmin is None: imin = 0 else: imin = int(np.where(times >= bmin)[0][0]) if bmax is None: imax = len(times) else: imax = int(np.where(times <= bmax)[0][-1]) + 1 mean_baseline_power = np.mean(power[:, imin:imax], axis=1) if baseline_mode is 'logratio': power /= mean_baseline_power[:, None] power = np.log(power) elif baseline_mode is 'zscore': power -= mean_baseline_power[:, None] power /= np.std(power[:, imin:imax], axis=1)[:, None] else: print "No baseline correction applied..." stc = SourceEstimate(None) stc.data = power stc.tmin = epochs.times[0] stc.tstep = 1.0 / Fs stc.lh_vertno = src[0]['vertno'] stc.rh_vertno = src[1]['vertno'] stc._init_times() stcs[name] = stc print '[done]' return stcs
def induced_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7, n_jobs=1): """Compute time induced power and inter-trial phase-locking factor The time frequency decomposition is done with Morlet wavelets Parameters ---------- epochs : array 3D array of shape [n_epochs, n_channels, n_times] Fs : float sampling Frequency frequencies : array Array of frequencies of interest use_fft : bool Compute transform with fft based convolutions or temporal convolutions. n_cycles : int The number of cycles in the wavelet n_jobs : int The number of CPUs used in parallel. All CPUs are used in -1. Requires joblib package. Returns ------- power : 2D array Induced power (Channels x Frequencies x Timepoints). Squared amplitude of time-frequency coefficients. phase_lock : 2D array Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints) """ n_frequencies = len(frequencies) n_epochs, n_channels, n_times = epochs.shape # Precompute wavelets for given frequency range to save time Ws = morlet(Fs, frequencies, n_cycles=n_cycles) try: import joblib except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 if n_jobs == 1: psd = np.empty((n_channels, n_frequencies, n_times)) plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex) for c in range(n_channels): X = np.squeeze(epochs[:, c, :]) psd[c], plf[c] = _time_frequency(X, Ws, use_fft) else: from joblib import Parallel, delayed psd_plf = Parallel(n_jobs=n_jobs)( delayed(_time_frequency)(np.squeeze(epochs[:, c, :]), Ws, use_fft) for c in range(n_channels)) psd = np.zeros((n_channels, n_frequencies, n_times)) plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex) for c, (psd_c, plf_c) in enumerate(psd_plf): psd[c, :, :], plf[c, :, :] = psd_c, plf_c psd /= n_epochs plf = np.abs(plf) / n_epochs return psd, plf
def single_trial_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7, baseline=None, baseline_mode='ratio', times=None, n_jobs=1): """Compute time-frequency power on single epochs Parameters ---------- epochs : instance Epochs | array of shape [n_epochs, n_channels, n_times] The epochs Fs : float Sampling rate frequencies : array-like The frequencies use_fft : bool Use the FFT for convolutions or not. n_cycles : float The number of cycles in the Morlet wavelet baseline: None (default) or tuple of length 2 The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. baseline_mode : None | 'ratio' | 'zscore' Do baseline correction with ratio (power is divided by mean power during baseline) or zscore (power is divided by standard deviatio of power during baseline after substracting the mean, power = [power - mean(power_baseline)] / std(power_baseline)) times : array Required to define baseline n_jobs : int The number of epochs to process at the same time Returns ------- power : list of 2D array Each element of the list the the power estimate for an epoch. """ mode = 'same' n_frequencies = len(frequencies) n_epochs, n_channels, n_times = epochs.shape # Precompute wavelets for given frequency range to save time Ws = morlet(Fs, frequencies, n_cycles=n_cycles) try: from scikits.learn.externals.joblib import Parallel, delayed parallel = Parallel(n_jobs) my_cwt = delayed(cwt) except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 my_cwt = cwt parallel = list print "Computing time-frequency power on single epochs..." power = np.empty((n_epochs, n_channels, n_frequencies, n_times), dtype=np.float) if n_jobs == 1: for k, e in enumerate(epochs): power[k] = np.abs(cwt(e, Ws, mode))**2 else: # Precompute tf decompositions in parallel tfrs = parallel(my_cwt(e, Ws, use_fft, mode) for e in epochs) for k, tfr in enumerate(tfrs): power[k] = np.abs(tfr)**2 # Run baseline correction if baseline is not None: if times is None: raise ValueError('times parameter is required to define baseline') print "Applying baseline correction ..." bmin = baseline[0] bmax = baseline[1] if bmin is None: imin = 0 else: imin = int(np.where(times >= bmin)[0][0]) if bmax is None: imax = len(times) else: imax = int(np.where(times <= bmax)[0][-1]) + 1 mean_baseline_power = np.mean(power[:, :, :, imin:imax], axis=3) if baseline_mode is 'ratio': power /= mean_baseline_power[:, :, :, None] elif baseline_mode is 'zscore': power -= mean_baseline_power[:, :, :, None] power /= np.std(power[:, :, :, imin:imax], axis=3)[:, :, :, None] else: print "No baseline correction applied..." return power
def source_induced_power(epochs, inverse_operator, bands, lambda2=1.0 / 9.0, dSPM=True, n_cycles=5, df=1, use_fft=False, baseline=None, baseline_mode='logratio', pca=True, subtract_evoked=False, n_jobs=1): """Compute source space induced power Parameters ---------- epochs: instance of Epochs The epochs inverse_operator: instance of inverse operator The inverse operator bands: dict Example : bands = dict(alpha=[8, 9]) lambda2: float The regularization parameter of the minimum norm dSPM: bool Do dSPM or not? n_cycles: int Number of cycles df: float delta frequency within bands use_fft: bool Do convolutions in time or frequency domain with FFT baseline: None (default) or tuple of length 2 The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. baseline_mode: None | 'logratio' | 'zscore' Do baseline correction with ratio (power is divided by mean power during baseline) or zscore (power is divided by standard deviatio of power during baseline after substracting the mean, power = [power - mean(power_baseline)] / std(power_baseline)) pca: bool If True, the true dimension of data is estimated before running the time frequency transforms. It reduces the computation times e.g. with a dataset that was maxfiltered (true dim is 64) subtract_evoked: bool If True, the evoked component (average of all epochs) if subtracted from each epochs. n_jobs: int Number of jobs to run in parallel """ if n_jobs == -1: try: import multiprocessing n_jobs = multiprocessing.cpu_count() except ImportError: print "multiprocessing not installed. Cannot run in parallel." n_jobs = 1 try: from scikits.learn.externals.joblib import Parallel, delayed parallel = Parallel(n_jobs) my_compute_power = delayed(_compute_power) except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 my_compute_power = _compute_power parallel = list # # Set up the inverse according to the parameters # epochs_data = epochs.get_data() if subtract_evoked: # subtract with a copy not to touch epochs epochs_data = epochs_data - np.mean(epochs_data, axis=0) nave = len(epochs_data) # XXX : can do better when no preload inv = prepare_inverse_operator(inverse_operator, nave, lambda2, dSPM) # # Pick the correct channels from the data # sel = [epochs.ch_names.index(name) for name in inv['noise_cov']['names']] print 'Picked %d channels from the data' % len(sel) print 'Computing inverse...', # # Simple matrix multiplication followed by combination of the # three current components # # This does all the data transformations to compute the weights for the # eigenleads # K = inv['reginv'][:, None] * reduce( np.dot, [inv['eigen_fields']['data'], inv['whitener'], inv['proj']]) if pca: U, s, Vh = linalg.svd(K) rank = np.sum(s > 1e-8 * s[0]) K = s[:rank] * U[:, :rank] Vh = Vh[:rank] print 'Reducing data rank to %d' % rank else: Vh = None # # Transformation into current distributions by weighting the # eigenleads with the weights computed above # if inv['eigen_leads_weighted']: # # R^0.5 has been already factored in # # print '(eigenleads already weighted)...', K = np.dot(inv['eigen_leads']['data'], K) else: # # R^0.5 has to factored in # # print '(eigenleads need to be weighted)...', K = np.sqrt(inv['source_cov']['data'])[:, None] * \ np.dot(inv['eigen_leads']['data'], K) Fs = epochs.info['sfreq'] # sampling in Hz stcs = dict() src = inv['src'] for name, band in bands.iteritems(): print 'Computing power in band %s [%s, %s] Hz...' % (name, band[0], band[1]) freqs = np.arange(band[0], band[1] + df / 2.0, df) # frequencies Ws = morlet(Fs, freqs, n_cycles=n_cycles) power = sum( parallel( my_compute_power(data, K, sel, Ws, inv['source_ori'], use_fft, Vh) for data in np.array_split(epochs_data, n_jobs))) if dSPM: # print '(dSPM)...', power *= inv['noisenorm'][:, None]**2 # average power in band + mean over epochs power /= len(epochs_data) * len(freqs) # Run baseline correction if baseline is not None: print "Applying baseline correction ..." times = epochs.times bmin, bmax = baseline if bmin is None: imin = 0 else: imin = int(np.where(times >= bmin)[0][0]) if bmax is None: imax = len(times) else: imax = int(np.where(times <= bmax)[0][-1]) + 1 mean_baseline_power = np.mean(power[:, imin:imax], axis=1) if baseline_mode is 'logratio': power /= mean_baseline_power[:, None] power = np.log(power) elif baseline_mode is 'zscore': power -= mean_baseline_power[:, None] power /= np.std(power[:, imin:imax], axis=1)[:, None] else: print "No baseline correction applied..." stc = SourceEstimate(None) stc.data = power stc.tmin = epochs.times[0] stc.tstep = 1.0 / Fs stc.lh_vertno = src[0]['vertno'] stc.rh_vertno = src[1]['vertno'] stc._init_times() stcs[name] = stc print '[done]' return stcs
def induced_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7, n_jobs=1): """Compute time induced power and inter-trial phase-locking factor The time frequency decomposition is done with Morlet wavelets Parameters ---------- epochs : array 3D array of shape [n_epochs, n_channels, n_times] Fs : float sampling Frequency frequencies : array Array of frequencies of interest use_fft : bool Compute transform with fft based convolutions or temporal convolutions. n_cycles : int The number of cycles in the wavelet n_jobs : int The number of CPUs used in parallel. All CPUs are used in -1. Requires joblib package. Returns ------- power : 2D array Induced power (Channels x Frequencies x Timepoints). Squared amplitude of time-frequency coefficients. phase_lock : 2D array Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints) """ n_frequencies = len(frequencies) n_epochs, n_channels, n_times = epochs.shape # Precompute wavelets for given frequency range to save time Ws = morlet(Fs, frequencies, n_cycles=n_cycles) try: import joblib except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 if n_jobs == 1: psd = np.empty((n_channels, n_frequencies, n_times)) plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex) for c in range(n_channels): X = np.squeeze(epochs[:, c, :]) psd[c], plf[c] = _time_frequency(X, Ws, use_fft) else: from joblib import Parallel, delayed psd_plf = Parallel(n_jobs=n_jobs)( delayed(_time_frequency)( np.squeeze(epochs[:, c, :]), Ws, use_fft) for c in range(n_channels)) psd = np.zeros((n_channels, n_frequencies, n_times)) plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex) for c, (psd_c, plf_c) in enumerate(psd_plf): psd[c, :, :], plf[c, :, :] = psd_c, plf_c psd /= n_epochs plf = np.abs(plf) / n_epochs return psd, plf
def single_trial_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7, baseline=None, baseline_mode='ratio', times=None, n_jobs=1): """Compute time-frequency power on single epochs Parameters ---------- epochs : instance Epochs | array of shape [n_epochs, n_channels, n_times] The epochs Fs : float Sampling rate frequencies : array-like The frequencies use_fft : bool Use the FFT for convolutions or not. n_cycles : float The number of cycles in the Morlet wavelet baseline: None (default) or tuple of length 2 The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. baseline_mode : None | 'ratio' | 'zscore' Do baseline correction with ratio (power is divided by mean power during baseline) or zscore (power is divided by standard deviatio of power during baseline after substracting the mean, power = [power - mean(power_baseline)] / std(power_baseline)) times : array Required to define baseline n_jobs : int The number of epochs to process at the same time Returns ------- power : list of 2D array Each element of the list the the power estimate for an epoch. """ mode = 'same' n_frequencies = len(frequencies) n_epochs, n_channels, n_times = epochs.shape # Precompute wavelets for given frequency range to save time Ws = morlet(Fs, frequencies, n_cycles=n_cycles) try: from scikits.learn.externals.joblib import Parallel, delayed parallel = Parallel(n_jobs) my_cwt = delayed(cwt) except ImportError: print "joblib not installed. Cannot run in parallel." n_jobs = 1 my_cwt = cwt parallel = list print "Computing time-frequency power on single epochs..." power = np.empty((n_epochs, n_channels, n_frequencies, n_times), dtype=np.float) if n_jobs == 1: for k, e in enumerate(epochs): power[k] = np.abs(cwt(e, Ws, mode)) ** 2 else: # Precompute tf decompositions in parallel tfrs = parallel(my_cwt(e, Ws, use_fft, mode) for e in epochs) for k, tfr in enumerate(tfrs): power[k] = np.abs(tfr) ** 2 # Run baseline correction if baseline is not None: if times is None: raise ValueError('times parameter is required to define baseline') print "Applying baseline correction ..." bmin = baseline[0] bmax = baseline[1] if bmin is None: imin = 0 else: imin = int(np.where(times >= bmin)[0][0]) if bmax is None: imax = len(times) else: imax = int(np.where(times <= bmax)[0][-1]) + 1 mean_baseline_power = np.mean(power[:, :, :, imin:imax], axis=3) if baseline_mode is 'ratio': power /= mean_baseline_power[:, :, :, None] elif baseline_mode is 'zscore': power -= mean_baseline_power[:, :, :, None] power /= np.std(power[:, :, :, imin:imax], axis=3)[:, :, :, None] else: print "No baseline correction applied..." return power