def get_pca_mean_and_pre_whitener_raw(raw, picks, start, stop, decim, reject, flat, tstep, pre_whitener, reject_by_annotation): """Aux method based on ica._fit_raw from mne v0.15""" if picks is None: # just use good data channels picks = _pick_data_channels(raw.info, exclude='bads', with_ref_meg=False) info = pick_info(raw.info, picks) if info['comps']: info['comps'] = [] start, stop = _check_start_stop(raw, start, stop) reject_by_annotation = 'omit' if reject_by_annotation else None # this will be a copy data = raw.get_data(picks, start, stop, reject_by_annotation) # this will be a view if decim is not None: data = data[:, ::decim] # this will make a copy if (reject is not None) or (flat is not None): data, drop_inds_ = _reject_data_segments(data, reject, flat, decim, info, tstep) # this may operate inplace or make a copy data, pre_whitener = pre_whiten(data, raw.info, picks, pre_whitener) pca_mean_ = np.mean(data, axis=1) return pca_mean_, pre_whitener
def fit(self, raw: mne.io.RawArray, start: float = None, stop: float = None, reject_by_annotation: bool = True, gfp: bool = False, n_jobs: int = 1, verbose=None) -> mod_Kmeans: """[summary] Args: raw (mne.io.RawArray): [description] start (float, optional): [description]. Defaults to None. stop (float, optional): [description]. Defaults to None. reject_by_annotation (bool, optional): [description]. Defaults to True. gfp (bool, optional): [description]. Defaults to False. n_jobs (int, optional): [description]. Defaults to 1. verbose ([type], optional): [description]. Defaults to None. Returns: mod_Kmeans: [description] """ _validate_type(raw, (BaseRaw), 'raw', 'Raw') reject_by_annotation = 'omit' if reject_by_annotation else None start, stop = _check_start_stop(raw, start, stop) n_jobs = check_n_jobs(n_jobs) if len(raw.info['bads']) is not 0: warn('Bad channels are present in the recording. ' 'They will still be used to compute microstate topographies. ' 'Consider using Raw.pick() or Raw.interpolate_bads()' ' before fitting.') data = raw.get_data(start, stop, reject_by_annotation=reject_by_annotation) if gfp is True: data = _extract_gfps(data) best_gev = 0 if n_jobs == 1: for _ in range(self.n_init): gev, maps, segmentation = self._run_mod_kmeans(data) if gev > best_gev: best_gev, best_maps, best_segmentation = gev, maps, segmentation else: parallel, p_fun, _ = parallel_func(self._run_mod_kmeans, total=self.n_init, n_jobs=n_jobs) runs = parallel(p_fun(data) for i in range(self.n_init)) runs = np.array(runs) best_run = np.argmax(runs[:, 0]) best_gev, best_maps, best_segmentation = runs[best_run] self.cluster_centers = best_maps self.GEV = best_gev self.labels = best_segmentation self.current_fit = True return (self)
def _fit_evoked(self, raw, picks, start, stop, decim, reject, flat, tstep, verbose): """Aux method """ if self.current_fit != 'unfitted': self._reset() if picks is None: # just use good data channels picks = pick_types(raw.info, meg=True, eeg=True, eog=False, ecg=False, misc=False, stim=False, ref_meg=False, exclude='bads') logger.info('Fitting ICA to data using %i channels. \n' 'Please be patient, this may take some time' % len(picks)) if self.max_pca_components is None: self.max_pca_components = len(picks) logger.info('Inferring max_pca_components from picks.') self.info = pick_info(raw.info, picks) if self.info['comps']: self.info['comps'] = [] self.ch_names = self.info['ch_names'] start, stop = _check_start_stop(raw, start, stop) data = raw.data[picks, start:stop] print data.shape if decim is not None: data = data[:, ::decim].copy() if (reject is not None) or (flat is not None): data, self.drop_inds_ = _reject_data_segments( data, reject, flat, decim, self.info, tstep) self.n_samples_ = data.shape[1] data, self._pre_whitener = self._pre_whiten(data, raw.info, picks) self._fit(data, self.max_pca_components, 'evoked') #'raw') return self
def _fit_evoked(self, raw, picks, start, stop, decim, reject, flat, tstep, verbose): """Aux method """ if self.current_fit != 'unfitted': self._reset() if picks is None: # just use good data channels picks = pick_types(raw.info, meg=True, eeg=True, eog=False, ecg=False, misc=False, stim=False, ref_meg=False, exclude='bads') logger.info('Fitting ICA to data using %i channels. \n' 'Please be patient, this may take some time' % len(picks)) if self.max_pca_components is None: self.max_pca_components = len(picks) logger.info('Inferring max_pca_components from picks.') self.info = pick_info(raw.info, picks) if self.info['comps']: self.info['comps'] = [] self.ch_names = self.info['ch_names'] start, stop = _check_start_stop(raw, start, stop) data = raw.data[picks, start:stop] print data.shape if decim is not None: data = data[:, ::decim].copy() if (reject is not None) or (flat is not None): data, self.drop_inds_ = _reject_data_segments(data, reject, flat, decim, self.info, tstep) self.n_samples_ = data.shape[1] data, self._pre_whitener = self._pre_whiten(data, raw.info, picks) self._fit(data, self.max_pca_components, 'evoked') #'raw') return self
def ica_apply_unfiltered(raw_unfilt, ica_filt, picks, n_pca_components=None, reject_by_annotation=None, start=None, stop=None): """Remove selected components from the unfiltered signal and preserve the original mean and standard deviation Note: this is needed when ICA was trained on filtered data but the cleaning will be applied on unfiltered data. After cleaning the original (unfiltered) mean and standard deviation is restored. Parameters ---------- raw_unfilt : instance of Raw The data to be processed (works inplace). n_pca_components : int | float | None The number of PCA components to be kept, either absolute (int) or percentage of the explained variance (float). If None (default), all PCA components will be used. start : int | float | None First sample to include. If float, data will be interpreted as time in seconds. If None, data will be used from the first sample. stop : int | float | None Last sample to not include. If float, data will be interpreted as time in seconds. If None, data will be used to the last sample. Returns ------- raw_unfilt_clean : instance of Raw after cleaning """ _check_preload(raw_unfilt, "ica.apply") start, stop = _check_start_stop(raw_unfilt, start, stop) data = raw_unfilt.get_data(picks, picks=picks, start=start, stop=stop, reject_by_annotation=reject_by_annotation) # compute pre-whitener and PCA data mean pre_whiten = np.atleast_2d(np.ones(len(picks)) * data.std()).T data, _ = ica_filt._pre_whiten(data, raw_unfilt.info, picks) pca_mean_ = np.mean(data, axis=1) # apply ICA on unfiltered data and preserve # original mean and stddev # Note, in MNE-Python ICA # pca.mean_ # is the overall mean across all data channels # ica.pre_whitener_ # is a vector with equal values being the # overall standard deviation ica_unfilt = ica_filt.copy() ica_unfilt.pca_mean_ = pca_mean_ ica_unfilt._pre_whitener = pre_whiten raw_unfilt_clean = ica_unfilt.apply(raw_unfilt, start=start, stop=stop, n_pca_components=n_pca_components) return raw_unfilt_clean