def fit(self, raw: mne.io.RawArray, start: float = None, stop: float = None, reject_by_annotation: bool = True, gfp: bool = False, n_jobs: int = 1, verbose=None) -> mod_Kmeans: """[summary] Args: raw (mne.io.RawArray): [description] start (float, optional): [description]. Defaults to None. stop (float, optional): [description]. Defaults to None. reject_by_annotation (bool, optional): [description]. Defaults to True. gfp (bool, optional): [description]. Defaults to False. n_jobs (int, optional): [description]. Defaults to 1. verbose ([type], optional): [description]. Defaults to None. Returns: mod_Kmeans: [description] """ _validate_type(raw, (BaseRaw), 'raw', 'Raw') reject_by_annotation = 'omit' if reject_by_annotation else None start, stop = _check_start_stop(raw, start, stop) n_jobs = check_n_jobs(n_jobs) if len(raw.info['bads']) is not 0: warn('Bad channels are present in the recording. ' 'They will still be used to compute microstate topographies. ' 'Consider using Raw.pick() or Raw.interpolate_bads()' ' before fitting.') data = raw.get_data(start, stop, reject_by_annotation=reject_by_annotation) if gfp is True: data = _extract_gfps(data) best_gev = 0 if n_jobs == 1: for _ in range(self.n_init): gev, maps, segmentation = self._run_mod_kmeans(data) if gev > best_gev: best_gev, best_maps, best_segmentation = gev, maps, segmentation else: parallel, p_fun, _ = parallel_func(self._run_mod_kmeans, total=self.n_init, n_jobs=n_jobs) runs = parallel(p_fun(data) for i in range(self.n_init)) runs = np.array(runs) best_run = np.argmax(runs[:, 0]) best_gev, best_maps, best_segmentation = runs[best_run] self.cluster_centers = best_maps self.GEV = best_gev self.labels = best_segmentation self.current_fit = True return (self)
def predict(self, raw: mne.io.RawArray, reject_by_annotation: bool = True, half_window_size: int = 3, factor: int = 10, crit: float = 10e-6, verbose: str = None) -> np.ndarray: """[summary] Args: raw (mne.io.RawArray): [description] reject_by_annotation (bool, optional): [description]. Defaults to True. half_window_size (int, optional): [description]. Defaults to 3. factor (int, optional): [description]. Defaults to 10. crit (float, optional): [description]. Defaults to 10e-6. verbose (str, optional): [description]. Defaults to None. Raises: ValueError: [description] Returns: np.ndarray: [description] """ if self.current_fit is False: raise ValueError('mod_Kmeans is not fitted.') data = raw.get_data() if reject_by_annotation: onsets, _ends = _annotations_starts_stops(raw, ['BAD']) if len(onsets) == 0: return (segment(data, self.cluster_centers, half_window_size, factor, crit)) onsets = onsets.tolist() onsets.append(data.shape[-1] - 1) _ends = _ends.tolist() ends = [0] ends.extend(_ends) segmentation = np.zeros(data.shape[-1]) for onset, end in zip(onsets, ends): if onset - end >= 2 * half_window_size + 1: # small segments can't be smoothed sample = data[:, end:onset] print(onset, end, type(end), type(onset)) segmentation[end:onset] = segment(sample, self.cluster_centers, half_window_size, factor, crit) return (segmentation) else: return (segment(data, self.cluster_centers, half_window_size, factor, crit))
def _pyedf_saveas_edf( self, mne_raw: mne.io.RawArray, fname: Union[os.PathLike, str], events_list: List[Union[float, float, str]], picks=None, tmin=0, tmax=None, overwrite=False, ): """ Saves the raw content of an MNE.io.Raw and its subclasses to a file using the EDF+ filetype pyEDFlib is used to save the raw contents of the RawArray to disk Parameters ---------- mne_raw : mne.io.RawArray An object with super class mne.io.Raw that contains the data to save fname : string File name of the new dataset. This has to be a new filename unless data have been preloaded. Filenames should end with .edf picks : array-like of int | None Indices of channels to include. If None all channels are kept. tmin : float | None Time in seconds of first sample to save. If None first sample is used. tmax : float | None Time in seconds of last sample to save. If None last sample is used. overwrite : bool If True, the destination file (if it exists) will be overwritten. If False (default), an error will be raised if the file exists. """ if not issubclass(type(mne_raw), mne.io.BaseRaw): raise TypeError("Must be mne.io.Raw type") if not overwrite and os.path.exists(fname): raise OSError("File already exists. No overwrite.") # static settings file_type = pyedflib.FILETYPE_EDFPLUS sfreq = mne_raw.info["sfreq"] date = datetime.datetime.now().strftime("%d %b %Y %H:%M:%S") first_sample = int(sfreq * tmin) last_sample = int(sfreq * tmax) if tmax is not None else None # convert data channels = mne_raw.get_data(picks, start=first_sample, stop=last_sample) # convert to microvolts to scale up precision channels *= 1e6 # set conversion parameters dmin, dmax = [-32768, 32767] pmin, pmax = [channels.min(), channels.max()] n_channels = len(channels) # create channel from this print(fname) f = pyedflib.EdfWriter(fname, n_channels=n_channels, file_type=file_type) try: channel_info = [] data_list = [] for i in range(n_channels): ch_dict = { "label": mne_raw.ch_names[i], "dimension": "uV", "sample_rate": sfreq, "physical_min": pmin, "physical_max": pmax, "digital_min": dmin, "digital_max": dmax, "transducer": "", "prefilter": "", } channel_info.append(ch_dict) data_list.append(channels[i]) f.setTechnician("eegio") f.setSignalHeaders(channel_info) for event in events_list: onset_in_seconds, duration_in_seconds, description = event f.writeAnnotation( float(onset_in_seconds), int(duration_in_seconds), description ) f.setStartdatetime(date) f.writeSamples(data_list) except Exception as e: print(e) return False finally: f.close() return True
def write_edf_from_mne_raw_array(mne_raw: mne.io.RawArray, fname: str, ref_type='', annotations=False, new_date=False, picks=None, tmin=0, tmax=None, overwrite=True): """ Saves the raw content of an MNE.io.Raw and its subclasses to a file using the EDF+ filetype pyEDFlib is used to save the raw contents of the RawArray to disk Parameters ---------- mne_raw : mne.io.Raw An object with super class mne.io.Raw that contains the data to save fname : string File name of the new dataset. This has to be a new filename unless data have been preloaded. Filenames should end with .edf picks : array-like of int | None Indices of channels to include. If None all channels are kept. tmin : float | None Time in seconds of first sample to save. If None first sample is used. tmax : float | None Time in seconds of last sample to save. If None last sample is used. overwrite : bool If True, the destination file (if it exists) will be overwritten. If False (default), an error will be raised if the file exists. """ if not issubclass(type(mne_raw), mne.io.BaseRaw): raise TypeError('Must be mne.io.Raw type') if not overwrite and os.path.exists(fname): raise OSError('File already exists. No overwrite.') # static settings if annotations: file_type = pyedflib.FILETYPE_EDFPLUS else: file_type = pyedflib.FILETYPE_EDF sfreq = mne_raw.info['sfreq'] date = datetime.now().strftime('%d %b %Y %H:%M:%S') if new_date \ else (datetime.fromtimestamp(mne_raw.info['meas_date'][0])).strftime('%d %b %Y %H:%M:%S') first_sample = int(sfreq * tmin) last_sample = int(sfreq * tmax) if tmax is not None else None # convert data channels = mne_raw.get_data(picks, start=first_sample, stop=last_sample) # convert to microvolts to scale up precision channels *= 1e6 # set conversion parameters dmin, dmax = [-32768, 32767] pmin, pmax = [channels.min(), channels.max()] n_channels = len(channels) # create channel from this try: f = pyedflib.EdfWriter(fname, n_channels=n_channels, file_type=file_type) channel_info = [] data_list = [] for i in range(n_channels): ch_dict = { 'label': mne_raw.ch_names[i], 'dimension': 'uV', 'sample_rate': sfreq, 'physical_min': pmin, 'physical_max': pmax, 'digital_min': dmin, 'digital_max': dmax, 'transducer': '', 'prefilter': '' } channel_info.append(ch_dict) data_list.append(channels[i]) f.setTechnician('mednickdb') f.setEquipment('ref=' + ref_type) f.setSignalHeaders(channel_info) f.setStartdatetime(date) f.writeSamples(data_list) except Exception as e: raise IOError('EDF could not be written') from e finally: f.close() return True