Esempio n. 1
0
def clean_by_interp(inst):
    """Clean epochs/evoked by LOOCV
    """
    inst_interp = inst.copy()
    mesg = 'Creating augmented epochs'
    pbar = ProgressBar(len(inst.info['ch_names']) - 1, mesg=mesg,
                       spinner=True)
    for ch_idx, ch in enumerate(inst.info['ch_names']):
        pbar.update(ch_idx + 1)
        if isinstance(inst, mne.Evoked):
            ch_orig = inst.data[ch_idx].copy()
        elif isinstance(inst, mne.Epochs):
            ch_orig = inst._data[:, ch_idx].copy()

        inst.info['bads'] = [ch]
        interpolate_bads(inst, reset_bads=True, mode='fast')

        if isinstance(inst, mne.Evoked):
            inst_interp.data[ch_idx] = inst.data[ch_idx]
            inst.data[ch_idx] = ch_orig
        elif isinstance(inst, mne.Epochs):
            inst_interp._data[:, ch_idx] = inst._data[:, ch_idx]
            inst._data[:, ch_idx] = ch_orig

    return inst_interp
Esempio n. 2
0
 def permutations():
     """Generator for the permutations with optional progress bar."""
     if verbose:
         progress = ProgressBar(len(Beh_perms),
                                mesg='Performing permutations')
         for i, Beh_perm in enumerate(Beh_perms):
             progress.update(i)
             yield Beh_perm
     else:
         for Beh_perm in Beh_perms:
             yield Beh_perm
Esempio n. 3
0
 def permutations():
     """Generator for the permutations with optional progress bar."""
     if verbose:
         progress = ProgressBar(len(sign_flips),
                                mesg='Performing permutations')
         for i, sign_flip in enumerate(sign_flips):
             progress.update(i)
             yield sign_flip
     else:
         for sign_flip in sign_flips:
             yield sign_flip
Esempio n. 4
0
 def _upload_chunk(self, client, f_client, f_server):
     from StringIO import StringIO
     file_obj = open(f_client, 'rb')
     target_length = os.path.getsize(f_client)
     chunk_size = 10 * 1024 * 1024
     offset = 0
     uploader = client.get_chunked_uploader(file_obj, target_length)
     last_block = None
     params = dict()
     pbar = ProgressBar(target_length, spinner=True)
     error_count = 0
     while offset < target_length:
         if error_count > 3:
             raise RuntimeError
         pbar.update(offset)
         next_chunk_size = min(chunk_size, target_length - offset)
         # read data if last chunk passed
         if last_block is None:
             last_block = file_obj.read(next_chunk_size)
         # set parameters
         if offset > 0:
             params = dict(upload_id=uploader.upload_id, offset=offset)
         try:
             url, ignored_params, headers = client.request(
                 "/chunked_upload",
                 params,
                 method='PUT',
                 content_server=True)
             reply = client.rest_client.PUT(url, StringIO(last_block),
                                            headers)
             new_offset = reply['offset']
             uploader.upload_id = reply['upload_id']
             # avoid reading data if last chunk didn't pass
             if new_offset > offset:
                 offset = new_offset
                 last_block = None
                 error_count == 0
             else:
                 error_count += 1
         except Exception:
             error_count += 1
     if target_length > 0:
         pbar.update(target_length)
     print('')
     file_obj.close()
     uploader.finish(f_server, overwrite=True)
Esempio n. 5
0
 def _upload_chunk(self, client, f_client, f_server):
     from StringIO import StringIO
     file_obj = open(f_client, 'rb')
     target_length = os.path.getsize(f_client)
     chunk_size = 10 * 1024 * 1024
     offset = 0
     uploader = client.get_chunked_uploader(file_obj, target_length)
     last_block = None
     params = dict()
     pbar = ProgressBar(target_length, spinner=True)
     error_count = 0
     while offset < target_length:
         if error_count > 3:
             raise RuntimeError
         pbar.update(offset)
         next_chunk_size = min(chunk_size, target_length - offset)
         # read data if last chunk passed
         if last_block is None:
             last_block = file_obj.read(next_chunk_size)
         # set parameters
         if offset > 0:
             params = dict(upload_id=uploader.upload_id, offset=offset)
         try:
             url, ignored_params, headers = client.request(
                 "/chunked_upload", params, method='PUT',
                 content_server=True)
             reply = client.rest_client.PUT(url, StringIO(last_block),
                                            headers)
             new_offset = reply['offset']
             uploader.upload_id = reply['upload_id']
             # avoid reading data if last chunk didn't pass
             if new_offset > offset:
                 offset = new_offset
                 last_block = None
                 error_count == 0
             else:
                 error_count += 1
         except Exception:
             error_count += 1
     if target_length > 0:
         pbar.update(target_length)
     print('')
     file_obj.close()
     uploader.finish(f_server, overwrite=True)
Esempio n. 6
0
    def _interpolate_bad_epochs(self, epochs, ch_type):
        """interpolate the bad epochs.

        Parameters
        ----------
        epochs : instance of mne.Epochs
            The epochs object which must be fixed.
        """
        drop_log = self.drop_log
        # 1: bad segment, # 2: interpolated, # 3: dropped
        self.fix_log = self.drop_log.copy()
        ch_names = drop_log.columns.values
        n_consensus = self.consensus_perc * len(ch_names)
        pbar = ProgressBar(len(epochs) - 1,
                           mesg='Repairing epochs: ',
                           spinner=True)
        # TODO: raise error if preload is not True
        for epoch_idx in range(len(epochs)):
            pbar.update(epoch_idx + 1)
            # ch_score = self.scores_[ch_type][epoch_idx]
            # sorted_ch_idx = np.argsort(ch_score)
            n_bads = drop_log.ix[epoch_idx].sum()
            if n_bads == 0 or n_bads > n_consensus:
                continue
            else:
                if n_bads <= self.n_interpolate:
                    bad_chs = drop_log.ix[epoch_idx].values == 1
                else:
                    # get peak-to-peak for channels in that epoch
                    data = epochs[epoch_idx].get_data()[0, :, :]
                    peaks = np.ptp(data, axis=-1)
                    # find channels which are bad by rejection threshold
                    bad_chs = np.where(drop_log.ix[epoch_idx].values == 1)[0]
                    # find the ordering of channels amongst the bad channels
                    sorted_ch_idx = np.argsort(peaks[bad_chs])[::-1]
                    # then select only the worst n_interpolate channels
                    bad_chs = bad_chs[sorted_ch_idx[:self.n_interpolate]]

            self.fix_log.ix[epoch_idx][bad_chs] = 2
            bad_chs = ch_names[bad_chs].tolist()
            epoch = epochs[epoch_idx]
            epoch.info['bads'] = bad_chs
            interpolate_bads(epoch, reset_bads=True)
            epochs._data[epoch_idx] = epoch._data
Esempio n. 7
0
    def _interpolate_bad_epochs(self, epochs, ch_type):
        """interpolate the bad epochs.

        Parameters
        ----------
        epochs : instance of mne.Epochs
            The epochs object which must be fixed.
        """
        drop_log = self.drop_log
        # 1: bad segment, # 2: interpolated, # 3: dropped
        self.fix_log = self.drop_log.copy()
        ch_names = drop_log.columns.values
        n_consensus = self.consensus_perc * len(ch_names)
        pbar = ProgressBar(len(epochs) - 1, mesg='Repairing epochs: ',
                           spinner=True)
        # TODO: raise error if preload is not True
        for epoch_idx in range(len(epochs)):
            pbar.update(epoch_idx + 1)
            # ch_score = self.scores_[ch_type][epoch_idx]
            # sorted_ch_idx = np.argsort(ch_score)
            n_bads = drop_log.ix[epoch_idx].sum()
            if n_bads == 0 or n_bads > n_consensus:
                continue
            else:
                if n_bads <= self.n_interpolate:
                    bad_chs = drop_log.ix[epoch_idx].values == 1
                else:
                    # get peak-to-peak for channels in that epoch
                    data = epochs[epoch_idx].get_data()[0, :, :]
                    peaks = np.ptp(data, axis=-1)
                    # find channels which are bad by rejection threshold
                    bad_chs = np.where(drop_log.ix[epoch_idx].values == 1)[0]
                    # find the ordering of channels amongst the bad channels
                    sorted_ch_idx = np.argsort(peaks[bad_chs])[::-1]
                    # then select only the worst n_interpolate channels
                    bad_chs = bad_chs[sorted_ch_idx[:self.n_interpolate]]

            self.fix_log.ix[epoch_idx][bad_chs] = 2
            bad_chs = ch_names[bad_chs].tolist()
            epoch = epochs[epoch_idx]
            epoch.info['bads'] = bad_chs
            interpolate_bads(epoch, reset_bads=True)
            epochs._data[epoch_idx] = epoch._data
Esempio n. 8
0
    def fit_ica(self, data, when='next', warm_start=False):
        """Conduct Independent Components Analysis (ICA) on a segment of data.

        The fitted ICA object is stored in the variable ica. Noisy components
        can be selected in the ICA, and then the ICA can be applied to incoming
        data to remove noise. Once fitted, ICA is applied by default to data
        when using the methods make_raw() or make_epochs().

        Components marked for removal can be accessed with self.ica.exclude.

        data : int, float, mne.RawArray
            The duration of previous or incoming data to use to fit the ICA, or
            an mne.RawArray object of data.
        when : {'previous', 'next'} (defaults to 'next')
            Whether to compute ICA on the previous or next X seconds of data.
            Can be 'next' or 'previous'. If data is type mne.RawArray, this
            parameter is ignored.
        warm_start : bool (defaults to False)
            If True, will include the EEG data from the previous fit. If False,
            will only use the data specified in the parameter data.
        """
        # Re-define ICA variable to start ICA from scratch if the ICA was
        # already fitted and user wants to fit again.
        if self.ica.current_fit != 'unfitted':
            self.ica = ICA(method='extended-infomax')

        if isinstance(data, io.RawArray):
            self.raw_for_ica = data

        elif isinstance(data, numbers.Number):
            user_index = int(data * self.info['sfreq'])
            if when.lower() not in ['previous', 'next']:
                raise ValueError("when must be 'previous' or 'next'. {} was "
                                 "passed.".format(when))
            elif when == 'previous':
                end_index = len(self.data)
                start_index = end_index - user_index
                # TODO: Check if out of bounds.

            elif when == 'next':
                start_index = len(self.data)
                end_index = start_index + user_index
                # Wait until the data is available.
                pbar = ProgressBar(end_index - start_index,
                                   mesg="Collecting data")
                while len(self.data) <= end_index:
                    # Sometimes sys.stdout.flush() raises ValueError. Is it
                    # because the while loop iterates too quickly for I/O?
                    try:
                        pbar.update(len(self.data) - start_index)
                    except ValueError:
                        pass
                print("")  # Get onto new line after progress bar finishes.

            _data = np.array([r[:] for r in
                              self.data[start_index:end_index]]).T

            # Now we have the data array in _data. Use it to make instance of
            # mne.RawArray, and then we can compute the ICA on that instance.
            _data[-1, :] = 0

            # Use previous data in addition to the specified data when fitting
            # the ICA, if the user requested this.
            if warm_start and self.raw_for_ica is not None:
                self.raw_for_ica = concatenate_raws(
                    [self.raw_for_ica, io.RawArray(_data, self.info)])
            else:
                self.raw_for_ica = io.RawArray(_data, self.info)

        logger.info("Computing ICA solution ...")
        t_0 = local_clock()
        self.ica.fit(self.raw_for_ica.copy())  # Fits in-place.
        logger.info("Finished in {:.2f} s".format(local_clock() - t_0))