예제 #1
0
def plot_average(filenames, save_plot=True, show_plot=False, dpi=100):
    ''' Plot Signal average from a list of averaged files. '''

    fname = get_files_from_list(filenames)

    # plot averages
    pl.ioff()  # switch off (interactive) plot visualisation
    factor = 1e15
    for fnavg in fname:
        name = fnavg[0:len(fnavg) - 4]
        basename = os.path.splitext(os.path.basename(name))[0]
        print fnavg
        # mne.read_evokeds provides a list or a single evoked based on condition.
        # here we assume only one evoked is returned (requires further handling)
        avg = mne.read_evokeds(fnavg)[0]
        ymin, ymax = avg.data.min(), avg.data.max()
        ymin *= factor * 1.1
        ymax *= factor * 1.1
        fig = pl.figure(basename, figsize=(10, 8), dpi=100)
        pl.clf()
        pl.ylim([ymin, ymax])
        pl.xlim([avg.times.min(), avg.times.max()])
        pl.plot(avg.times, avg.data.T * factor, color='black')
        pl.title(basename)

        # save figure
        fnfig = os.path.splitext(fnavg)[0] + '.png'
        pl.savefig(fnfig, dpi=dpi)

    pl.ion()  # switch on (interactive) plot visualisation
예제 #2
0
def perform_detrending(fname_raw, save=True):

    from mne.io import Raw
    from numpy import poly1d, polyfit

    fnraw = get_files_from_list(fname_raw)

    # loop across all filenames
    for fname in fnraw:

        # read data in
        raw = Raw(fname, preload=True)

        # get channels
        picks = mne.pick_types(raw.info, meg='mag', ref_meg=True, eeg=False, stim=False,
                               eog=False, exclude='bads')
        xval = np.arange(raw._data.shape[1])

        # loop over all channels
        for ipick in picks:
            coeff = polyfit(xval, raw._data[ipick, :], deg=1)
            trend = poly1d(coeff)
            raw._data[ipick, :] -= trend(xval)

    # save detrended data
    if save:
        fnout = fname_raw[:fname_raw.rfind('-raw.fif')] + ',dt-raw.fif'
        raw.save(fnout, overwrite=True)

    return raw
예제 #3
0
def perform_detrending(fname_raw, save=True):

    from mne.io import Raw
    from numpy import poly1d, polyfit

    fnraw = get_files_from_list(fname_raw)

    # loop across all filenames
    for fname in fnraw:

        # read data in
        raw = Raw(fname, preload=True)

        # get channels
        picks = mne.pick_types(raw.info, meg='mag', ref_meg=True,
                               eeg=False, stim=False,
                               eog=False, exclude='bads')
        xval = np.arange(raw._data.shape[1])

        # loop over all channels
        for ipick in picks:
            coeff = polyfit(xval, raw._data[ipick, :], deg=1)
            trend = poly1d(coeff)
            raw._data[ipick, :] -= trend(xval)

    # save detrended data
    if save:
        fnout = fname_raw[:fname_raw.rfind('-raw.fif')] + ',dt-raw.fif'
        raw.save(fnout, overwrite=True)

    return raw
예제 #4
0
def plot_average(filenames, save_plot=True, show_plot=False, dpi=100):

    ''' Plot Signal average from a list of averaged files. '''

    fname = get_files_from_list(filenames)

    # plot averages
    pl.ioff()  # switch off (interactive) plot visualisation
    factor = 1e15
    for fnavg in fname:
        name = fnavg[0:len(fnavg) - 4]
        basename = os.path.splitext(os.path.basename(name))[0]
        print fnavg
        # mne.read_evokeds provides a list or a single evoked based on condition.
        # here we assume only one evoked is returned (requires further handling)
        avg = mne.read_evokeds(fnavg)[0]
        ymin, ymax = avg.data.min(), avg.data.max()
        ymin *= factor * 1.1
        ymax *= factor * 1.1
        fig = pl.figure(basename, figsize=(10, 8), dpi=100)
        pl.clf()
        pl.ylim([ymin, ymax])
        pl.xlim([avg.times.min(), avg.times.max()])
        pl.plot(avg.times, avg.data.T * factor, color='black')
        pl.title(basename)

        # save figure
        fnfig = os.path.splitext(fnavg)[0] + '.png'
        pl.savefig(fnfig, dpi=dpi)

    pl.ion()  # switch on (interactive) plot visualisation
예제 #5
0
def apply_create_noise_covariance(fname_empty_room, require_filter=True,
                                  verbose=None):

    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the empty room file (must be a fif-file)
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov, pick_types
    from mne.io import Raw

    fner = get_files_from_list(fname_empty_room)

    nfiles = len(fner)

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        print ">>> create noise covariance using file: "
        path_in, name = os.path.split(fn_in)
        print name

        if require_filter:
            print "Filtering with preset settings..."
            # filter empty room raw data
            apply_filter(fn_in, flow=1, fhigh=45, order=4, njobs=4)
            # reconstruct empty room file name accordingly
            fn_in = fn_in.split('-')[0] + ',fibp1-45-empty.fif'

        # file name for saving noise_cov
        fn_out = fn_in[:fn_in.rfind(ext_empty_raw)] + ext_empty_cov

        # read in data
        raw_empty = Raw(fn_in, verbose=verbose)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, ref_meg=False, eeg=False,
                           stim=False, eog=False, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty, picks=picks, verbose=verbose)

        # write noise-covariance matrix to disk
        write_cov(fn_out, noise_cov_mat)
예제 #6
0
def apply_ica(fname_filtered, n_components=0.99, decim=None,
              reject={'mag': 5e-12}, ica_method='fastica',
              flow=None, fhigh=None, verbose=True):

    ''' Applies ICA to a list of (filtered) raw files. '''

    from mne.preprocessing import ICA

    fnfilt = get_files_from_list(fname_filtered)

    # loop across all filenames
    for fname in fnfilt:
        name = os.path.split(fname)[1]
        print ">>>> perform ICA signal decomposition on :  " + name
        # load filtered data
        raw = mne.io.Raw(fname, preload=True)
        picks = mne.pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')

        # check if data to estimate the optimal
        # de-mixing matrix should be filtered
        if flow or fhigh:
            from jumeg.filter import jumeg_filter

            # define filter type
            if not flow:
                filter_type = 'lp'
                filter_info = "     --> filter parameter    : filter type=low pass %dHz" % flow
            elif not fhigh:
                filter_type = 'hp'
                filter_info = "     --> filter parameter    : filter type=high pass %dHz" % flow
            else:
                filter_type = 'bp'
                filter_info = "     --> filter parameter: filter type=band pass %d-%dHz" % (flow, fhigh)

            if verbose:
                print ">>>> NOTE: Optimal cleaning parameter are estimated from filtered data!"
                print filter_info

            fi_mne_notch = jumeg_filter(fcut1=flow, fcut2=fhigh, filter_type=filter_type,
                                        remove_dcoffset=False,
                                        sampling_frequency=raw.info['sfreq'])
            fi_mne_notch.apply_filter(raw._data, picks=picks)

        # ICA decomposition
        ica = ICA(method=ica_method, n_components=n_components,
                  max_pca_components=None)

        ica.fit(raw, picks=picks, decim=decim, reject=reject)

        # save ICA object
        fnica_out = fname[:fname.rfind(ext_raw)] + ext_ica
        # fnica_out = fname[0:len(fname)-4]+'-ica.fif'
        ica.save(fnica_out)
예제 #7
0
def apply_ica_select_brain_response(fname_clean_raw, n_pca_components=None,
                                    conditions=['trigger'], include=None):

    ''' Performs ICA recomposition with selected brain response components to a list of (ICA) files.
        fname_clean_raw: raw data after ECG and EOG rejection.
        n_pca_commonents: ICA's recomposition parameter.
        conditions: the event kind to recompose the raw data, it can be 'trigger',
                    'response' or include both conditions.
    '''

    fnlist = get_files_from_list(fname_clean_raw)

    # loop across all filenames
    for fn_clean in fnlist:
        #basename = fn_ctps_ics.rsplit('ctps')[0].rstrip(',')
        basename = fn_clean.split(ext_raw)[0]
        fnfilt = fn_clean
        fnarica = basename + ext_ica

        # load filtered and artefact removed data
        meg_raw = mne.io.Raw(fnfilt, preload=True)
        picks = mne.pick_types(meg_raw.info, meg=True, exclude='bads')
        # ICA decomposition
        ica = mne.preprocessing.read_ica(fnarica)

        # loop across different event IDs
        ctps_ics = []
        descrip_id = ''
        for event in conditions:
            fn_ics_eve = basename + prefix_ctps + event + '-ic_selection.txt'
            ctps_ics_eve = np.loadtxt(fn_ics_eve, dtype=int, delimiter=',')
            ctps_ics += (list(ctps_ics_eve - 1))
            descrip_id += ',' + event
        #To keep the index unique
        ctps_ics = list(set(ctps_ics))
        fnclean_eve = fn_ics_eve.split(',ctpsbr')[0] +\
            '%s,ctpsbr-raw.fif' % descrip_id

        # clean and save MEG data
        if n_pca_components:
            npca = n_pca_components
        else:
            npca = picks.size

        meg_clean = ica.apply(meg_raw, include=ctps_ics, n_pca_components=npca,
                              copy=True)
        if not meg_clean.info['description']:
            meg_clean.info['description'] = ''
            meg_clean.info['description'] += 'Raw recomposed from ctps selected\
                                              ICA components for brain\
                                              responses only.'
        meg_clean.save(fnclean_eve, overwrite=True)
        plot_compare_brain_responses(fname_clean_raw, fnclean_eve)
예제 #8
0
def compute_forward_solution(fname_raw, subjects_dir=None, spacing='ico4',
                             mindist=5, eeg=False, overwrite=False):
    '''Performs forward solution computation using mne_do_foward_solution
       (uses MNE-C binaries).

       Requires bem sol files, and raw meas file)

    Input
    -----
    fname_raw : str or list
    List of raw files for which to compute the forward operator.

    Returns
    -------
    None. Forward operator -fwd.fif will be saved.
    '''

    from mne import do_forward_solution

    fnames = get_files_from_list(fname_raw)

    if subjects_dir is None and 'SUBJECTS_DIR' in os.environ:
        subjects_dir = os.environ['SUBJECTS_DIR']
    else:
        print 'Please set SUBJECTS_DIR.'

    for fname in fnames:
        print 'Computing fwd solution for %s' % (fname)

        basename = os.path.basename(fname).split('-raw.fif')[0]
        subject = basename.split('_')[0]
        meas_fname = os.path.basename(fname)
        fwd_fname = meas_fname.rsplit('-raw.fif')[0] + '-fwd.fif'
        src_fname = subject + '-ico-4-src.fif'
        bem_fname = subject + '-5120-5120-5120-bem-sol.fif'
        trans_fname = subject + '-trans.fif'

        fwd = do_forward_solution(subject, meas_fname, fname=fwd_fname, src=src_fname,
                                  spacing=spacing, mindist=mindist, bem=bem_fname, mri=trans_fname,
                                  eeg=eeg, overwrite=overwrite, subjects_dir=subjects_dir)

        # fwd['surf_ori'] = True
        # to read forward solutions
        # fwd = mne.read_forward_solution(fwd_fname)

        print 'Forward operator saved in file %s' % (fwd_fname)
예제 #9
0
def apply_empty(fname_empty_room, require_filter=True):
    from jumeg.jumeg_noise_reducer import noise_reducer
    fner = get_files_from_list(fname_empty_room)
    nfiles = len(fner)
    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        path_in, name = os.path.split(fn_in)
        fn_empty_nr = fn_in[:fn_in.rfind(ext_empty_raw)] + ',nr-empty.fif'
        noise_reducer(fn_in, refnotch=50, detrending=False, fnout=fn_empty_nr)
        noise_reducer(fn_empty_nr,
                      refnotch=60,
                      detrending=False,
                      fnout=fn_empty_nr)
        noise_reducer(fn_empty_nr, reflp=5, fnout=fn_empty_nr)
        fn_in = fn_empty_nr
        if require_filter:
            print "Filtering with preset settings..."
            # filter empty room raw data
            apply_filter(fn_in, flow=1, fhigh=45, order=4, njobs=4)
예제 #10
0
def apply_filter(fname_raw, flow=1, fhigh=45, order=4, njobs=4):

    ''' Applies the MNE butterworth filter to a list of raw files. '''

    filter_type = 'butter'
    filt_method = 'fft'

    fnraw = get_files_from_list(fname_raw)

    # loop across all filenames
    for fname in fnraw:
        print ">>> filter raw data: %0.1f - %0.1f..." % (flow, fhigh)
        # load raw data
        raw = mne.io.Raw(fname, preload=True)
        # filter raw data
        raw.filter(flow, fhigh, n_jobs=njobs, method=filt_method)
        # raw.filter(l_freq=flow_raw, h_freq=fhigh_raw, n_jobs=njobs, method='iir',
        #     iir_params={'ftype': filter_type, 'order': order})
        print ">>>> writing filtered data to disk..."
        name_raw = fname[:fname.rfind('-')]  # fname.split('-')[0]
        fnfilt = name_raw + prefix_filt + "%d-%d" % (flow, fhigh)
        fnfilt = fnfilt + fname[fname.rfind('-'):]  # fname.split('-')[1]
        print 'saving: ' + fnfilt
        raw.save(fnfilt, overwrite=True)
예제 #11
0
def plot_denoising_4raw_data(fname_raw,
                             fmin=0,
                             fmax=300,
                             tmin=0.0,
                             tmax=60.0,
                             proj=False,
                             n_fft=4096,
                             color='blue',
                             stim_name=None,
                             event_id=1,
                             tmin_stim=-0.2,
                             tmax_stim=0.5,
                             area_mode='range',
                             area_alpha=0.33,
                             n_jobs=1,
                             title1='before denoising',
                             title2='after denoising',
                             info=None,
                             show=True,
                             fnout=None):
    """Plot the power spectral density across channels to show denoising.

    Parameters
    ----------
    fname_raw : list or str
        List of raw files, without denoising and with for comparison.
    tmin : float
        Start time for calculations.
    tmax : float
        End time for calculations.
    fmin : float
        Start frequency to consider.
    fmax : float
        End frequency to consider.
    proj : bool
        Apply projection.
    n_fft : int
        Number of points to use in Welch FFT calculations.
    color : str | tuple
        A matplotlib-compatible color to use.
    area_mode : str | None
        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
        will be plotted. If 'range', the min and max (across channels) will be
        plotted. Bad channels will be excluded from these calculations.
        If None, no area will be plotted.
    area_alpha : float
        Alpha for the area.
    info : bool
        Display information in the figure.
    show : bool
        Show figure.
    fnout : str
        Name of the saved output figure. If none, no figure will be saved.
    title1, title2 : str
        Title for two psd plots.
    n_jobs : int
        Number of jobs to use for parallel computation.
    stim_name : str
        Name of the stim channel. If stim_name is set, the plot of epochs average
        is also shown alongside the PSD plots.
    event_id : int
        ID of the stim event. (only when stim_name is set)

    Example Usage
    -------------
    plot_denoising(['orig-raw.fif', 'orig,nr-raw.fif', fnout='example')
    """

    from matplotlib import gridspec as grd
    import matplotlib.pyplot as plt
    from mne.time_frequency import compute_raw_psd

    fnraw = get_files_from_list(fname_raw)

    # ---------------------------------
    # estimate power spectrum
    # ---------------------------------
    psds_all = []
    freqs_all = []

    # loop across all filenames
    for fname in fnraw:

        # read in data
        raw = mne.io.Raw(fname, preload=True)
        picks = mne.pick_types(raw.info,
                               meg='mag',
                               eeg=False,
                               stim=False,
                               eog=False,
                               exclude='bads')

        if area_mode not in [None, 'std', 'range']:
            raise ValueError('"area_mode" must be "std", "range", or None')

        psds, freqs = compute_raw_psd(raw,
                                      picks=picks,
                                      fmin=fmin,
                                      fmax=fmax,
                                      tmin=tmin,
                                      tmax=tmax,
                                      n_fft=n_fft,
                                      n_jobs=n_jobs,
                                      proj=proj)
        psds_all.append(psds)
        freqs_all.append(freqs)

    if stim_name:
        n_xplots = 2

        # get some infos
        events = mne.find_events(raw, stim_channel=stim_name, consecutive=True)

    else:
        n_xplots = 1

    fig = plt.figure('denoising', figsize=(16, 6 * n_xplots))
    gs = grd.GridSpec(n_xplots, int(len(psds_all)))

    # loop across all filenames
    for idx in range(int(len(psds_all))):

        # ---------------------------------
        # plot power spectrum
        # ---------------------------------
        p1 = plt.subplot(gs[0, idx])

        # Convert PSDs to dB
        psds = 10 * np.log10(psds_all[idx])
        psd_mean = np.mean(psds, axis=0)
        if area_mode == 'std':
            psd_std = np.std(psds, axis=0)
            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
        elif area_mode == 'range':
            hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
        else:  # area_mode is None
            hyp_limits = None

        p1.plot(freqs_all[idx], psd_mean, color=color)
        if hyp_limits is not None:
            p1.fill_between(freqs_all[idx],
                            hyp_limits[0],
                            y2=hyp_limits[1],
                            color=color,
                            alpha=area_alpha)

        if idx == 0:
            p1.set_title(title1)
            ylim = [np.min(psd_mean) - 10, np.max(psd_mean) + 10]
        else:
            p1.set_title(title2)

        p1.set_xlabel('Freq (Hz)')
        p1.set_ylabel('Power Spectral Density (dB/Hz)')
        p1.set_xlim(freqs_all[idx][0], freqs_all[idx][-1])
        p1.set_ylim(ylim[0], ylim[1])

        # ---------------------------------
        # plot signal around stimulus
        # onset
        # ---------------------------------
        if stim_name:
            raw = mne.io.Raw(fnraw[idx], preload=True)
            epochs = mne.Epochs(raw,
                                events,
                                event_id,
                                proj=False,
                                tmin=tmin_stim,
                                tmax=tmax_stim,
                                picks=picks,
                                preload=True,
                                baseline=(None, None))
            evoked = epochs.average()
            if idx == 0:
                ymin = np.min(evoked.data)
                ymax = np.max(evoked.data)

            times = evoked.times * 1e3
            p2 = plt.subplot(gs[1, idx])
            p2.plot(times, evoked.data.T, 'blue', linewidth=0.5)
            p2.set_xlim(times[0], times[len(times) - 1])
            p2.set_ylim(1.1 * ymin, 1.1 * ymax)

            if (idx == 1) and info:
                plt.text(times[0], 0.9 * ymax, '  ICs: ' + str(info))

    # save image
    if fnout:
        fig.savefig(fnout + '.png', format='png')

    # show image if requested
    if show:
        plt.show()

    plt.close('denoising')
    plt.ion()
예제 #12
0
def noise_reducer(fname_raw, raw=None, signals=[], noiseref=[], detrending=None,
                  tmin=None, tmax=None, reflp=None, refhp=None, refnotch=None,
                  exclude_artifacts=True, checkresults=True, return_raw=False,
                  complementary_signal=False, fnout=None, verbose=False):
    """
    Apply noise reduction to signal channels using reference channels.

    Parameters
    ----------
    fname_raw : (list of) rawfile name(s)
    raw : mne Raw objects
        Allows passing of (preloaded) raw object in addition to fname_raw
        or solely (use fname_raw=None in this case).
    signals : list of string
              List of channels to compensate using noiseref.
              If empty use the meg signal channels.
    noiseref : list of string | str
              List of channels to use as noise reference.
              If empty use the magnetic reference channsls (default).
    signals and noiseref may contain regexp, which are resolved
    using mne.pick_channels_regexp(). All other channels are copied.
    tmin : lower latency bound for weight-calc [start of trace]
    tmax : upper latency bound for weight-calc [ end  of trace]
           Weights are calc'd for (tmin,tmax), but applied to entire data set
    refhp : high-pass frequency for reference signal filter [None]
    reflp :  low-pass frequency for reference signal filter [None]
            reflp < refhp: band-stop filter
            reflp > refhp: band-pass filter
            reflp is not None, refhp is None: low-pass filter
            reflp is None, refhp is not None: high-pass filter
    refnotch : (list of) notch frequencies for reference signal filter [None]
               use raw(ref)-notched(ref) as reference signal
    exclude_artifacts: filter signal-channels thru _is_good() [True]
                       (parameters are at present hard-coded!)
    return_raw : bool
        If return_raw is true, the raw object is returned and raw file
        is not written to disk unless fnout is explicitly specified.
        It is suggested that this option be used in cases where the
        noise_reducer is applied multiple times. [False]
    fnout : explicit specification for an output file name [None]
        Automatic filenames replace '-raw.fif' by ',nr-raw.fif'.
    complementary_signal : replaced signal by traces that would be
                           subtracted [False]
                           (can be useful for debugging)
    detrending: boolean to ctrl subtraction of linear trend from all
                magn. chans [False]
    checkresults : boolean to control internal checks and overall success
                   [True]

    Outputfile
    ----------
    <wawa>,nr-raw.fif for input <wawa>-raw.fif

    Returns
    -------
    If return_raw is True, then mne.io.Raw instance is returned.

    Bugs
    ----
    - artifact checking is incomplete (and with arb. window of tstep=0.2s)
    - no accounting of channels used as signal/reference
    - non existing input file handled ungracefully
    """

    if type(complementary_signal) != bool:
        raise ValueError("Argument complementary_signal must be of type bool")

    # handle error if Raw object passed with file list
    if raw and isinstance(fname_raw, list):
        raise ValueError('List of file names cannot be combined with'
                         'one Raw object')

    # handle error if return_raw is requested with file list
    if return_raw and isinstance(fname_raw, list):
        raise ValueError('List of file names cannot be combined return_raw.'
                         'Please pass one file at a time.')

    # handle error if Raw object is passed with detrending option
    # TODO include perform_detrending for Raw objects
    if raw and detrending:
        raise ValueError('Please perform detrending on the raw file directly.'
                         'Cannot perform detrending on the raw object')

    # Handle combinations of fname_raw and raw object:
    if fname_raw is not None:
        fnraw = get_files_from_list(fname_raw)
        have_input_file = True
    elif raw is not None:
        if 'filename' in raw.info:
            fnraw = [os.path.basename(raw.filenames[0])]
        else:
            fnraw = raw._filenames[0]
        warnings.warn('Setting file name from Raw object')
        have_input_file = False
        if fnout is None and not return_raw:
            raise ValueError('Refusing to waste resources without result')
    else:
        raise ValueError('Refusing Creatio ex nihilo')

    # loop across all filenames
    for fname in fnraw:

        if verbose:
            print("########## Read raw data:")

        tc0 = time.clock()
        tw0 = time.time()

        if raw is None:
            if detrending:
                raw = perform_detrending(fname, save=False)
            else:
                raw = mne.io.Raw(fname, preload=True)
        else:
            # perform sanity check to make sure Raw object and file are same
            if 'filename' in raw.info:
                fnintern = [os.path.basename(raw.filenames[0])]
            else:
                fnintern = raw._filenames[0]
            if os.path.basename(fname) != os.path.basename(fnintern):
                warnings.warn('The file name within the Raw object and provided\n   '
                              'fname are not the same. Please check again.')

        tc1 = time.clock()
        tw1 = time.time()

        if verbose:
            print(">>> loading raw data took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tc0)), (tw1 - tw0)))

        # Time window selection
        # weights are calc'd based on [tmin,tmax], but applied to the entire data set.
        # tstep is used in artifact detection
        # tmin,tmax variables must not be changed here!
        if tmin is None:
            itmin = 0
        else:
            itmin = int(floor(tmin * raw.info['sfreq']))
        if tmax is None:
            itmax = raw.last_samp - raw.first_samp
        else:
            itmax = int(ceil(tmax * raw.info['sfreq']))

        if itmax - itmin < 2:
            raise ValueError("Time-window for noise compensation empty or too short")

        if verbose:
            print(">>> Set time-range to [%7.3f,%7.3f]" % \
                  (raw.times[itmin], raw.times[itmax]))

        if signals is None or len(signals) == 0:
            sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,
                                     eog=False, exclude='bads')
        else:
            sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,
                                                raw.info.get('bads'))
        nsig = len(sigpick)
        if nsig == 0:
            raise ValueError("No channel selected for noise compensation")

        if noiseref is None or len(noiseref) == 0:
            # References are not limited to 4D ref-chans, but can be anything,
            # incl. ECG or powerline monitor.
            if verbose:
                print(">>> Using all refchans.")
            refexclude = "bads"
            refpick = mne.pick_types(raw.info, ref_meg=True, meg=False,
                                     eeg=False, stim=False,
                                     eog=False, exclude='bads')
        else:
            refpick = channel_indices_from_list(raw.info['ch_names'][:],
                                                noiseref, raw.info.get('bads'))
        nref = len(refpick)
        if nref == 0:
            raise ValueError("No channel selected as noise reference")

        if verbose:
            print(">>> sigpick: %3d chans, refpick: %3d chans" % (nsig, nref))
        badpick = np.intersect1d(sigpick, refpick, assume_unique=False)
        if len(badpick) > 0:
            raise Warning("Intersection of signal and reference channels not empty")

        if reflp is None and refhp is None and refnotch is None:
            use_reffilter = False
            use_refantinotch = False
        else:
            use_reffilter = True
            if verbose:
                print("########## Filter reference channels:")

            use_refantinotch = False
            if refnotch is not None:
                if reflp is not None or reflp is not None:
                    raise ValueError("Cannot specify notch- and high-/low-pass"
                                     "reference filter together")
                nyquist = (0.5 * raw.info['sfreq'])
                if isinstance(refnotch, list):
                    notchfrqs = refnotch
                else:
                    notchfrqs = [refnotch]
                notchfrqscln = []
                for nfrq in notchfrqs:
                    if not isinstance(nfrq, float) and not isinstance(nfrq, int):
                        raise ValueError("Illegal entry for notch-frequency (", nfrq, ")")
                    if nfrq >= nyquist:
                        warnings.warn('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)
                    else:
                        notchfrqscln.append(nfrq)
                if len(notchfrqscln) == 0:
                    raise ValueError("Notch frequency list is (now) empty")
                use_refantinotch = True
                if verbose:
                    print(">>> notches at freq ", end=' ')
                    print(notchfrqscln)
            else:
                if verbose:
                    if reflp is not None:
                        print(">>>  low-pass with cutoff-freq %.1f" % reflp)
                    if refhp is not None:
                        print(">>> high-pass with cutoff-freq %.1f" % refhp)

            # Adapt followg drop-chans cmd to use 'all-but-refpick'
            droplist = [raw.info['ch_names'][k] for k in range(raw.info['nchan']) if not k in refpick]
            tct = time.clock()
            twt = time.time()
            fltref = raw.copy().drop_channels(droplist)
            if use_refantinotch:
                rawref = raw.copy().drop_channels(droplist)
                fltref.notch_filter(notchfrqscln, fir_design='firwin',
                                    fir_window='hann', phase='zero',
                                    picks=np.array(list(range(nref))),
                                    method='fir')
                fltref._data = (rawref._data - fltref._data)
            else:
                fltref.filter(refhp, reflp, fir_design='firwin',
                              fir_window='hann', phase='zero',
                              picks=np.array(list(range(nref))),
                              method='fir')
            tc1 = time.clock()
            tw1 = time.time()
            if verbose:
                print(">>> filtering ref-chans  took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)),
                                                                                           (tw1 - twt)))

        if verbose:
            print("########## Calculating sig-ref/ref-ref-channel covariances:")
        # Calculate sig-ref/ref-ref-channel covariance:
        # (there is no need to calc inter-signal-chan cov,
        #  but there seems to be no appropriat fct available)
        # Here we copy the idea from compute_raw_data_covariance()
        # and truncate it as appropriate.
        tct = time.clock()
        twt = time.time()
        # The following reject and infosig entries are only
        # used in _is_good-calls.
        # _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
        # ignore ref-channels (not covered by dict) and checks individual
        # data segments - artifacts across a buffer boundary are not found.
        reject = dict(grad=4000e-13,  # T / m (gradiometers)
                      mag=4e-12,  # T (magnetometers)
                      eeg=40e-6,  # uV (EEG channels)
                      eog=250e-6)  # uV (EOG channels)

        infosig = copy.copy(raw.info)
        infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
        # the below fields are *NOT* (190103) updated automatically when 'chs' is updated
        infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
        infosig['nchan'] = len(sigpick)
        idx_by_typesig = channel_indices_by_type(infosig)

        # Read data in chunks:
        tstep = 0.2
        itstep = int(ceil(tstep * raw.info['sfreq']))
        sigmean = 0
        refmean = 0
        sscovdata = 0
        srcovdata = 0
        rrcovdata = 0
        n_samples = 0

        for first in range(itmin, itmax, itstep):
            last = first + itstep
            if last >= itmax:
                last = itmax
            raw_segmentsig, times = raw[sigpick, first:last]
            if use_reffilter:
                raw_segmentref, times = fltref[:, first:last]
            else:
                raw_segmentref, times = raw[refpick, first:last]

            if not exclude_artifacts or \
                    _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
                             ignore_chs=raw.info['bads']):
                sigmean += raw_segmentsig.sum(axis=1)
                refmean += raw_segmentref.sum(axis=1)
                sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
                srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
                rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
                n_samples += raw_segmentsig.shape[1]
            else:
                logger.info("Artefact detected in [%d, %d]" % (first, last))
        if n_samples <= 1:
            raise ValueError('Too few samples to calculate weights')
        sigmean /= n_samples
        refmean /= n_samples
        sscovdata -= n_samples * sigmean[:] * sigmean[:]
        sscovdata /= (n_samples - 1)
        srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
        srcovdata /= (n_samples - 1)
        rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
        rrcovdata /= (n_samples - 1)
        sscovinit = np.copy(sscovdata)
        if verbose:
            print(">>> Normalize srcov...")

        rrslope = copy.copy(rrcovdata)
        for iref in range(nref):
            dtmp = rrcovdata[iref, iref]
            if dtmp > TINY:
                srcovdata[:, iref] /= dtmp
                rrslope[:, iref] /= dtmp
            else:
                srcovdata[:, iref] = 0.
                rrslope[:, iref] = 0.

        if verbose:
            print(">>> Number of samples used : %d" % n_samples)
            tc1 = time.clock()
            tw1 = time.time()
            print(">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))

        if checkresults:
            if verbose:
                print("########## Calculated initial signal channel covariance:")
                # Calculate initial signal channel covariance:
                # (only used as quality measure)
                print(">>> initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
                for i in range(min(5, nsig)):
                    print(">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
                for i in range(max(0, nsig - 5), nsig):
                    print(">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
                print(">>>")

        U, s, V = np.linalg.svd(rrslope, full_matrices=True)
        if verbose:
            print(">>> singular values:")
            print(s)
            print(">>> Applying cutoff for smallest SVs:")

        dtmp = s.max() * SVD_RELCUTOFF
        s *= (abs(s) >= dtmp)
        sinv = [1. / s[k] if s[k] != 0. else 0. for k in range(nref)]
        if verbose:
            print(">>> singular values (after cutoff):")
            print(s)

        stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
        if verbose:
            print(">>> Testing svd-result: %s" % stat)
            if not stat:
                print("    (Maybe due to SV-cutoff?)")

        # Solve for inverse coefficients:
        # Set RRinv.tr=U diag(sinv) V
        RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))
        if checkresults:
            stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))
            if stat:
                if verbose:
                    print(">>> Testing RRinv-result (should be unit-matrix): ok")
            else:
                print(">>> Testing RRinv-result (should be unit-matrix): failed")
                print(np.transpose(np.dot(RRinv, rrslope)))
                print(">>>")

        if verbose:
            print("########## Calc weight matrix...")

        # weights-matrix will be somewhat larger than necessary,
        # (to simplify indexing in compensation loop):
        weights = np.zeros((raw._data.shape[0], nref))
        for isig in range(nsig):
            for iref in range(nref):
                weights[sigpick[isig], iref] = np.dot(srcovdata[isig, :], RRinv[:, iref])

        if verbose:
            print("########## Compensating signal channels:")
            if complementary_signal:
                print(">>> Caveat: REPLACING signal by compensation signal")

        tct = time.clock()
        twt = time.time()

        # Work on entire data stream:
        for isl in range(raw._data.shape[1]):
            slice = np.take(raw._data, [isl], axis=1)
            if use_reffilter:
                refslice = np.take(fltref._data, [isl], axis=1)
                refarr = refslice[:].flatten() - refmean
                # refarr = fltres[:,isl]-refmean
            else:
                refarr = slice[refpick].flatten() - refmean
            subrefarr = np.dot(weights[:], refarr)

            if not complementary_signal:
                raw._data[:, isl] -= subrefarr
            else:
                raw._data[:, isl] = subrefarr

            if (isl % 10000 == 0 or isl + 1 == raw._data.shape[1]) and verbose:
                print("\rProcessed slice %6d" % isl, end=" ")
                sys.stdout.flush()

        if verbose:
            print("\nDone.")
            tc1 = time.clock()
            tw1 = time.time()
            print(">>> compensation loop took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)), (tw1 - twt)))

        if checkresults:
            if verbose:
                print("########## Calculating final signal channel covariance:")
            # Calculate final signal channel covariance:
            # (only used as quality measure)
            tct = time.clock()
            twt = time.time()
            sigmean = 0
            sscovdata = 0
            n_samples = 0
            for first in range(itmin, itmax, itstep):
                last = first + itstep
                if last >= itmax:
                    last = itmax
                raw_segmentsig, times = raw[sigpick, first:last]
                # Artifacts found here will probably differ from pre-noisered artifacts!
                if not exclude_artifacts or \
                        _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
                                 flat=None, ignore_chs=raw.info['bads']):
                    sigmean += raw_segmentsig.sum(axis=1)
                    sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
                    n_samples += raw_segmentsig.shape[1]
            if n_samples <= 1:
                raise ValueError('Too few samples to calculate final signal channel covariance')
            sigmean /= n_samples
            sscovdata -= n_samples * sigmean[:] * sigmean[:]
            sscovdata /= (n_samples - 1)
            if verbose:
                print(">>> no channel got worse: %s" % str(np.all(np.less_equal(sscovdata, sscovinit))))
                print(">>> final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
                for i in range(min(5, nsig)):
                    print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
                # for i in range(min(5,nsig),max(0,nsig-5)):
                #    print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
                for i in range(max(0, nsig - 5), nsig):
                    print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
                tc1 = time.clock()
                tw1 = time.time()
                print(">>> signal covar-calc took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)),
                                                                                        (tw1 - twt)))
                print(">>>")

        if fnout is not None:
            fnoutloc = fnout
        elif return_raw:
            fnoutloc = None
        elif have_input_file:
            fnoutloc = fname[:fname.rfind('-raw.fif')] + ',nr-raw.fif'
        else:
            fnoutloc = None

        if fnoutloc is not None:
            if verbose:
                print(">>> Saving '%s'..." % fnoutloc)
            raw.save(fnoutloc, overwrite=True)

        tc1 = time.clock()
        tw1 = time.time()
        if verbose:
            print(">>> Total run took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tc0)), (tw1 - tw0)))

        if return_raw:
            if verbose:
                print(">>> Returning raw object...")
            return raw
예제 #13
0
def apply_ctps_select_ic(fname_ctps, threshold=0.1):

    ''' Select ICs based on CTPS analysis. '''

    fnlist = get_files_from_list(fname_ctps)

    # loop across all filenames
    pl.ioff()  # switch off (interactive) plot visualisation
    ifile = 0
    for fnctps in fnlist:
        name = os.path.splitext(fnctps)[0]
        basename = os.path.splitext(os.path.basename(fnctps))[0]
        print '>>> working on: ' + basename
        # load CTPS data
        dctps = np.load(fnctps).item()
        freqs = dctps['freqs']
        nfreq = len(freqs)
        ncomp = dctps['ncomp']
        trig_name = dctps['trig_name']
        times = dctps['times']
        ic_sel = []
        # loop acros all freq. bands
        fig = pl.figure(ifile + 1, figsize=(16, 9), dpi=100)
        pl.clf()
        fig.subplots_adjust(left=0.08, right=0.95, bottom=0.05,
                            top=0.93, wspace=0.2, hspace=0.2)
        fig.suptitle(basename, fontweight='bold')
        nrow = np.ceil(float(nfreq) / 2)
        for ifreq in range(nfreq):
            pk = dctps['pk'][ifreq]
            pt = dctps['pt'][ifreq]
            pkmax = pk.max(1)
            ixmax = np.where(pkmax == pkmax.max())[0]
            ix = (np.where(pkmax >= threshold))[0]
            if np.any(ix+1):
                if (ifreq > 0):
                    ic_sel = np.append(ic_sel, ix + 1)
                else:
                    ic_sel = ix + 1

            # do construct names for title, fnout_fig, fnout_ctps
            frange = ' @' + str(freqs[ifreq][0]) + '-' + str(freqs[ifreq][1])
            x = np.arange(ncomp) + 1
            # do make bar plots for ctps thresh level plots
            ax = fig.add_subplot(nrow, 2, ifreq + 1)
            pl.bar(x, pkmax, color='steelblue')
            pl.bar(x[ix], pkmax[ix], color='red')
            pl.title(trig_name + frange, fontsize='small')
            pl.xlim([1, ncomp + 1])
            pl.ylim([0, 0.5])
            pl.text(2, 0.45, 'ICs: ' + str(ix + 1))
        ic_sel = np.unique(ic_sel)
        nic = np.size(ic_sel)
        info = 'ICs (all): ' + str(ic_sel).strip('[]')
        fig.text(0.02, 0.01, info, transform=ax.transAxes)

        # save CTPS components found
        fntxt = name + '-ic_selection.txt'
        ic_sel = np.reshape(ic_sel, [1, nic])
        np.savetxt(fntxt, ic_sel, fmt='%i', delimiter=', ')
        ifile += 1

        # save figure
        fnfig = name + '-ic_selection.png'
        pl.savefig(fnfig, dpi=100)
    pl.ion()  # switch on (interactive) plot visualisation
예제 #14
0
def apply_average(filenames, name_stim='STI 014', event_id=None, postfix=None,
                  tmin=-0.2, tmax=0.4, baseline=(None, 0), proj=False,
                  save_plot=True, show_plot=False):

    ''' Performs averaging to a list of raw files. '''

    # Trigger or Response ?
    if name_stim == 'STI 014':      # trigger
        trig_name = 'trigger'
    else:
        if name_stim == 'STI 013':   # response
            trig_name = 'response'
        else:
            trig_name = 'trigger'

    fnlist = get_files_from_list(filenames)

    # loop across raw files
    fnavg = []    # collect output filenames
    for fname in fnlist:
        name = os.path.split(fname)[1]
        print '>>> average raw data'
        print name
        # load raw data
        raw = mne.io.Raw(fname, preload=True)
        picks = mne.pick_types(raw.info, meg=True, ref_meg=False,
                               exclude='bads')

        # stim events
        stim_events = mne.find_events(raw, stim_channel=name_stim,
                                      consecutive=True)
        nevents = len(stim_events)

        if nevents > 0:
            # for a specific event ID
            if event_id:
                ix = np.where(stim_events[:, 2] == event_id)[0]
                stim_events = stim_events[ix, :]
            else:
                event_id = stim_events[0, 2]

            epochs = mne.Epochs(raw, events=stim_events,
                                event_id=event_id, tmin=tmin, tmax=tmax,
                                picks=picks, preload=True, baseline=baseline,
                                proj=proj)
            avg = epochs.average()

            # save averaged data
            if (fname.rfind(ext_raw) > -1):
                nchar = 8
            else:
                nchar = 4
            if (postfix):
                fnout = fname[0:len(fname) - nchar] + postfix + '.fif'
            else:
                fnout = fname[0:len(fname) - nchar] + ',' + trig_name + ext_ave

            avg.save(fnout)
            print 'saved:' + fnout
            fnavg.append(fnout)

            if (save_plot):
                plot_average(fnavg, show_plot=show_plot)

        else:
            event_id = None
            print '>>> Warning: Event not found in file: ' + fname
예제 #15
0
def apply_ctps_surrogates(fname_ctps, fnout, nrepeat=1000,
                          mode='shuffle', save=True, n_jobs=4):

    '''
    Perform CTPS surrogate tests to estimate the significance level
    for CTPS anaysis (a proper pK value ist estimated).

    It is most likely that the statistical reliability of this test
    is best improved by increasing the number of repetitions, while the
    number of different experiments/subjects have minor effects only

    Parameters
    ----------
    fname_ctps:  CTPS filename (or list of filenames)

    fnout: Output (text) filename to store surrogate stats across all files

    Options:
    nrepeat: number of repetitions used to estimate the pk threshold
             default is 1000

    mode: 2 different modi are allowed.
        'mode=shuffle' whill randomly shuffle the phase values. This is the default
        'mode=shift' whill randomly shift the phase values

    Return
    ------
    info string array containing the statistical values about the surrogate analysis

    '''
    import os, time
    from jumeg_utils import make_surrogates_ctps, get_stats_surrogates_ctps

    fnlist = get_files_from_list(fname_ctps)

    # loop across all filenames
    ifile = 1
    sep = '=========================================================================='
    info = [sep,'#','# Statistical analysis on CTPS surrogates','#',sep]
    for fnctps in fnlist:
        path = os.path.dirname(fnctps)
        basename = os.path.basename(fnctps)
        name = os.path.splitext(basename)[0]
        print '>>> calc. surrogates based on: ' + basename
        # load CTPS data
        dctps = np.load(fnctps).item()
        phase_trials = dctps['pt']  # [nfreq, ntrials, nsources, nsamples]
        # create surrogate tests
        t_start = time.time()
        pks = make_surrogates_ctps(phase_trials,nrepeat=nrepeat,
                                   mode=mode,verbose=None,n_jobs=n_jobs)

        # perform stats on surrogates
        stats = get_stats_surrogates_ctps(pks, verbose=False)
        info.append(sep)
        info.append(path)
        info.append(basename)
        info.append('nfreq: '+ str(stats['nfreq']))
        info.append('nrepeat: '+ str(stats['nrepeat']))
        info.append('nsamples: '+ str(stats['nsamples']))
        info.append('nsources: '+ str(stats['nsources']))
        info.append('permutation mode: '+ mode)
        # info for each freq. band of the current data set
        info.append('# stats for each frequency band:')
        line_f    = 'freqs (Hz):'
        line_max  = 'pk max:    '
        line_mean = 'pk mean:   '
        line_min  = 'pk min:    '
        for i in range(stats['nfreq']):
            flow, fhigh = dctps['freqs'][i]
            line_f    += str('%5d-%d ' % (flow,fhigh))
            line_max  += str('%8.3f' % stats['pks_max'][i])
            line_mean += str('%8.3f' % stats['pks_mean'][i])
            line_min  += str('%8.3f' % stats['pks_min'][i])
        info.append(line_f)
        info.append(line_min)
        info.append(line_mean)
        info.append(line_max)
        # across freq. bands
        pks_min = stats['pks_min_global']
        pks_mean = stats['pks_mean_global']
        pks_std = stats['pks_std_global']
        pks_pct = stats['pks_pct99_global']
        pks_max = stats['pks_max_global']
        info.append('# stats across all frequency bands:')
        info.append('pk min:  '+ str('%8.3f' % pks_min))
        info.append('pk mean: '+ str('%8.3f' % stats['pks_mean_global']))
        info.append('pk std:  '+ str('%8.3f' % stats['pks_std_global']))
        info.append('pk pct99:'+ str('%8.3f' % stats['pks_pct99_global']))
        info.append('pk max:  '+ str('%8.3f' % stats['pks_max_global']))


        # combine global stats values of different ctps files into one global analysis
        if (ifile > 1):
            pks_all = np.concatenate((pks_all, pks.flatten()))
        else:
            pks_all = pks.flatten()
        ifile += 1

    if (ifile > 1):
        info.append(sep)
        info.append('#')
        info.append('# stats across all files:')
        info.append('#')
        info.append('pk min:  '+ str('%8.3f' % pks_all.min()))
        info.append('pk mean: '+ str('%8.3f' % pks_all.mean()))
        info.append('pk std:  '+ str('%8.3f' % pks_all.std()))
        info.append('pk pct99:'+ str('%8.3f' % np.percentile(pks_all,99)))
        info.append('pk max:  '+ str('%8.3f' % pks_all.max()))

    info.append('#')
    duration = (time.time() - t_start) / 60.0  # in minutes
    info.append('duration [min]: %0.2f' % duration)
    info.append('#')
    info.append(sep)

    # save surrogate stats
    if (save):
        np.savetxt(fnout, info, fmt='%s')

    return info
예제 #16
0
def apply_ctps(fname_ica, freqs=[(1, 4), (4, 8), (8, 12), (12, 16), (16, 20)],
               tmin=-0.2, tmax=0.4, name_stim='STI 014', event_id=None,
               baseline=(None, 0), proj=False):

    ''' Applies CTPS to a list of ICA files. '''

    from jumeg.filter import jumeg_filter

    fiws = jumeg_filter(filter_method="bw")
    fiws.filter_type = 'bp'   # bp, lp, hp
    fiws.dcoffset = True
    fiws.filter_attenuation_factor = 1

    nfreq = len(freqs)
    print '>>> CTPS calculation on: ', freqs

    # Trigger or Response ?
    if name_stim == 'STI 014':      # trigger
        trig_name = 'trigger'
    else:
        if name_stim == 'STI 013':   # response
            trig_name = 'response'
        else:
            trig_name = 'auxillary'

    fnlist = get_files_from_list(fname_ica)

    # loop across all filenames
    for fnica in fnlist:
        name = os.path.split(fnica)[1]
        #fname = fnica[0:len(fnica)-4]
        basename = fnica[:fnica.rfind(ext_ica)]
        fnraw = basename + ext_raw
        #basename = os.path.splitext(os.path.basename(fnica))[0]
        # load cleaned data
        raw = mne.io.Raw(fnraw, preload=True)
        picks = mne.pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')

        # read (second) ICA
        print ">>>> working on: " + basename
        ica = mne.preprocessing.read_ica(fnica)
        ica_picks = np.arange(ica.n_components_)
        ncomp = len(ica_picks)

        # stim events
        stim_events = mne.find_events(raw, stim_channel=name_stim, consecutive=True)
        nevents = len(stim_events)

        if (nevents > 0):
            # for a specific event ID
            if event_id:
                ix = np.where(stim_events[:, 2] == event_id)[0]
                stim_events = stim_events[ix, :]
            else:
                event_id = stim_events[0, 2]
            # create ctps dictionary
            dctps = {'fnica': fnica,
                     'basename': basename,
                     'stim_channel': name_stim,
                     'trig_name': trig_name,
                     'ncomp': ncomp,
                     'nevent': nevents,
                     'event_id': event_id,
                     'nfreq': nfreq,
                     'freqs': freqs,
                     }
            # loop across all filenames
            pkarr = []
            ptarr = []
            pkmax_arr = []
            for ifreq in range(nfreq):
                ica_raw = ica.get_sources(raw)
                flow, fhigh = freqs[ifreq][0], freqs[ifreq][1]
                bp = str(flow) + '_' + str(fhigh)
                # filter ICA data and create epochs
                #tw=0.1
                # ica_raw.filter(l_freq=flow, h_freq=fhigh, picks=ica_picks,
                #     method='fft',l_trans_bandwidth=tw, h_trans_bandwidth=tw)
                # ica_raw.filter(l_freq=flow, h_freq=fhigh, picks=ica_picks,
                #                                                 method='fft')

                # filter ws settings
                # later we will make this as a one line call
                data_length = raw._data[0, :].size
                fiws.sampling_frequency = raw.info['sfreq']
                fiws.fcut1 = flow
                fiws.fcut2 = fhigh
                #fiws.init_filter_kernel(data_length)
                #fiws.init_filter(data_length)
                for ichan in ica_picks:
                    fiws.apply_filter(ica_raw._data[ichan, :])

                ica_epochs = mne.Epochs(ica_raw, events=stim_events,
                                        event_id=event_id, tmin=tmin,
                                        tmax=tmax, verbose=False,
                                        picks=ica_picks, baseline=baseline,
                                        proj=proj)
                # compute CTPS
                _, pk, pt = ctps.ctps(ica_epochs.get_data())
                pkmax = pk.max(1)
                times = ica_epochs.times * 1e3
                pkarr.append(pk)
                ptarr.append(pt)
                pkmax_arr.append(pkmax)
            pkarr = np.array(pkarr)
            ptarr = np.array(ptarr)
            pkmax_arr = np.array(pkmax_arr)
            dctps['pk'] = np.float32(pkarr)
            dctps['pt'] = np.float32(ptarr)
            dctps['pkmax'] = np.float32(pkmax_arr)
            dctps['nsamp'] = len(times)
            dctps['times'] = np.float32(times)
            dctps['tmin'] = np.float32(ica_epochs.tmin)
            dctps['tmax'] = np.float32(ica_epochs.tmax)
            fnctps = basename + prefix_ctps + trig_name
            np.save(fnctps, dctps)
            # Note; loading example: dctps = np.load(fnctps).items()
        else:
            event_id = None
예제 #17
0
def apply_ica_cleaning(fname_ica, n_pca_components=None,
                       name_ecg='ECG 001', flow_ecg=10, fhigh_ecg=20,
                       name_eog_hor='EOG 001', name_eog_ver='EOG 002',
                       flow_eog=1, fhigh_eog=10, threshold=0.3,
                       unfiltered=False, notch_filter=True, notch_freq=50,
                       notch_width=None):

    ''' Performs artifact rejection based on ICA to a list of (ICA) files. '''

    fnlist = get_files_from_list(fname_ica)

    # loop across all filenames
    for fnica in fnlist:
        name = os.path.split(fnica)[1]
        #basename = fnica[0:len(fnica)-4]
        basename = fnica[:fnica.rfind(ext_ica)]
        fnfilt = basename + ext_raw
        fnclean = basename + ext_clean
        fnica_ar = basename + ext_icap
        print ">>>> perform artifact rejection on :"
        print '   ' + name

        # load filtered data
        meg_raw = mne.io.Raw(fnfilt, preload=True)
        picks = mne.pick_types(meg_raw.info, meg=True, ref_meg=False, exclude='bads')
        # ICA decomposition
        ica = mne.preprocessing.read_ica(fnica)

        # get ECG and EOG related components
        ic_ecg = get_ics_cardiac(meg_raw, ica,
                                 flow=flow_ecg, fhigh=fhigh_ecg, thresh=threshold)
        ic_eog = get_ics_ocular(meg_raw, ica,
                                flow=flow_eog, fhigh=fhigh_eog, thresh=threshold)
        ica.exclude += list(ic_ecg) + list(ic_eog)
        # ica.plot_topomap(ic_artefacts)
        ica.save(fnica)  # save again to store excluded

        # clean and save MEG data
        if n_pca_components:
            npca = n_pca_components
        else:
            npca = picks.size

        # check if cleaning should be applied
        # to unfiltered data
        if unfiltered:
            # adjust filenames to unfiltered data
            basename = basename[:basename.rfind(',')]
            fnfilt = basename + ext_raw
            fnclean = basename + ext_clean
            fnica_ar = basename + ext_icap

            # load raw unfiltered data
            meg_raw = mne.io.Raw(fnfilt, preload=True)

            # apply notch filter
            if notch_filter:

                from jumeg.filter import jumeg_filter

                # generate and apply filter
                # check if array of frequencies is given
                if type(notch_freq) in (tuple, list):
                    notch = np.array(notch_freq)
                elif type(np.ndarray) == np.ndarray:
                    notch = notch_freq
                # or a single frequency
                else:
                    notch = np.array([])

                fi_mne_notch = jumeg_filter(filter_method="mne", filter_type='notch',
                                            remove_dcoffset=False,
                                            notch=notch, notch_width=notch_width)

                # if only a single frequency is given generate optimal
                # filter parameter to also remove the harmonics
                if not type(notch_freq) in (tuple, list, np.ndarray):
                    fi_mne_notch.calc_notches(notch_freq)

                fi_mne_notch.apply_filter(meg_raw._data, picks=picks)

        # apply cleaning
        meg_clean = ica.apply(meg_raw, exclude=ica.exclude,
                              n_pca_components=npca, copy=True)
        meg_clean.save(fnclean, overwrite=True)

        # plot ECG, EOG averages before and after ICA
        print ">>>> create performance image..."
        plot_performance_artifact_rejection(meg_raw, ica, fnica_ar,
                                            show=False, verbose=False)
예제 #18
0
def noise_reducer(fname_raw, raw=None, signals=[], noiseref=[], detrending=None,
                  tmin=None, tmax=None, reflp=None, refhp=None, refnotch=None,
                  exclude_artifacts=True, checkresults=True, return_raw=False,
                  complementary_signal=False, fnout=None, verbose=False):

    """Apply noise reduction to signal channels using reference channels.

    Parameters
    ----------
    fname_raw : (list of) rawfile names
    raw : mne Raw objects
        Allows passing of raw object as well.
    signals : list of string
              List of channels to compensate using noiseref.
              If empty use the meg signal channels.
    noiseref : list of string | str
              List of channels to use as noise reference.
              If empty use the magnetic reference channsls (default).
    signals and noiseref may contain regexp, which are resolved
    using mne.pick_channels_regexp(). All other channels are copied.
    tmin : lower latency bound for weight-calc [start of trace]
    tmax : upper latency bound for weight-calc [ end  of trace]
           Weights are calc'd for (tmin,tmax), but applied to entire data set
    refhp : high-pass frequency for reference signal filter [None]
    reflp :  low-pass frequency for reference signal filter [None]
            reflp < refhp: band-stop filter
            reflp > refhp: band-pass filter
            reflp is not None, refhp is None: low-pass filter
            reflp is None, refhp is not None: high-pass filter
    refnotch : (base) notch frequency for reference signal filter [None]
               use raw(ref)-notched(ref) as reference signal
    exclude_artifacts: filter signal-channels thru _is_good() [True]
                       (parameters are at present hard-coded!)
    return_raw : bool
        If return_raw is true, the raw object is returned and raw file
        is not written to disk. It is suggested that this option be used in cases
        where the noise_reducer is applied multiple times. [False]
    complementary_signal : replaced signal by traces that would be subtracted [False]
                           (can be useful for debugging)
    detrending: boolean to ctrl subtraction of linear trend from all magn. chans [False]
    checkresults : boolean to control internal checks and overall success [True]

    Outputfile
    ----------
    <wawa>,nr-raw.fif for input <wawa>-raw.fif

    Returns
    -------
    If return_raw is True, then mne.io.Raw instance is returned.

    Bugs
    ----
    - artifact checking is incomplete (and with arb. window of tstep=0.2s)
    - no accounting of channels used as signal/reference
    - non existing input file handled ungracefully
    """

    if type(complementary_signal) != bool:
        raise ValueError("Argument complementary_signal must be of type bool")

    # handle error if Raw object passed with file list
    if raw and isinstance(fname_raw, list):
        raise ValueError('List of file names cannot be combined with one Raw object')

    # handle error if return_raw is requested with file list
    if return_raw and isinstance(fname_raw, list):
        raise ValueError('List of file names cannot be combined return_raw.'
                         'Please pass one file at a time.')

    # handle error if Raw object is passed with detrending option
    #TODO include perform_detrending for Raw objects
    if raw and detrending:
        raise ValueError('Please perform detrending on the raw file directly. Cannot perform'
                         'detrending on the raw object')

    fnraw = get_files_from_list(fname_raw)

    # loop across all filenames
    for fname in fnraw:

        if verbose:
            print "########## Read raw data:"

        tc0 = time.clock()
        tw0 = time.time()

        if raw is None:
            if detrending:
                raw = perform_detrending(fname, save=False)
            else:
                raw = mne.io.Raw(fname, preload=True)
        else:
            # perform sanity check to make sure Raw object and file are same
            if os.path.basename(fname) != os.path.basename(raw.info['filename']):
                warnings.warn('The file name within the Raw object and provided'
                              'fname are not the same. Please check again.')

        tc1 = time.clock()
        tw1 = time.time()

        if verbose:
            print ">>> loading raw data took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0))

        # Time window selection
        # weights are calc'd based on [tmin,tmax], but applied to the entire data set.
        # tstep is used in artifact detection
        # tmin,tmax variables must not be changed here!
        if tmin is None:
            itmin = 0
        else:
            itmin = int(floor(tmin * raw.info['sfreq']))
        if tmax is None:
            itmax = raw.last_samp
        else:
            itmax = int(ceil(tmax * raw.info['sfreq']))

        if itmax - itmin < 2:
            raise ValueError("Time-window for noise compensation empty or too short")

        if verbose:
            print ">>> Set time-range to [%7.3f,%7.3f]" % \
                  (raw.times[itmin], raw.times[itmax])

        if signals is None or len(signals) == 0:
            sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,
                                     eog=False, exclude='bads')
        else:
            sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,
                                                raw.info.get('bads'))
        nsig = len(sigpick)
        if nsig == 0:
            raise ValueError("No channel selected for noise compensation")

        if noiseref is None or len(noiseref) == 0:
            # References are not limited to 4D ref-chans, but can be anything,
            # incl. ECG or powerline monitor.
            if verbose:
                print ">>> Using all refchans."
            refexclude = "bads"
            refpick = mne.pick_types(raw.info, ref_meg=True, meg=False, eeg=False,
                                     stim=False, eog=False, exclude='bads')
        else:
            refpick = channel_indices_from_list(raw.info['ch_names'][:], noiseref,
                                                raw.info.get('bads'))
        nref = len(refpick)
        if nref == 0:
            raise ValueError("No channel selected as noise reference")

        if verbose:
            print ">>> sigpick: %3d chans, refpick: %3d chans" % (nsig, nref)

        if reflp is None and refhp is None and refnotch is None:
            use_reffilter = False
            use_refantinotch = False
        else:
            use_reffilter = True
            if verbose:
                print "########## Filter reference channels:"

            use_refantinotch = False
            if refnotch is not None:
                if reflp is None and reflp is None:
                    use_refantinotch = True
                    freqlast = np.min([5.01 * refnotch, 0.5 * raw.info['sfreq']])
                    if verbose:
                        print ">>> notches at freq %.1f and harmonics below %.1f" % (refnotch, freqlast)
                else:
                    raise ValueError("Cannot specify notch- and high-/low-pass"
                                     "reference filter together")
            else:
                if verbose:
                    if reflp is not None:
                        print ">>>  low-pass with cutoff-freq %.1f" % reflp
                    if refhp is not None:
                        print ">>> high-pass with cutoff-freq %.1f" % refhp

            # Adapt followg drop-chans cmd to use 'all-but-refpick'
            droplist = [raw.info['ch_names'][k] for k in xrange(raw.info['nchan']) if not k in refpick]
            tct = time.clock()
            twt = time.time()
            fltref = raw.copy().drop_channels(droplist)
            if use_refantinotch:
                rawref = raw.copy().drop_channels(droplist)
                freqlast = np.min([5.01 * refnotch, 0.5 * raw.info['sfreq']])
                fltref.notch_filter(np.arange(refnotch, freqlast, refnotch),
                                    picks=np.array(xrange(nref)), method='iir')
                fltref._data = (rawref._data - fltref._data)
            else:
                fltref.filter(refhp, reflp, picks=np.array(xrange(nref)), method='iir')
            tc1 = time.clock()
            tw1 = time.time()
            if verbose:
                print ">>> filtering ref-chans  took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))

        if verbose:
            print "########## Calculating sig-ref/ref-ref-channel covariances:"
        # Calculate sig-ref/ref-ref-channel covariance:
        # (there is no need to calc inter-signal-chan cov,
        #  but there seems to be no appropriat fct available)
        # Here we copy the idea from compute_raw_data_covariance()
        # and truncate it as appropriate.
        tct = time.clock()
        twt = time.time()
        # The following reject and infosig entries are only
        # used in _is_good-calls.
        # _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
        # ignore ref-channels (not covered by dict) and checks individual
        # data segments - artifacts across a buffer boundary are not found.
        reject = dict(grad=4000e-13, # T / m (gradiometers)
                      mag=4e-12,     # T (magnetometers)
                      eeg=40e-6,     # uV (EEG channels)
                      eog=250e-6)    # uV (EOG channels)

        infosig = copy.copy(raw.info)
        infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
        infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
        infosig['nchan'] = len(sigpick)
        idx_by_typesig = channel_indices_by_type(infosig)

        # Read data in chunks:
        tstep = 0.2
        itstep = int(ceil(tstep * raw.info['sfreq']))
        sigmean = 0
        refmean = 0
        sscovdata = 0
        srcovdata = 0
        rrcovdata = 0
        n_samples = 0

        for first in range(itmin, itmax, itstep):
            last = first + itstep
            if last >= itmax:
                last = itmax
            raw_segmentsig, times = raw[sigpick, first:last]
            if use_reffilter:
                raw_segmentref, times = fltref[:, first:last]
            else:
                raw_segmentref, times = raw[refpick, first:last]

            if not exclude_artifacts or \
               _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
                        ignore_chs=raw.info['bads']):
                sigmean += raw_segmentsig.sum(axis=1)
                refmean += raw_segmentref.sum(axis=1)
                sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
                srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
                rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
                n_samples += raw_segmentsig.shape[1]
            else:
                logger.info("Artefact detected in [%d, %d]" % (first, last))
        if n_samples <= 1:
            raise ValueError('Too few samples to calculate weights')
        sigmean /= n_samples
        refmean /= n_samples
        sscovdata -= n_samples * sigmean[:] * sigmean[:]
        sscovdata /= (n_samples - 1)
        srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
        srcovdata /= (n_samples - 1)
        rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
        rrcovdata /= (n_samples - 1)
        sscovinit = np.copy(sscovdata)
        if verbose:
            print ">>> Normalize srcov..."

        rrslope = copy.copy(rrcovdata)
        for iref in xrange(nref):
            dtmp = rrcovdata[iref, iref]
            if dtmp > TINY:
                srcovdata[:, iref] /= dtmp
                rrslope[:, iref] /= dtmp
            else:
                srcovdata[:, iref] = 0.
                rrslope[:, iref] = 0.

        if verbose:
            print ">>> Number of samples used : %d" % n_samples
            tc1 = time.clock()
            tw1 = time.time()
            print ">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))

        if checkresults:
            if verbose:
                print "########## Calculated initial signal channel covariance:"
                # Calculate initial signal channel covariance:
                # (only used as quality measure)
                print ">>> initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata))
                for i in xrange(5):
                    print ">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i]))
                print ">>>"

        U, s, V = np.linalg.svd(rrslope, full_matrices=True)
        if verbose:
            print ">>> singular values:"
            print s
            print ">>> Applying cutoff for smallest SVs:"

        dtmp = s.max() * SVD_RELCUTOFF
        s *= (abs(s) >= dtmp)
        sinv = [1. / s[k] if s[k] != 0. else 0. for k in xrange(nref)]
        if verbose:
            print ">>> singular values (after cutoff):"
            print s

        stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
        if verbose:
            print ">>> Testing svd-result: %s" % stat
            if not stat:
                print "    (Maybe due to SV-cutoff?)"

        # Solve for inverse coefficients:
        # Set RRinv.tr=U diag(sinv) V
        RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))
        if checkresults:
            stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))
            if stat:
                if verbose:
                    print ">>> Testing RRinv-result (should be unit-matrix): ok"
            else:
                print ">>> Testing RRinv-result (should be unit-matrix): failed"
                print np.transpose(np.dot(RRinv, rrslope))
                print ">>>"

        if verbose:
            print "########## Calc weight matrix..."

        # weights-matrix will be somewhat larger than necessary,
        # (to simplify indexing in compensation loop):
        weights = np.zeros((raw._data.shape[0], nref))
        for isig in xrange(nsig):
            for iref in xrange(nref):
                weights[sigpick[isig],iref] = np.dot(srcovdata[isig,:], RRinv[:,iref])

        if verbose:
            print "########## Compensating signal channels:"
            if complementary_signal:
                print ">>> Caveat: REPLACING signal by compensation signal"

        tct = time.clock()
        twt = time.time()

        # Work on entire data stream:
        for isl in xrange(raw._data.shape[1]):
            slice = np.take(raw._data, [isl], axis=1)
            if use_reffilter:
                refslice = np.take(fltref._data, [isl], axis=1)
                refarr = refslice[:].flatten() - refmean
                # refarr = fltres[:,isl]-refmean
            else:
                refarr = slice[refpick].flatten() - refmean
            subrefarr = np.dot(weights[:], refarr)

            if not complementary_signal:
                raw._data[:, isl] -= subrefarr
            else:
                raw._data[:, isl] = subrefarr

            if (isl % 10000 == 0) and verbose:
                print "\rProcessed slice %6d" % isl

        if verbose:
            print "\nDone."
            tc1 = time.clock()
            tw1 = time.time()
            print ">>> compensation loop took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))

        if checkresults:
            if verbose:
                print "########## Calculating final signal channel covariance:"
            # Calculate final signal channel covariance:
            # (only used as quality measure)
            tct = time.clock()
            twt = time.time()
            sigmean = 0
            sscovdata = 0
            n_samples = 0
            for first in range(itmin, itmax, itstep):
                last = first + itstep
                if last >= itmax:
                    last = itmax
                raw_segmentsig, times = raw[sigpick, first:last]
                # Artifacts found here will probably differ from pre-noisered artifacts!
                if not exclude_artifacts or \
                   _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
                            flat=None, ignore_chs=raw.info['bads']):
                    sigmean += raw_segmentsig.sum(axis=1)
                    sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
                    n_samples += raw_segmentsig.shape[1]
            sigmean /= n_samples
            sscovdata -= n_samples * sigmean[:] * sigmean[:]
            sscovdata /= (n_samples - 1)
            if verbose:
                print ">>> no channel got worse: ", np.all(np.less_equal(sscovdata, sscovinit))
                print ">>> final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata))
                for i in xrange(5):
                    print ">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i]))
                tc1 = time.clock()
                tw1 = time.time()
                print ">>> signal covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))
                print ">>>"

        if fnout is not None:
            fnoutloc = fnout
        else:
            fnoutloc = fname[:fname.rfind('-raw.fif')] + ',nr-raw.fif'

        if verbose:
            print ">>> Saving '%s'..." % fnoutloc

        if return_raw:
            return raw
        else:
            raw.save(fnoutloc, overwrite=True)

        tc1 = time.clock()
        tw1 = time.time()
        if verbose:
            print ">>> Total run took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0))
예제 #19
0
def apply_cov(fname_empty_room, filtered=True):
    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the empty room file (must be a fif-file)
        File name should end with -raw.fif in order to have proper output filenames.
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    require_noise_reducer: bool
        If true, a noise reducer is applied on the empty room file.
        The noise reducer frequencies are fixed to 50Hz, 60Hz and
        to frequencies less than 5Hz i.e. the reference channels are filtered to
        these frequency ranges and then signal obtained is removed from
        the empty room raw data. For more information please check the jumeg noise reducer.
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_covariance as cp_covariance
    from mne import write_cov, pick_types
    from mne.io import Raw
    fner = get_files_from_list(fname_empty_room)
    nfiles = len(fner)

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]

        fn_fig1 = fn_in[:fn_in.rfind(ext_empty_raw)] + ',Magnetometers.tiff'
        fn_fig2 = fn_in[:fn_in.rfind(ext_empty_raw)] + ',Eigenvalue_index.tiff'
        #fn_out = fn_in[:fn_in.rfind(ext_empty_raw)] + ext_empty_cov
        path_in, name = os.path.split(fn_in)
        subject = name.split('_')[0]
        # read in data
        raw_empty = Raw(fn_in)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty,
                                      tmin=None,
                                      tmax=None,
                                      tstep=0.2,
                                      picks=picks)
        #noise_cov_mat = cp_covariance(raw_empty, picks=picks)
        fig1, fig2 = mne.viz.plot_cov(noise_cov_mat, raw_empty.info)
        # write noise-covariance matrix to disk
        if filtered:
            fn_out = path_in + '/%s_empty,fibp1-45' % subject + ext_empty_cov
        else:
            fn_out = path_in + '/%s_empty' % subject + ext_empty_cov
        write_cov(fn_out, noise_cov_mat)
        fig1.savefig(fn_fig1)
        fig2.savefig(fn_fig2)
        pl.close('all')
예제 #20
0
def plot_denoising(fname_raw, fmin=0, fmax=300, tmin=0.0, tmax=60.0,
                   proj=False, n_fft=4096, color='blue',
                   stim_name=None, event_id=1,
                   tmin_stim=-0.2, tmax_stim=0.5,
                   area_mode='range', area_alpha=0.33, n_jobs=1,
                   title1='before denoising', title2='after denoising',
                   info=None, show=True, fnout=None):
    """Plot the power spectral density across channels to show denoising.

    Parameters
    ----------
    fname_raw : list or str
        List of raw files, without denoising and with for comparison.
    tmin : float
        Start time for calculations.
    tmax : float
        End time for calculations.
    fmin : float
        Start frequency to consider.
    fmax : float
        End frequency to consider.
    proj : bool
        Apply projection.
    n_fft : int
        Number of points to use in Welch FFT calculations.
    color : str | tuple
        A matplotlib-compatible color to use.
    area_mode : str | None
        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
        will be plotted. If 'range', the min and max (across channels) will be
        plotted. Bad channels will be excluded from these calculations.
        If None, no area will be plotted.
    area_alpha : float
        Alpha for the area.
    info : bool
        Display information in the figure.
    show : bool
        Show figure.
    fnout : str
        Name of the saved output figure. If none, no figure will be saved.
    title1, title2 : str
        Title for two psd plots.
    n_jobs : int
        Number of jobs to use for parallel computation.
    stim_name : str
        Name of the stim channel. If stim_name is set, the plot of epochs average
        is also shown alongside the PSD plots.
    event_id : int
        ID of the stim event. (only when stim_name is set)

    Example Usage
    -------------
    plot_denoising(['orig-raw.fif', 'orig,nr-raw.fif', fnout='example')
    """

    from matplotlib import gridspec as grd
    import matplotlib.pyplot as plt
    from mne.time_frequency import psd_welch

    fnraw = get_files_from_list(fname_raw)

    # ---------------------------------
    # estimate power spectrum
    # ---------------------------------
    psds_all = []
    freqs_all = []

    # loop across all filenames
    for fname in fnraw:

        # read in data
        raw = mne.io.Raw(fname, preload=True)
        picks = mne.pick_types(raw.info, meg='mag', eeg=False,
                               stim=False, eog=False, exclude='bads')

        if area_mode not in [None, 'std', 'range']:
            raise ValueError('"area_mode" must be "std", "range", or None')

        psds, freqs = psd_welch(raw, picks=picks, fmin=fmin, fmax=fmax,
                                tmin=tmin, tmax=tmax, n_fft=n_fft,
                                n_jobs=n_jobs, proj=proj)
        psds_all.append(psds)
        freqs_all.append(freqs)

    if stim_name:
        n_xplots = 2

        # get some infos
        events = mne.find_events(raw, stim_channel=stim_name, consecutive=True)

    else:
        n_xplots = 1

    fig = plt.figure('denoising', figsize=(16, 6 * n_xplots))
    gs = grd.GridSpec(n_xplots, int(len(psds_all)))

    # loop across all filenames
    for idx in range(int(len(psds_all))):

        # ---------------------------------
        # plot power spectrum
        # ---------------------------------
        p1 = plt.subplot(gs[0, idx])

        # Convert PSDs to dB
        psds = 10 * np.log10(psds_all[idx])
        psd_mean = np.mean(psds, axis=0)
        if area_mode == 'std':
            psd_std = np.std(psds, axis=0)
            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
        elif area_mode == 'range':
            hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
        else:  # area_mode is None
            hyp_limits = None

        p1.plot(freqs_all[idx], psd_mean, color=color)
        if hyp_limits is not None:
            p1.fill_between(freqs_all[idx], hyp_limits[0], y2=hyp_limits[1],
                            color=color, alpha=area_alpha)

        if idx == 0:
            p1.set_title(title1)
            ylim = [np.min(psd_mean) - 10, np.max(psd_mean) + 10]
        else:
            p1.set_title(title2)

        p1.set_xlabel('Freq (Hz)')
        p1.set_ylabel('Power Spectral Density (dB/Hz)')
        p1.set_xlim(freqs_all[idx][0], freqs_all[idx][-1])
        p1.set_ylim(ylim[0], ylim[1])

        # ---------------------------------
        # plot signal around stimulus
        # onset
        # ---------------------------------
        if stim_name:
            raw = mne.io.Raw(fnraw[idx], preload=True)
            epochs = mne.Epochs(raw, events, event_id, proj=False,
                                tmin=tmin_stim, tmax=tmax_stim, picks=picks,
                                preload=True, baseline=(None, None))
            evoked = epochs.average()
            if idx == 0:
                ymin = np.min(evoked.data)
                ymax = np.max(evoked.data)

            times = evoked.times * 1e3
            p2 = plt.subplot(gs[1, idx])
            p2.plot(times, evoked.data.T, 'blue', linewidth=0.5)
            p2.set_xlim(times[0], times[len(times) - 1])
            p2.set_ylim(1.1 * ymin, 1.1 * ymax)

            if (idx == 1) and info:
                plt.text(times[0], 0.9 * ymax, '  ICs: ' + str(info))

    # save image
    if fnout:
        fig.savefig(fnout + '.png', format='png')

    # show image if requested
    if show:
        plt.show()

    plt.close('denoising')
    plt.ion()