def compute_noise_cov(cov_fname, raw):
    import os.path as op

    from mne import compute_raw_covariance, pick_types, write_cov
    from nipype.utils.filemanip import split_filename as split_f
    from neuropype_ephy.preproc import create_reject_dict

    print '***** COMPUTE RAW COV *****' + cov_fname

    if not op.isfile(cov_fname):

        data_path, basename, ext = split_f(raw.info['filename'])
        fname = op.join(data_path, '%s-cov.fif' % basename)

        reject = create_reject_dict(raw.info)
#        reject = dict(mag=4e-12, grad=4000e-13, eog=250e-6)

        picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')

        noise_cov = compute_raw_covariance(raw, picks=picks, reject=reject)

        write_cov(fname, noise_cov)

    else:
        print '*** NOISE cov file %s exists!!!' % cov_fname

    return cov_fname
Example #2
0
def test_io_cov():
    """Test IO for noise covariance matrices
    """
    cov = read_cov(cov_fname)
    cov.save(op.join(tempdir, 'test-cov.fif'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov['bads'] = ['EEG 039']
    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
    assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
    assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
    cov_sel.save(op.join(tempdir, 'test-cov.fif'))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_cov(cov_badname, cov)
        read_cov(cov_badname)
    assert_true(len(w) == 2)
Example #3
0
def test_io_cov():
    """Test IO for noise covariance matrices
    """
    tempdir = _TempDir()
    cov = read_cov(cov_fname)
    cov.save(op.join(tempdir, 'test-cov.fif'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov['bads'] = ['EEG 039']
    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
    assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
    assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
    cov_sel.save(op.join(tempdir, 'test-cov.fif'))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_cov(cov_badname, cov)
        read_cov(cov_badname)
    assert_true(len(w) == 2)
Example #4
0
def test_io_cov():
    """Test IO for noise covariance matrices
    """
    cov = read_cov(cov_fname)
    cov.save(op.join(tempdir, "test-cov.fif"))
    cov2 = read_cov(op.join(tempdir, "test-cov.fif"))
    assert_array_almost_equal(cov.data, cov2.data)

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, "test-cov.fif.gz"))
    cov2 = read_cov(op.join(tempdir, "test-cov.fif.gz"))
    assert_array_almost_equal(cov.data, cov2.data)

    cov["bads"] = ["EEG 039"]
    cov_sel = pick_channels_cov(cov, exclude=cov["bads"])
    assert_true(cov_sel["dim"] == (len(cov["data"]) - len(cov["bads"])))
    assert_true(cov_sel["data"].shape == (cov_sel["dim"], cov_sel["dim"]))
    cov_sel.save(op.join(tempdir, "test-cov.fif"))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, "test-cov.fif.gz"))
    cov2 = read_cov(op.join(tempdir, "test-cov.fif.gz"))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        cov_badname = op.join(tempdir, "test-bad-name.fif.gz")
        write_cov(cov_badname, cov)
        read_cov(cov_badname)
    assert_true(len(w) == 2)
Example #5
0
def apply_create_noise_covariance(fname_empty_room, require_filter=True, verbose=None):

    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the empty room file (must be a fif-file)
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov
    from mne.io import Raw
    from mne import pick_types
    import os

    fner = get_files_from_list(fname_empty_room)

    nfiles = len(fner)

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        print ">>> create noise covariance using file: " 
        path_in , name = os.path.split(fn_in)
        print name

        if require_filter == True:
            print "Filtering with preset settings..."
            # filter empty room raw data
            apply_filter(fn_in, flow=1, fhigh=45, order=4, njobs=4)
            # reconstruct empty room file name accordingly
            fn_in = fn_in.split('-')[0] + ',bp1-45Hz-empty.fif'

        # file name for saving noise_cov
        fn_out = fn_in.split('-')[0] + ',empty-cov.fif'

        # read in data
        raw_empty = Raw(fn_in, verbose=verbose)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, ref_meg=False, eeg=False, stim=False,
                           eog=False, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty, picks=picks, verbose=verbose)

        # write noise-covariance matrix to disk
        write_cov(fn_out, noise_cov_mat)
Example #6
0
    def _run_interface(self, runtime):

        raw_filename = self.inputs.raw_filename
        cov_fname_in = self.inputs.cov_fname_in
        is_epoched = self.inputs.is_epoched
        is_evoked = self.inputs.is_evoked
        events_id = self.inputs.events_id
        t_min = self.inputs.t_min
        t_max = self.inputs.t_max

        data_path, basename, ext = split_f(raw_filename)

        self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)

        # Check if a noise cov matrix was already computed
        if not op.isfile(cov_fname_in):
            if is_epoched and is_evoked:
                raw = read_raw_fif(raw_filename)
                events = find_events(raw)

                if not op.isfile(self.cov_fname_out):
                    print(('\n*** COMPUTE COV FROM EPOCHS ***\n' +
                           self.cov_fname_out))

                    reject = _create_reject_dict(raw.info)
                    picks = pick_types(raw.info,
                                       meg=True,
                                       ref_meg=False,
                                       exclude='bads')

                    epochs = Epochs(raw,
                                    events,
                                    events_id,
                                    t_min,
                                    t_max,
                                    picks=picks,
                                    baseline=(None, 0),
                                    reject=reject)

                    # TODO method='auto'? too long!!!
                    noise_cov = compute_covariance(epochs,
                                                   tmax=0,
                                                   method='diagonal_fixed')
                    write_cov(self.cov_fname_out, noise_cov)
                else:
                    print(('\n *** NOISE cov file %s exists!!! \n' %
                           self.cov_fname_out))
            else:
                # Compute noise cov matrix from empty room data
                self.cov_fname_out = compute_noise_cov(
                    op.join(data_path, cov_fname_in), raw_filename)

        else:
            print(
                ('\n *** NOISE cov file {} exists!!! \n'.format(cov_fname_in)))
            self.cov_fname_out = cov_fname_in

        return runtime
Example #7
0
def meg_calc_noise_cov(subj):
    '''
    Calculate noise covariance using rest epochs.
    '''
    subj = subj     
    epo = mne.read_epochs(subj)
    noise_cov = mne.compute_covariance(epo['rest'], tmin = 0.0, method = 'empirical', n_jobs = n_cores)
    noise_cov.plot(epo.info)
    mne.write_cov(os.path.join(sourceP_dir, subj_id + '_noise-cov.fif'), noise_cov) 
    return(epo, noise_cov)
def compute_epochs_cov_evokeds(subject):
    """Epoch, compute noise covariance and average.

    params:
    subject : str
        the subject id to be loaded
    """
    raw = Raw(save_folder + "%s_filtered_ica_mc_raw_tsss.fif" % subject,
              preload=True)
    # Select events to extract epochs from.
    event_id = {'ent_left': 1,
                'ent_right': 2,
                'ctl_left': 4,
                'ctl_right': 8}

    #   Setup for reading the raw data
    events = mne.find_events(raw, min_duration=0.01)

    picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=False,
                           include=include, exclude='bads')
    # Read epochs
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                        baseline=(None, 0), reject=reject,
                        preload=True)

    epochs.save(epochs_folder + "%s_filtered_ica_mc_tsss-epo.fif" % subject)

    # Plot epochs.
    # epochs.plot(trellis=False)

    # Look at channels that caused dropped events, showing that the subject's
    # blinks were likely to blame for most epochs being dropped
    epochs.drop_bad_epochs()
    fig = epochs.plot_drop_log(subject=subject, show=False)
    fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)

    # Make noise cov
    cov = compute_covariance(epochs, tmin=None, tmax=0, method="auto")
    mne.write_cov(epochs_folder + "%s-cov.fif" % subject, cov)

    # Average epochs and get evoked data corresponding to the left stimulation
    ###########################################################################
    # Save evoked responses for different conditions to disk

    # average epochs and get Evoked datasets
    evokeds = [epochs[cond].average() for cond in ['ent_left', 'ent_right',
                                                   'ctl_left', 'ctl_right']]

    evokeds = [epochs[cond].average() for cond in epochs.event_id.keys()]

    # save evoked data to disk
    mne.write_evokeds(epochs_folder +
                      '%s_filtered_ica_mc_raw_tsss-ave.fif' % subject, evokeds)

    plt.close("all")
def apply_create_noise_covariance(fname_empty_room, verbose=None):
    
    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the de-noise, empty room file (must be a fif-file)
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov, pick_types
    from mne.io import Raw
    from jumeg.jumeg_noise_reducer import noise_reducer
    fner = get_files_from_list(fname_empty_room)
    nfiles = len(fner)
    ext_empty_raw = '-raw.fif'
    ext_empty_cov = '-cov.fif'
    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        print ">>> create noise covariance using file: "
        path_in, name = os.path.split(fn_in)
        print name   
        fn_empty_nr = fn_in[:fn_in.rfind('-raw.fif')] + ',nr-raw.fif'
        noise_reducer(fn_in, refnotch=50, detrending=False, fnout=fn_empty_nr)
        noise_reducer(fn_empty_nr, refnotch=60, detrending=False, fnout=fn_empty_nr) 
        noise_reducer(fn_empty_nr, reflp=5, fnout=fn_empty_nr)
        # file name for saving noise_cov
        fn_out = fn_empty_nr[:fn_empty_nr.rfind(ext_empty_raw)] + ext_empty_cov
        # read in data
        raw_empty = Raw(fn_empty_nr, preload=True, verbose=verbose)
        raw_empty.interpolate_bads()
        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, ref_meg=False, eeg=False,
                           stim=False, eog=False, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty, picks=picks, verbose=verbose)

        # write noise-covariance matrix to disk
        write_cov(fn_out, noise_cov_mat)
Example #10
0
def compute_noise_cov(fname_template, raw_filename):
    """
    Compute noise covariance data from a continuous segment of raw data.
    Employ empty room data (collected without the subject) to calculate
    the full noise covariance matrix.
    This is recommended for analyzing ongoing spontaneous activity.

    Inputs
        cov_fname : str
            noise covariance file name template
        raw_filename : str
            raw filename

    Output
        cov_fname : str
            noise covariance file name in which is saved the noise covariance
            matrix
    """
    # Check if cov matrix exists
    cov_fname = _get_cov_fname(fname_template)

    if not op.isfile(cov_fname):
        er_raw, cov_fname = _get_er_data(fname_template)

        if not op.isfile(cov_fname) and er_raw:
            reject = _create_reject_dict(er_raw.info)
            picks = pick_types(er_raw.info,
                               meg=True,
                               ref_meg=False,
                               exclude='bads')

            noise_cov = compute_raw_covariance(er_raw,
                                               picks=picks,
                                               reject=reject)

            write_cov(cov_fname, noise_cov)
        elif op.isfile(cov_fname):
            print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
        elif not er_raw:
            cov_fname = compute_cov_identity(raw_filename)

    else:
        print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))

    return cov_fname
Example #11
0
def compute_noise_cov(cov_fname, raw):
    """Compute noise covariance data from a continuous segment of raw data.

    Employ empty room data (collected without the subject) to calculate
    the full noise covariance matrix. This is recommended for analyzing ongoing
    spontaneous activity.

    Parameters
    ----------
    cov_fname : str
        noise covariance file name
    raw : Raw
        the raw data

    Returns
    -------
    cov_fname : str
        noise covariance file name in which is saved the noise covariance
        matrix
    """
    import os.path as op

    from mne import compute_raw_covariance, pick_types, write_cov
    from nipype.utils.filemanip import split_filename as split_f
    from ephypype.preproc import create_reject_dict

    print(('***** COMPUTE RAW COV *****' + cov_fname))

    if not op.isfile(cov_fname):

        data_path, basename, ext = split_f(raw.info['filename'])
        fname = op.join(data_path, '%s-cov.fif' % basename)

        reject = create_reject_dict(raw.info)

        picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')

        noise_cov = compute_raw_covariance(raw, picks=picks, reject=reject)

        write_cov(fname, noise_cov)

    else:
        print(('*** NOISE cov file %s exists!!!' % cov_fname))

    return cov_fname
    def _run_interface(self, runtime):

        raw_filename = self.inputs.raw_filename
        cov_fname_in = self.inputs.cov_fname_in
        is_epoched = self.inputs.is_epoched
        is_evoked = self.inputs.is_evoked
        events_id = self.inputs.events_id
        t_min = self.inputs.t_min
        t_max = self.inputs.t_max

        if cov_fname_in == '' or not op.exists(cov_fname_in):

            if is_epoched and is_evoked:
                raw = Raw(raw_filename)
                events = find_events(raw)

                data_path, basename, ext = split_f(raw.info['filename'])
                self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)

                if not op.exists(self.cov_fname_out):
                    print '\n*** COMPUTE COV FROM EPOCHS ***\n' + self.cov_fname_out

                    reject = create_reject_dict(raw.info)
    
                    picks = pick_types(raw.info, meg=True, ref_meg=False,
                                       exclude='bads')

                    epochs = Epochs(raw, events, events_id, t_min, t_max,
                                    picks=picks, baseline=(None, 0),
                                    reject=reject)

                    # TODO method='auto'? too long!!!
                    noise_cov = compute_covariance(epochs, tmax=0,
                                                   method='diagonal_fixed')
                    write_cov(self.cov_fname_out, noise_cov)
                else:
                    print '\n *** NOISE cov file %s exists!!! \n' % self.cov_fname_out
            else:
                '\n *** NO EPOCH DATA \n'

        else:
            print '\n *** NOISE cov file %s exists!!! \n' % cov_fname_in
            self.cov_fname_out = cov_fname_in

        return runtime
Example #13
0
def compute_cov_identity(raw_filename):
    "Compute Identity Noise Covariance matrix."
    raw = read_raw_fif(raw_filename)

    data_path, basename, ext = split_f(raw_filename)
    cov_fname = op.join(data_path, 'identity_noise-cov.fif')

    if not op.isfile(cov_fname):
        picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')

        ch_names = [raw.info['ch_names'][k] for k in picks]
        bads = [b for b in raw.info['bads'] if b in ch_names]
        noise_cov = mne.Covariance(np.identity(len(picks)), ch_names, bads,
                                   raw.info['projs'], nfree=0)

        write_cov(cov_fname, noise_cov)

    return cov_fname
    def compute_covariance_matrix(self,tmax=0,method='auto'):
        """"""

        fname = op.join(self.processed_files, '%s_cov.fif' %self.subject)

        if not op.exists(fname):

            cov_reg = mne.cov.compute_covariance(self.epochs_clean, tmax = 0, method=method)

            mne.write_cov(fname, cov_reg)

            self.add_preprocessing_notes("Noise covariance matrix computed, regularized, and saved to %s" %fname)

        else:
            cov_reg = mne.read_cov(fname)

        self.cov_reg = cov_reg

        return self.cov_reg
Example #15
0
    def compute_covariance_matrix(self,tmax=0,method='auto'):
        """"""

        fname = op.join(self.processed_files, '%s_cov.fif' %self.subject)

        if not op.exists(fname):

            cov_reg = mne.cov.compute_covariance(self.epochs_clean, tmax = 0, method=method)

            mne.write_cov(fname, cov_reg)

            self.add_preprocessing_notes("Noise covariance matrix computed, regularized, and saved to %s" %fname)

        else:
            cov_reg = mne.read_cov(fname)

        self.cov_reg = cov_reg

        return self.cov_reg
Example #16
0
    def _run_interface(self, runtime):

        raw_filename = self.inputs.raw_filename
        cov_fname_in = self.inputs.cov_fname_in
        is_epoched = self.inputs.is_epoched
        is_evoked = self.inputs.is_evoked

        data_path, basename, ext = split_f(raw_filename)

        # self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)
        self.cov_fname_out = op.abspath('%s-cov.fif' % basename)

        # Check if a noise cov matrix was already computed
        if not op.isfile(cov_fname_in):
            if is_epoched and is_evoked:
                epochs = read_epochs(raw_filename, preload=True)

                if not op.isfile(self.cov_fname_out):
                    print(('\n*** COMPUTE COV FROM EPOCHS ***\n' +
                           self.cov_fname_out))
                    # make sure cv is deterministic
                    cv = KFold(3, random_state=42)
                    noise_cov = compute_covariance(epochs,
                                                   tmax=0,
                                                   method='shrunk',
                                                   cv=cv)
                    write_cov(self.cov_fname_out, noise_cov)
                else:
                    print(('\n *** NOISE cov file %s exists!!! \n' %
                           self.cov_fname_out))
            else:
                # Compute noise cov matrix from empty room data
                self.cov_fname_out = compute_noise_cov(
                    op.join(data_path, cov_fname_in), raw_filename)

        else:
            print(
                ('\n *** NOISE cov file {} exists!!! \n'.format(cov_fname_in)))
            self.cov_fname_out = cov_fname_in

        return runtime
def read_noise_cov(cov_fname, raw_info):
    import os.path as op
    import numpy as np
    import mne

    print '***** READ RAW COV *****' + cov_fname

    if not op.isfile(cov_fname):
        # create an Identity matrix
        picks = mne.pick_types(raw_info, meg=True, ref_meg=False,
                               exclude='bads')
        ch_names = [raw_info['ch_names'][i] for i in picks]

        C = mne.Covariance(data=np.identity(len(picks)), names=ch_names,
                           bads=[], projs=[], nfree=0)
        mne.write_cov(cov_fname, C)
    else:
        print '*** noise covariance file %s exists!!!' % cov_fname
        noise_cov = mne.read_cov(cov_fname)

    return noise_cov
Example #18
0
def read_noise_cov(cov_fname, raw_info):
    """Read a noise covariance matrix from cov_fname.

    Parameters
    ----------
    cov_fname : str
        noise covariance file name
    raw_info : dict
        dictionary containing the information about the raw data

    Returns
    -------
    noise_cov : Covariance
        the noise covariance matrix
    """
    import os.path as op
    import numpy as np
    import mne

    print(('***** READ RAW COV *****' + cov_fname))

    if not op.isfile(cov_fname):
        # create an Identity matrix
        picks = mne.pick_types(raw_info,
                               meg=True,
                               ref_meg=False,
                               exclude='bads')
        ch_names = [raw_info['ch_names'][i] for i in picks]

        c = mne.Covariance(data=np.identity(len(picks)),
                           names=ch_names,
                           bads=[],
                           projs=[],
                           nfree=0)
        mne.write_cov(cov_fname, c)
    else:
        print(('*** noise covariance file %s exists!!!' % cov_fname))
        noise_cov = mne.read_cov(cov_fname)

    return noise_cov
Example #19
0
def test_io_cov():
    """Test IO for noise covariance matrices
    """
    tempdir = _TempDir()
    cov = read_cov(cov_fname)
    cov['method'] = 'empirical'
    cov['loglik'] = -np.inf
    cov.save(op.join(tempdir, 'test-cov.fif'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_array_almost_equal(cov.data, cov2.data)
    assert_equal(cov['method'], cov2['method'])
    assert_equal(cov['loglik'], cov2['loglik'])
    assert_true('Covariance' in repr(cov))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov['bads'] = ['EEG 039']
    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
    assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
    assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
    cov_sel.save(op.join(tempdir, 'test-cov.fif'))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_cov(cov_badname, cov)
        read_cov(cov_badname)
    assert_naming(w, 'test_cov.py', 2)
Example #20
0
def test_io_cov():
    """Test IO for noise covariance matrices."""
    tempdir = _TempDir()
    cov = read_cov(cov_fname)
    cov["method"] = "empirical"
    cov["loglik"] = -np.inf
    cov.save(op.join(tempdir, "test-cov.fif"))
    cov2 = read_cov(op.join(tempdir, "test-cov.fif"))
    assert_array_almost_equal(cov.data, cov2.data)
    assert_equal(cov["method"], cov2["method"])
    assert_equal(cov["loglik"], cov2["loglik"])
    assert_true("Covariance" in repr(cov))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, "test-cov.fif.gz"))
    cov2 = read_cov(op.join(tempdir, "test-cov.fif.gz"))
    assert_array_almost_equal(cov.data, cov2.data)

    cov["bads"] = ["EEG 039"]
    cov_sel = pick_channels_cov(cov, exclude=cov["bads"])
    assert_true(cov_sel["dim"] == (len(cov["data"]) - len(cov["bads"])))
    assert_true(cov_sel["data"].shape == (cov_sel["dim"], cov_sel["dim"]))
    cov_sel.save(op.join(tempdir, "test-cov.fif"))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, "test-cov.fif.gz"))
    cov2 = read_cov(op.join(tempdir, "test-cov.fif.gz"))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        cov_badname = op.join(tempdir, "test-bad-name.fif.gz")
        write_cov(cov_badname, cov)
        read_cov(cov_badname)
    assert_naming(w, "test_cov.py", 2)
Example #21
0
def create_noise_covariance_matrix(fname_empty_room,
                                   fname_out=None,
                                   verbose=None):
    """Creates the noise covariance matrix from an empty room file"""

    print ">>>> estimate noise covariance matrix from empty room file..."

    # read in data
    raw_empty = mne.fiff.Raw(fname_empty_room, verbose=verbose)
    # filter data

    # pick only MEG channels
    picks = mne.fiff.pick_types(raw_empty.info, meg=True, exclude='bads')

    # calculate noise-covariance matrix
    noise_cov_mat = mne.compute_raw_data_covariance(raw_empty,
                                                    picks=picks,
                                                    verbose=verbose)
    # write noise-covariance matrix to disk
    if fname_out is not None:
        mne.write_cov(fname_out, noise_cov_mat)

    return noise_cov_mat
Example #22
0
def test_io_cov():
    """Test IO for noise covariance matrices."""
    tempdir = _TempDir()
    cov = read_cov(cov_fname)
    cov['method'] = 'empirical'
    cov['loglik'] = -np.inf
    cov.save(op.join(tempdir, 'test-cov.fif'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_array_almost_equal(cov.data, cov2.data)
    assert_equal(cov['method'], cov2['method'])
    assert_equal(cov['loglik'], cov2['loglik'])
    assert 'Covariance' in repr(cov)

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov['bads'] = ['EEG 039']
    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
    assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
    assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
    cov_sel.save(op.join(tempdir, 'test-cov.fif'))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-cov.fif'):
        write_cov(cov_badname, cov)
    with pytest.warns(RuntimeWarning, match='-cov.fif'):
        read_cov(cov_badname)
Example #23
0
def test_io_cov():
    """Test IO for noise covariance matrices."""
    tempdir = _TempDir()
    cov = read_cov(cov_fname)
    cov['method'] = 'empirical'
    cov['loglik'] = -np.inf
    cov.save(op.join(tempdir, 'test-cov.fif'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_array_almost_equal(cov.data, cov2.data)
    assert_equal(cov['method'], cov2['method'])
    assert_equal(cov['loglik'], cov2['loglik'])
    assert 'Covariance' in repr(cov)

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    cov['bads'] = ['EEG 039']
    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
    assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
    assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
    cov_sel.save(op.join(tempdir, 'test-cov.fif'))

    cov2 = read_cov(cov_gz_fname)
    assert_array_almost_equal(cov.data, cov2.data)
    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
    assert_array_almost_equal(cov.data, cov2.data)

    # test warnings on bad filenames
    cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-cov.fif'):
        write_cov(cov_badname, cov)
    with pytest.warns(RuntimeWarning, match='-cov.fif'):
        read_cov(cov_badname)
def create_noise_covariance_matrix(fname_empty_room, fname_out=None, verbose=None):
    """Creates the noise covariance matrix from an empty room file"""

    print ">>>> estimate noise covariance matrix from empty room file..."

    # read in data
    raw_empty = mne.fiff.Raw(fname_empty_room,
                             verbose=verbose)
    # filter data

    # pick only MEG channels
    picks = mne.fiff.pick_types(raw_empty.info,
                                meg=True,
                                exclude='bads')

    # calculate noise-covariance matrix
    noise_cov_mat = mne.compute_raw_data_covariance(raw_empty,
                                                    picks=picks,
                                                    verbose=verbose)
    # write noise-covariance matrix to disk
    if fname_out is not None:
        mne.write_cov(fname_out, noise_cov_mat)

    return noise_cov_mat
PartNoise = 'ParticipantNoise_Preprocessed_trans_raw_tsss.fif'
PartNoiseData = mne.io.read_raw_fif(
    rootdir + '/Datafiles/ICAforConcatenatedRaws/' + PartNoise, preload=True)

sfreq = PartNoiseData.info['sfreq']
picks = mne.pick_types(PartNoiseData.info,
                       meg=True,
                       eeg=False,
                       eog=False,
                       stim=False,
                       exclude='bads')

# Covariance calculation:
print('\n<< Calculating Covariance from Participant Noise Data >>')

if CovRankEst:
    rank = None
    newfile = 'NoiseCov_fromParticipantNoise-cov.fif'
else:
    rank = 'full'
    newfile = 'NoiseCov_fromParticipantNoise_FullRank-cov.fif'

NoiseCov = mne.compute_raw_covariance(PartNoiseData,
                                      picks=picks,
                                      method='shrunk',
                                      rank=rank)

# save data files
os.chdir(rootdir)
mne.write_cov(newfile, NoiseCov)
Example #26
0
    #        # pick EEG channels
            picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude='bads')
            reject = dict(mag = 4e-12, grad = 4000e-13)
            epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline = (None,0), picks = picks, proj = True, preload = True, reject=reject)
            print epochs
#            ##Example: reject = dict(grad=4000e-13, # T / m (gradiometers)
#                          mag=4e-12, # T (magnetometers)
#                          eeg=40e-6, # uV (EEG channels)
#                          eog=250e-6 # uV (EOG channels)
#                          )
            
#             Compute the covariance from the raw data or from epochs 
            cov = mne.compute_covariance(epochs, tmin = None, tmax = 0) #Use for runs... 
            print cov
#        covRuns.append(cov)
        mne.write_cov(cname, cov)
#        
#print len(covRuns)
#
###Make the Final Grand average of all the runs
#runData = []
#runNave = []
#newCov = []
#newCov = copy.deepcopy(cov)
#count = 0 
#
#for covRun in covRuns:
#    runData.append(covRun)
###        runNave.append(evRun[c].nave)
#    print 'Jane Here', 
#print runData
Example #27
0
import mne
import sys

from mne import compute_covariance

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

from my_settings import *

reject = dict(
    grad=4000e-13,  # T / m (gradiometers)
    mag=4e-12,  # T (magnetometers)
    eeg=180e-6  #
)

subject = sys.argv[1]

epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject)
epochs.drop_bad_epochs(reject_params)

# fig = epochs.plot_drop_log(subject=subject, show=False)
# fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)

# Make noise cov
cov = compute_covariance(epochs, tmin=None, tmax=0, method="factor_analysis")
mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)
Example #28
0
                        baseline=baseline)
    return epochs


epochs_s = [process_meg(raw_name) for raw_name in raw_name_s]
evoked_s = [ep.average() for ep in epochs_s]

# compute noise covariance (takes a few minutes)
noise_cov_s = []
for subj, ep in zip(["a", "b"], epochs_s):
    cov_fname = meg_path + f"subject_{subj}/sef-cov.fif"
    if os.path.exists(cov_fname):
        cov = mne.read_cov(cov_fname)
    else:
        cov = mne.compute_covariance(ep, tmin=None, tmax=0.)
        mne.write_cov(cov_fname, cov)
    noise_cov_s.append(cov)

f, axes = plt.subplots(1, 2, sharey=True)
for ax, ev, nc, ll in zip(axes.ravel(), evoked_s, noise_cov_s, ["a", "b"]):
    picks = mne.pick_types(ev.info, meg="grad")
    ev.plot(picks=picks, axes=ax, noise_cov=nc, show=False)
    ax.set_title("Subject %s" % ll, fontsize=15)
plt.show()

#########################################################
# Source and forward modeling
# ---------------------------
# To guarantee an alignment across subjects, we start by
# computing (or reading if available) the source space of the average
# subject of freesurfer `fsaverage`
Example #29
0
    fwd_fname = meg_dir + subject + '_shepard-fwd.fif'
    inv_fname = meg_dir + subject + '_shepard-inv.fif'
    src_fname = meg_dir + subject + '-ico-4-src.fif'
    trans_fname = meg_dir + subject + '-trans.fif'
    bem_fname = mri_dir + '%s/bem/%s-inner_skull-bem-sol.fif' % (subject,
                                                                 subject)
    stc_fname = meg_dir + subject + '_shepard.stc.npy'
    os.environ[
        "SUBJECTS_DIR"] = '/Volumes/Server/MORPHLAB/Users/Ellie/Shepard/mri/'

    # load
    epochs = read_epochs(epochs_fname)

    print("Making covariance matrix...")
    noise_cov = compute_covariance(epochs, tmax=0., method=['shrunk'])
    write_cov(cov_fname, noise_cov)

    noise_cov = read_cov(cov_fname)

    print("Making BEM model...")
    surfaces = make_bem_model(subject,
                              ico=4,
                              conductivity=(0.3, ),
                              subjects_dir=mri_dir,
                              verbose=None)
    bem = make_bem_solution(surfaces)
    write_bem_solution(bem_fname, bem)

    # if not op.isfile(fwd_fname):
    print("Making forward solution...")
    src = setup_source_space(subject, spacing='ico4', subjects_dir=mri_dir)
Example #30
0
def apply_cov(fname_empty_room, filtered=True):
    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the empty room file (must be a fif-file)
        File name should end with -raw.fif in order to have proper output filenames.
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    require_noise_reducer: bool
        If true, a noise reducer is applied on the empty room file.
        The noise reducer frequencies are fixed to 50Hz, 60Hz and
        to frequencies less than 5Hz i.e. the reference channels are filtered to
        these frequency ranges and then signal obtained is removed from
        the empty room raw data. For more information please check the jumeg noise reducer.
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_covariance as cp_covariance
    from mne import write_cov, pick_types
    from mne.io import Raw
    fner = get_files_from_list(fname_empty_room)
    nfiles = len(fner)

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]

        fn_fig1 = fn_in[:fn_in.rfind(ext_empty_raw)] + ',Magnetometers.tiff'
        fn_fig2 = fn_in[:fn_in.rfind(ext_empty_raw)] + ',Eigenvalue_index.tiff'
        #fn_out = fn_in[:fn_in.rfind(ext_empty_raw)] + ext_empty_cov
        path_in, name = os.path.split(fn_in)
        subject = name.split('_')[0]
        # read in data
        raw_empty = Raw(fn_in)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty,
                                      tmin=None,
                                      tmax=None,
                                      tstep=0.2,
                                      picks=picks)
        #noise_cov_mat = cp_covariance(raw_empty, picks=picks)
        fig1, fig2 = mne.viz.plot_cov(noise_cov_mat, raw_empty.info)
        # write noise-covariance matrix to disk
        if filtered:
            fn_out = path_in + '/%s_empty,fibp1-45' % subject + ext_empty_cov
        else:
            fn_out = path_in + '/%s_empty' % subject + ext_empty_cov
        write_cov(fn_out, noise_cov_mat)
        fig1.savefig(fn_fig1)
        fig2.savefig(fn_fig2)
        pl.close('all')
Example #31
0
def apply_create_noise_covariance(fname_empty_room, require_filter=False,
                                  require_noise_reducer=False, verbose=None):

    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the empty room file (must be a fif-file)
        File name should end with -raw.fif in order to have proper output filenames.
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    require_noise_reducer: bool
        If true, a noise reducer is applied on the empty room file.
        The noise reducer frequencies are fixed to 50Hz, 60Hz and
        to frequencies less than 5Hz i.e. the reference channels are filtered to
        these frequency ranges and then signal obtained is removed from
        the empty room raw data. For more information please check the jumeg noise reducer.
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov, pick_types
    from mne.io import Raw
    from jumeg.jumeg_noise_reducer import noise_reducer

    fner = get_files_from_list(fname_empty_room)
    nfiles = len(fner)

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        print ">>> create noise covariance using file: "
        path_in, name = os.path.split(fn_in)
        print name

        if require_filter:
            print "Filtering with preset settings..."
            # filter empty room raw data
            apply_filter(fn_in, flow=1, fhigh=45, order=4, njobs=4)
            # reconstruct empty room file name accordingly
            fn_in = fn_in[:fn_in.rfind(ext_empty_raw)] + ',fibp1-45-raw.fif'

        if require_noise_reducer:
            fn_empty_nr = fn_in[:fn_in.rfind(ext_empty_raw)] + ',nr-raw.fif'
            noise_reducer(fn_in, refnotch=50, detrending=False, fnout=fn_empty_nr)
            noise_reducer(fn_empty_nr, refnotch=60, detrending=False, fnout=fn_empty_nr)
            noise_reducer(fn_empty_nr, reflp=5, fnout=fn_empty_nr)
            fn_in = fn_empty_nr

        # file name for saving noise_cov
        fn_out = fn_in[:fn_in.rfind(ext_empty_raw)] + ext_empty_cov

        # read in data
        raw_empty = Raw(fn_in, verbose=verbose)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, ref_meg=False, eeg=False,
                           stim=False, eog=False, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty, picks=picks, verbose=verbose)

        # write noise-covariance matrix to disk
        write_cov(fn_out, noise_cov_mat)
Example #32
0
def _compute_evoked(subject, kind):

    fname = BIDSPath(root=BIDS_ROOT,
                     subject=subject,
                     session=kind,
                     task=kind,
                     datatype='meg',
                     extension='.fif')

    raw = read_raw_bids(fname)
    mne.channels.fix_mag_coil_types(raw.info)
    raw = _run_maxfilter(raw)
    raw.filter(0.1, 45)
    _compute_add_ssp_exg(raw)

    out = {}
    for ii, event_id in enumerate(task_info[kind]['event_id']):
        epochs_params = task_info[kind]['epochs_params'][ii]
        lock = task_info[kind]['lock'][ii]
        events = mne.find_events(raw,
                                 uint_cast=True,
                                 min_duration=2. / raw.info['sfreq'])

        if kind == 'task' and lock == 'resp':
            event_map = np.array([(k, v)
                                  for k, v in Counter(events[:, 2]).items()])
            button_press = event_map[:, 0][np.argmax(event_map[:, 1])]
            if event_map[:, 1][np.argmax(event_map[:, 1])] >= 50:
                events[events[:, 2] == button_press, 2] = 8192
            else:
                raise RuntimeError('Could not guess button press')

        reject = _get_global_reject_epochs(raw,
                                           events=events,
                                           event_id=event_id,
                                           epochs_params=epochs_params)

        epochs = mne.Epochs(raw,
                            events=events,
                            event_id=event_id,
                            reject=reject,
                            preload=True,
                            **epochs_params)

        evokeds = list()
        for kk in event_id:
            evoked = epochs[kk].average()
            evoked.comment = kk
            evokeds.append(evoked)

        # tmax is 0.05 to account for the shift error of 50ms in camcan
        noise_covs = mne.compute_covariance(epochs,
                                            tmin=None,
                                            tmax=0.05,
                                            verbose=False,
                                            n_jobs=1,
                                            projs=None)

        out_path = op.join(derivative_path, subject)
        if not op.exists(out_path):
            os.makedirs(out_path)
        epo_fname = op.join(out_path, '%s_%s_sensors-epo.fif' % (kind, lock))
        cov_fname = op.join(out_path, '%s_%s_sensors-cov.fif' % (kind, lock))
        ave_fname = op.join(out_path, '%s_%s_sensors-ave.fif' % (kind, lock))

        mne.write_evokeds(ave_fname, evokeds)
        mne.write_cov(cov_fname, noise_covs)

        epochs.save(epo_fname, overwrite=True)

        out.update({lock: (kind, epochs.average().nave)})

    return out
Example #33
0
    subject = sys.argv[1]  #Get the subject
except:
    print "Please run with input file provided. Exiting"
    sys.exit()
subjects_dir = '/home/qdong/freesurfer/subjects/'
subject_path = subjects_dir + subject  #Set the data path of the subject

raw_empty_fname = subject_path + '/MEG/%s_emptyroom.fif' % subject
raw_empty = mne.fiff.Raw(raw_empty_fname, preload=True)
#Filter the empty room data
picks_empty = mne.fiff.pick_types(raw_empty.info,
                                  meg=True,
                                  eeg=False,
                                  eog=True,
                                  ecg=True,
                                  stim=False,
                                  exclude='bads')
raw_empty.filter(flow,
                 fhigh,
                 picks=picks_empty,
                 n_jobs=njobs,
                 method='iir',
                 iir_params={
                     'ftype': filter_type,
                     'order': filter_order
                 })
#Get the basename
raw_empty_basename = os.path.splitext(os.path.basename(raw_empty_fname))[0]
cov = mne.compute_raw_data_covariance(raw_empty, picks=picks_empty)
mne.write_cov(subject_path + '/MEG/%s_cov.fif' % (raw_empty_basename), cov)
Example #34
0
def run_correlation(subjects_dir, subject, volume_spacing, freq, ortho_bool):

    num_threads(8)
    ortho_flag = str(ortho_bool)
    frequency = str(freq)
    DATA_DIR = Path(f'{subjects_dir}', f'{subject}', 'mne_files')
    eye_proj1 = f'{DATA_DIR}/{subject}_eyes1-proj.fif.gz'
    eye_proj2 = f'{DATA_DIR}/{subject}_eyes2-proj.fif.gz'
    fname_meg = f'{DATA_DIR}/{subject}_ses-rest_task-rest.fif'
    t1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz')
    heartbeat_proj = f'{DATA_DIR}/{subject}_heartbeat-proj.fif.gz'
    fwd_fname = f'{DATA_DIR}/{subject}_{volume_spacing}-fwd.fif.gz'
    src_fname = f'{DATA_DIR}/{subject}_{volume_spacing}-src.fif.gz'
    cov_fname = f'{DATA_DIR}/{subject}-cov_{volume_spacing}.fif.gz'
    raw_cov_fname = f'{DATA_DIR}/{subject}-rawcov_{volume_spacing}.fif.gz'
    raw_proj = f'{DATA_DIR}/{subject}_ses-rest_task-rest_proj.fif.gz'
    source_voxel_coords = f'{DATA_DIR}/{subject}_coords_{volume_spacing}.pkl'
    corr_file_acLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_acLeft.npy'
    corr_file_scLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_scLeft.npy'
    corr_file_vcLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_vcLeft.npy'
    corr_file_mtLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_mtLeft.npy'
    corr_file_mtlLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_mtlLeft.npy'
    corr_file_smcLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_smcLeft.npy'
    corr_file_lpcLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_lpcLeft.npy'
    corr_file_dpfcLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_dpfcLeft.npy'
    corr_file_tmpcLeft = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_tmpcLeft.npy'

    corr_file_acRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_acRight.npy'
    corr_file_scRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_scRight.npy'
    corr_file_vcRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_vcRight.npy'
    corr_file_mtRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_mtRight.npy'
    corr_file_mtlRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_mtlRight.npy'
    corr_file_smcRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_smcRight.npy'
    corr_file_lpcRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_lpcRight.npy'
    corr_file_dpfcRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_dpfcRight.npy'
    corr_file_tmpcRight = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_tmpcRight.npy'

    corr_file_mpfc = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_mpfc.npy'
    corr_file_sma = f'{DATA_DIR}/{subject}_{ortho_flag}_{volume_spacing}_{frequency}_sma.npy'

    check_for_files = []
    check_for_files.append(corr_file_acLeft)
    check_for_files.append(corr_file_scLeft)
    check_for_files.append(corr_file_vcLeft)
    check_for_files.append(corr_file_mtLeft)
    check_for_files.append(corr_file_mtlLeft)
    check_for_files.append(corr_file_smcLeft)
    check_for_files.append(corr_file_lpcLeft)
    check_for_files.append(corr_file_dpfcLeft)
    check_for_files.append(corr_file_tmpcLeft)

    check_for_files.append(corr_file_acRight)
    check_for_files.append(corr_file_scRight)
    check_for_files.append(corr_file_vcRight)
    check_for_files.append(corr_file_mtRight)
    check_for_files.append(corr_file_mtlRight)
    check_for_files.append(corr_file_smcRight)
    check_for_files.append(corr_file_lpcRight)
    check_for_files.append(corr_file_dpfcRight)
    check_for_files.append(corr_file_tmpcRight)

    check_for_files.append(corr_file_mpfc)
    check_for_files.append(corr_file_sma)


    file_exist = [f for f in check_for_files if os.path.isfile(f)]
    file_not_exist = list(set(file_exist) ^ set(check_for_files))

    if not file_not_exist:
        print('SC, AC, VC correlation files exists...')

    else:
        trans = f'/home/senthilp/caesar/camcan/cc700/camcan_coreg-master/trans/{subject}-trans.fif' # The transformation file obtained by coregistration
        file_trans = pathlib.Path(trans)
        file_ss = pathlib.Path(src_fname)
        file_fm = pathlib.Path(fwd_fname)
        file_proj = pathlib.Path(raw_proj)
        file_cov = pathlib.Path(cov_fname)
        file_rawcov = pathlib.Path(raw_cov_fname)
        t1 = nib.load(t1_fname)

        if not file_trans.exists():
            print (f'{trans} File doesnt exist...')
            sys.exit(0)

        #info = mne.io.read_info(fname_meg)
        # plot_registration(info, trans, subject, subjects_dir)
        if not file_ss.exists():

            src = compute_SourceSpace(subject, subjects_dir, src_fname, source_voxel_coords, plot=True, ss='volume', 
                                volume_spacing=volume_spacing)
            seed_l_sc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['SSC_Left'])
            seed_r_sc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['SSC_Right'])
            seed_l_ac = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['AC_Left'])
            seed_r_ac = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['AC_Right'])
            seed_l_vc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['VC_Left'])
            seed_r_vc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['VC_Right'])
            seed_l_mt = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['MT+_Left'])
            seed_r_mt = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['MT+_Right'])
            seed_l_mtl = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['MTL_Left'])
            seed_r_mtl = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['MTL_Right'])
            seed_l_smc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['SMC_Left'])
            seed_r_smc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['SMC_Right'])
            seed_l_lpc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['LPC_Left'])
            seed_r_lpc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['LPC_Right'])
            seed_l_dpfc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['DPFC_Left'])
            seed_r_dpfc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['DPFC_Right'])
            seed_l_tmpc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['TMPC_Left'])
            seed_r_tmpc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['TMPC_Right'])

            seed_mpfc = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['MPFC_MidBrain'])
            seed_sma = MNI_to_MRI(subject, subjects_dir, t1, ROI_mni['SMA_MidBrain'])

            src_inuse = np.where(src[0]['inuse'] == 1)
            loc_l_sc = src_inuse[0][0]
            loc_r_sc = src_inuse[0][1]
            loc_l_ac = src_inuse[0][2]
            loc_r_ac = src_inuse[0][3]
            loc_l_vc = src_inuse[0][4]
            loc_r_vc = src_inuse[0][5]
            loc_l_mt = src_inuse[0][6]
            loc_r_mt = src_inuse[0][7]
            loc_l_mtl = src_inuse[0][8]
            loc_r_mtl = src_inuse[0][9]
            loc_l_smc = src_inuse[0][10]
            loc_r_smc = src_inuse[0][11]
            loc_l_lpc = src_inuse[0][12]
            loc_r_lpc = src_inuse[0][13]
            loc_l_dpfc = src_inuse[0][14]
            loc_r_dpfc = src_inuse[0][15]
            loc_l_tmpc = src_inuse[0][16]
            loc_r_tmpc = src_inuse[0][17]
            loc_mpfc = src_inuse[0][18]
            loc_sma = src_inuse[0][19]
            src[0]['rr'][loc_l_sc] = seed_l_sc
            src[0]['rr'][loc_r_sc] = seed_r_sc
            src[0]['rr'][loc_l_ac] = seed_l_ac
            src[0]['rr'][loc_r_ac] = seed_r_ac
            src[0]['rr'][loc_l_vc] = seed_l_vc
            src[0]['rr'][loc_r_vc] = seed_r_vc
            src[0]['rr'][loc_l_mt] = seed_l_mt
            src[0]['rr'][loc_r_mt] = seed_r_mt
            src[0]['rr'][loc_l_mtl] = seed_l_mtl
            src[0]['rr'][loc_r_mtl] = seed_r_mtl
            src[0]['rr'][loc_l_smc] = seed_l_smc
            src[0]['rr'][loc_r_smc] = seed_r_smc
            src[0]['rr'][loc_l_lpc] = seed_l_lpc
            src[0]['rr'][loc_r_lpc] = seed_r_lpc
            src[0]['rr'][loc_l_dpfc] = seed_l_dpfc
            src[0]['rr'][loc_r_dpfc] = seed_r_dpfc
            src[0]['rr'][loc_l_tmpc] = seed_l_tmpc
            src[0]['rr'][loc_r_tmpc] = seed_r_tmpc
            src[0]['rr'][loc_mpfc] = seed_mpfc
            src[0]['rr'][loc_sma] = seed_sma
            src.save(src_fname, overwrite=True)
        src = mne.read_source_spaces(src_fname)
        #view_SS_brain(subject, subjects_dir, src)

        if not file_fm.exists():
            forward_model(subject, subjects_dir, fname_meg, trans, src, fwd_fname)
        fwd = mne.read_forward_solution(fwd_fname)

        # sensitivty_plot(subject, subjects_dir, fwd)
        raw = mne.io.read_raw_fif(fname_meg, verbose='error', preload=True)

        srate = raw.info['sfreq']
        n_time_samps = raw.n_times
        time_secs = raw.times
        ch_names = raw.ch_names
        n_chan = len(ch_names)
        freq_res =  srate/n_time_samps
        print('\n')
        print('-------------------------- Data summary-------------------------------')
        print(f'Subject {subject}')
        print(f"Frequency resolution {freq_res} Hz")
        print(f"The first few channel names are {ch_names[:3]}")
        print(f"The last time sample at {time_secs[-1]} seconds.")
        print(f"Sampling Frequency (No of time points/sec) {srate} Hz")
        print(f"Miscellaneous acquisition info {raw.info['description']}")
        print(f"Bad channels marked during data acquisition {raw.info['bads']}")
        print(f"Convert time in sec ( 60s ) to ingeter index {raw.time_as_index(60)}") # Convert time to indices
        print(f"The raw data object has {n_time_samps} time samples and {n_chan} channels.")
        print('------------------------------------------------------------------------')
        print('\n')
        # raw.plot(n_channels=10, scalings='auto', title='Data from arrays', show=True, block=True)
        if not file_proj.exists():
            projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2, ch_name='ECG063')
            projs_eog1, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='EOG061')
            projs_eog2, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='EOG062')
            if projs_ecg is not None:
                mne.write_proj(heartbeat_proj, projs_ecg) # Saving projectors
                raw.info['projs'] += projs_ecg
            if projs_eog1 is not None:
                mne.write_proj(eye_proj1, projs_eog1)
                raw.info['projs'] += projs_eog1
            if projs_eog2 is not None:
                mne.write_proj(eye_proj2, projs_eog2)
                raw.info['projs'] += projs_eog2
            raw.apply_proj()
            raw.save(raw_proj, proj=True, overwrite=True)
        print(raw_proj)
        raw_proj_applied = mne.io.read_raw_fif(raw_proj, verbose='error', preload=True)


        print(f'High-pass filtering data at 0.5 Hz')
        raw_proj_applied.filter(l_freq=0.5, h_freq=None, method='iir')

        if not file_cov.exists():
            cov = mne.compute_raw_covariance(raw_proj_applied) # compute before band-pass of interest
            mne.write_cov(cov_fname, cov)
        cov = mne.read_cov(cov_fname) 

        # cov.plot(raw.info, proj=True, exclude='bads', show_svd=False
        # raw_proj_applied.crop(tmax=10)
        
        do_epochs = False

        l_freq = freq-2.0
        h_freq = freq+2.0
        print(f'Band pass filter data [{l_freq}, {h_freq}]')
        raw_proj_filtered = raw_proj_applied.filter(l_freq=l_freq, h_freq=h_freq)

        if do_epochs:
            print('Segmenting raw data...')
            events = mne.make_fixed_length_events(raw_proj_filtered, duration=5.)
            raw_proj_filtered = mne.Epochs(raw_proj_filtered, events=events, tmin=0, tmax=5.,
                                            baseline=None, preload=True)
            data_cov = mne.compute_covariance(raw_proj_filtered)         
        else:
            if not file_rawcov.exists():
                data_cov = mne.compute_raw_covariance(raw_proj_filtered)
                mne.write_cov(raw_cov_fname, data_cov)
            else:
                data_cov = mne.read_cov(file_rawcov)

        filters = make_lcmv(raw_proj_filtered.info, fwd, data_cov, 0.05, cov,
                            pick_ori='max-power', weight_norm='nai')
        raw_proj_filtered_comp = raw_proj_filtered.apply_hilbert()

        if do_epochs:
            stcs = apply_lcmv_epochs(raw_proj_filtered_comp, filters, return_generator=False)
        else:
            stcs = apply_lcmv_raw(raw_proj_filtered_comp, filters, verbose=True)
            stcs = [stcs]
        # Power Envelope Correlation
        print(f'Computing Power Envelope Correlation for {subject}....Orthogonalize {ortho_flag}')

        all_corr = envelope_correlation(stcs, combine=None, orthogonalize=False,
                    log=False, absolute=True, verbose=None)

        np.save(corr_file_scLeft, all_corr[seed_left_sc])
        np.save(corr_file_acLeft, all_corr[seed_left_ac])
        np.save(corr_file_vcLeft, all_corr[seed_left_vc])
        np.save(corr_file_mtLeft, all_corr[seed_left_mt])
        np.save(corr_file_mtlLeft, all_corr[seed_left_mtl])
        np.save(corr_file_smcLeft, all_corr[seed_left_smc])
        np.save(corr_file_lpcLeft, all_corr[seed_left_lpc])
        np.save(corr_file_dpfcLeft, all_corr[seed_left_dpfc])
        np.save(corr_file_tmpcLeft, all_corr[seed_left_tmpc])

        np.save(corr_file_scRight, all_corr[seed_right_sc])
        np.save(corr_file_acRight, all_corr[seed_right_ac])
        np.save(corr_file_vcRight, all_corr[seed_right_vc])
        np.save(corr_file_mtRight, all_corr[seed_right_mt])
        np.save(corr_file_mtlRight, all_corr[seed_right_mtl])
        np.save(corr_file_smcRight, all_corr[seed_right_smc])
        np.save(corr_file_lpcRight, all_corr[seed_right_lpc])
        np.save(corr_file_dpfcRight, all_corr[seed_right_dpfc])
        np.save(corr_file_tmpcRight, all_corr[seed_right_tmpc])

        np.save(corr_file_mpfc, all_corr[seed_mpfc_index])
        np.save(corr_file_sma, all_corr[seed_sma_index])

        del stcs
Example #35
0
def gen_covariances(p, subjects, run_indices, decim):
    """Generate forward solutions

    Can only complete successfully once preprocessing is performed.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    run_indices : array-like | None
        Run indices to include.
    decim : list of int
        The subject decimations.
    """
    for si, subj in enumerate(subjects):
        print('  Subject %2d/%2d...' % (si + 1, len(subjects)), end='')
        cov_dir = op.join(p.work_dir, subj, p.cov_dir)
        if not op.isdir(cov_dir):
            os.mkdir(cov_dir)
        has_rank_arg = 'rank' in get_args(compute_covariance)
        kwargs = dict()
        kwargs_erm = dict()
        if p.cov_rank == 'full':  # backward compat
            if has_rank_arg:
                kwargs['rank'] = 'full'
        else:
            if not has_rank_arg:
                raise RuntimeError(
                    'There is no "rank" argument of compute_covariance, '
                    'you need to update MNE-Python')
            if p.cov_rank is None:
                assert p.compute_rank  # otherwise this is weird
                kwargs['rank'] = _compute_rank(p, subj, run_indices[si])
            else:
                kwargs['rank'] = p.cov_rank
        kwargs_erm['rank'] = kwargs['rank']
        if p.force_erm_cov_rank_full and has_rank_arg:
            kwargs_erm['rank'] = 'full'
        # Use the same thresholds we used for primary Epochs
        if p.autoreject_thresholds:
            reject = get_epochs_evokeds_fnames(p, subj, [])[0][1]
            reject = reject.replace('-epo.fif', '-reject.h5')
            reject = read_hdf5(reject)
        else:
            reject = _handle_dict(p.reject, subj)
        flat = _handle_dict(p.flat, subj)

        # Make empty room cov
        if p.runs_empty:
            if len(p.runs_empty) > 1:
                raise ValueError('Too many empty rooms; undefined output!')
            new_run = safe_inserter(p.runs_empty[0], subj)
            empty_cov_name = op.join(
                cov_dir, new_run + p.pca_extra + p.inv_tag + '-cov.fif')
            empty_fif = get_raw_fnames(p, subj, 'pca', 'only', False)[0]
            raw = read_raw_fif(empty_fif, preload=True)
            raw.pick_types(meg=True, eog=True, exclude='bads')
            use_reject, use_flat = _restrict_reject_flat(reject, flat, raw)
            if 'eeg' in use_reject:
                del use_reject['eeg']
            if 'eeg' in use_flat:
                del use_flat['eeg']
            cov = compute_raw_covariance(raw,
                                         reject=use_reject,
                                         flat=use_flat,
                                         method=p.cov_method,
                                         **kwargs_erm)
            write_cov(empty_cov_name, cov)

        # Make evoked covariances
        for ii, (inv_name, inv_run) in enumerate(zip(p.inv_names, p.inv_runs)):
            cov_name = op.join(
                cov_dir,
                safe_inserter(inv_name, subj) + ('-%d' % p.lp_cut) +
                p.inv_tag + '-cov.fif')
            if run_indices[si] is None:
                ridx = inv_run
            else:
                ridx = np.intersect1d(run_indices[si], inv_run)
            # read in raw files
            raw_fnames = get_raw_fnames(p, subj, 'pca', False, False, ridx)

            raws = []
            first_samps = []
            last_samps = []
            for raw_fname in raw_fnames:
                raws.append(read_raw_fif(raw_fname, preload=False))
                first_samps.append(raws[-1]._first_samps[0])
                last_samps.append(raws[-1]._last_samps[-1])
            _fix_raw_eog_cals(raws)  # safe b/c cov only needs MEEG
            raw = concatenate_raws(raws)
            # read in events
            events = _read_events(p, subj, ridx, raw)
            if p.pick_events_cov is not None:
                old_count = sum(len(e) for e in events)
                if callable(p.pick_events_cov):
                    picker = p.pick_events_cov
                else:
                    picker = p.pick_events_cov[ii]
                events = picker(events)
                new_count = len(events)
                print('  Using %s/%s events for %s' %
                      (new_count, old_count, op.basename(cov_name)))
            # create epochs
            use_reject, use_flat = _restrict_reject_flat(reject, flat, raw)
            baseline = _get_baseline(p)
            epochs = Epochs(
                raw,
                events,
                event_id=None,
                tmin=baseline[0],
                tmax=baseline[1],
                baseline=(None, None),
                proj=False,
                reject=use_reject,
                flat=use_flat,
                preload=True,
                decim=decim[si],
                verbose='error',  # ignore decim-related warnings
                on_missing=p.on_missing,
                reject_by_annotation=p.reject_epochs_by_annot)
            epochs.pick_types(meg=True, eeg=True, exclude=[])
            cov = compute_covariance(epochs, method=p.cov_method, **kwargs)
            if kwargs.get('rank', None) not in (None, 'full'):
                want_rank = sum(kwargs['rank'].values())
                out_rank = compute_whitener(cov,
                                            epochs.info,
                                            return_rank=True,
                                            verbose='error')[2]
                if want_rank != out_rank:
                    # Hopefully we never hit this code path, but let's keep
                    # some debugging stuff around just in case
                    plot_cov(cov, epochs.info)
                    epochs_fnames, _ = get_epochs_evokeds_fnames(
                        p, subj, p.analyses)
                    epochs2 = read_epochs(epochs_fnames[1], preload=True)
                    idx = np.searchsorted(epochs.events[:, 0],
                                          epochs2.events[:, 0])
                    assert len(np.unique(idx)) == len(idx)
                    epochs = epochs[idx]
                    assert np.array_equal(epochs.events[:, 0],
                                          epochs2.events[:, 0])
                    epochs2.pick_types(meg=True, eeg=True, exclude=[])
                    import matplotlib.pyplot as plt
                    plt.figure()
                    for eps in (epochs, epochs2):
                        eps = eps.get_data().transpose([1, 0, 2])
                        eps = eps.reshape(len(eps), -1)
                        plt.plot(
                            np.log10(np.maximum(linalg.svdvals(eps), 1e-50)))
                    epochs.plot()
                    baseline = _get_baseline(p)
                    epochs2.copy().crop(*baseline).plot()
                    raise RuntimeError('Error computing rank')

            write_cov(cov_name, cov)
        print()
Example #36
0
def apply_create_noise_covariance(fname_empty_room, fname_out, verbose=None):

    '''
    Creates the noise covariance matrix from an
    empty room file.

        Parameters
        ----------
        fname_empty_room : String containing the filename
            of the empty room file (must be a fif-file)
        fname_out : String containing the output filename
            of the noise-covariance estimation (must also
            be a fif-file)
        verbose : bool, str, int, or None
            If not None, override default verbose level
            (see mne.verbose).
            default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov
    from mne.io import Raw
    from mne import pick_types
    import os


    if isinstance(fname_empty_room, list):
        fner = fname_empty_room
    else:
        if isinstance(fname_empty_room, str):
            fner = list([fname_empty_room]) 
        else:
            fner = list(fname_empty_room)
    nfiles = len(fner)

    if isinstance(fname_out, list):
        fnout = fname_out
    else:
        if isinstance(fname_out, str):
            fnout = list([fname_out]) 
        else:
            fnout = list(fname_out)
    if (len(fnout) != nfiles):
        print ">>> Error (create_noise_covariance_matrix):"
        print "    Number of in/out files do not match"
        print
        exit()
    

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        fn_out = fnout[ifile]
        print ">>> create noise covariance using file: " 
        path_in , name  = os.path.split(fn_in)
        print name

        # read in data
        raw_empty = Raw(fn_in, verbose=verbose)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, eeg=False, stim=False,
                           eog=False, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty, picks=picks, verbose=verbose)

        # write noise-covariance matrix to disk
        write_cov(fn_out, noise_cov_mat)
Example #37
0
def intra(subj_list, fmin, fmax):
    '''
    Performs main process, including generation of inverse solution and PSD computation.
    '''
    
    for subj in subj_list:

        print('Now beginning intra processing on ' + subj + '...\n') * 5

        # Set function parameters
        fname_raw = data_path + subj[:5] + '/' + subj 
        fname_fwd = data_path + subj[:5] + '/' + subj[:-4] + '-ico-4-fwd.fif'

        # Load data 
        raw = fiff.Raw(fname_raw) 
        forward_meg = mne.read_forward_solution(fname_fwd) 

        # Estimate noise covariance from the raw data 
        precov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6)) 
        write_cov(data_path + subj[:5] + '/' + subj[:-4] + '-cov.fif', precov) 

        # Find events from raw file
        events = mne.find_events(raw, stim_channel='STI 014')

        # Write events to file
        mne.write_events(data_path + subj[:5] + '/' + subj[:-4] + '-eve.fif', events)

        # Set up pick list:
        include = []
        exclude = raw.info['bads']
        picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, include=include, exclude=exclude)

        # Read epochs and remove bad epochs
        epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6)) 

        # Average epochs to produce an evoked dataset, then write to disk
        evoked = epochs.average()
        evoked.save(data_path + subj[:5] + '/' + subj[:-4] + '-ave.fif')

        # Regularize noise cov
        cov = mne.cov.regularize(precov, evoked.info, grad=0.05, mag=0.05, eeg=0.1, proj=True)

        # Restrict forward solution as necessary for MEG
        restricted_fwd = mne.fiff.pick_types_forward(forward_meg, meg=True, eeg=False) 

        # Make inverse operator
        info = evoked.info
        inverse_operator = make_inverse_operator(info, restricted_fwd, cov, loose=None, depth=0.8)

        # Pull data for averaging later
        epc_array = epochs.get_data()

        # Compute the inverse solution
        inv = apply_inverse(evoked, inverse_operator, lambda2, "dSPM", pick_normal=False)
        inv.save(data_path + subj[:5] + '/' + subj[:-4] + '-inv.fif')

        # picks MEG gradiometers
        picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, exclude=exclude)

        # Compute source power spectral density and save to file
        psd = compute_source_psd(raw, inverse_operator, method='dSPM', lambda2=lambda2, fmin=fmin, fmax=fmax, NFFT=2048)
        psd.save(data_path + subj[:5] + '/' + subj[:-4] + '-psd.fif')
def intra(subj):
    '''
    Performs initial computations within subject and returns average PSD and variance of all epochs.
    '''
    print('Now beginning intra processing on ' + subj + '...\n') * 5

    # Set function parameters
    fname_label = subjects_dir + '/' + subj + '/' + 'label/%s.label' % label_name
    fname_raw = data_path + subj + '/' + subj + '_rest_raw_sss.fif'
    if os.path.isfile(data_path + subj + '/' + subj + '_rest_raw_sss-ico-4-fwd.fif'): 
        fname_fwd = data_path + subj + '/' + subj + '_rest_raw_sss-ico-4-fwd.fif'
    else: 
        print('Subject ' + subj + ' does not have a ico-4-fwd.fif on file.')	

    if label_name.startswith('lh.'):
    	hemi = 'left'
    elif label_name.startswith('rh.'):
    	hemi = 'right'	
    
    # Load data
    label = mne.read_label(fname_label)
    raw = fiff.Raw(fname_raw)
    forward_meg = mne.read_forward_solution(fname_fwd)
    
    # Estimate noise covariance from teh raw data
    cov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6))
    write_cov(data_path + subj + '/' + subj + '-cov.fif', cov)
    
    # Make inverse operator
    info = raw.info
    inverse_operator = make_inverse_operator(info, forward_meg, cov, loose=None, depth=0.8)
    
    # Epoch data into 4s intervals
    events = mne.make_fixed_length_events(raw, 1, start=0, stop=None, 
    		duration=4.)
    
    # Set up pick list: (MEG minus bad channels)
    include = []
    exclude = raw.info['bads']
    picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude=exclude)
    
    # Read epochs and remove bad epochs
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
    
    # Pull data for averaging later
    epc_array = epochs.get_data()
    
    # Compute the inverse solution
    inv = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label=label)
    
    #Need to add a line here to automatically create stc directory within subj
    
    epoch_num = 1
    epoch_num_str = str(epoch_num)
    for i in inv:
#    	i.save(data_path + subj + '/tmp/' + label_name[3:] + '_rest_raw_sss-oct-6-inv' + epoch_num_str)
	i.save(data_path + subj + '/tmp/' + label_name[3:] + '_rest_raw_sss-ico-4-inv' + epoch_num_str)
    	epoch_num = epoch_num + 1
    	epoch_num_str = str(epoch_num)
    
    # The following is used to remove the empty opposing hemisphere files
    # and then move the files to save into the appropriate directory
    
    if hemi == 'left':
    	filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ]	
    	for f in filelist:
            os.remove(data_path + subj + '/tmp/' + f)
    	keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ]
    	for f in keepers:
    	    src = f 
            os.rename(data_path + subj + '/tmp/' + src, data_path + subj + '/inv/' + src)
    
    elif hemi == 'right':
    	filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ]
        for f in filelist:
            os.remove(data_path + subj + '/tmp/' + f)
    	keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ]
        for f in keepers:
            src = f 
            os.rename(data_path + subj + '/tmp/' + src, data_path + subj + '/inv/' + src)
    
    
    # define frequencies of interest
    bandwidth = 4.  # bandwidth of the windows in Hz
    
    # compute source space psd in label
    
    # Note: By using "return_generator=True" stcs will be a generator object
    # instead of a list. This allows us so to iterate without having to
    # keep everything in memory.
    
    psd = compute_source_psd_epochs(epochs, inverse_operator, lambda2=lambda2,
                                     method=method, fmin=fmin, fmax=fmax,
                                     bandwidth=bandwidth, label=label, return_generator=False)
    
    epoch_num = 1
    epoch_num_str = str(epoch_num)
    for i in psd:
    	i.save(data_path + subj + '/' + 'tmp' + '/' + label_name[3:] + '_dspm_snr-1_PSD'+ epoch_num_str)
    	epoch_num = epoch_num + 1
        epoch_num_str = str(epoch_num)
    
    if hemi == 'left':
        filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ]
        for f in filelist:
            os.remove(data_path + subj + '/tmp/' + f)
    	keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ]
        for f in keepers:
            src = f
            os.rename(data_path + subj + '/tmp/' + src,data_path + subj + '/psd/' + src)
    
    elif hemi == 'right':
        filelist = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-lh.stc") ]
        for f in filelist:
            os.remove(data_path + subj + '/tmp/' + f)
    	keepers = [ f for f in os.listdir(data_path + subj + '/tmp') if f.endswith("-rh.stc") ]
        for f in keepers:
            src = f
            os.rename(data_path + subj + '/tmp/' + src,data_path + subj + '/psd/' + src)
   
 
    # This code computes the average PSDs of each epoch. Each PSD file is an array of shape N_vertices*N_frequencies. This code averages the PSD value of each vertex together and outputs the average PSD value of each frequency. Then, it averages the PSD values of each epoch, outputting one average PSD value per frequency value, i.e., this is the average across epochs.
    
    n_epochs = len(epc_array)
    for i, stc in enumerate(psd):
        if i >= n_epochs:
            break
    
        if i == 0:
            psd_avg = np.mean(stc.data, axis=0)
        else:
            psd_avg += np.mean(stc.data, axis=0)
    
    print('Length of psd for subject ' + subj + ' is ' + str(len(psd)) + '.')
    print('Number of epochs for subject ' + subj + ' is ' + str(n_epochs) + '.')
   
    if len(psd) != 0:
        psd_avg /= n_epochs
    
    # Compute variance for each epoch and then variance across epochs 
    
    n_epochs = len(epc_array)
    for i, stc in enumerate(psd):
        if i >= n_epochs:
            psd_var = np.array()
	    break
        
        if i == 0:
            psd_var = np.var(stc.data, axis=0)
        else:
            psd_var = np.vstack((psd_var,np.var(stc.data, axis=0)))
    
    if len(psd) >= 2:
        tot_var = np.var(psd_var, axis=0)

    if len(psd) <= 1:
	failed_subj = subj
	print(failed_subj + ' failed. No PSD values calculated, likely because all epochs were rejected.')
	return failed_subj, failed_subj, failed_subj

    if len(psd) >= 2:
        return (psd_avg, tot_var, len(psd_avg))
def apply_inverse(fnepo, method='dSPM', event='LLst', min_subject='fsaverage', STC_US='ROI', 
                  condition='LL', save_cov=False):
    '''  
        1. Noise covariance matrix is calculated under the condion of 'require_
        filter'.
        2. 'fnclean' is inversed and morphed into the common source space under 
        the condition of 'require_filter'.
        
        Parameter
        ---------
        fnepo: string or list
            The epochs file with ECG, EOG and environmental noise free.
        method: inverse method, 'MNE' or 'dSPM'
        event: string
            The event name related with epochs.
        min_subject: string
            The subject name as the common brain.
        STC_use: string
            The using of the inversion for further analysis.
            'ROI' stands for ROIs definition, 'CAU' stands for causality analysis. 
    '''
    #Get the default subjects_dir
    from mne.minimum_norm import (apply_inverse, apply_inverse_epochs)
    subjects_dir = os.environ['SUBJECTS_DIR']
    fnlist = get_files_from_list(fnepo)
    # loop across all filenames
    for fname in fnlist:
        meg_path = os.path.split(fname)[0]
        name = os.path.basename(fname)
        stc_name = name[:name.rfind('-epo.fif')] 
        subject = name.split('_')[0]
        subject_path = subjects_dir + '/%s' %subject
        min_dir = subjects_dir + '/%s' %min_subject
        fn_trans = meg_path + '/%s-trans.fif' % subject
        fn_src = subject_path + '/bem/%s-ico-4-src.fif' % subject
        fn_bem = subject_path + '/bem/%s-5120-5120-5120-bem-sol.fif' % subject
        snr = 3.0
        lambda2 = 1.0 / snr ** 2 
        #noise_cov = mne.read_cov(fn_cov)
        epochs = mne.read_epochs(fname)
        fn_cov = meg_path + '/%s-cov.fif' %condition
        if save_cov == False:
            noise_cov = mne.read_cov(fn_cov)
        elif save_cov==True:
            noise_cov = mne.compute_covariance(epochs, tmax=0.)
            mne.write_cov(fn_cov, noise_cov)
        if STC_US == 'ROI':
            # this path used for ROI definition
            stc_path = min_dir + '/MNE_ROIs/%s' % subject
            #fn_cov = meg_path + '/%s_empty,fibp1-45,nr-cov.fif' % subject
            evoked = epochs.average()
            set_directory(stc_path)
            noise_cov = mne.cov.regularize(noise_cov, evoked.info,
                                            mag=0.05, grad=0.05, proj=True)
            fwd_ev = mne.make_forward_solution(evoked.info, trans=fn_trans,
                                                src=fn_src, bem=fn_bem,
                                                fname=None, meg=True, eeg=False,
                                                mindist=5.0, n_jobs=2,
                                                overwrite=True)
            fwd_ev = mne.convert_forward_solution(fwd_ev, surf_ori=True)
            forward_meg_ev = mne.pick_types_forward(fwd_ev, meg=True, eeg=False)
            inverse_operator_ev = mne.minimum_norm.make_inverse_operator(
                evoked.info, forward_meg_ev, noise_cov,
                loose=0.2, depth=0.8)
            # Compute inverse solution
            stc = apply_inverse(evoked, inverse_operator_ev, lambda2, method,
                                pick_ori=None)
            # Morph STC
            subject_id = min_subject
            stc_morph = mne.morph_data(subject, subject_id, stc, grade=4, smooth=4)
            stc_morph.save(stc_path + '/%s' % (stc_name), ftype='stc')
    
        elif STC_US == 'CAU':
            stcs_path = min_dir + '/stcs/%s/%s/' % (subject,event)
            reset_directory(stcs_path)
            noise_cov = mne.cov.regularize(noise_cov, epochs.info,
                                            mag=0.05, grad=0.05, proj=True)
            fwd = mne.make_forward_solution(epochs.info, trans=fn_trans,
                                            src=fn_src, bem=fn_bem,
                                            meg=True, eeg=False, mindist=5.0,
                                            n_jobs=2, overwrite=True)
            fwd = mne.convert_forward_solution(fwd, surf_ori=True)
            forward_meg = mne.pick_types_forward(fwd, meg=True, eeg=False)
            inverse_operator = mne.minimum_norm.make_inverse_operator(
                epochs.info, forward_meg, noise_cov, loose=0.2,
                depth=0.8)
            # Compute inverse solution
            stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2,
                                        method=method, pick_ori='normal')
            s = 0
            while s < len(stcs):
                stc_morph = mne.morph_data(
                    subject, min_subject, stcs[s], grade=4, smooth=4)
                stc_morph.save(stcs_path + '/trial%s_fsaverage'
                                % (subject, str(s)), ftype='stc')
                s = s + 1
Example #40
0
                               verbose=None)

    #writing and reading it in as surf_ori is set to true in the process
    fwd20 = mne.read_forward_solution(fwd_fname20, surf_ori=True)
    fwd30 = mne.read_forward_solution(fwd_fname30, surf_ori=True)
    fwd40 = mne.read_forward_solution(fwd_fname40, surf_ori=True)

    epochs20 = mne.read_epochs(fname20, proj=True, preload=True, verbose=None)
    epochs30 = mne.read_epochs(fname30, proj=True, preload=True, verbose=None)
    epochs40 = mne.read_epochs(fname40, proj=True, preload=True, verbose=None)

    noise_cov20 = mne.compute_covariance(epochs20, tmax=0., method=['shrunk'])
    noise_cov30 = mne.compute_covariance(epochs30, tmax=0., method=['shrunk'])
    noise_cov40 = mne.compute_covariance(epochs40, tmax=0., method=['shrunk'])

    mne.write_cov(cov20_fname, noise_cov20)
    mne.write_cov(cov30_fname, noise_cov30)
    mne.write_cov(cov40_fname, noise_cov40)

    evoked20 = epochs20.average()
    evoked30 = epochs30.average()
    evoked40 = epochs40.average()

    # make an MEG inverse operator
    info20 = evoked20.info
    info30 = evoked30.info
    info40 = evoked40.info

    inverse_operator20 = make_inverse_operator(info20,
                                               fwd20,
                                               noise_cov30,
Example #41
0
                            reject={'mag': 3e-12},
                            verbose=False)

        # # back to coding
        # proj = mne.read_proj(fname_proj)
        # epochs.add_proj(proj)
        # epochs.apply_proj()

        # plot evoked
        evoked = epochs.average()
        p = evoked.plot(titles={'mag': 'Evoked Response'}, show=False)
        rep_group.add_figs_to_section(
            p, (f"{subject}: Evoked Response " + "to Prime Word"), 'Evoked')

        # plot covariance and whitened evoked
        epochs.load_data().crop(-.2, -.1)
        cov = mne.compute_covariance(epochs, method='auto', verbose=False)
        p = cov.plot(epochs.info, show_svd=0, show=False)[0]
        # comments = ('The covariance matrix is computed on the -200:-100 ms '
        #             'baseline. -100:0 ms is confounded with the eye-mvt.')
        rep_group.add_figs_to_section(p, f"{subject}: Covariance Matrix",
                                      'Covariance Matrix')
        p = evoked.plot_white(cov, show=False)
        rep_group.add_figs_to_section(
            p, f"{subject}: Whitened Evoked to Prime Word", 'Whitened Evoked')

        # save covariance
        mne.write_cov(fname_cov, cov)

rep_group.save(fname_rep_group, overwrite=True, open_browser=False)
    raw_noise.load_data()

    # apply ref channel correction and drop ref channels
    hcp.preprocessing.apply_ref_correction(raw_noise)

    # Note: MNE complains on Python 2.7
    raw_noise.filter(0.50, None, method='iir',
                     iir_params=dict(order=4, ftype='butter'), n_jobs=n_jobs)
    raw_noise.filter(None, 60, method='iir',
                     iir_params=dict(order=4, ftype='butter'), n_jobs=n_jobs)

    ##############################################################################
    # Note that using the empty room noise covariance will inflate the SNR of the
    # evkoked and renders comparisons  to `baseline` rather uninformative.
    noise_cov = mne.compute_raw_covariance(raw_noise, method='empirical')
    mne.write_cov(noise_cov_fname, noise_cov)
else:
    noise_cov = mne.read_cov(noise_cov_fname)

##############################################################################
# Now we assemble the inverse operator, project the data and show the results
# on the `fsaverage` surface, the freesurfer average brain.

if not op.isfile(inv_fname):
    inv_op = mne.minimum_norm.make_inverse_operator(
        evoked.info, fwd, noise_cov=noise_cov)
    write_inverse_operator(inv_fname, inv_op)
else:
    inv_op = read_inverse_operator(inv_fname)

Example #43
0
elif label_name.startswith('rh.'):
	hemi = 'right'	

event_id, tmin, tmax = 1, 0.0, 4.0
snr = 1.0 
lambda2 = 1.0 / snr ** 2
method = "dSPM" 

# Load data
label = mne.read_label(fname_label)
raw = fiff.Raw(fname_raw)
forward_meg = mne.read_forward_solution(fname_fwd)

# Estimate noise covariance from teh raw data
cov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6))
write_cov(data_path + subj + '/' + subj + '-cov.fif', cov)

# Make inverse operator
info = raw.info
inverse_operator = make_inverse_operator(info, forward_meg, cov, loose=None, depth=0.8)

# Epoch data into 4s intervals
events = mne.make_fixed_length_events(raw, 1, start=0, stop=None, 
		duration=4.)

# Set up pick list: (MEG minus bad channels)
include = []
exclude = raw.info['bads']
picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, 
		include=include, exclude=exclude)
Example #44
0
def apply_create_noise_covariance(fname_empty_room, require_filter=False,
                                  require_noise_reducer=False, verbose=None):

    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname_empty_room : String containing the filename
        of the empty room file (must be a fif-file)
        File name should end with -raw.fif in order to have proper output filenames.
    require_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    require_noise_reducer: bool
        If true, a noise reducer is applied on the empty room file.
        The noise reducer frequencies are fixed to 50Hz, 60Hz and
        to frequencies less than 5Hz i.e. the reference channels are filtered to
        these frequency ranges and then signal obtained is removed from
        the empty room raw data. For more information please check the jumeg noise reducer.
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov, pick_types
    from mne.io import Raw
    from jumeg.jumeg_noise_reducer import noise_reducer

    fner = get_files_from_list(fname_empty_room)
    nfiles = len(fner)

    # loop across all filenames
    for ifile in range(nfiles):
        fn_in = fner[ifile]
        print ">>> create noise covariance using file: "
        path_in, name = os.path.split(fn_in)
        print name

        if require_filter:
            print "Filtering with preset settings..."
            # filter empty room raw data
            apply_filter(fn_in, flow=1, fhigh=45, order=4, njobs=4)
            # reconstruct empty room file name accordingly
            fn_in = fn_in[:fn_in.rfind(ext_empty_raw)] + ',fibp1-45-raw.fif'

        if require_noise_reducer:
            fn_empty_nr = fn_in[:fn_in.rfind(ext_empty_raw)] + ',nr-raw.fif'
            noise_reducer(fn_in, refnotch=50, detrending=False, fnout=fn_empty_nr)
            noise_reducer(fn_empty_nr, refnotch=60, detrending=False, fnout=fn_empty_nr)
            noise_reducer(fn_empty_nr, reflp=5, fnout=fn_empty_nr)
            fn_in = fn_empty_nr

        # file name for saving noise_cov
        fn_out = fn_in[:fn_in.rfind(ext_empty_raw)] + ext_empty_cov

        # read in data
        raw_empty = Raw(fn_in, verbose=verbose)

        # pick MEG channels only
        picks = pick_types(raw_empty.info, meg=True, ref_meg=False, eeg=False,
                           stim=False, eog=False, exclude='bads')

        # calculate noise-covariance matrix
        noise_cov_mat = cp_covariance(raw_empty, picks=picks, verbose=verbose)

        # write noise-covariance matrix to disk
        write_cov(fn_out, noise_cov_mat)
Example #45
0
def test_channel_name_limit(tmp_path, monkeypatch, fname):
    """Test that our remapping works properly."""
    #
    # raw
    #
    if fname.endswith('fif'):
        raw = read_raw_fif(fname)
        raw.pick_channels(raw.ch_names[:3])
        ref_names = []
        data_names = raw.ch_names
    else:
        assert fname.endswith('.ds')
        raw = read_raw_ctf(fname)
        ref_names = [
            raw.ch_names[pick]
            for pick in pick_types(raw.info, meg=False, ref_meg=True)
        ]
        data_names = raw.ch_names[32:35]
    proj = dict(data=np.ones((1, len(data_names))),
                col_names=data_names[:2].copy(),
                row_names=None,
                nrow=1)
    proj = Projection(data=proj,
                      active=False,
                      desc='test',
                      kind=0,
                      explained_var=0.)
    raw.add_proj(proj, remove_existing=True)
    raw.info.normalize_proj()
    raw.pick_channels(data_names + ref_names).crop(0, 2)
    long_names = ['123456789abcdefg' + name for name in raw.ch_names]
    fname = tmp_path / 'test-raw.fif'
    with catch_logging() as log:
        raw.save(fname)
    log = log.getvalue()
    assert 'truncated' not in log
    rename = dict(zip(raw.ch_names, long_names))
    long_data_names = [rename[name] for name in data_names]
    long_proj_names = long_data_names[:2]
    raw.rename_channels(rename)
    for comp in raw.info['comps']:
        for key in ('row_names', 'col_names'):
            for name in comp['data'][key]:
                assert name in raw.ch_names
    if raw.info['comps']:
        assert raw.compensation_grade == 0
        raw.apply_gradient_compensation(3)
        assert raw.compensation_grade == 3
    assert len(raw.info['projs']) == 1
    assert raw.info['projs'][0]['data']['col_names'] == long_proj_names
    raw.info['bads'] = bads = long_data_names[2:3]
    good_long_data_names = [
        name for name in long_data_names if name not in bads
    ]
    with catch_logging() as log:
        raw.save(fname, overwrite=True, verbose=True)
    log = log.getvalue()
    assert 'truncated to 15' in log
    for name in raw.ch_names:
        assert len(name) > 15
    # first read the full way
    with catch_logging() as log:
        raw_read = read_raw_fif(fname, verbose=True)
    log = log.getvalue()
    assert 'Reading extended channel information' in log
    for ra in (raw, raw_read):
        assert ra.ch_names == long_names
    assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names
    del raw_read
    # next read as if no longer names could be read
    monkeypatch.setattr(meas_info, '_read_extended_ch_info',
                        lambda x, y, z: None)
    with catch_logging() as log:
        raw_read = read_raw_fif(fname, verbose=True)
    log = log.getvalue()
    assert 'extended' not in log
    if raw.info['comps']:
        assert raw_read.compensation_grade == 3
        raw_read.apply_gradient_compensation(0)
        assert raw_read.compensation_grade == 0
    monkeypatch.setattr(  # restore
        meas_info, '_read_extended_ch_info', _read_extended_ch_info)
    short_proj_names = [
        f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}'
        for ni, name in enumerate(long_data_names[:2])
    ]
    assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names
    #
    # epochs
    #
    epochs = Epochs(raw, make_fixed_length_events(raw))
    fname = tmp_path / 'test-epo.fif'
    epochs.save(fname)
    epochs_read = read_epochs(fname)
    for ep in (epochs, epochs_read):
        assert ep.info['ch_names'] == long_names
        assert ep.ch_names == long_names
    del raw, epochs_read
    # cov
    epochs.info['bads'] = []
    cov = compute_covariance(epochs, verbose='error')
    fname = tmp_path / 'test-cov.fif'
    write_cov(fname, cov)
    cov_read = read_cov(fname)
    for co in (cov, cov_read):
        assert co['names'] == long_data_names
        assert co['bads'] == []
    del cov_read

    #
    # evoked
    #
    evoked = epochs.average()
    evoked.info['bads'] = bads
    assert evoked.nave == 1
    fname = tmp_path / 'test-ave.fif'
    evoked.save(fname)
    evoked_read = read_evokeds(fname)[0]
    for ev in (evoked, evoked_read):
        assert ev.ch_names == long_names
        assert ev.info['bads'] == bads
    del evoked_read, epochs

    #
    # forward
    #
    with _record_warnings():  # not enough points for CTF
        sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(
        pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]]))
    fwd = make_forward_solution(evoked.info, None, src, sphere)
    fname = tmp_path / 'temp-fwd.fif'
    write_forward_solution(fname, fwd)
    fwd_read = read_forward_solution(fname)
    for fw in (fwd, fwd_read):
        assert fw['sol']['row_names'] == long_data_names
        assert fw['info']['ch_names'] == long_data_names
        assert fw['info']['bads'] == bads
    del fwd_read

    #
    # inv
    #
    inv = make_inverse_operator(evoked.info, fwd, cov)
    fname = tmp_path / 'test-inv.fif'
    write_inverse_operator(fname, inv)
    inv_read = read_inverse_operator(fname)
    for iv in (inv, inv_read):
        assert iv['info']['ch_names'] == good_long_data_names
    apply_inverse(evoked, inv)  # smoke test
Example #46
0
    def _run_interface(self, runtime):

        raw_filename = self.inputs.raw_filename
        cov_fname_in = self.inputs.cov_fname_in
        is_epoched = self.inputs.is_epoched
        is_evoked = self.inputs.is_evoked
        events_id = self.inputs.events_id
        t_min = self.inputs.t_min
        t_max = self.inputs.t_max

        data_path, basename, ext = split_f(raw_filename)

        self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)

        if not op.isfile(cov_fname_in):
            if is_epoched and is_evoked:
                raw = read_raw_fif(raw_filename)
                events = find_events(raw)

                if not op.isfile(self.cov_fname_out):
                    print(('\n*** COMPUTE COV FROM EPOCHS ***\n' +
                           self.cov_fname_out))

                    reject = create_reject_dict(raw.info)
                    picks = pick_types(raw.info,
                                       meg=True,
                                       ref_meg=False,
                                       exclude='bads')

                    epochs = Epochs(raw,
                                    events,
                                    events_id,
                                    t_min,
                                    t_max,
                                    picks=picks,
                                    baseline=(None, 0),
                                    reject=reject)

                    # TODO method='auto'? too long!!!
                    noise_cov = compute_covariance(epochs,
                                                   tmax=0,
                                                   method='diagonal_fixed')
                    write_cov(self.cov_fname_out, noise_cov)
                else:
                    print(('\n *** NOISE cov file %s exists!!! \n' %
                           self.cov_fname_out))
            else:
                '\n *** RAW DATA \n'
                for er_fname in glob.glob(op.join(data_path, cov_fname_in)):
                    print(('\n found file name %s  \n' % er_fname))

                try:
                    if er_fname.rfind('cov.fif') > -1:
                        print("\n *** NOISE cov file %s exists!! "
                              "\n" % er_fname)
                        self.cov_fname_out = er_fname
                    else:
                        if er_fname.rfind('.fif') > -1:
                            er_raw = read_raw_fif(er_fname)
                            er_fname = er_fname.replace('.fif', '-raw-cov.fif')
                        elif er_fname.rfind('.ds') > -1:
                            er_raw = read_raw_ctf(er_fname)
                            er_fname = er_fname.replace('.ds', '-raw-cov.fif')

                        self.cov_fname_out = op.join(data_path, er_fname)

                        if not op.isfile(self.cov_fname_out):
                            reject = create_reject_dict(er_raw.info)
                            picks = pick_types(er_raw.info,
                                               meg=True,
                                               ref_meg=False,
                                               exclude='bads')

                            noise_cov = compute_raw_covariance(er_raw,
                                                               picks=picks,
                                                               reject=reject)
                            write_cov(self.cov_fname_out, noise_cov)
                        else:
                            print(('\n *** NOISE cov file %s exists!!! \n' %
                                   self.cov_fname_out))
                except NameError:
                    sys.exit("No covariance matrix as input!")
                    # TODO creare una matrice diagonale?

        else:
            print(('\n *** NOISE cov file %s exists!!! \n' % cov_fname_in))
            self.cov_fname_out = cov_fname_in

        return runtime
def run_correlation(subjects_dir, subject, volume_spacing, freq):

    num_threads(8)
    frequency = str(freq)
    DATA_DIR = Path(f'{subjects_dir}', f'{subject}', 'mne_files')
    eye_proj1 = f'{DATA_DIR}/{subject}_eyes1-proj.fif.gz'
    eye_proj2 = f'{DATA_DIR}/{subject}_eyes2-proj.fif.gz'
    fname_meg = f'{DATA_DIR}/{subject}_ses-rest_task-rest.fif'
    t1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz')
    heartbeat_proj = f'{DATA_DIR}/{subject}_heartbeat-proj.fif.gz'
    fwd_fname = f'{DATA_DIR}/{subject}_{volume_spacing}-fwd-label.fif.gz'
    src_fname = f'{DATA_DIR}/{subject}_{volume_spacing}-src-label.fif.gz'
    cov_fname = f'{DATA_DIR}/{subject}-cov_{volume_spacing}-label.fif.gz'
    raw_cov_fname = f'{DATA_DIR}/{subject}-rawcov_{volume_spacing}-label.fif.gz'
    raw_proj = f'{DATA_DIR}/{subject}_ses-rest_task-rest_proj-label.fif.gz'
    source_voxel_coords = f'{DATA_DIR}/{subject}_coords_{volume_spacing}.pkl'
    freesurfer_label = f'{DATA_DIR}/{subject}_freesurferlabel_{volume_spacing}-label.pkl'
    corr_true_file_label = f'{DATA_DIR}/{subject}_corr_ortho_true_{volume_spacing}_{frequency}_label.npy'

    check_for_files = []
    check_for_files.append(corr_true_file_label)


    file_exist = [f for f in check_for_files if os.path.isfile(f)]
    file_not_exist = list(set(file_exist) ^ set(check_for_files))

    if not file_not_exist:
        print('correlation files exists...')

    else:
        trans = f'/home/senthilp/caesar/camcan/cc700/camcan_coreg-master/trans/{subject}-trans.fif' # The transformation file obtained by coregistration
        file_trans = pathlib.Path(trans)
        file_ss = pathlib.Path(src_fname)
        file_fm = pathlib.Path(fwd_fname)
        file_proj = pathlib.Path(raw_proj)
        file_cov = pathlib.Path(cov_fname)
        file_rawcov = pathlib.Path(raw_cov_fname)
        t1 = nib.load(t1_fname)

        if not file_trans.exists():
            print (f'{trans} File doesnt exist...')
            sys.exit(0)

        info = mne.io.read_info(fname_meg)
        # plot_registration(info, trans, subject, subjects_dir)

        print(file_ss)
        if not file_ss.exists():

            src = compute_SourceSpace(subject, subjects_dir, src_fname, source_voxel_coords, plot=True, ss='volume', 
                                volume_spacing=volume_spacing)

            src.save(src_fname, overwrite=True)
        src = mne.read_source_spaces(src_fname)
        #view_SS_brain(subject, subjects_dir, src)

        if not file_fm.exists():
            forward_model(subject, subjects_dir, fname_meg, trans, src, fwd_fname)
        fwd = mne.read_forward_solution(fwd_fname)


       
        # sensitivty_plot(subject, subjects_dir, fwd)
        raw = mne.io.read_raw_fif(fname_meg, verbose='error', preload=True)

        srate = raw.info['sfreq']
        n_time_samps = raw.n_times
        time_secs = raw.times
        ch_names = raw.ch_names
        n_chan = len(ch_names)
        freq_res =  srate/n_time_samps
        print('\n')
        print('-------------------------- Data summary-------------------------------')
        print(f'Subject {subject}')
        print(f"Frequency resolution {freq_res} Hz")
        print(f"The first few channel names are {ch_names[:3]}")
        print(f"The last time sample at {time_secs[-1]} seconds.")
        print(f"Sampling Frequency (No of time points/sec) {srate} Hz")
        print(f"Miscellaneous acquisition info {raw.info['description']}")
        print(f"Bad channels marked during data acquisition {raw.info['bads']}")
        print(f"Convert time in sec ( 60s ) to ingeter index {raw.time_as_index(60)}") # Convert time to indices
        print(f"The raw data object has {n_time_samps} time samples and {n_chan} channels.")
        print('------------------------------------------------------------------------')
        print('\n')
        # raw.plot(n_channels=10, scalings='auto', title='Data from arrays', show=True, block=True)
        if not file_proj.exists():
            projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2, ch_name='ECG063')
            projs_eog1, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='EOG061')
            projs_eog2, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='EOG062')
            if projs_ecg is not None:
                mne.write_proj(heartbeat_proj, projs_ecg) # Saving projectors
                raw.info['projs'] += projs_ecg
            if projs_eog1 is not None:
                mne.write_proj(eye_proj1, projs_eog1)
                raw.info['projs'] += projs_eog1
            if projs_eog2 is not None:
                mne.write_proj(eye_proj2, projs_eog2)
                raw.info['projs'] += projs_eog2
            raw.apply_proj()
            raw.save(raw_proj, proj=True, overwrite=True)
        print(raw_proj)
        raw_proj_applied = mne.io.read_raw_fif(raw_proj, verbose='error', preload=True)


        print(f'High-pass filtering data at 0.5 Hz')
        raw_proj_applied.filter(l_freq=0.5, h_freq=None, method='iir')

        if not file_cov.exists():
            cov = mne.compute_raw_covariance(raw_proj_applied) # compute before band-pass of interest
            mne.write_cov(cov_fname, cov)
        cov = mne.read_cov(cov_fname) 

        # cov.plot(raw.info, proj=True, exclude='bads', show_svd=False
        # raw_proj_applied.crop(tmax=10)
        
        do_epochs = False

        l_freq = freq-2.0
        h_freq = freq+2.0
        print(f'Band pass filter data [{l_freq}, {h_freq}]')
        raw_proj_filtered = raw_proj_applied.filter(l_freq=l_freq, h_freq=h_freq)

        if do_epochs:
            print('Segmenting raw data...')
            events = mne.make_fixed_length_events(raw_proj_filtered, duration=5.)
            raw_proj_filtered = mne.Epochs(raw_proj_filtered, events=events, tmin=0, tmax=5.,
                                            baseline=None, preload=True)
            data_cov = mne.compute_covariance(raw_proj_filtered)         
        else:
            if not file_rawcov.exists():
                data_cov = mne.compute_raw_covariance(raw_proj_filtered)
                mne.write_cov(raw_cov_fname, data_cov)
            else:
                data_cov = mne.read_cov(file_rawcov)

        filters = make_lcmv(raw_proj_filtered.info, fwd, data_cov, 0.05, cov,
                            pick_ori='max-power', weight_norm='nai')
        raw_proj_filtered_comp = raw_proj_filtered.apply_hilbert()

        if do_epochs:
            stcs = apply_lcmv_epochs(raw_proj_filtered_comp, filters, return_generator=False)
        else:
            stcs = apply_lcmv_raw(raw_proj_filtered_comp, filters, verbose=True)
        
        print('Extracting label time course...')
        atlas = f'{subjects_dir}/{subject}/mri/aparc.a2009s+aseg.mgz'
        label_ts = mne.extract_label_time_course(stcs, atlas, fwd['src'], return_generator=False)
        label_ts = [label_ts]

        # Power Envelope Correlation
        print(f'Computing Power Envelope Correlation for {subject}....Orthogonalize True')

        all_corr = envelope_correlation(label_ts, combine=None, orthogonalize="pairwise",
                    log=True, absolute=True, verbose=None)

        print(f'Correlation saved to {corr_true_file_label}')
        np.save(corr_true_file_label, all_corr)

        del stcs
Example #48
0
import mne
import sys

from mne import compute_covariance

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

from my_settings import *

reject = dict(grad=4000e-13,  # T / m (gradiometers)
              mag=4e-12,  # T (magnetometers)
              eeg=180e-6  #
              )


subject = sys.argv[1]

epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject)
epochs.drop_bad_epochs(reject_params)

# fig = epochs.plot_drop_log(subject=subject, show=False)
# fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)

# Make noise cov
cov = compute_covariance(epochs, tmin=None, tmax=0,
                         method="factor_analysis")
mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)

Compute the noise matrix
Author: Praveen, Qunxi
"""
import mne, sys, os
import pylab as pl
import numpy as np

flow, fhigh = 1.0, 45.0
filter_type = 'butter'
filter_order = 4
njobs = 4

try:
    subject = sys.argv[1]#Get the subject
except:
    print "Please run with input file provided. Exiting"
    sys.exit()
subjects_dir = '/home/qdong/freesurfer/subjects/'
subject_path = subjects_dir + subject#Set the data path of the subject

raw_empty_fname = subject_path + '/MEG/%s_emptyroom.fif' %subject
raw_empty = mne.fiff.Raw(raw_empty_fname, preload=True)
#Filter the empty room data
picks_empty = mne.fiff.pick_types(raw_empty.info, meg=True, eeg=False, eog=True, ecg=True, stim=False, exclude='bads') 
raw_empty.filter(flow, fhigh, picks=picks_empty, n_jobs=njobs, method='iir', 
           iir_params={'ftype': filter_type, 'order': filter_order})          
#Get the basename
raw_empty_basename = os.path.splitext(os.path.basename(raw_empty_fname))[0]
cov = mne.compute_raw_data_covariance(raw_empty, picks=picks_empty)
mne.write_cov(subject_path+'/MEG/%s_cov.fif' %(raw_empty_basename), cov)
Example #50
0
# Making the inverse solution
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
                              write_inverse_operator)


## Computing the covariance
# Empty room data goes here
emptyroom = '/Users/aditya/desktop/MEG_TrainingData/SelfPaced_ButtonPress/empty_room-2.fif'
raw_empty_room = mne.io.read_raw_fif(emptyroom)
## Noise covariance compuation - can be done through either the empty room
## calculaion or using the epochs before the stimilus - I got the 
## results I showed you using the epochs method
#noise_cov = mne.compute_raw_covariance(raw_empty_room, tmin=0, tmax=None)
noise_cov = mne.compute_covariance(
    epochs, tmax=0., method=['shrunk', 'empirical'])
mne.write_cov('fingerpress-cov.fif', noise_cov)
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)


## Pick specific types of fwd operator - not really needed
fwd = mne.pick_types_forward(fwd, meg=True , eeg=True)
info = evoked.info
### Computing inverse operator
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
                                         loose=0.2, depth=0.8)
## Writing inverse operator
write_inverse_operator('fingerpress-inv.fif',
                       inverse_operator)
### Computing inverse solution
method = "dSPM"
snr = 3.
Example #51
0
def apply_create_noise_covariance_data(fname,raw=None,do_filter=True,filter_parameter=None,verbose=False,do_run=True,save=True ):
    '''
    Creates the noise covariance matrix from an empty room file.

    Parameters
    ----------
    fname : string 
        containing the filename of the empty room file (must be a fif-file)
    do_filter: bool
        If true, the empy room file is filtered before calculating
        the covariance matrix. (Beware, filter settings are fixed.)
    filter_parameter: dict
        dict with filter parameter and flags 
    do_run: bool
        execute this
    save: bool
        save output file
    verbose : bool, str, int, or None
        If not None, override default verbose level
        (see mne.verbose).
        default: verbose=None
        
    RETURN
    ---------
    full empty room filname as string
    raw obj of input raw-obj or raw-empty-room obj    
    '''

    # -------------------------------------------
    # import necessary modules
    # -------------------------------------------
    from mne import compute_raw_data_covariance as cp_covariance
    from mne import write_cov
    from mne.io import Raw
    from mne import pick_types
    import os

    mne.verbose = verbose

    try:
        (fname_empty_room,raw_empty) = jumeg_base.get_empty_room_fif(fname=fname,raw=raw,preload=do_run)
    except:
        return

    if raw_empty :
   #--- picks meg channels
       filter_parameter.picks = jumeg_base.pick_meg_nobads(raw_empty)
  
   #--- filter or get filter name
       filter_parameter.do_run = do_filter

       if do_filter :
          print "Filtering empty room fif with noise variance settings...\n"
          (fname_empty_room,raw_empty) = apply_filter_data(fname_empty_room,raw=raw_empty,**filter_parameter)
    

   #--- update file name for saving noise_cov
    fname_empty_room_cov = fname_empty_room.split('-')[0] + ',empty-cov.fif'
  
   #--- calc nois-covariance matrix
    if do_run :
       noise_cov_mat = cp_covariance(raw_empty,picks=filter_parameter.picks,verbose=verbose)
   #--- write noise-covariance matrix to disk
       if save :
          write_cov( fname_empty_room_cov, noise_cov_mat)
    
    return fname_empty_room_cov
def intra(subj):
    """
    Performs initial computations within subject and returns average PSD and variance of all epochs.
    """
    print ("Now beginning intra processing on " + subj + "...\n") * 5

    # Set function parameters
    fname_label = subjects_dir + "/" + subj + "/" + "label/%s.label" % label_name
    fname_raw = data_path + subj + "/" + subj + "_list" + list_num + "_raw_sss-ico-4-fwd.fif"

    if os.path.isfile(data_path + subj + "/" + subj + "_list" + list_num + "_raw_sss-ico-4-fwd.fif"):
        fname_fwd = data_path + subj + "/" + subj + "_list" + list_num + "_raw_sss-ico-4-fwd.fif"
    else:
        print ("Subject " + subj + " does not have a ico-4-fwd.fif on file.")

    if label_name.startswith("lh."):
        hemi = "left"
    elif label_name.startswith("rh."):
        hemi = "right"

    # Load data
    label = mne.read_label(fname_label)
    raw = fiff.Raw(fname_raw)
    forward_meg = mne.read_forward_solution(fname_fwd)

    # Estimate noise covariance from the raw data.
    precov = mne.compute_raw_data_covariance(raw, reject=dict(eog=150e-6))
    write_cov(data_path + subj + "/" + subj + "-cov.fif", precov)

    # Find events from raw file
    events = mne.find_events(raw, stim_channel="STI 014")

    # Set up pick list: (MEG minus bad channels)
    include = []
    exclude = raw.info["bads"]
    picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude=exclude)

    # Read epochs and remove bad epochs
    epochs = mne.Epochs(
        raw,
        events,
        event_id,
        tmin,
        tmax,
        proj=True,
        picks=picks,
        baseline=(None, 0),
        preload=True,
        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6),
    )

    # Average epochs and get an evoked dataset. Save to disk.
    evoked = epochs.average()
    evoked.save(data_path + subj + "/" + subj + "_list" + list_num + "_rest_raw_sss-ave.fif")

    # Regularize noise cov
    cov = mne.cov.regularize(precov, evoked.info, grad=4000e-13, mag=4e-12, eog=150e-6, proj=True)

    # Make inverse operator
    info = evoked.info
    inverse_operator = make_inverse_operator(info, forward_meg, cov, loose=None, depth=0.8)

    # Pull data for averaging later
    epc_array = epochs.get_data()

    # Compute the inverse solution
    inv = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label=label)

    # Need to add a line here to automatically create stc directory within subj

    epoch_num = 1
    epoch_num_str = str(epoch_num)
    for i in inv:
        i.save(data_path + subj + "/tmp/" + label_name[3:] + "_rest_raw_sss-ico-4-inv" + epoch_num_str)
        epoch_num = epoch_num + 1
        epoch_num_str = str(epoch_num)

    # The following is used to remove the empty opposing hemisphere files
    # and then move the files to save into the appropriate directory

    if hemi == "left":
        filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")]
        for f in filelist:
            os.remove(data_path + subj + "/tmp/" + f)
        keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")]
        for f in keepers:
            src = f
            os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/inv/" + src)

    elif hemi == "right":
        filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")]
        for f in filelist:
            os.remove(data_path + subj + "/tmp/" + f)
        keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")]
        for f in keepers:
            src = f
            os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/inv/" + src)

    # define frequencies of interest
    bandwidth = 4.0  # bandwidth of the windows in Hz

    # compute source space psd in label

    # Note: By using "return_generator=True" stcs will be a generator object
    # instead of a list. This allows us so to iterate without having to
    # keep everything in memory.

    psd = compute_source_psd_epochs(
        epochs,
        inverse_operator,
        lambda2=lambda2,
        method=method,
        fmin=fmin,
        fmax=fmax,
        bandwidth=bandwidth,
        label=label,
        return_generator=False,
    )

    epoch_num = 1
    epoch_num_str = str(epoch_num)
    for i in psd:
        i.save(data_path + subj + "/" + "tmp" + "/" + label_name[3:] + "_dspm_snr-1_PSD" + epoch_num_str)
        epoch_num = epoch_num + 1
        epoch_num_str = str(epoch_num)

    if hemi == "left":
        filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")]
        for f in filelist:
            os.remove(data_path + subj + "/tmp/" + f)
        keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")]
        for f in keepers:
            src = f
            os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/psd/" + src)

    elif hemi == "right":
        filelist = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-lh.stc")]
        for f in filelist:
            os.remove(data_path + subj + "/tmp/" + f)
        keepers = [f for f in os.listdir(data_path + subj + "/tmp") if f.endswith("-rh.stc")]
        for f in keepers:
            src = f
            os.rename(data_path + subj + "/tmp/" + src, data_path + subj + "/psd/" + src)

    # This code computes the average PSDs of each epoch. Each PSD file is an array of shape N_vertices*N_frequencies. This code averages the PSD value of each vertex together and outputs the average PSD value of each frequency. Then, it averages the PSD values of each epoch, outputting one average PSD value per frequency value, i.e., this is the average across epochs.

    n_epochs = len(epc_array)
    for i, stc in enumerate(psd):
        if i >= n_epochs:
            break

        if i == 0:
            psd_avg = np.mean(stc.data, axis=0)
        else:
            psd_avg += np.mean(stc.data, axis=0)

    print ("Length of psd for subject " + subj + " is " + str(len(psd)) + ".")
    print ("Number of epochs for subject " + subj + " is " + str(n_epochs) + ".")

    if len(psd) != 0:
        psd_avg /= n_epochs

    # Compute variance for each epoch and then variance across epochs

    n_epochs = len(epc_array)
    for i, stc in enumerate(psd):
        if i >= n_epochs:
            psd_var = np.array()
            break

        if i == 0:
            psd_var = np.var(stc.data, axis=0)
        else:
            psd_var = np.vstack((psd_var, np.var(stc.data, axis=0)))

    if len(psd) >= 2:
        tot_var = np.var(psd_var, axis=0)

    if len(psd) <= 1:
        failed_subj = subj
        print (failed_subj + " failed. No PSD values calculated, likely because all epochs were rejected.")
        return failed_subj, failed_subj, failed_subj

    if len(psd) >= 2:
        return (psd_avg, tot_var, len(psd_avg))