Exemplo n.º 1
0
def test_bti(_bids_validate):
    """Test functionality of the write_raw_bids conversion for BTi data."""
    bids_root = _TempDir()
    data_path = op.join(base_path, 'bti', 'tests', 'data')
    raw_fname = op.join(data_path, 'test_pdf_linux')
    config_fname = op.join(data_path, 'test_config_linux')
    headshape_fname = op.join(data_path, 'test_hs_linux')

    raw = mne.io.read_raw_bti(raw_fname, config_fname=config_fname,
                              head_shape_fname=headshape_fname)

    # write the BIDS dataset description, then write BIDS files
    make_dataset_description(bids_root, name="BTi data")
    write_raw_bids(raw, bids_basename, bids_root, verbose=True)

    assert op.exists(op.join(bids_root, 'participants.tsv'))
    _bids_validate(bids_root)

    raw = read_raw_bids(bids_basename + '_meg', bids_root)

    with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"):
        read_raw_bids(bids_basename + '_meg', bids_root,
                      extra_params=dict(foo='bar'))

    if check_version('mne', '0.20'):
        # test anonymize
        raw = mne.io.read_raw_bti(raw_fname, config_fname=config_fname,
                                  head_shape_fname=headshape_fname)
        with pytest.warns(UserWarning,
                          match='Converting to FIF for anonymization'):
            output_path = _test_anonymize(raw, bids_basename)
        _bids_validate(output_path)
Exemplo n.º 2
0
    def create_dataset_description(self, fpath, **dataset_description_kwargs):
        """
        Create a new Bids format dataset.

        Parameters
        ----------
        fpath : Union[str, os.PathLike]
            The path of this new directory
        dataset_description_kwargs : dict
            Other arguments used by make_dataset_description

        """
        # create dataset description
        mne_bids.make_dataset_description(fpath, **dataset_description_kwargs)
Exemplo n.º 3
0
def bids_init_dataset(data_root_path='',
                      dataset_name=None,
                      dataset_description=dict(),
                      readme='',
                      changes=''):
    """Create directories and files missing to follow bids.

    Files and folders already created will be left untouched.
    This is an utility to initialize all files that should be present
    according to the standard. Particularly those that should be filled
    manually like README files.

    dataset_description.json : interactif mode to fill in. Or laer on if the
    user wants. By default :
    Name: dataset_name
    BidsVersion: 1.0.0

    README is quite free as a file

    CHANGES follow CPAN standards

    """

    # CHECK DATASET RESPOSITORY
    dataset_name, dataset_name_path = get_bids_default_path(
        data_root_path, dataset_name)
    if not os.path.exists(dataset_name_path):
        os.makedirs(dataset_name_path)

    # CHECK dataset_description.json FILE
    description_file = os.path.exists(
        os.path.join(dataset_name_path, 'dataset_description.json'))
    overwrite_datadesc_file = True
    if description_file:
        overwrite_datadesc_file = yes_no(
            '\nA dataset_description.json is already exising, do you want to overwrite ? '
        )
    if overwrite_datadesc_file or not description_file:
        data_descrip = yes_no(
            '\nDo you want to create or overwrite the dataset_description.json ? (y/n)'
        )
        if data_descrip:
            print(
                '\nIf you do not know all information: pass and edit the file later.'
            )
            name = input("\nTape the name of this BIDS dataset: ").lower()
            authors = input(
                "\nA list of authors like [‘a’, ‘b’, ‘c’]: ").lower()
            acknowledgements = input(
                "\nA list of acknowledgements like [‘a’, ‘b’, ‘c’]: ").lower()
            how_to_acknowledge = input(
                "\nEither a str describing how to  acknowledge this dataset OR a list of publications that should be cited : "
            )
            funding = input(
                '\nList of sources of funding (e.g., grant numbers). Must be a list of strings or a single comma separated string like [‘a’, ‘b’, ‘c’] : '
            )
            references_and_links = input(
                "\nList of references to publication that contain information on the dataset, or links. Must be a list of strings or a single comma separated string like [‘a’, ‘b’, ‘c’] :"
            )
            doi = input('\nThe DOI for the dataset : ')
            make_dataset_description(dataset_name_path,
                                     name=name,
                                     data_license=None,
                                     authors=authors,
                                     acknowledgements=str(acknowledgements),
                                     how_to_acknowledge=how_to_acknowledge,
                                     funding=str(funding),
                                     references_and_links=references_and_links,
                                     doi=doi,
                                     verbose=False)
        else:
            print(
                "\nYou may update the README file later on. A README file by default has been created."
            )
            make_dataset_description(dataset_name_path, name=dataset_name)

    # CHECK CHANGES FILE / TEXT FILE CPAN CONVENTION
    changes_file = os.path.join(dataset_name_path, 'CHANGES')
    changes_file_exist = os.path.exists(changes_file)
    overwrite_changes_file = True
    if changes_file_exist:
        overwrite_changes_file = yes_no(
            '\nA CHANGES file is already existing, do you want to overwrite ? '
        )

    if overwrite_changes_file or not changes_file_exist:
        changes = yes_no(
            '\nDo you want to create/overwrite the CHANGES file ? (y/n)')
        if changes:
            changes_input = input("Tape your text: ")
            with open(changes_file, 'w', encoding="utf-8") as fid:
                fid.write(str(changes_input))

    # CHECK README FILE / TEXT FILE
    readme_file = os.path.join(os.path.join(dataset_name_path, 'README'))
    readme_file_exist = os.path.exists(readme_file)
    overwrite_readme_file = True
    if readme_file_exist:
        overwrite_readme_file = yes_no(
            '\nA README file is already existing, do you want to overwrite ? ')

    if overwrite_readme_file or not readme_file_exist:
        readme = yes_no(
            '\nDo you want to create/complete the README file ? (y/n)')
        if not readme:
            readme_input = "TO BE COMPLETED BY THE USER"
        else:
            readme_input = input("Tape your text: ")
        with open(readme_file, 'w') as fid:
            fid.write('test')
Exemplo n.º 4
0
def test_edf(_bids_validate):
    """Test write_raw_bids conversion for European Data Format data."""
    bids_root = _TempDir()
    data_path = op.join(testing.data_path(), 'EDF')
    raw_fname = op.join(data_path, 'test_reduced.edf')

    raw = mne.io.read_raw_edf(raw_fname)

    raw.rename_channels({raw.info['ch_names'][0]: 'EOG'})
    raw.info['chs'][0]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
    raw.rename_channels({raw.info['ch_names'][1]: 'EMG'})
    raw.set_channel_types({'EMG': 'emg'})

    # test dataset description overwrites with the authors set
    make_dataset_description(bids_root, name="test",
                             authors=["test1", "test2"])
    write_raw_bids(raw, bids_basename, bids_root, overwrite=False)
    dataset_description_fpath = op.join(bids_root, "dataset_description.json")
    with open(dataset_description_fpath, 'r') as f:
        dataset_description_json = json.load(f)
        assert dataset_description_json["Authors"] == ["test1", "test2"]

    # write from fresh start w/ overwrite
    write_raw_bids(raw, bids_basename, bids_root, overwrite=True)
    # after overwrite, the dataset description if defaulted to MNE-BIDS
    with open(dataset_description_fpath, 'r') as f:
        dataset_description_json = json.load(f)
        assert dataset_description_json["Authors"] == ["MNE-BIDS"]

    # Reading the file back should raise an error, because we renamed channels
    # in `raw` and used that information to write a channels.tsv. Yet, we
    # saved the unchanged `raw` in the BIDS folder, so channels in the TSV and
    # in raw clash
    with pytest.raises(RuntimeError, match='Channels do not correspond'):
        read_raw_bids(bids_basename + '_eeg.edf', bids_root)

    with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"):
        read_raw_bids(bids_basename + '_eeg.edf', bids_root,
                      extra_params=dict(foo='bar'))

    bids_fname = bids_basename.replace('run-01', 'run-%s' % run2)
    write_raw_bids(raw, bids_fname, bids_root, overwrite=True)
    _bids_validate(bids_root)

    # ensure there is an EMG channel in the channels.tsv:
    channels_tsv = make_bids_basename(
        subject=subject_id, session=session_id, task=task, run=run,
        suffix='channels.tsv', acquisition=acq,
        prefix=op.join(bids_root, 'sub-01', 'ses-01', 'eeg'))
    data = _from_tsv(channels_tsv)
    assert 'ElectroMyoGram' in data['description']

    # check that the scans list contains two scans
    scans_tsv = make_bids_basename(
        subject=subject_id, session=session_id, suffix='scans.tsv',
        prefix=op.join(bids_root, 'sub-01', 'ses-01'))
    data = _from_tsv(scans_tsv)
    assert len(list(data.values())[0]) == 2

    # Also cover iEEG
    # We use the same data and pretend that eeg channels are ecog
    raw.set_channel_types({raw.ch_names[i]: 'ecog'
                           for i in mne.pick_types(raw.info, eeg=True)})
    bids_root = _TempDir()
    write_raw_bids(raw, bids_basename, bids_root)
    _bids_validate(bids_root)

    # test anonymize and convert
    if check_version('mne', '0.20') and check_version('pybv', '0.2.0'):
        raw = mne.io.read_raw_edf(raw_fname)
        output_path = _test_anonymize(raw, bids_basename)
        _bids_validate(output_path)
Exemplo n.º 5
0
def _setup_main_eeg():
    """
    We copy some explicit example from MNE-BIDS and modify small details.

    Specifically, we will follow these steps:

    1. Download repository, and use the data in example directory:
        data/
            bids_layout/
                sourcedata/
                derivatives/
                sub-XXX/
                sub-XXY/
                ...

    2. Load the source raw data, extract information, preprocess certain things
    and save in a new BIDS directory

    3. Check the result and compare it with the standard
    """
    ###############################################################################
    # Step 1: Prepare the data
    # -------------------------
    #
    # First, we need some data to work with. We will use some sample simulated scalp and
    # iEEG data. For each subject, there are "seizure" events. For the present example, we will
    # show how to format the data for two modalities to comply with the Brain Imaging Data Structure
    # (`BIDS <http://bids.neuroimaging.io/>`_).
    #
    # The data are in the `European Data Format <https://www.edfplus.info/>`_
    # '.edf', which is good for us because next to the BrainVision format, EDF is
    # one of the recommended file formats for EEG BIDS. However, apart from the
    # data format, we need to build a directory structure and supply meta data
    # files to properly *bidsify* this data.
    #
    # Conveniently, there is already a data loading function available with
    # MNE-Python:

    DATADIR = os.getcwd()
    bids_root = os.path.join(DATADIR, "./data/bids_layout/")
    RUN_IEEG = False  # either run scalp, or iEEG
    line_freq = (
        60  # user should set the line frequency, since MNE-BIDS defaults to 50 Hz
    )
    test_subjectid = "0001"
    test_sessionid = "seizure"
    test_task = "monitor"
    authors = ["Adam Li", "Patrick Myers"]

    if RUN_IEEG:
        edf_fpaths = [
            os.path.join(bids_root, "sourcedata", "ieeg_ecog_test.edf")
        ]
        modality = "ecog"
    else:
        edf_fpath1 = os.path.join(bids_root, "sourcedata", "scalp_test.edf")
        edf_fpath2 = os.path.join(bids_root, "sourcedata", "scalp_test_2.edf")
        edf_fpaths = [edf_fpath1, edf_fpath2]
        modality = "eeg"
    ###############################################################################
    # Let's see whether the data has been downloaded using a quick visualization
    # of the directory tree.

    data_dir = os.path.join(bids_root, "sourcedata")
    print_dir_tree(data_dir)

    ###############################################################################
    # Step 2: Formatting as BIDS
    # --------------------------
    #
    # Let's start by formatting a single subject. We are reading the data using
    # MNE-Python's io module and the :func:`read_raw_edf` function. Note that we
    # must use `preload=False`, the default in MNE-Python. It prevents the data
    # from being loaded and modified when converting to BIDS.
    #
    # Note that kind and acquisition currently almost stand for the same thing.
    # Please read the BIDS docs to get acquainted.

    # create the BIDS directory structure
    if not os.path.exists(bids_root):
        print("Making bids root directory.")
        make_bids_folders(
            output_path=bids_root,
            session=test_sessionid,
            subject=test_subjectid,
            kind=modality,
        )

    for i, edf_fpath in enumerate(edf_fpaths):
        """ Write data file into BIDS format """
        test_runid = i

        # add a bids run
        bids_basename = make_bids_basename(
            subject=test_subjectid,
            session=test_sessionid,
            task=test_task,
            run=test_runid,
            acquisition=modality,
        )
        print("Loading filepath: ", edf_fpath)
        print("Writing to bidsroot: ", bids_root)
        print("Bids basenmae; ", bids_basename)
        # call bidsbuilder pipeline
        bids_deriv_root = BidsConverter.convert_to_bids(
            edf_fpath=edf_fpath,
            bids_root=bids_root,
            bids_basename=bids_basename,
            line_freq=line_freq,
            overwrite=True,
        )

    # currently write_raw_bids overwrites make_dataset_description
    # TODO: put this on the top when PR gets merged.
    make_dataset_description(os.path.join(bids_root),
                             name="test_bids_dataset",
                             authors=authors)

    ###############################################################################
    # What does our fresh BIDS directory look like?
    print_dir_tree(bids_root)

    ###############################################################################
    # Step 3: Check and compare and read in the data
    # ------------------------------------------------------------
    # Now we have written our BIDS directory.

    if modality in ["ecog", "seeg"]:
        kind = "ieeg"
    elif modality == "eeg":
        kind = "eeg"
    bids_fname = bids_basename + f"_{kind}.edf"

    print("Trying to read from: ", bids_fname)

    # use MNE-BIDS function to read in the data
    raw = read_raw_bids(bids_fname, bids_root)

    print("Read successfully using MNE-BIDS")

    # use BidsRun object, which just simply adds additional functionality
    # bidsrun = BidsRun(tmp_bids_root, bids_fname)
    # raw = bidsrun.load_data()

    print(raw)

    ###############################################################################
    # Step 4: Run BIDS-Validate
    # ------------------------------------------------------------
    # Now we have written our BIDS directory.
    # save a fif copy and reload it
    # TODO: re-check when pybids is updated.
    # currently, use the https://bids-standard.github.io/bids-validator/ and see that it is verified
    params = _parse_bids_filename(bids_basename, True)
    print(raw.info)
    fif_data_path = make_bids_folders(
        subject=params["sub"],
        session=params["ses"],
        kind=kind,
        output_path=bids_root,
        overwrite=False,
        verbose=True,
    )
    rel_bids_root = f"/sub-0001/ses-seizure/{kind}/"
    path = os.path.join(rel_bids_root, bids_fname)
    is_valid = BIDSValidator().is_bids(path)

    print(BIDSValidator().is_top_level(path))
    print(BIDSValidator().is_associated_data(path))
    print(BIDSValidator().is_session_level(path))
    print(BIDSValidator().is_subject_level(path))
    print(BIDSValidator().is_phenotypic(path))
    print(BIDSValidator().is_file(path))

    print("checked filepath: ", os.path.join(rel_bids_root, bids_fname))
    print(is_valid)
Exemplo n.º 6
0
    print("Loading filepath: ", edf_fpath)
    print("Writing to bidsroot: ", bids_root)
    print("Bids basenmae; ", bids_basename)
    # call bidsbuilder pipeline
    bids_deriv_root = BidsConverter.convert_to_bids(
        edf_fpath=edf_fpath,
        bids_root=bids_root,
        bids_basename=bids_basename,
        line_freq=line_freq,
        overwrite=True,
    )

# currently write_raw_bids overwrites make_dataset_description
# TODO: put this on the top when PR gets merged.
make_dataset_description(os.path.join(bids_root),
                         name="test_bids_dataset",
                         authors=authors)

###############################################################################
# What does our fresh BIDS directory look like?
print_dir_tree(bids_root)

###############################################################################
# Step 3: Check and compare and read in the data
# ------------------------------------------------------------
# Now we have written our BIDS directory.

if modality in ["ecog", "seeg"]:
    kind = "ieeg"
elif modality == "eeg":
    kind = "eeg"
Exemplo n.º 7
0
#%%

raw_files=os.listdir(subjetc_file)
raw_file_list=[]
marker_lists=pd.read_csv('Behavioral/all_raw_marker.csv')['trigger'].to_list() 
event_ids=event_test.raw_marker_create(marker_lists)

for raw_file in raw_files:
    if os.path.splitext(raw_file)[1]=='.vhdr':
        raw_file_list.append(raw_file)
#%%
for q in range(0,len(raw_file_list)):

    raw=mne.io.read_raw_brainvision(os.path.join(subjetc_file,raw_file_list[q]),preload=False)
    # if len(raw.info['ch_names'])>60:
    #     raw.info['bads']=['BIP1','BIP2','BIP3','BIP4','BIP5','BIP6','BIP7','BIP8','BIP9','BIP10','BIP11','BIP12','BIP13','BIP14',
    #                  'BIP15','BIP16','BIP17','BIP18','BIP19','BIP20','BIP21','BIP22','BIP23','BIP24']
    # raw.drop_channels(ch_names=raw.info['bads'])
    raw.info['line_freq'] = 50
    raw.set_channel_types(mapping={'EOG':'eog'})
    # montage=mne.channels.make_standard_montage('standard_1020')
    # raw.set_montage(montage)
    subject_id=re.findall('(\d+)',raw_file_list[q])[0]
    bids_path = BIDSPath(subject=subject_id, task=settings.task, root=settings.bids_root_path_init(),session=settings.session,datatype='eeg',suffix='eeg')
    #events,events_id=mne.events_from_annotations(raw,event_id=event_ids)       
    write_raw_bids(raw, bids_path,overwrite=True)

make_dataset_description(path=settings.bids_root_path_init(),name=settings.project_name,authors=settings.authors,
acknowledgements=settings.acknowledgements,funding=settings.funding_support,overwrite=True)

Exemplo n.º 8
0
def main():
    #splitter = "\\" if platform.system().lower().startswith("win") else "/"

    parser = argparse.ArgumentParser()
    parser.add_argument("--subject",
                        action="store",
                        type=str,
                        required=False,
                        help="Name of the Patient/ Subject to process")
    parser.add_argument("--bidsroot",
                        action="store",
                        type=str,
                        required=False,
                        help="Specify a different BIDS root directory to use")
    parser.add_argument("--inputfolder",
                        action="store",
                        type=str,
                        required=False,
                        help="Specify a different data input folder")
    parser.add_argument(
        "--fsonly",
        action="store",
        type=str,
        required=False,
        help="Use --fsonly true if you only want to do a freesurfer segmentation"
    )  # do only freesurfer segmentation
    parser.add_argument("--openmp",
                        action="store",
                        type=str,
                        required=False,
                        help="Specify how many jobs/ processor cores to use")
    parser.add_argument("--srcspacing",
                        action="store",
                        type=str,
                        required=False,
                        help="Source spacing: \
                            -defaults to ico4 --> 2562 Source points \
                            || other options: \
                            oct5 --> 1026 Source points \
                            || oct6 --> 4098 Source points \
                            || ico5 --> 10242 Source points")
    parser.add_argument("--extras",
                        action="store",
                        type=str,
                        required=False,
                        help="Specify directory containing extras for report")

    args = parser.parse_args()

    # additional arguments
    if args.bidsroot:
        bids_root = args.bidsroot
    else:
        bids_root = os.environ.get("BIDS_ROOT")

    if args.openmp:
        n_jobs = openmp = int(args.openmp)
    else:
        n_jobs = openmp = int(os.environ.get("OPENMP"))
        if n_jobs == None:
            n_jobs = openmp = int(1)

    if args.inputfolder:
        input_folder = args.inputfolder
    else:
        input_folder = os.environ.get("INPUT_FOLDER")

    if args.extras:
        extras_directory = args.extras
    else:
        extras_directory = os.environ.get("EXTRAS_DIRECTORY")

# define subject
    subject = args.subject
    if not subject:
        poss = [s for s in os.listdir(input_folder)]
        print(
            f"No subject specified, maybe you want to choose from those:\n {poss}"
        )
        subject = input()

    if not subject.startswith("sub-"):
        ject = str(subject)
        subject = "sub-" + subject
    else:
        ject = subject.split("sub-")[-1]

# create folder structure and copy
    dfc = Folderer.DerivativesFoldersCreator(BIDS_root=bids_root,
                                             extras_directory=extras_directory,
                                             subject=subject)
    dfc.make_derivatives_folders()

    # logging
    logfile = opj(dfc.freport, "SourceLocPipeline.log")
    logging.basicConfig(filename=logfile,
                        filemode="w",
                        format="\n%(levelname)s --> %(message)s")
    rootlog = logging.getLogger()
    rootlog.setLevel(logging.INFO)
    rootlog.info("Now running SourceLoc pipeline...")

    # log parameters
    rootlog.info(f"*" * 20)
    rootlog.info("Parameters")
    rootlog.info(f"*" * 20)
    rootlog.info(f"Subject name = {ject}")
    rootlog.info(f"Input folder is set to: {input_folder}.")
    rootlog.info(f"BIDS root is set to: {bids_root}.")
    rootlog.info(f"Extras directory is set to: {extras_directory}.")
    rootlog.info(f"Using {openmp} processor cores/ jobs.")
    rootlog.info("Folder structure has been created.")

    # check if freesurfer subjects_dir exists
    FS_SUBJECTS_DIR = str(os.environ.get("SUBJECTS_DIR"))
    if FS_SUBJECTS_DIR == None:
        print(f"It seems freesurfer is not properly set up on your computer")
        rootlog.warning(
            "No working freesurfer environment found - SUBJECTS_DIR is not set"
        )

# check if source spacing was set + is valid
    if not args.srcspacing:
        spacing = str(os.environ.get("SRCSPACING"))
        if not spacing:
            spacing = "oct6"
    else:
        spacing = args.srcspacing
    if spacing not in ["ico4", "oct5", "oct6", "ico5"]:
        spacing = "oct6"
        print('The desired spacing isn\'t allowed, typo?\n \
                        Options are: "ico4", "oct5", "oct6", "ico5"\n \
                        --> spacing was automatically set to "oct6".')
        rootlog.warning(
            "Spacing was set to \"oct6\", as input given was invalid.")
    rootlog.info(f"Final source spacing is {spacing}.")

    # MRI to nii.gz, then freesurfer, then hippocampal subfields
    # Naturally, this only works with a freesurfer environment
    # and this will take some time...
    anafolder = opj(input_folder, ject)
    if os.path.isdir(anafolder):
        rootlog.setLevel(logging.ERROR)
        rap = Anatomist.RawAnatomyProcessor(anafolder,
                                            FS_SUBJECTS_DIR,
                                            n_jobs=n_jobs)
        try:
            rap.run_anatomy_pipeline()
        except Exception as e:
            rootlog.error(
                f"Something went wrong while processing anatomy: {e}")
        rootlog.setLevel(logging.INFO)

# Check if only freesurfer segmentation was desired and comply, if true
    if args.fsonly and args.fsonly.lower() == "true":
        rootlog.info(
            "Only freesurfer segmentation was desired - finished without errors."
        )
        exit()

# copy freesurfer files to local subjects_dir
    try:
        segmentation = opj(FS_SUBJECTS_DIR, subject)
        target = opj(dfc.fanat, subject)
        if not os.path.isdir(target):
            os.mkdir(target)
        rootlog.info(
            f"Copying freesurfer segmentation {segmentation} to {target}")
        dfc._recursive_overwrite(segmentation, target)
    except Exception as e:
        rootlog.error(f"Couldn't copy freesurfer segmentation\n--> {e}.")

# create source models
    sourcerer = Anatomist.SourceModeler(subjects_dir=dfc.fanat,
                                        subject=subject,
                                        spacing=spacing,
                                        n_jobs=n_jobs)
    sourcerer.calculate_source_models()

    # process raw fifs
    raws = glob.glob(input_folder + "/*.fif")
    raws = [f for f in raws if ject in f]
    epo_filename = opj(dfc.spikes, str(subject) + "-epo.fif")
    concatname = opj(os.path.dirname(raws[0]), str(subject) + "_concat.fif")

    def raw_processing_already_done():
        r = os.path.isfile(concatname)
        c = os.path.isfile(epo_filename)
        return r and c

    if not raw_processing_already_done():
        # parse list of appropriate raws
        rootlog.info(
            f"The following raw files were found for preprocessing:\n{raws}")
        prepper = u.RawPreprocessor()
        for run, rawfile in enumerate(raws):
            if "tsss" in rawfile and ject in rawfile and not "-epo" in rawfile:
                # --> search for matching eventfile and combine
                rawname = rawfile.strip(".fif") + "_prep.fif"
                if not "_prep" in rawfile:
                    # epochs
                    epochs = prepper.raw_to_epoch(rawfile)
                    if epochs is not None:
                        epochs = epochs.load_data().filter(
                            l_freq=l_freq,
                            fir_design=fir_design,
                            h_freq=h_freq,
                            n_jobs=n_jobs)
                        epo_filename = rawfile.strip(".fif") + "-epo.fif"
                        epochs.save(epo_filename, overwrite=True)
                    # preprocessing
                    raw = mne.io.read_raw(rawfile,
                                          preload=False,
                                          on_split_missing="ignore")
                    raw = prepper.filter_raw(raw,
                                             l_freq=l_freq,
                                             fir_design=fir_design,
                                             h_freq=h_freq,
                                             n_jobs=n_jobs)
                    raw = prepper.resample_raw(raw,
                                               s_freq=s_freq,
                                               n_jobs=n_jobs)
                    #
                    # Artifacts
                    # ECG artifacts
                    # It's smarter to supervise this step (--> look at the topomaps!)
                    raw.load_data()
                    try:
                        ecg_projs, _ = mne.preprocessing.compute_proj_ecg(
                            raw,
                            n_grad=n_grad,
                            n_mag=n_mag,
                            n_eeg=n_eeg,
                            reject=None)
                        raw.add_proj(ecg_projs, remove_existing=False)
                        fig = mne.viz.plot_projs_topomap(ecg_projs,
                                                         info=raw.info,
                                                         show=False)
                        savename = os.path.join(dfc.fprep,
                                                "ECG_projs_Topomap.png")
                        fig.savefig(savename)
                    except Exception as e:
                        rootlog.error(
                            f"ECG - Atrifact correction failed --> {e}")
                    #EOG artifacts
                    # It's a bad idea to do this in an automated step
                    try:
                        eog_evoked = mne.preprocessing.create_eog_epochs(
                            raw).average()
                        #eog_evoked.apply_baseline((None, None))
                        eog_projs, _ = mne.preprocessing.compute_proj_eog(
                            raw,
                            n_grad=n_grad,
                            n_mag=n_mag,
                            n_eeg=n_eeg,
                            n_jobs=n_jobs,
                            reject=None)
                        raw.add_proj(
                            eog_projs, remove_existing=False
                        )  # --> don't do this in the early stages - see documentation
                        figs = eog_evoked.plot_joint(show=False)
                        for idx, fig in enumerate(figs):
                            savename = os.path.join(
                                dfc.fprep, "EOG Topomap_" + str(idx) + ".png")
                            fig.savefig(savename)
                    except Exception as e:
                        rootlog.error(
                            f"EOG - Atrifact correction failed --> {e}")
                    # save raw, store projs
                    all_projs = raw.info["projs"]
                    raw.save(rawname, overwrite=True)
                    del (raw)

        # concatenate epochs
        epo_filename = opj(dfc.spikes, str(subject) + "-epo.fif")
        if not os.path.isfile(epo_filename):
            epoch_files = glob.glob(input_folder + "/*-epo.fif")
            epoch_files = [f for f in epoch_files if ject in f]
            all_epochs = dict()
            rootlog.info("Concatenating epochs now...")
            for f in epoch_files:
                all_epochs[f] = mne.read_epochs(f)
            concat_epochs = mne.concatenate_epochs(
                [all_epochs[f] for f in epoch_files])
            concat_epochs.add_proj(all_projs, remove_existing=True)
            concat_epochs.apply_proj()
            rootlog.info(f"Saving concatenated epoch file as {epo_filename}")
            concat_epochs.save(epo_filename)

        # concatenate filtered and resampled files
        raws = glob.glob(input_folder + "/*.fif")
        raws = [f for f in raws if ject in f]
        raws = [f for f in raws if "_prep" in f]
        all_raws = dict()
        concatname = opj(os.path.dirname(raws[0]),
                         str(subject) + "_concat.fif")
        if not os.path.isfile(concatname):
            for r in raws:
                all_raws[r] = mne.io.read_raw(r, preload=False)
                all_raws[r].del_proj()
            rootlog.info(
                f"Concatenating the following (filtered and resampled) raw files: {raws}"
            )
            try:
                raw = mne.concatenate_raws(
                    [all_raws[r] for r in all_raws.keys()])
                rootlog.info(
                    "Rawfiles have successfully been concatenated....")
            except Exception as e:
                rootlog.error(
                    f"Failed trying to concatenate raw file\n {r} --> {e}")
                #print("Loading only first raw file!")
                #raw = mne.io.read_raw(raws[0])
            rootlog.info("Applying SSP projectors on concatenated file...")
            raw.add_proj(all_projs, remove_existing=True)
            raw.apply_proj()
            rootlog.info(f"Saving concatenated rawfile as {concatname}")
            raw.save(concatname)

        # Save in BIDS format
        derivatives_root = opj(bids_root, "derivatives")
        # meg
        bids_path = BIDSPath(subject=ject,
                             session="resting",
                             task="resting",
                             root=derivatives_root,
                             processing="concat")
        raw = mne.io.read_raw(concatname, preload=False)
        write_raw_bids(raw, bids_path, overwrite=True)

    # anatomy
    rootlog.info("Running dicom2nifti on MRI...")
    derivatives_root = opj(bids_root, "derivatives")
    # meg
    bids_path = BIDSPath(subject=ject,
                         session="resting",
                         task="resting",
                         root=bids_root,
                         processing="concat")
    nii = glob.glob(opj(input_folder, ject, "*.nii*"))

    try:
        for n in nii:
            write_anat(n, bids_path=bids_path, overwrite=True)
    except Exception as e:
        rootlog.error(f"Conversion of MRI to nift failed --> {e}")

    # Create Dataset
    the_roots = [bids_root, derivatives_root]
    rootlog.info("Creating BIDS dataset...")
    for r in the_roots:
        make_dataset_description(r,
                                 name="CDK Epilepsy Dataset",
                                 data_license="closed",
                                 authors="Rudi Kreidenhuber",
                                 overwrite=True)

# Coregistration --> this doesn't work with WSLg - from here on
# run on windows, if you are on a windows machine
    transfile = opj(dfc.ftrans, subject + "-trans.fif")
    rootlog.info("Starting coregistration...")
    if os.path.isfile(transfile):
        rootlog.info(
            f"Skipping coregistration, because a transfile ({transfile}) already exists"
        )
    else:
        print(f"\n\n\n--> Transfile should be called: {transfile}\n\n\n")
        try:
            mne.gui.coregistration(
                subject=subject,
                subjects_dir=dfc.fanat,
                inst=bids_path,
                advanced_rendering=False)  # BIDS: inst=raw.filenames[0])
        except:
            print("failed with bids_derivatives folder")
            rawfile = opj(dfc.fbase, "ses-resting", "meg", "*concat_meg.fif")
            rawfile = glob.glob(rawfile)[0]
            rootlog.info(
                f"Coregistration with BIDS-file failed, Rawfile used was: {rawfile}"
            )
            mne.gui.coregistration(subject=subject,
                                   subjects_dir=dfc.fanat,
                                   inst=rawfile,
                                   advanced_rendering=False)

# frequency spectrum
    if do_frequencies:
        rootlog.info("Calculating frequency spectrum...")
        bem_sol = opj(dfc.fsrc, subject + "-3-layer-BEM-sol.fif")
        if not os.path.isfile(bem_sol) and use_single_shell_model:
            rootlog.warning("Working with a single shell head model")
            bem_sol = opj(dfc.fsrc, subject + "-single-shell-BEM-sol.fif")
        fwd_name = opj(dfc.fsrc, subject + "-fwd.fif")
        srcfilename = opj(dfc.fsrc, subject + "-" + spacing + "-src.fif")
        filebase = str(subject) + "_Freqs"
        all_stcs_filename = (filebase + '-stc-psd-MNE.pkl')
        all_stcs_filename = opj(dfc.freq, all_stcs_filename)
        sensor_psd_filename = (filebase + '-sensor-psd-MNE.pkl')
        sensor_psd_filename = opj(dfc.freq, sensor_psd_filename)
        if not os.path.isfile(all_stcs_filename) or not os.path.isfile(
                sensor_psd_filename
        ):  # so this should run only on the first file..
            # load again in case preprocessing didn't happen before
            concatname = opj(input_folder, str(subject) + "_concat.fif")
            raw = mne.io.read_raw(concatname, preload=True)
            if os.path.isfile(fwd_name):
                fwd = mne.read_forward_solution(fwd_name)
            else:
                fwd = mne.make_forward_solution(raw.info,
                                                src=srcfilename,
                                                bem=bem_sol,
                                                trans=transfile,
                                                meg=True,
                                                eeg=False,
                                                mindist=0.2,
                                                ignore_ref=False,
                                                n_jobs=n_jobs,
                                                verbose=True)
                mne.write_forward_solution(fwd_name, fwd)
            noise_cov = mne.compute_raw_covariance(raw,
                                                   method="empirical",
                                                   n_jobs=n_jobs)
            inv = mne.minimum_norm.make_inverse_operator(raw.info,
                                                         forward=fwd,
                                                         noise_cov=noise_cov,
                                                         loose="auto",
                                                         depth=0.8)
            snr = 3.
            stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd(
                raw,
                inv,
                lambda2=lambda2,
                method='MNE',
                fmin=1,
                fmax=45,
                n_fft=2048,
                n_jobs=n_jobs,
                return_sensor=True,
                verbose=True)
            pickle.dump(stc_psd, open(all_stcs_filename, "wb"))
            pickle.dump(sensor_psd, open(sensor_psd_filename, "wb"))
        else:
            stc_psd = pickle.load(open(all_stcs_filename, "rb"))
            sensor_psd = pickle.load(open(sensor_psd_filename, "rb"))
        # Visualization
        topos = dict()
        stcs = dict()
        topo_norm = sensor_psd.data.sum(axis=1, keepdims=True)
        stc_norm = stc_psd.sum()
        for band, limits in freq_bands.items():  # normalize...
            data = sensor_psd.copy().crop(*limits).data.sum(axis=1,
                                                            keepdims=True)
            topos[band] = mne.EvokedArray(100 * data / topo_norm,
                                          sensor_psd.info)
            stcs[band] = 100 * stc_psd.copy().crop(
                *limits).sum() / stc_norm.data
        brain = dict()
        x_hemi_freq = dict()
        mne.viz.set_3d_backend('pyvista')
        for band in freq_bands.keys():
            brain[band] = u.plot_freq_band_dors(stcs[band],
                                                band=band,
                                                subject=subject,
                                                subjects_dir=dfc.fanat,
                                                filebase=filebase)
            freqfilename3d = (filebase + '_' + band +
                              '_freq_topomap_3d_dors.png')
            freqfilename3d = os.path.join(dfc.freq, freqfilename3d)
            image = brain[band].save_image(freqfilename3d)
            brain_lh, brain_rh = u.plot_freq_band_lat(stcs[band],
                                                      band=band,
                                                      subject=subject,
                                                      subjects_dir=dfc.fanat,
                                                      filebase=filebase)
            freqfilename3d = (filebase + '_' + band +
                              '_freq_topomap_3d_lat_lh.png')
            freqfilename3d = os.path.join(dfc.freq, freqfilename3d)
            image = brain_lh.save_image(freqfilename3d)
            freqfilename3d = (filebase + '_' + band +
                              '_freq_topomap_3d_lat_rh.png')
            freqfilename3d = os.path.join(dfc.freq, freqfilename3d)
            image = brain_rh.save_image(freqfilename3d)
            brain_lh, brain_rh = u.plot_freq_band_med(stcs[band],
                                                      band=band,
                                                      subject=subject,
                                                      subjects_dir=dfc.fanat,
                                                      filebase=filebase)
            freqfilename3d = (filebase + '_' + band +
                              '_freq_topomap_3d_med_lh.png')
            freqfilename3d = os.path.join(dfc.freq, freqfilename3d)
            image = brain_lh.save_image(freqfilename3d)
            freqfilename3d = (filebase + '_' + band +
                              '_freq_topomap_3d_med_rh.png')
            freqfilename3d = os.path.join(dfc.freq, freqfilename3d)
            image = brain_rh.save_image(freqfilename3d)
            # Cross hemisphere comparison
            # make sure fsaverage_sym exists in local subjects dir:
            rootlog.info(
                f"Calculating cross hemisphere comparison for {band}.")
            target = os.path.join(dfc.fanat, "fsaverage_sym")
            if not os.path.isdir(target):
                # try to find it in $SUBJECTS_DIR and copy
                os_subj_dir = os.environ.get("SUBJECTS_DIR")
                fs_avg_sym_dir = os.path.join(os_subj_dir, "fsaverage_sym")
                u.recursive_overwrite(fs_avg_sym_dir, target)
            if not os.path.isdir(target):
                rootlog.error("fsaverage_sym not found - aborting")
                raise Exception
            mstc = stcs[band].copy()
            mstc = mne.compute_source_morph(mstc,
                                            subject,
                                            'fsaverage_sym',
                                            smooth=5,
                                            warn=False,
                                            subjects_dir=dfc.fanat).apply(mstc)
            morph = mne.compute_source_morph(mstc,
                                             'fsaverage_sym',
                                             'fsaverage_sym',
                                             spacing=mstc.vertices,
                                             warn=False,
                                             subjects_dir=dfc.fanat,
                                             xhemi=True,
                                             verbose='error')
            stc_xhemi = morph.apply(mstc)
            diff = mstc - stc_xhemi
            title = ('blue = RH; ' + subject + ' -Freq-x_hemi- ' + band)
            x_hemi_freq[band] = diff.plot(
                hemi='lh',
                subjects_dir=dfc.fanat,
                size=(1200, 800),
                time_label=title,
                add_data_kwargs=dict(time_label_size=10))
            freqfilename3d = (filebase + '_x_hemi_' + band + '.png')
            freqfilename3d = os.path.join(dfc.freq, freqfilename3d)
            image = x_hemi_freq[band].save_image(freqfilename3d)


# Source localization
    rootlog.info("Now starting source localization...")
    epo_filename = opj(dfc.spikes, str(subject) + "-epo.fif")
    concat_epochs = mne.read_epochs(epo_filename)
    noise_cov_file = opj(dfc.spikes, "Spikes_noise_covariance.pkl")
    srcfilename = opj(dfc.fsrc, subject + "-" + spacing + "-src.fif")
    if not os.path.isfile(noise_cov_file):
        noise_cov = mne.compute_covariance(concat_epochs,
                                           tmax=-1.,
                                           method='auto',
                                           n_jobs=n_jobs,
                                           rank="full")
        pickle.dump(noise_cov, open(noise_cov_file, "wb"))
    else:
        with open(noise_cov_file, 'rb') as f:
            noise_cov = pickle.load(f)

    rootlog.info(
        f"The following events have been found: \n{concat_epochs.event_id.keys()}"
    )
    for event in concat_epochs.event_id.keys():
        eventname = str(event)
        if eventname == "ignore_me" or eventname == "AAA" or eventname.startswith(
                "."):
            rootlog.info(f"Omitting event {event}")
        else:
            try:
                rootlog.info(f"Now localizing event: {event}")
                e = concat_epochs[eventname].load_data().crop(
                    tmin=-0.5, tmax=0.5).average()
                e_folder = os.path.join(dfc.spikes, eventname)
                evoked_filename = opj(e_folder,
                                      ject + "_" + eventname + "-ave.fif")
                cp_folder = os.path.join(dfc.spikes, eventname, "custom_pics")
                cts_folder = os.path.join(dfc.spikes, eventname,
                                          "custom_time_series")
                gp_folder = os.path.join(dfc.spikes, eventname, "generic_pics")
                folders = [e_folder, cp_folder, cts_folder, gp_folder]
                if not os.path.isdir(e_folder):
                    for f in folders:
                        os.mkdir(f)
                e.save(evoked_filename)
                src = mne.read_source_spaces(srcfilename)
                bem_sol = opj(dfc.fsrc, subject + "-3-layer-BEM-sol.fif")
                if not os.path.isfile(bem_sol) and use_single_shell_model:
                    bem_sol = opj(dfc.fsrc,
                                  subject + "-single-shell-BEM-sol.fif")

                fwd_name = opj(dfc.fsrc, subject + "-fwd.fif")
                if os.path.isfile(fwd_name):
                    fwd = mne.read_forward_solution(fwd_name)
                else:
                    fwd = mne.make_forward_solution(e.info,
                                                    src=src,
                                                    bem=bem_sol,
                                                    trans=transfile,
                                                    meg=True,
                                                    eeg=False,
                                                    mindist=0.2,
                                                    ignore_ref=False,
                                                    n_jobs=n_jobs,
                                                    verbose=True)
                # inv for cortical surface
                inv = mne.minimum_norm.make_inverse_operator(
                    e.info,
                    forward=fwd,
                    noise_cov=noise_cov,
                    loose=0.2,
                    depth=0.8)
                # inv with volume source space
                vol_srcfilename = opj(dfc.fsrc, subject + "-vol-src.fif")
                src_vol = mne.read_source_spaces(vol_srcfilename)
                fwd_vol = mne.make_forward_solution(e.info,
                                                    src=src_vol,
                                                    bem=bem_sol,
                                                    trans=transfile,
                                                    meg=True,
                                                    eeg=False,
                                                    mindist=0.2,
                                                    ignore_ref=False,
                                                    n_jobs=n_jobs,
                                                    verbose=True)
                inv_vol = mne.minimum_norm.make_inverse_operator(
                    e.info,
                    forward=fwd_vol,
                    noise_cov=noise_cov,
                    loose=1,
                    depth=0.8)

                # Distributed source models
                for m in source_loc_methods:
                    stc_name = 'stc_' + m
                    if m == 'dSPM':
                        # calculate vector solution in volume source space
                        try:
                            rootlog.info(
                                "Now calculating dSPM vector solution...")
                            stc_name = mne.minimum_norm.apply_inverse(
                                e,
                                inv_vol,
                                lambda2,
                                method='dSPM',
                                pick_ori='vector')
                            surfer_kwargs = dict(
                                subjects_dir=dfc.fanat,  # hemi='split', 
                                clim=dict(kind='percent', lims=[90, 96,
                                                                99.85]),
                                views=['lat', 'med'],
                                colorbar=True,
                                initial_time=0,
                                time_unit='ms',
                                size=(1000, 800),
                                smoothing_steps=10)
                            brain = stc_name.plot(**surfer_kwargs)
                            label = str(ject + " - " + eventname +
                                        " - Vector solution")
                            brain.add_text(0.1,
                                           0.9,
                                           label,
                                           'title',
                                           font_size=10)
                            img_f_name = ('img_stc_' + ject + '_' + eventname +
                                          '_' + m + '.png')
                            img_f_name = os.path.join(gp_folder, img_f_name)
                            brain.save_image(img_f_name)
                            stc_f_name = ("stc_" + ject + '_' + eventname +
                                          '_' + m + ".h5")
                            stc_f_name = os.path.join(e_folder, stc_f_name)
                            stc_name = stc_name.crop(tmin=stc_tmin,
                                                     tmax=stc_tmax)
                            rootlog.info("Saving dSPM vector solution.")
                            stc_name.save(stc_f_name)
                        except Exception as ex:
                            rootlog.error(f"dSPM failed --> {ex}")
                    else:
                        stc_name = mne.minimum_norm.apply_inverse(
                            e, inv, lambda2, method=m, pick_ori=None)
                        surfer_kwargs = dict(hemi='split',
                                             subjects_dir=dfc.fanat,
                                             clim=dict(kind='percent',
                                                       lims=[90, 96, 99.85]),
                                             views=['lat', 'med'],
                                             colorbar=True,
                                             initial_time=0,
                                             time_unit='ms',
                                             size=(1000, 800),
                                             smoothing_steps=10)
                        brain = stc_name.plot(**surfer_kwargs)
                        label = str(ject + " - " + eventname + " - " + m)
                        brain.add_text(0.1, 0.9, label, 'title', font_size=10)
                        img_f_name = ('img_stc_' + ject + '_' + eventname +
                                      '_' + m + '.png')
                        img_f_name = os.path.join(gp_folder, img_f_name)
                        brain.save_image(img_f_name)
                        stc_f_name = ('stc_' + ject + '_' + eventname + '_' +
                                      m)
                        stc_f_name = os.path.join(e_folder, stc_f_name)
                        stc_name = stc_name.crop(tmin=stc_tmin, tmax=stc_tmax)
                        rootlog.info("Saving eLORETA.")
                        stc_name.save(stc_f_name)
                        if m == "eLORETA":
                            try:
                                rootlog.info(
                                    "Now calculating eLORETA with peaks...")
                                rh_peaks = u.get_peak_points(
                                    stc_name,
                                    hemi='rh',
                                    tmin=peaks_tmin,
                                    tmax=peaks_tmax,
                                    nr_points=peaks_nr_of_points,
                                    mode=peaks_mode)
                                lh_peaks = u.get_peak_points(
                                    stc_name,
                                    hemi='lh',
                                    tmin=peaks_tmin,
                                    tmax=peaks_tmax,
                                    nr_points=peaks_nr_of_points,
                                    mode=peaks_mode)
                                label = str(ject + " - " + eventname + " - " +
                                            m + " - max. activation points")
                                brain.add_text(0.1, 0.9, label,
                                               font_size=10)  #, 'title'
                                for p in rh_peaks:
                                    brain.add_foci(p,
                                                   color='green',
                                                   coords_as_verts=True,
                                                   hemi='rh',
                                                   scale_factor=0.6,
                                                   alpha=0.9)
                                for p in lh_peaks:
                                    brain.add_foci(p,
                                                   color='green',
                                                   coords_as_verts=True,
                                                   hemi='lh',
                                                   scale_factor=0.6,
                                                   alpha=0.9)
                                stc_f_name = ('stc_' + ject + '_' + eventname +
                                              '_' + m + "_with_peaks-ave")
                                stc_f_name = os.path.join(e_folder, stc_f_name)
                                stc_name.save(stc_f_name)
                                img_f_name = ('img_stc_' + ject + '_' +
                                              eventname + '_' + m +
                                              '_with_peaks.png')
                                img_f_name = os.path.join(
                                    gp_folder, img_f_name)
                                brain.save_image(img_f_name)
                            except Exception as ex:
                                rootlog.error(
                                    f"eLORETA with peaks failed --> {ex}")
                # Dipoles
                rootlog.info("Now calculating ECD.")
                try:
                    for start, stop in dip_times.values():
                        dip_epoch = e.copy().crop(start, stop).pick('meg')
                        ecd = mne.fit_dipole(dip_epoch,
                                             noise_cov,
                                             bem_sol,
                                             trans=transfile)[0]
                        best_idx = np.argmax(ecd.gof)
                        best_time = ecd.times[best_idx]
                        trans = mne.read_trans(transfile)
                        mri_pos = mne.head_to_mri(ecd.pos,
                                                  mri_head_t=trans,
                                                  subject=subject,
                                                  subjects_dir=dfc.fanat)
                        t1_file_name = os.path.join(dfc.fanat, subject, 'mri',
                                                    'T1.mgz')
                        stoptime = str(abs(int(stop * int(e.info["sfreq"]))))
                        if stoptime == "5":
                            stoptime = "05"
                        title = str(eventname + ' - ECD @ minus ' + stoptime +
                                    ' ms')
                        t1_fig = plot_anat(t1_file_name,
                                           cut_coords=mri_pos[0],
                                           title=title)
                        t1_f_name_pic = ('img_ecd_' + eventname + '_' +
                                         '_Dipol_' + stoptime + '.png')
                        t1_f_name_pic = os.path.join(e_folder, "generic_pics",
                                                     t1_f_name_pic)
                        t1_fig.savefig(t1_f_name_pic)
                        fig_3d = ecd.plot_locations(trans,
                                                    subject,
                                                    dfc.fanat,
                                                    mode="orthoview")
                        fig_3d_pic = ('img_3d_ecd_' + eventname + '_' +
                                      '_Dipol_' + stoptime + '.png')
                        fig_3d_pic = os.path.join(e_folder, "generic_pics",
                                                  fig_3d_pic)
                        fig_3d.savefig(fig_3d_pic)
                        plt.close("all")
                except Exception as ex:
                    rootlog.error(f"ECD calculation failed --> {ex}")
            except Exception as ex:
                rootlog.error(f"Source localization failed because of:\n {ex}")

    # Create report
    if do_report:
        reporter = Reporter.EpilepsyReportBuilder(
            derivatives_root=derivatives_root,
            subject=subject,
            extras_dir=extras_directory)
        reporter.create_report()

    # Last words
    logging.info("Finished SourceLocPipeline.")
    print("SourceLocPipeline completed!")
Exemplo n.º 9
0
# eyetrack. note, the BEP for eyetracking isn't merged
# but this is likely the naming convention for it
for subject, experiments in config.exp_list.items():
    subname = 'sub-{}'.format(subject)
    for ii, exp in enumerate(experiments, 1):
        if exp != 'n/a':
            bids_basename = make_bids_basename(subject=subject,
                                               run='{:02d}'.format(ii),
                                               task=project_name)
            bids_eyetrack = op.join(output_path, subname, 'eyetrack',
                                    bids_basename + '_eyetrack.edf')
            input_fname = op.join(input_path, subject, 'edf',
                                  '{}_{}.edf'.format(subject, exp))
            make_bids_folders(subject=subject,
                              kind='eyetrack',
                              output_path=output_path)
            sh.copyfile(input_fname, bids_eyetrack)

# make a dataset description
make_dataset_description(
    path=output_path,
    data_license='CC-BY',
    name=project_name,
    authors=[
        'Teon L Brooks', 'Laura Gwilliams', 'Alexandre Gramfort',
        'Alec Marantz'
    ],
    how_to_acknowledge='',
    funding=['NSF DGE-1342536 (TB)', 'Abu  Dhabi  Institute Grant G1001 (AM)'],
    references_and_links='',
    doi='')