Пример #1
0
def test_handle_kind():
    """Test the automatic extraction of kind from the data."""
    # Create a dummy raw
    n_channels = 1
    sampling_rate = 100
    data = random((n_channels, sampling_rate))
    channel_types = ['grad', 'eeg', 'ecog']
    expected_kinds = ['meg', 'eeg', 'ieeg']
    # do it once for each type ... and once for "no type"
    for chtype, kind in zip(channel_types, expected_kinds):
        info = mne.create_info(n_channels, sampling_rate, ch_types=[chtype])
        raw = mne.io.RawArray(data, info)
        assert _handle_kind(raw) == kind

    # if the situation is ambiguous (EEG and iEEG channels both), raise error
    with pytest.raises(ValueError, match='Both EEG and iEEG channels found'):
        info = mne.create_info(2, sampling_rate, ch_types=['eeg', 'ecog'])
        raw = mne.io.RawArray(random((2, sampling_rate)), info)
        _handle_kind(raw)

    # if we cannot find a proper channel type, we raise an error
    with pytest.raises(ValueError, match='Neither MEG/EEG/iEEG channels'):
        info = mne.create_info(n_channels, sampling_rate, ch_types=['misc'])
        raw = mne.io.RawArray(data, info)
        _handle_kind(raw)
Пример #2
0
def test_copyfile_kit():
    """Test copying and renaming KIT files to a new location."""
    output_path = _TempDir()
    data_path = op.join(base_path, 'kit', 'tests', 'data')
    raw_fname = op.join(data_path, 'test.sqd')
    hpi_fname = op.join(data_path, 'test_mrk.sqd')
    electrode_fname = op.join(data_path, 'test.elp')
    headshape_fname = op.join(data_path, 'test.hsp')
    subject_id = '01'
    session_id = '01'
    run = '01'
    acq = '01'
    task = 'testing'

    bids_basename = make_bids_basename(
        subject=subject_id, session=session_id, run=run, acquisition=acq,
        task=task)

    kit_bids_basename = bids_basename.copy().update(acquisition=None,
                                                    prefix=output_path)

    raw = mne.io.read_raw_kit(
        raw_fname, mrk=hpi_fname, elp=electrode_fname,
        hsp=headshape_fname)
    _, ext = _parse_ext(raw_fname, verbose=True)
    kind = _handle_kind(raw)
    bids_fname = str(bids_basename.copy().update(suffix=f'{kind}{ext}',
                                                 prefix=output_path))

    copyfile_kit(raw_fname, bids_fname, subject_id, session_id,
                 task, run, raw._init_kwargs)
    assert op.exists(bids_fname)
    _, ext = _parse_ext(hpi_fname, verbose=True)
    if ext == '.sqd':
        kit_bids_basename.suffix = 'markers.sqd'
        assert op.exists(kit_bids_basename)
    elif ext == '.mrk':
        kit_bids_basename.suffix = 'markers.mrk'
        assert op.exists(kit_bids_basename)

    if op.exists(electrode_fname):
        task, run, key = None, None, 'ELP'
        elp_ext = '.pos'
        elp_fname = make_bids_basename(
            subject=subject_id, session=session_id, task=task, run=run,
            acquisition=key, suffix='headshape%s' % elp_ext,
            prefix=output_path)
        assert op.exists(elp_fname)

    if op.exists(headshape_fname):
        task, run, key = None, None, 'HSP'
        hsp_ext = '.pos'
        hsp_fname = make_bids_basename(
            subject=subject_id, session=session_id, task=task, run=run,
            acquisition=key, suffix='headshape%s' % hsp_ext,
            prefix=output_path)
        assert op.exists(hsp_fname)
Пример #3
0
def write_raw_bids(raw, bids_basename, output_path, events_data=None,
                   event_id=None, overwrite=False, verbose=True):
    """Walk over a folder of files and create BIDS compatible folder.

    .. warning:: The original files are simply copied over if the original
                 file format is BIDS-supported for that modality. Otherwise,
                 this function will convert to a BIDS-supported file format
                 while warning the user. For EEG and iEEG data, conversion will
                 be to BrainVision format, for MEG conversion will be to FIF.

    Parameters
    ----------
    raw : instance of mne.io.Raw
        The raw data. It must be an instance of mne.Raw. The data should not be
        loaded on disk, i.e., raw.preload must be False.
    bids_basename : str
        The base filename of the BIDS compatible files. Typically, this can be
        generated using make_bids_basename.
        Example: `sub-01_ses-01_task-testing_acq-01_run-01`.
        This will write the following files in the correct subfolder of the
        output_path::

            sub-01_ses-01_task-testing_acq-01_run-01_meg.fif
            sub-01_ses-01_task-testing_acq-01_run-01_meg.json
            sub-01_ses-01_task-testing_acq-01_run-01_channels.tsv
            sub-01_ses-01_task-testing_acq-01_run-01_coordsystem.json

        and the following one if events_data is not None::

            sub-01_ses-01_task-testing_acq-01_run-01_events.tsv

        and add a line to the following files::

            participants.tsv
            scans.tsv

        Note that the modality 'meg' is automatically inferred from the raw
        object and extension '.fif' is copied from raw.filenames.
    output_path : str
        The path of the root of the BIDS compatible folder. The session and
        subject specific folders will be populated automatically by parsing
        bids_basename.
    events_data : str | array | None
        The events file. If a string, a path to the events file. If an array,
        the MNE events array (shape n_events, 3). If None, events will be
        inferred from the stim channel using `mne.find_events`.
    event_id : dict | None
        The event id dict used to create a 'trial_type' column in events.tsv
    overwrite : bool
        Whether to overwrite existing files or data in files.
        Defaults to False.
        If overwrite is True, any existing files with the same BIDS parameters
        will be overwritten with the exception of the `participants.tsv` and
        `scans.tsv` files. For these files, parts of pre-existing data that
        match the current data will be replaced.
        If overwrite is False, no existing data will be overwritten or
        replaced.
    verbose : bool
        If verbose is True, this will print a snippet of the sidecar files. If
        False, no content will be printed.

    Returns
    -------
    output_path : str
        The path of the root of the BIDS compatible folder.

    Notes
    -----
    For the participants.tsv file, the raw.info['subjects_info'] should be
    updated and raw.info['meas_date'] should not be None to compute the age
    of the participant correctly.

    """
    if not check_version('mne', '0.17'):
        raise ValueError('Your version of MNE is too old. '
                         'Please update to 0.17 or newer.')

    if not isinstance(raw, BaseRaw):
        raise ValueError('raw_file must be an instance of BaseRaw, '
                         'got %s' % type(raw))

    if not hasattr(raw, 'filenames') or raw.filenames[0] is None:
        raise ValueError('raw.filenames is missing. Please set raw.filenames'
                         'as a list with the full path of original raw file.')

    if raw.preload is not False:
        raise ValueError('The data should not be preloaded.')

    raw = raw.copy()

    raw_fname = raw.filenames[0]
    if '.ds' in op.dirname(raw.filenames[0]):
        raw_fname = op.dirname(raw.filenames[0])
    # point to file containing header info for multifile systems
    raw_fname = raw_fname.replace('.eeg', '.vhdr')
    raw_fname = raw_fname.replace('.fdt', '.set')
    _, ext = _parse_ext(raw_fname, verbose=verbose)

    raw_orig = reader[ext](**raw._init_kwargs)
    assert_array_equal(raw.times, raw_orig.times,
                       "raw.times should not have changed since reading"
                       " in from the file. It may have been cropped.")

    params = _parse_bids_filename(bids_basename, verbose)
    subject_id, session_id = params['sub'], params['ses']
    acquisition, task, run = params['acq'], params['task'], params['run']
    kind = _handle_kind(raw)

    bids_fname = bids_basename + '_%s%s' % (kind, ext)

    # check whether the info provided indicates that the data is emptyroom
    # data
    emptyroom = False
    if subject_id == 'emptyroom' and task == 'noise':
        emptyroom = True
        # check the session date provided is consistent with the value in raw
        meas_date = raw.info.get('meas_date', None)
        if meas_date is not None:
            er_date = datetime.fromtimestamp(
                raw.info['meas_date'][0]).strftime('%Y%m%d')
            if er_date != session_id:
                raise ValueError("Date provided for session doesn't match "
                                 "session date.")

    data_path = make_bids_folders(subject=subject_id, session=session_id,
                                  kind=kind, output_path=output_path,
                                  overwrite=False, verbose=verbose)
    if session_id is None:
        ses_path = os.sep.join(data_path.split(os.sep)[:-1])
    else:
        ses_path = make_bids_folders(subject=subject_id, session=session_id,
                                     output_path=output_path, make_dir=False,
                                     overwrite=False, verbose=verbose)

    # create filenames
    scans_fname = make_bids_basename(
        subject=subject_id, session=session_id, suffix='scans.tsv',
        prefix=ses_path)
    participants_tsv_fname = make_bids_basename(prefix=output_path,
                                                suffix='participants.tsv')
    participants_json_fname = make_bids_basename(prefix=output_path,
                                                 suffix='participants.json')
    coordsystem_fname = make_bids_basename(
        subject=subject_id, session=session_id, acquisition=acquisition,
        suffix='coordsystem.json', prefix=data_path)
    sidecar_fname = make_bids_basename(
        subject=subject_id, session=session_id, task=task, run=run,
        acquisition=acquisition, suffix='%s.json' % kind, prefix=data_path)
    events_fname = make_bids_basename(
        subject=subject_id, session=session_id, task=task,
        acquisition=acquisition, run=run, suffix='events.tsv',
        prefix=data_path)
    channels_fname = make_bids_basename(
        subject=subject_id, session=session_id, task=task, run=run,
        acquisition=acquisition, suffix='channels.tsv', prefix=data_path)
    if ext not in ['.fif', '.ds', '.vhdr', '.edf', '.bdf', '.set', '.con',
                   '.sqd']:
        bids_raw_folder = bids_fname.split('.')[0]
        bids_fname = op.join(bids_raw_folder, bids_fname)

    # Read in Raw object and extract metadata from Raw object if needed
    orient = ORIENTATION.get(ext, 'n/a')
    unit = UNITS.get(ext, 'n/a')
    manufacturer = MANUFACTURERS.get(ext, 'n/a')

    # save all meta data
    _participants_tsv(raw, subject_id, participants_tsv_fname, overwrite,
                      verbose)
    _participants_json(participants_json_fname, True, verbose)
    _scans_tsv(raw, op.join(kind, bids_fname), scans_fname, overwrite, verbose)

    # TODO: Implement coordystem.json and electrodes.tsv for EEG and  iEEG
    if kind == 'meg' and not emptyroom:
        _coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname,
                          overwrite, verbose)

    events, event_id = _read_events(events_data, event_id, raw, ext)
    if events is not None and len(events) > 0 and not emptyroom:
        _events_tsv(events, raw, events_fname, event_id, overwrite, verbose)

    make_dataset_description(output_path, name=" ", verbose=verbose)
    _sidecar_json(raw, task, manufacturer, sidecar_fname, kind, overwrite,
                  verbose)
    _channels_tsv(raw, channels_fname, overwrite, verbose)

    # set the raw file name to now be the absolute path to ensure the files
    # are placed in the right location
    bids_fname = op.join(data_path, bids_fname)
    if os.path.exists(bids_fname) and not overwrite:
        raise FileExistsError('"%s" already exists. Please set '  # noqa: F821
                              'overwrite to True.' % bids_fname)
    _mkdir_p(os.path.dirname(bids_fname))

    if verbose:
        print('Copying data files to %s' % op.splitext(bids_fname)[0])

    convert = ext not in ALLOWED_EXTENSIONS[kind]
    # Copy the imaging data files
    if convert:
        if kind == 'meg':
            raise ValueError('Got file extension %s for MEG data, ' +
                             'expected one of %s' % ALLOWED_EXTENSIONS['meg'])
        if verbose:
            warn('Converting data files to BrainVision format')
        if not check_version('pybv', '0.2'):
            raise ImportError('pybv >=0.2.0 is required for converting ' +
                              '%s files to Brainvision format' % ext)
        from pybv import write_brainvision
        events, event_id = events_from_annotations(raw)
        write_brainvision(raw.get_data(), raw.info['sfreq'],
                          raw.ch_names,
                          op.splitext(op.basename(bids_fname))[0],
                          op.dirname(bids_fname), events[:, [0, 2]],
                          resolution=1e-6)
    elif ext == '.fif':
        n_rawfiles = len(raw.filenames)
        if n_rawfiles > 1:
            split_naming = 'bids'
            raw.save(bids_fname, split_naming=split_naming, overwrite=True)
        else:
            # This ensures that single FIF files do not have the part param
            raw.save(bids_fname, split_naming='neuromag', overwrite=True)

    # CTF data is saved and renamed in a directory
    elif ext == '.ds':
        copyfile_ctf(raw_fname, bids_fname)
    # BrainVision is multifile, copy over all of them and fix pointers
    elif ext == '.vhdr':
        copyfile_brainvision(raw_fname, bids_fname)
    # EEGLAB .set might be accompanied by a .fdt - find out and copy it too
    elif ext == '.set':
        copyfile_eeglab(raw_fname, bids_fname)
    elif ext == '.pdf':
        copyfile_bti(raw_orig, op.join(data_path, bids_raw_folder))
    else:
        sh.copyfile(raw_fname, bids_fname)
    # KIT data requires the marker file to be copied over too
    if 'mrk' in raw._init_kwargs:
        hpi = raw._init_kwargs['mrk']
        acq_map = dict()
        if isinstance(hpi, list):
            if _get_mrk_meas_date(hpi[0]) > _get_mrk_meas_date(hpi[1]):
                raise ValueError('Markers provided in incorrect order.')
            _, marker_ext = _parse_ext(hpi[0])
            acq_map = dict(zip(['pre', 'post'], hpi))
        else:
            _, marker_ext = _parse_ext(hpi)
            acq_map[None] = hpi
        for key, value in acq_map.items():
            marker_fname = make_bids_basename(
                subject=subject_id, session=session_id, task=task, run=run,
                acquisition=key, suffix='markers%s' % marker_ext,
                prefix=data_path)
            sh.copyfile(value, marker_fname)

    return output_path
Пример #4
0
def main(
    bids_fname,
    bids_root,
    output_fname,
    reference,
    deriv_path=None,
    verbose=False,
    overwrite=False,
):
    """
    Run an Example EZTrack core analysis.

    Parameters
    ----------
    bids_fname : str
        The full basename of the bids file, resulting from make_bids_basename
    bids_root : Union[str or Path]
        The base directory where the bids data is stored
    output_fname : str
        The name of the output file
    verbose : bool
        Whether to display output
    overwrite: bool
        Whether to overwrite existing output

    """
    # determine kind from bids_basename
    params = _parse_bids_filename(bids_fname, verbose=verbose)
    acquisition = params["acq"]
    subject = params["sub"]

    # load the data
    raw = read_raw_bids(bids_fname, bids_root, verbose=verbose)

    # get rid of bad ch_names
    raw.load_data()
    bad_chs = raw.info["bads"]
    raw = raw.drop_channels(bad_chs)

    # determine kind from the raw input
    kind = _handle_kind(raw)
    # only keep EEG/SEEG channel
    pick_dict = {f"{acquisition}": True}
    raw = raw.pick_types(**pick_dict)

    # preprocess the data using preprocess pipeline
    if kind == "eeg":
        raw = preprocess_eeg(raw, bad_chs=[])
    elif kind == "ieeg":
        raw = preprocess_ieeg(raw, bad_chs=[])

    # validation checks on raw data
    validate_raw_metadata(raw)

    # run fragility analysis
    if deriv_path is None:
        deriv_path = Path(
            Path(bids_root) / "derivatives" / "fragility" / reference / subject
        )
    fragility_results, metadata = analyze_data(
        raw,
        deriv_path,
        output_fname,
        reference=reference,
        overwrite=overwrite,
        verbose=verbose,
    )
    pertmats, adjmats, delvecs_array = fragility_results  # extract results from tuple

    # run validation checks on fragility output
    validate_eztrack_result(pertmats, metadata)

    # draw heatmap
    main_draw_heatmap(deriv_path, output_fname)

    return deriv_path
Пример #5
0
    def convert_to_bids(
        edf_fpath,
        bids_root,
        bids_basename,
        coords_fpath=None,
        excluded_contacts=None,
        eog_contacts=None,
        misc_contacts=None,
        overwrite=False,
        line_freq=60.0,
    ):
        """
        Convert the passed edf file into the Bids format.

        # TODO:
        - Clean up how write_raw_bids is called
        - eliminate redundant writing/reading using temporaryDirectory
        
        Parameters
        ----------
        edf_fpath : Union[str, os.PathLike]
            The location the edf file.
        bids_root : Union[str, os.PathLike]
            The base directory for newly created bids files.
        bids_basename : str
            The base name of the new data files
        excluded_contacts : list
            Contacts to be excluded from conversion
        eog_contacts : list
            Contacts to be annotated as EOG.
        misc_contacts : list
            Contacts to be annotated  as Misc.
        overwrite : bool
            Whether to overwrite an existing converted file.

        Returns
        -------
        The path to the new data file.

        """
        if excluded_contacts is None:
            excluded_contacts = ["-", ""]
        raw = mne.io.read_raw_edf(
            edf_fpath,
            preload=False,
            verbose="ERROR",
            exclude=excluded_contacts,
            eog=eog_contacts,
            misc=misc_contacts,
        )
        if line_freq is not None:
            raw.info["line_freq"] = line_freq

        annonymize_dict = None
        # {
        #         "daysback": 10000,
        #         "keep_his": True,
        #     }

        # extract parameters from bids_basenmae
        params = _parse_bids_filename(bids_basename, True)
        subject, session = params["sub"], params["ses"]
        acquisition, kind = params["acq"], params["kind"]
        task = params["task"]

        # read in the events from the EDF file
        events_data, events_id = mne.events_from_annotations(raw)
        print(events_data, events_id)
        channel_scrub = ChannelScrub

        # convert the channel types based on acquisition if necessary
        if acquisition is not None:
            ch_modality_map = {ch: acquisition for ch in raw.ch_names}
            raw.set_channel_types(ch_modality_map)
            ch_type_mapping = channel_scrub.label_channel_types(raw.ch_names)
            raw.set_channel_types((ch_type_mapping))

        # reformat channel text if necessary
        channel_scrub.channel_text_scrub(raw)

        # look for bad channels that are obvious
        channel_names = raw.ch_names
        bad_channels = channel_scrub.look_for_bad_channels(channel_names)
        bad_channels_dict = {}
        for bad in bad_channels:
            bad_channels_dict[
                bad] = f"Scrubbed channels containing markers {', '.join(BAD_MARKERS)}"
        raw.info["bads"] = bad_channels

        if coords_fpath:
            ch_pos = dict()
            with open(coords_fpath, "r") as fp:
                # strip of newline character
                lines = [line.rstrip("\n") for line in fp]

                for line in lines:
                    ch_name = line.split(" ")[0]
                    coord = line.split(" ")[1:]
                    ch_pos[ch_name] = [float(x) for x in coord]
            unit = "mm"
            if unit != "m":
                ch_pos = {
                    ch_name: np.divide(coord, 1000)
                    for ch_name, coord in ch_pos.items()
                }
            montage = mne.channels.make_dig_montage(ch_pos=ch_pos,
                                                    coord_frame="head")

            # TODO: remove. purely for testing scenario
            # ch_names = raw.ch_names
            # elec = np.random.random_sample((len(ch_names), 3))  # assume in mm
            # elec = elec / 1000  # convert to meters
            # montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
            #                                         coord_frame='head')
        else:
            montage = None

        if montage is not None:
            if not isinstance(montage, mne.channels.DigMontage):
                raise TypeError("Montage passed in should be of type: "
                                "`mne.channels.DigMontage`.")
            raw.set_montage(montage)
            print("Set montage: ")
            print(len(raw.info["ch_names"]))
            print(raw.info["dig"])
            print(raw)

        # actually perform write_raw bids
        bids_root = write_raw_bids(
            raw,
            bids_basename,
            bids_root,
            events_data=events_data,
            event_id=events_id,
            overwrite=overwrite,
            # anonymize=annonymize_dict,
            verbose=False,
        )

        # save a fif copy and reload it
        kind = _handle_kind(raw)
        fif_data_path = make_bids_folders(
            subject=subject,
            session=session,
            kind=kind,
            output_path=bids_root,
            overwrite=False,
            verbose=True,
        )

        bids_fname = bids_basename + f"_{kind}.fif"
        deriv_bids_root = os.path.join(bids_root, "derivatives")

        print("Should be saving for: ", bids_fname)
        with tempfile.TemporaryDirectory() as tmp_bids_root:
            raw.save(os.path.join(tmp_bids_root, bids_fname),
                     overwrite=overwrite)
            raw = mne.io.read_raw_fif(os.path.join(tmp_bids_root, bids_fname))

            print(raw, bids_basename)
            print(raw.filenames)
            _, ext = _parse_ext(raw.filenames[0])
            print(ext)
            # actually perform write_raw bids
            bids_root = write_raw_bids(
                raw,
                bids_basename,
                deriv_bids_root,
                events_data=events_data,
                event_id=events_id,
                overwrite=overwrite,
                # anonymize=annonymize_dict,
                verbose=False,
            )
        return bids_root