Ejemplo n.º 1
0
def test_copyfile_kit(tmpdir):
    """Test copying and renaming KIT files to a new location."""
    output_path = str(tmpdir)
    data_path = op.join(base_path, 'kit', 'tests', 'data')
    raw_fname = op.join(data_path, 'test.sqd')
    hpi_fname = op.join(data_path, 'test_mrk.sqd')
    electrode_fname = op.join(data_path, 'test.elp')
    headshape_fname = op.join(data_path, 'test.hsp')
    subject_id = '01'
    session_id = '01'
    run = '01'
    acq = '01'
    task = 'testing'

    raw = mne.io.read_raw_kit(
        raw_fname, mrk=hpi_fname, elp=electrode_fname,
        hsp=headshape_fname)
    _, ext = _parse_ext(raw_fname, verbose=True)
    datatype = _handle_datatype(raw)

    bids_path = BIDSPath(
        subject=subject_id, session=session_id, run=run, acquisition=acq,
        task=task)
    kit_bids_path = bids_path.copy().update(acquisition=None,
                                            datatype=datatype,
                                            root=output_path)
    bids_fname = str(bids_path.copy().update(datatype=datatype,
                                             suffix=datatype,
                                             extension=ext,
                                             root=output_path))

    copyfile_kit(raw_fname, bids_fname, subject_id, session_id,
                 task, run, raw._init_kwargs)
    assert op.exists(bids_fname)
    _, ext = _parse_ext(hpi_fname, verbose=True)
    if ext == '.sqd':
        kit_bids_path.update(suffix='markers', extension='.sqd')
        assert op.exists(kit_bids_path)
    elif ext == '.mrk':
        kit_bids_path.update(suffix='markers', extension='.mrk')
        assert op.exists(kit_bids_path)

    if op.exists(electrode_fname):
        task, run, key = None, None, 'ELP'
        elp_ext = '.pos'
        elp_fname = BIDSPath(
            subject=subject_id, session=session_id, task=task, run=run,
            acquisition=key, suffix='headshape', extension=elp_ext,
            datatype='meg', root=output_path)
        assert op.exists(elp_fname)

    if op.exists(headshape_fname):
        task, run, key = None, None, 'HSP'
        hsp_ext = '.pos'
        hsp_fname = BIDSPath(
            subject=subject_id, session=session_id, task=task, run=run,
            acquisition=key, suffix='headshape', extension=hsp_ext,
            datatype='meg', root=output_path)
        assert op.exists(hsp_fname)
Ejemplo n.º 2
0
def copyfile_kit(src, dest, subject_id, session_id,
                 task, run, _init_kwargs):
    """Copy and rename KIT files to a new location.

    Parameters
    ----------
    src : str
        Path to the source raw .con or .sqd folder.
    dest : str
        Path to the destination of the new bids folder.
    subject_id : str | None
        The subject ID. Corresponds to "sub".
    session_id : str | None
        The session identifier. Corresponds to "ses".
    task : str | None
        The task identifier. Corresponds to "task".
    run : int | None
        The run number. Corresponds to "run".
    _init_kwargs : dict
        Extract information of marker and headpoints

    """
    # create parent directories in case it does not exist yet
    _mkdir_p(op.dirname(dest))

    # KIT data requires the marker file to be copied over too
    sh.copyfile(src, dest)
    data_path = op.split(dest)[0]
    datatype = 'meg'
    if 'mrk' in _init_kwargs:
        hpi = _init_kwargs['mrk']
        acq_map = dict()
        if isinstance(hpi, list):
            if _get_mrk_meas_date(hpi[0]) > _get_mrk_meas_date(hpi[1]):
                raise ValueError('Markers provided in incorrect order.')
            _, marker_ext = _parse_ext(hpi[0])
            acq_map = dict(zip(['pre', 'post'], hpi))
        else:
            _, marker_ext = _parse_ext(hpi)
            acq_map[None] = hpi
        for key, value in acq_map.items():
            marker_path = BIDSPath(
                subject=subject_id, session=session_id, task=task, run=run,
                acquisition=key, suffix='markers', extension=marker_ext,
                datatype=datatype)
            sh.copyfile(value, op.join(data_path, marker_path.basename))
    for acq in ['elp', 'hsp']:
        if acq in _init_kwargs:
            position_file = _init_kwargs[acq]
            task, run, acq = None, None, acq.upper()
            position_ext = '.pos'
            position_path = BIDSPath(
                subject=subject_id, session=session_id, task=task, run=run,
                acquisition=acq, suffix='headshape', extension=position_ext,
                datatype=datatype)
            sh.copyfile(position_file,
                        op.join(data_path, position_path.basename))
Ejemplo n.º 3
0
def copyfile_eeglab(src, dest):
    """Copy a EEGLAB files to a new location and adjust pointer to '.fdt' file.

    Some EEGLAB .set files come with a .fdt binary file that contains the data.
    When moving a .set file, we need to check for an associated .fdt file and
    move it to an appropriate location as well as update an internal pointer
    within the .set file.

    Parameters
    ----------
    src : str
        Path to the source raw .set file.
    dest : str
        Path to the destination of the new .set file.

    """
    # Get extenstion of the EEGLAB file
    fname_src, ext_src = _parse_ext(src)
    fname_dest, ext_dest = _parse_ext(dest)
    if ext_src != ext_dest:
        raise ValueError('Need to move data with same extension'
                         f' but got {ext_src}, {ext_dest}')

    # Extract matlab struct "EEG" from EEGLAB file
    mat = loadmat(src,
                  squeeze_me=False,
                  chars_as_strings=False,
                  mat_dtype=False,
                  struct_as_record=True)
    if 'EEG' not in mat:
        raise ValueError(f'Could not find "EEG" field in {src}')
    eeg = mat['EEG']

    # If the data field is a string, it points to a .fdt file in src dir
    data = eeg[0][0]['data']
    if all([item in data[0, -4:] for item in '.fdt']):
        head, tail = op.split(src)
        fdt_pointer = ''.join(data.tolist()[0])
        fdt_path = op.join(head, fdt_pointer)
        fdt_name, fdt_ext = _parse_ext(fdt_path)
        if fdt_ext != '.fdt':
            raise IOError('Expected extension .fdt for linked data but found'
                          f' {fdt_ext}')

        # Copy the fdt file and give it a new name
        sh.copyfile(fdt_path, fname_dest + '.fdt')

        # Now adjust the pointer in the set file
        head, tail = op.split(fname_dest + '.fdt')
        mat['EEG'][0][0]['data'] = tail
        savemat(dest, mat, appendmat=False)

    # If no .fdt file, simply copy the .set file, no modifications necessary
    else:
        sh.copyfile(src, dest)
Ejemplo n.º 4
0
def _get_brainvision_paths(vhdr_path):
    """Get the .eeg and .vmrk file paths from a BrainVision header file.

    Parameters
    ----------
    vhdr_path : str
        Path to the header file.

    Returns
    -------
    paths : tuple
        Paths to the .eeg file at index 0 and the .vmrk file at index 1 of
        the returned tuple.

    """
    fname, ext = _parse_ext(vhdr_path)
    if ext != '.vhdr':
        raise ValueError(f'Expecting file ending in ".vhdr",'
                         f' but got {ext}')

    # Header file seems fine
    # extract encoding from brainvision header file, or default to utf-8
    enc = _get_brainvision_encoding(vhdr_path)

    # ..and read it
    with open(vhdr_path, 'r', encoding=enc) as f:
        lines = f.readlines()

    # Try to find data file .eeg
    eeg_file_match = re.search(r'DataFile=(.*\.eeg)', ' '.join(lines))
    if not eeg_file_match:
        raise ValueError('Could not find a .eeg file link in'
                         f' {vhdr_path}')
    else:
        eeg_file = eeg_file_match.groups()[0]

    # Try to find marker file .vmrk
    vmrk_file_match = re.search(r'MarkerFile=(.*\.vmrk)', ' '.join(lines))
    if not vmrk_file_match:
        raise ValueError('Could not find a .vmrk file link in'
                         f' {vhdr_path}')
    else:
        vmrk_file = vmrk_file_match.groups()[0]

    # Make sure we are dealing with file names as is customary, not paths
    # Paths are problematic when copying the files to another system. Instead,
    # always use the file name and keep the file triplet in the same directory
    assert os.sep not in eeg_file
    assert os.sep not in vmrk_file

    # Assert the paths exist
    head, tail = op.split(vhdr_path)
    eeg_file_path = op.join(head, eeg_file)
    vmrk_file_path = op.join(head, vmrk_file)
    assert op.exists(eeg_file_path)
    assert op.exists(vmrk_file_path)

    # Return the paths
    return (eeg_file_path, vmrk_file_path)
Ejemplo n.º 5
0
def test_parse_ext():
    """Test the file extension extraction."""
    f = 'sub-05_task-matchingpennies.vhdr'
    fname, ext = _parse_ext(f)
    assert fname == 'sub-05_task-matchingpennies'
    assert ext == '.vhdr'

    # Test for case where no extension: assume BTI format
    f = 'sub-01_task-rest'
    fname, ext = _parse_ext(f)
    assert fname == f
    assert ext == '.pdf'

    # Get a .nii.gz file
    f = 'sub-01_task-rest.nii.gz'
    fname, ext = _parse_ext(f)
    assert fname == 'sub-01_task-rest'
    assert ext == '.nii.gz'
Ejemplo n.º 6
0
def _replace_ext(fname, ext, verbose=False):
    """Replace the extension of the fname with the passed extension."""
    if verbose:
        print(f"Trying to replace {fname} with extension {ext}")

    fname, _ext = _parse_ext(fname, verbose=verbose)
    if not ext.startswith("."):
        ext = "." + ext

    return fname + ext
Ejemplo n.º 7
0
def _add_desc_to_bids_fname(bids_fname, description, verbose: bool = True):
    if "desc" in str(bids_fname):
        return bids_fname

    bids_fname, ext = _parse_ext(bids_fname, verbose)

    # split by the datatype
    datatype = bids_fname.split("_")[-1]
    source_bids_fname = bids_fname.split(f"_{datatype}")[0]

    result_fname = source_bids_fname + f"_desc-{description}" + f"_{datatype}" + ext
    return result_fname
Ejemplo n.º 8
0
def _read_raw(raw_fpath,
              electrode=None,
              hsp=None,
              hpi=None,
              allow_maxshield=False,
              config=None,
              **kwargs):
    """Read a raw file into MNE, making inferences based on extension."""
    _, ext = _parse_ext(raw_fpath)

    # KIT systems
    if ext in ['.con', '.sqd']:
        raw = io.read_raw_kit(raw_fpath,
                              elp=electrode,
                              hsp=hsp,
                              mrk=hpi,
                              preload=False,
                              **kwargs)

    # BTi systems
    elif ext == '.pdf':
        raw = io.read_raw_bti(raw_fpath,
                              config_fname=config,
                              head_shape_fname=hsp,
                              preload=False,
                              **kwargs)

    elif ext == '.fif':
        raw = reader[ext](raw_fpath, allow_maxshield, **kwargs)

    elif ext in ['.ds', '.vhdr', '.set', '.edf', '.bdf', '.EDF']:
        raw_fpath = Path(raw_fpath)
        # handle EDF extension upper/lower casing
        if ext == '.edf' and not raw_fpath.exists():
            raw_fpath = raw_fpath.with_suffix('.EDF')
        elif ext == '.EDF' and not raw_fpath.exists():
            raw_fpath = raw_fpath.with_suffix('.edf')
        raw = reader[ext](raw_fpath, **kwargs)

    # MEF and NWB are allowed, but not yet implemented
    elif ext in ['.mef', '.nwb']:
        raise ValueError(f'Got "{ext}" as extension. This is an allowed '
                         f'extension but there is no IO support for this '
                         f'file format yet.')

    # No supported data found ...
    # ---------------------------
    else:
        raise ValueError(f'Raw file name extension must be one '
                         f'of {ALLOWED_DATATYPE_EXTENSIONS}\n'
                         f'Got {ext}')
    return raw
Ejemplo n.º 9
0
def _read_raw(raw_path,
              electrode=None,
              hsp=None,
              hpi=None,
              allow_maxshield=False,
              config_path=None,
              **kwargs):
    """Read a raw file into MNE, making inferences based on extension."""
    _, ext = _parse_ext(raw_path)

    # KIT systems
    if ext in ['.con', '.sqd']:
        raw = io.read_raw_kit(raw_path,
                              elp=electrode,
                              hsp=hsp,
                              mrk=hpi,
                              preload=False,
                              **kwargs)

    # BTi systems
    elif ext == '.pdf':
        raw = io.read_raw_bti(
            pdf_fname=str(raw_path),  # FIXME MNE should accept Path!
            config_fname=str(config_path),  # FIXME MNE should accept Path!
            head_shape_fname=hsp,
            preload=False,
            **kwargs)

    elif ext == '.fif':
        raw = reader[ext](raw_path, allow_maxshield, **kwargs)

    elif ext in ['.ds', '.vhdr', '.set', '.edf', '.bdf', '.EDF', '.snirf']:
        if (ext == '.snirf'
                and not check_version('mne', '1.0')):  # pragma: no cover
            raise RuntimeError(
                'fNIRS support in MNE-BIDS requires MNE-Python version 1.0')
        raw_path = Path(raw_path)
        raw = reader[ext](raw_path, **kwargs)

    # MEF and NWB are allowed, but not yet implemented
    elif ext in ['.mef', '.nwb']:
        raise ValueError(f'Got "{ext}" as extension. This is an allowed '
                         f'extension but there is no IO support for this '
                         f'file format yet.')

    # No supported data found ...
    # ---------------------------
    else:
        raise ValueError(f'Raw file name extension must be one '
                         f'of {ALLOWED_DATATYPE_EXTENSIONS}\n'
                         f'Got {ext}')
    return raw
Ejemplo n.º 10
0
    def save(self, fpath: Union[str, Path]):
        """Save ResultInfo as a JSON dictionary.
        Parameters
        ----------
        fpath : str | pathlib.Path
            The filepath to save the ResultInfo data structure as JSON.
        """
        fname, ext = _parse_ext(fpath, verbose=False)
        if ext != ".json":
            raise RuntimeError("Saving Result Info metadata "
                               f"as {ext} is not supported. "
                               "Please use .json")

        with open(fpath, "w") as fout:
            json.dump(self, fout, cls=NumpyEncoder, indent=4, sort_keys=True)
Ejemplo n.º 11
0
def _summarize_channels_tsv(root, scans_fpaths, verbose=True):
    """Summarize channels.tsv data in BIDS root directory.

    Currently, summarizes all REQUIRED components of channels
    data, and some RECOMMENDED and OPTIONAL components.

    Parameters
    ----------
    root : str | pathlib.Path
        The path of the root of the BIDS compatible folder.
    scans_fpaths : list
        A list of all *_scans.tsv files in ``root``. The summary
        will occur for all scans listed in the *_scans.tsv files.
    verbose : bool

    Returns
    -------
    template_dict : dict
        A dictionary of values for various template strings.
    """
    root = Path(root)

    # keep track of channel type, status
    ch_status_count = {'bad': [], 'good': []}
    ch_count = []

    # loop through each scan
    for scan_fpath in scans_fpaths:
        # load in the scans.tsv file
        # and read metadata for each scan
        scans_tsv = _from_tsv(scan_fpath)
        scans = scans_tsv['filename']
        for scan in scans:
            # summarize metadata of recordings
            bids_path, _ = _parse_ext(scan)
            datatype = op.dirname(scan)
            if datatype not in ['meg', 'eeg', 'ieeg']:
                continue

            # convert to BIDS Path
            params = get_entities_from_fname(bids_path)
            bids_path = BIDSPath(root=root, **params)

            # XXX: improve to allow emptyroom
            if bids_path.subject == 'emptyroom':
                continue

            channels_fname = _find_matching_sidecar(bids_path=bids_path,
                                                    suffix='channels',
                                                    extension='.tsv')

            # summarize channels.tsv
            channels_tsv = _from_tsv(channels_fname)
            for status in ch_status_count.keys():
                ch_status = [
                    ch for ch in channels_tsv['status'] if ch == status
                ]
                ch_status_count[status].append(len(ch_status))
            ch_count.append(len(channels_tsv['name']))

    # create summary template strings for status
    template_dict = {
        'mean_chs': np.mean(ch_count),
        'std_chs': np.std(ch_count),
        'mean_good_chs': np.mean(ch_status_count['good']),
        'std_good_chs': np.std(ch_status_count['good']),
        'mean_bad_chs': np.mean(ch_status_count['bad']),
        'std_bad_chs': np.std(ch_status_count['bad']),
    }
    for key, val in template_dict.items():
        template_dict[key] = round(val, 2)
    return template_dict
Ejemplo n.º 12
0
def _summarize_sidecar_json(root, scans_fpaths, verbose=True):
    """Summarize scans in BIDS root directory.

    Parameters
    ----------
    root : str | pathlib.Path
        The path of the root of the BIDS compatible folder.
    scans_fpaths : list
        A list of all *_scans.tsv files in ``root``. The summary
        will occur for all scans listed in the *_scans.tsv files.
    verbose : bool
        Set verbose output to true or false.

    Returns
    -------
    template_dict : dict
        A dictionary of values for various template strings.
    """
    n_scans = 0
    powerlinefreqs, sfreqs = set(), set()
    manufacturers = set()
    length_recordings = []

    # loop through each scan
    for scan_fpath in scans_fpaths:
        # load in the scans.tsv file
        # and read metadata for each scan
        scans_tsv = _from_tsv(scan_fpath)
        scans = scans_tsv['filename']
        for scan in scans:
            # summarize metadata of recordings
            bids_path, ext = _parse_ext(scan)
            datatype = op.dirname(scan)
            if datatype not in ALLOWED_DATATYPES:
                continue

            n_scans += 1

            # convert to BIDS Path
            params = get_entities_from_fname(bids_path)
            bids_path = BIDSPath(root=root, **params)

            # XXX: improve to allow emptyroom
            if bids_path.subject == 'emptyroom':
                continue

            sidecar_fname = _find_matching_sidecar(bids_path=bids_path,
                                                   suffix=datatype,
                                                   extension='.json')
            with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin:
                sidecar_json = json.load(fin)

            # aggregate metadata from each scan
            # REQUIRED kwargs
            sfreq = sidecar_json['SamplingFrequency']
            powerlinefreq = str(sidecar_json['PowerLineFrequency'])
            software_filters = sidecar_json.get('SoftwareFilters')
            if not software_filters:
                software_filters = 'n/a'

            # RECOMMENDED kwargs
            manufacturer = sidecar_json.get('Manufacturer', 'n/a')
            record_duration = sidecar_json.get('RecordingDuration', 'n/a')

            sfreqs.add(str(np.round(sfreq, 2)))
            powerlinefreqs.add(str(powerlinefreq))
            if manufacturer != 'n/a':
                manufacturers.add(manufacturer)
            length_recordings.append(record_duration)

    # XXX: length summary is only allowed, if no 'n/a' was found
    if any([dur == 'n/a' for dur in length_recordings]):
        length_recordings = None

    template_dict = {
        'n_scans': n_scans,
        'manufacturer': list(manufacturers),
        'sfreq': sfreqs,
        'powerlinefreq': powerlinefreqs,
        'software_filters': software_filters,
        'length_recordings': length_recordings,
    }
    return template_dict
Ejemplo n.º 13
0
def copyfile_brainvision(vhdr_src, vhdr_dest, anonymize=None, verbose=False):
    """Copy a BrainVision file triplet to a new location and repair links.

    The BrainVision file format consists of three files: .vhdr, .eeg, and .vmrk
    The .eeg and .vmrk files associated with the .vhdr file will be given names
    as in `vhdr_dest` with adjusted extensions. Internal file pointers will be
    fixed.

    Parameters
    ----------
    vhdr_src : str
        The src path of the .vhdr file to be copied.
    vhdr_dest : str
        The destination path of the .vhdr file.
    anonymize : dict | None
        If None (default), no anonymization is performed.
        If dict, data will be anonymized depending on the keys provided with
        the dict: `daysback` is a required key, `keep_his` is an optional key.

        `daysback` : int
            Number of days by which to move back the recording date in time.
            In studies with multiple subjects the relative recording date
            differences between subjects can be kept by using the same number
            of `daysback` for all subject anonymizations. `daysback` should be
            great enough to shift the date prior to 1925 to conform with BIDS
            anonymization rules.

        `keep_his` : bool
            By default (False), all subject information next to the recording
            date will be overwritten as well. If True, keep subject information
            apart from the recording date.

    verbose : bool
        Determine whether results should be logged. Defaults to False.

    See Also
    --------
    mne.io.anonymize_info

    """
    # Get extenstion of the brainvision file
    fname_src, ext_src = _parse_ext(vhdr_src)
    fname_dest, ext_dest = _parse_ext(vhdr_dest)
    if ext_src != ext_dest:
        raise ValueError(f'Need to move data with same extension'
                         f' but got "{ext_src}", "{ext_dest}"')

    eeg_file_path, vmrk_file_path = _get_brainvision_paths(vhdr_src)

    # extract encoding from brainvision header file, or default to utf-8
    enc = _get_brainvision_encoding(vhdr_src, verbose)

    # Copy data .eeg ... no links to repair
    sh.copyfile(eeg_file_path, fname_dest + '.eeg')

    # Write new header and marker files, fixing the file pointer links
    # For that, we need to replace an old "basename" with a new one
    # assuming that all .eeg, .vhdr, .vmrk share one basename
    __, basename_src = op.split(fname_src)
    assert basename_src + '.eeg' == op.split(eeg_file_path)[-1]
    assert basename_src + '.vmrk' == op.split(vmrk_file_path)[-1]
    __, basename_dest = op.split(fname_dest)
    search_lines = ['DataFile=' + basename_src + '.eeg',
                    'MarkerFile=' + basename_src + '.vmrk']

    with open(vhdr_src, 'r', encoding=enc) as fin:
        with open(vhdr_dest, 'w', encoding=enc) as fout:
            for line in fin.readlines():
                if line.strip() in search_lines:
                    line = line.replace(basename_src, basename_dest)
                fout.write(line)

    with open(vmrk_file_path, 'r', encoding=enc) as fin:
        with open(fname_dest + '.vmrk', 'w', encoding=enc) as fout:
            for line in fin.readlines():
                if line.strip() in search_lines:
                    line = line.replace(basename_src, basename_dest)
                fout.write(line)

    if anonymize is not None:
        raw = read_raw_brainvision(vhdr_src, preload=False, verbose=0)
        daysback, keep_his = _check_anonymize(anonymize, raw, '.vhdr')
        raw.info = anonymize_info(raw.info, daysback=daysback,
                                  keep_his=keep_his)
        _anonymize_brainvision(fname_dest + '.vhdr',
                               date=raw.info['meas_date'])

    if verbose:
        for ext in ['.eeg', '.vhdr', '.vmrk']:
            _, fname = os.path.split(fname_dest + ext)
            dirname = op.dirname(op.realpath(vhdr_dest))
            print(f'Created "{fname}" in "{dirname}".')
        if anonymize:
            print('Anonymized all dates in VHDR and VMRK.')
Ejemplo n.º 14
0
def copyfile_edf(src, dest, anonymize=None):
    """Copy an EDF, EDF+, or BDF file to a new location, optionally anonymize.

    .. warning:: EDF/EDF+/BDF files contain two fields for recording dates:
                 A generic "startdate" field that supports only 2-digit years,
                 and a "Startdate" field as part of the "local recording
                 identification", which supports 4-digit years.
                 If you want to anonymize your file, MNE-BIDS will set the
                 "startdate" field to 85 (i.e., 1985), the earliest possible
                 date for that field. However, the "Startdate" field in the
                 file's "local recording identification" and the date in the
                 session's corresponding ``scans.tsv`` will be set correctly
                 according to the argument provided to the ``anonymize``
                 parameter. Note that it is possible that not all EDF/EDF+/BDF
                 reading software parses the accurate recording date, and
                 that for some reading software, the wrong year (1985) may
                 be parsed.

    Parameters
    ----------
    src : str | pathlib.Path
        The source path of the .edf or .bdf file to be copied.
    dest : str | pathlib.Path
        The destination path of the .edf or .bdf file.
    anonymize : dict | None
        If None (default), no anonymization is performed.
        If dict, data will be anonymized depending on the keys provided with
        the dict: `daysback` is a required key, `keep_his` is an optional key.

        `daysback` : int
            Number of days by which to move back the recording date in time.
            In studies with multiple subjects the relative recording date
            differences between subjects can be kept by using the same number
            of `daysback` for all subject anonymizations. `daysback` should be
            great enough to shift the date prior to 1925 to conform with BIDS
            anonymization rules. Due to limitations of the EDF/BDF format, the
            year of the anonymized date will always be set to 1985 in the
            'startdate' field of the file. The correctly-shifted year will be
            written to the 'local recording identification' region of the
            file header, which may not be parsed by all EDF/EDF+/BDF reader
            softwares.

        `keep_his` : bool
            By default (False), all subject information next to the recording
            date will be overwritten as well. If True, keep subject information
            apart from the recording date. Participant names and birthdates
            will always be anonymized if present, regardless of this setting.

    See Also
    --------
    mne.io.anonymize_info
    copyfile_brainvision
    copyfile_bti
    copyfile_ctf
    copyfile_eeglab
    copyfile_kit

    """
    # Ensure source & destination extensions are the same
    fname_src, ext_src = _parse_ext(src)
    fname_dest, ext_dest = _parse_ext(dest)
    if ext_src != ext_dest:
        raise ValueError(f'Need to move data with same extension, '
                         f' but got "{ext_src}" and "{ext_dest}"')

    # Copy data prior to any anonymization
    sh.copyfile(src, dest)

    # Anonymize EDF/BDF data, if requested
    if anonymize is not None:
        if ext_src == '.bdf':
            raw = read_raw_bdf(dest, preload=False, verbose=0)
        elif ext_src == '.edf':
            raw = read_raw_edf(dest, preload=False, verbose=0)
        else:
            raise ValueError('Unsupported file type ({0})'.format(ext_src))

        # Get subject info, recording info, and recording date
        with open(dest, 'rb') as f:
            f.seek(8)  # id_info field starts 8 bytes in
            id_info = f.read(80).decode('ascii').rstrip()
            rec_info = f.read(80).decode('ascii').rstrip()

        # Parse metadata from file
        if len(id_info) == 0 or len(id_info.split(' ')) != 4:
            id_info = "X X X X"
        if len(rec_info) == 0 or len(rec_info.split(' ')) != 5:
            rec_info = "Startdate X X X X"
        pid, sex, birthdate, name = id_info.split(' ')
        start_date, admin_code, tech, equip = rec_info.split(' ')[1:5]

        # Try to anonymize the recording date
        daysback, keep_his = _check_anonymize(anonymize, raw, '.edf')
        anonymize_info(raw.info, daysback=daysback, keep_his=keep_his)
        start_date = '01-JAN-1985'
        meas_date = '01.01.85'

        # Anonymize ID info and write to file
        if keep_his:
            # Always remove participant birthdate and name to be safe
            id_info = [pid, sex, "X", "X"]
            rec_info = ["Startdate", start_date, admin_code, tech, equip]
        else:
            id_info = ["0", "X", "X", "X"]
            rec_info = ["Startdate", start_date, "X",
                        "mne-bids_anonymize", "X"]
        with open(dest, 'r+b') as f:
            f.seek(8)  # id_info field starts 8 bytes in
            f.write(bytes(" ".join(id_info).ljust(80), 'ascii'))
            f.write(bytes(" ".join(rec_info).ljust(80), 'ascii'))
            f.write(bytes(meas_date, 'ascii'))
Ejemplo n.º 15
0
def get_head_mri_trans(bids_path,
                       extra_params=None,
                       t1_bids_path=None,
                       fs_subject=None,
                       fs_subjects_dir=None,
                       *,
                       kind=None,
                       verbose=None):
    """Produce transformation matrix from MEG and MRI landmark points.

    Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar
    files of (i) the MEG and (ii) the T1-weighted MRI data. The two sets of
    points will then be used to calculate a transformation matrix from head
    coordinates to MRI coordinates.

    .. note:: The MEG and MRI data need **not** necessarily be stored in the
              same session or even in the same BIDS dataset. See the
              ``t1_bids_path`` parameter for details.

    Parameters
    ----------
    bids_path : BIDSPath
        The path of the electrophysiology recording. If ``datatype`` and
        ``suffix`` are not present, they will be set to ``'meg'``, and a
        warning will be raised.

        .. versionchanged:: 0.10
           A warning is raised it ``datatype`` or ``suffix`` are not set.
    extra_params : None | dict
        Extra parameters to be passed to :func:`mne.io.read_raw` when reading
        the MEG file.
    t1_bids_path : BIDSPath | None
        If ``None`` (default), will try to discover the T1-weighted MRI file
        based on the name and location of the MEG recording specified via the
        ``bids_path`` parameter. Alternatively, you explicitly specify which
        T1-weighted MRI scan to use for extraction of MRI landmarks. To do
        that, pass a :class:`mne_bids.BIDSPath` pointing to the scan.
        Use this parameter e.g. if the T1 scan was recorded during a different
        session than the MEG. It is even possible to point to a T1 image stored
        in an entirely different BIDS dataset than the MEG data.
    fs_subject : str
        The subject identifier used for FreeSurfer.

        .. versionchanged:: 0.10
           Does not default anymore to ``bids_path.subject`` if ``None``.
    fs_subjects_dir : path-like | None
        The FreeSurfer subjects directory. If ``None``, defaults to the
        ``SUBJECTS_DIR`` environment variable.

        .. versionadded:: 0.8
    kind : str | None
        The suffix of the anatomical landmark names in the JSON sidecar.
        A suffix might be present e.g. to distinguish landmarks between
        sessions. If provided, should not include a leading underscore ``_``.
        For example, if the landmark names in the JSON sidecar file are
        ``LPA_ses-1``, ``RPA_ses-1``, ``NAS_ses-1``, you should pass
        ``'ses-1'`` here.
        If ``None``, no suffix is appended, the landmarks named
        ``Nasion`` (or ``NAS``), ``LPA``, and ``RPA`` will be used.

        .. versionadded:: 0.10
    %(verbose)s

    Returns
    -------
    trans : mne.transforms.Transform
        The data transformation matrix from head to MRI coordinates.
    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    if not isinstance(bids_path, BIDSPath):
        raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
                           'instantiate using mne_bids.BIDSPath().')

    # check root available
    meg_bids_path = bids_path.copy()
    del bids_path
    if meg_bids_path.root is None:
        raise ValueError('The root of the "bids_path" must be set. '
                         'Please use `bids_path.update(root="<root>")` '
                         'to set the root of the BIDS folder to read.')

    # if the bids_path is underspecified, only get info for MEG data
    if meg_bids_path.datatype is None:
        meg_bids_path.datatype = 'meg'
        warn(
            'bids_path did not have a datatype set. Assuming "meg". This '
            'will raise an exception in the future.',
            module='mne_bids',
            category=DeprecationWarning)
    if meg_bids_path.suffix is None:
        meg_bids_path.suffix = 'meg'
        warn(
            'bids_path did not have a suffix set. Assuming "meg". This '
            'will raise an exception in the future.',
            module='mne_bids',
            category=DeprecationWarning)

    # Get the sidecar file for MRI landmarks
    t1w_bids_path = ((meg_bids_path if t1_bids_path is None else
                      t1_bids_path).copy().update(datatype='anat',
                                                  suffix='T1w',
                                                  task=None))
    t1w_json_path = _find_matching_sidecar(bids_path=t1w_bids_path,
                                           extension='.json',
                                           on_error='ignore')
    del t1_bids_path

    if t1w_json_path is not None:
        t1w_json_path = Path(t1w_json_path)

    if t1w_json_path is None or not t1w_json_path.exists():
        raise FileNotFoundError(
            f'Did not find T1w JSON sidecar file, tried location: '
            f'{t1w_json_path}')
    for extension in ('.nii', '.nii.gz'):
        t1w_path_candidate = t1w_json_path.with_suffix(extension)
        if t1w_path_candidate.exists():
            t1w_bids_path = get_bids_path_from_fname(fname=t1w_path_candidate)
            break

    if not t1w_bids_path.fpath.exists():
        raise FileNotFoundError(
            f'Did not find T1w recording file, tried location: '
            f'{t1w_path_candidate.name.replace(".nii.gz", "")}[.nii, .nii.gz]')

    # Get MRI landmarks from the JSON sidecar
    t1w_json = json.loads(t1w_json_path.read_text(encoding='utf-8'))
    mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())

    # landmarks array: rows: [LPA, NAS, RPA]; columns: [x, y, z]
    suffix = f"_{kind}" if kind is not None else ""
    mri_landmarks = np.full((3, 3), np.nan)
    for landmark_name, coords in mri_coords_dict.items():
        if landmark_name.upper() == ('LPA' + suffix).upper():
            mri_landmarks[0, :] = coords
        elif landmark_name.upper() == ('RPA' + suffix).upper():
            mri_landmarks[2, :] = coords
        elif (landmark_name.upper() == ('NAS' + suffix).upper()
              or landmark_name.lower() == ('nasion' + suffix).lower()):
            mri_landmarks[1, :] = coords
        else:
            continue

    if np.isnan(mri_landmarks).any():
        raise RuntimeError(
            f'Could not extract fiducial points from T1w sidecar file: '
            f'{t1w_json_path}\n\n'
            f'The sidecar file SHOULD contain a key '
            f'"AnatomicalLandmarkCoordinates" pointing to an '
            f'object with the keys "LPA", "NAS", and "RPA". '
            f'Yet, the following structure was found:\n\n'
            f'{mri_coords_dict}')

    # The MRI landmarks are in "voxels". We need to convert them to the
    # Neuromag RAS coordinate system in order to compare them with MEG
    # landmarks. See also: `mne_bids.write.write_anat`
    if fs_subject is None:
        warn(
            'Passing "fs_subject=None" has been deprecated and will raise '
            'an error in future versions. Please explicitly specify the '
            'FreeSurfer subject name.', DeprecationWarning)
        fs_subject = f'sub-{meg_bids_path.subject}'

    fs_subjects_dir = get_subjects_dir(fs_subjects_dir, raise_error=False)
    fs_t1_path = Path(fs_subjects_dir) / fs_subject / 'mri' / 'T1.mgz'
    if not fs_t1_path.exists():
        raise ValueError(
            f"Could not find {fs_t1_path}. Consider running FreeSurfer's "
            f"'recon-all` for subject {fs_subject}.")
    fs_t1_mgh = nib.load(str(fs_t1_path))
    t1_nifti = nib.load(str(t1w_bids_path.fpath))

    # Convert to MGH format to access vox2ras method
    t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)

    # convert to scanner RAS
    mri_landmarks = apply_trans(t1_mgh.header.get_vox2ras(), mri_landmarks)

    # convert to FreeSurfer T1 voxels (same scanner RAS as T1)
    mri_landmarks = apply_trans(fs_t1_mgh.header.get_ras2vox(), mri_landmarks)

    # now extract transformation matrix and put back to RAS coordinates of MRI
    vox2ras_tkr = fs_t1_mgh.header.get_vox2ras_tkr()
    mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)
    mri_landmarks = mri_landmarks * 1e-3

    # Get MEG landmarks from the raw file
    _, ext = _parse_ext(meg_bids_path)
    if extra_params is None:
        extra_params = dict()
    if ext == '.fif':
        extra_params['allow_maxshield'] = True

    raw = read_raw_bids(bids_path=meg_bids_path, extra_params=extra_params)

    if (raw.get_montage() is None or raw.get_montage().get_positions() is None
            or any([
                raw.get_montage().get_positions()[fid_key] is None
                for fid_key in ('nasion', 'lpa', 'rpa')
            ])):
        raise RuntimeError(
            f'Could not extract fiducial points from ``raw`` file: '
            f'{meg_bids_path}\n\n'
            f'The ``raw`` file SHOULD contain digitization points '
            'for the nasion and left and right pre-auricular points '
            'but none were found')
    pos = raw.get_montage().get_positions()
    meg_landmarks = np.asarray((pos['lpa'], pos['nasion'], pos['rpa']))

    # Given the two sets of points, fit the transform
    trans_fitted = fit_matched_points(src_pts=meg_landmarks,
                                      tgt_pts=mri_landmarks)
    trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)
    return trans
Ejemplo n.º 16
0
def get_head_mri_trans(bids_path, extra_params=None, t1_bids_path=None):
    """Produce transformation matrix from MEG and MRI landmark points.

    Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar
    files of (i) the MEG and (ii) the T1-weighted MRI data. The two sets of
    points will then be used to calculate a transformation matrix from head
    coordinates to MRI coordinates.

    .. note:: The MEG and MRI data need **not** necessarily be stored in the
              same session or even in the same BIDS dataset. See the
              ``t1_bids_path`` parameter for details.

    Parameters
    ----------
    bids_path : mne_bids.BIDSPath
        The path of the MEG recording.
    extra_params : None | dict
        Extra parameters to be passed to :func:`mne.io.read_raw` when reading
        the MEG file.
    t1_bids_path : mne_bids.BIDSPath | None
        If ``None`` (default), will try to discover the T1-weighted MRI file
        based on the name and location of the MEG recording specified via the
        ``bids_path`` parameter. Alternatively, you explicitly specify which
        T1-weighted MRI scan to use for extraction of MRI landmarks. To do
        that, pass a :class:`mne_bids.BIDSPath` pointing to the scan.
        Use this parameter e.g. if the T1 scan was recorded during a different
        session than the MEG. It is even possible to point to a T1 image stored
        in an entirely different BIDS dataset than the MEG data.

        .. versionadded:: 0.8

    Returns
    -------
    trans : mne.transforms.Transform
        The data transformation matrix from head to MRI coordinates.
    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    if not isinstance(bids_path, BIDSPath):
        raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
                           'instantiate using mne_bids.BIDSPath().')

    # check root available
    meg_bids_path = bids_path.copy()
    del bids_path
    if meg_bids_path.root is None:
        raise ValueError('The root of the "bids_path" must be set. '
                         'Please use `bids_path.update(root="<root>")` '
                         'to set the root of the BIDS folder to read.')

    # only get this for MEG data
    meg_bids_path.update(datatype='meg', suffix='meg')

    # Get the sidecar file for MRI landmarks
    if t1_bids_path is None:
        t1w_json_path = _find_matching_sidecar(meg_bids_path,
                                               suffix='T1w',
                                               extension='.json')
    else:
        t1_bids_path = t1_bids_path.copy().update(suffix='T1w',
                                                  datatype='anat')
        t1w_json_path = _find_matching_sidecar(t1_bids_path,
                                               suffix='T1w',
                                               extension='.json')

    # Get MRI landmarks from the JSON sidecar
    with open(t1w_json_path, 'r', encoding='utf-8') as f:
        t1w_json = json.load(f)
    mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())

    # landmarks array: rows: [LPA, NAS, RPA]; columns: [x, y, z]
    mri_landmarks = np.full((3, 3), np.nan)
    for landmark_name, coords in mri_coords_dict.items():
        if landmark_name.upper() == 'LPA':
            mri_landmarks[0, :] = coords
        elif landmark_name.upper() == 'RPA':
            mri_landmarks[2, :] = coords
        elif (landmark_name.upper() == 'NAS'
              or landmark_name.lower() == 'nasion'):
            mri_landmarks[1, :] = coords
        else:
            continue

    if np.isnan(mri_landmarks).any():
        raise RuntimeError(
            f'Could not extract fiducial points from T1w sidecar file: '
            f'{t1w_json_path}\n\n'
            f'The sidecar file SHOULD contain a key '
            f'"AnatomicalLandmarkCoordinates" pointing to an '
            f'object with the keys "LPA", "NAS", and "RPA". '
            f'Yet, the following structure was found:\n\n'
            f'{mri_coords_dict}')

    # The MRI landmarks are in "voxels". We need to convert the to the
    # neuromag RAS coordinate system in order to compare the with MEG landmarks
    # see also: `mne_bids.write.write_anat`
    t1w_path = t1w_json_path.replace('.json', '.nii')
    if not op.exists(t1w_path):
        t1w_path += '.gz'  # perhaps it is .nii.gz? ... else raise an error
    if not op.exists(t1w_path):
        raise RuntimeError(
            'Could not find the T1 weighted MRI associated '
            'with "{}". Tried: "{}" but it does not exist.'.format(
                t1w_json_path, t1w_path))
    t1_nifti = nib.load(t1w_path)
    # Convert to MGH format to access vox2ras method
    t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)

    # now extract transformation matrix and put back to RAS coordinates of MRI
    vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()
    mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)
    mri_landmarks = mri_landmarks * 1e-3

    # Get MEG landmarks from the raw file
    _, ext = _parse_ext(meg_bids_path)
    if extra_params is None:
        extra_params = dict()
    if ext == '.fif':
        extra_params['allow_maxshield'] = True

    raw = read_raw_bids(bids_path=meg_bids_path, extra_params=extra_params)
    meg_coords_dict = _extract_landmarks(raw.info['dig'])
    meg_landmarks = np.asarray((meg_coords_dict['LPA'], meg_coords_dict['NAS'],
                                meg_coords_dict['RPA']))

    # Given the two sets of points, fit the transform
    trans_fitted = fit_matched_points(src_pts=meg_landmarks,
                                      tgt_pts=mri_landmarks)
    trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)
    return trans
Ejemplo n.º 17
0
def get_head_mri_trans(bids_path, extra_params=None):
    """Produce transformation matrix from MEG and MRI landmark points.

    Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar
    files of (i) the MEG and (ii) the T1 weighted MRI data. The two sets of
    points will then be used to calculate a transformation matrix from head
    coordinates to MRI coordinates.

    Parameters
    ----------
    bids_path : mne_bids.BIDSPath
        The path of the recording for which to retrieve the transformation. The
        :class:`mne_bids.BIDSPath` instance passed here **must** have the
        ``.root`` attribute set.
    extra_params : None | dict
        Extra parameters to be passed to MNE read_raw_* functions when reading
        the lankmarks from the MEG file.
        If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.

    Returns
    -------
    trans : mne.transforms.Transform
        The data transformation matrix from head to MRI coordinates

    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    if not isinstance(bids_path, BIDSPath):
        raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
                           'instantiate using mne_bids.BIDSPath().')

    # check root available
    bids_path = bids_path.copy()
    bids_root = bids_path.root
    if bids_root is None:
        raise ValueError('The root of the "bids_path" must be set. '
                         'Please use `bids_path.update(root="<root>")` '
                         'to set the root of the BIDS folder to read.')
    # only get this for MEG data
    bids_path.update(datatype='meg')

    # Get the sidecar file for MRI landmarks
    bids_fname = bids_path.update(suffix='meg', root=bids_root)
    t1w_json_path = _find_matching_sidecar(bids_fname,
                                           suffix='T1w',
                                           extension='.json')

    # Get MRI landmarks from the JSON sidecar
    with open(t1w_json_path, 'r', encoding='utf-8-sig') as f:
        t1w_json = json.load(f)
    mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())
    mri_landmarks = np.asarray(
        (mri_coords_dict.get('LPA',
                             np.nan), mri_coords_dict.get('NAS', np.nan),
         mri_coords_dict.get('RPA', np.nan)))
    if np.isnan(mri_landmarks).any():
        raise RuntimeError(
            'Could not parse T1w sidecar file: "{}"\n\n'
            'The sidecar file MUST contain a key '
            '"AnatomicalLandmarkCoordinates" pointing to a '
            'dict with keys "LPA", "NAS", "RPA". '
            'Yet, the following structure was found:\n\n"{}"'.format(
                t1w_json_path, t1w_json))

    # The MRI landmarks are in "voxels". We need to convert the to the
    # neuromag RAS coordinate system in order to compare the with MEG landmarks
    # see also: `mne_bids.write.write_anat`
    t1w_path = t1w_json_path.replace('.json', '.nii')
    if not op.exists(t1w_path):
        t1w_path += '.gz'  # perhaps it is .nii.gz? ... else raise an error
    if not op.exists(t1w_path):
        raise RuntimeError(
            'Could not find the T1 weighted MRI associated '
            'with "{}". Tried: "{}" but it does not exist.'.format(
                t1w_json_path, t1w_path))
    t1_nifti = nib.load(t1w_path)
    # Convert to MGH format to access vox2ras method
    t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)

    # now extract transformation matrix and put back to RAS coordinates of MRI
    vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()
    mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)
    mri_landmarks = mri_landmarks * 1e-3

    # Get MEG landmarks from the raw file
    _, ext = _parse_ext(bids_fname)
    if extra_params is None:
        extra_params = dict()
        if ext == '.fif':
            extra_params = dict(allow_maxshield=True)

    raw = read_raw_bids(bids_path=bids_path, extra_params=extra_params)
    meg_coords_dict = _extract_landmarks(raw.info['dig'])
    meg_landmarks = np.asarray((meg_coords_dict['LPA'], meg_coords_dict['NAS'],
                                meg_coords_dict['RPA']))

    # Given the two sets of points, fit the transform
    trans_fitted = fit_matched_points(src_pts=meg_landmarks,
                                      tgt_pts=mri_landmarks)
    trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)
    return trans
Ejemplo n.º 18
0
def copyfile_eeglab(src, dest):
    """Copy a EEGLAB files to a new location and adjust pointer to '.fdt' file.

    Some EEGLAB .set files come with a .fdt binary file that contains the data.
    When moving a .set file, we need to check for an associated .fdt file and
    move it to an appropriate location as well as update an internal pointer
    within the .set file.

    Parameters
    ----------
    src : str | pathlib.Path
        Path to the source raw .set file.
    dest : str | pathlib.Path
        Path to the destination of the new .set file.

    See Also
    --------
    copyfile_brainvision
    copyfile_bti
    copyfile_ctf
    copyfile_edf
    copyfile_kit

    """
    if not mne.utils.check_version('scipy', '1.5.0'):  # pragma: no cover
        raise ImportError('SciPy >=1.5.0 is required handling EEGLAB data.')

    # Get extension of the EEGLAB file
    _, ext_src = _parse_ext(src)
    fname_dest, ext_dest = _parse_ext(dest)
    if ext_src != ext_dest:
        raise ValueError(f'Need to move data with same extension'
                         f' but got {ext_src}, {ext_dest}')

    # Load the EEG struct
    uint16_codec = None
    eeg = loadmat(file_name=src, simplify_cells=True,
                  appendmat=False, uint16_codec=uint16_codec)
    oldstyle = False
    if 'EEG' in eeg:
        eeg = eeg['EEG']
        oldstyle = True

    if isinstance(eeg['data'], str):
        # If the data field is a string, it points to a .fdt file in src dir
        fdt_fname = eeg['data']
        assert fdt_fname.endswith('.fdt')
        head, tail = op.split(src)
        fdt_path = op.join(head, fdt_fname)

        # Copy the .fdt file and give it a new name
        sh.copyfile(fdt_path, fname_dest + '.fdt')

        # Now adjust the pointer in the .set file
        head, tail = op.split(fname_dest + '.fdt')
        eeg['data'] = tail

        # Save the EEG dictionary as a Matlab struct again
        mdict = dict(EEG=eeg) if oldstyle else eeg
        savemat(file_name=dest, mdict=mdict, appendmat=False)
    else:
        # If no .fdt file, simply copy the .set file, no modifications
        # necessary
        sh.copyfile(src, dest)