示例#1
0
def _write_coordsystem_json(
    fname: str,
    unit: str,
    img_fname: str = None,
    overwrite: bool = True,
    verbose: bool = True,
):
    system_description = (
        "FreeSurfer Coordinate System derived from the CT, or T1 MRI scan."
    )
    processing_description = "SEEK-algorithm (thresholding, cylindrical clustering and post-processing), or manual labeling of contacts using FieldTrip Toolbox."

    if img_fname is not None:
        # load in image and determine coordinate system
        img = nb.load(img_fname)
        axcodes = nb.orientations.aff2axcodes(img.affine)
        coordsystem_name = "".join(axcodes)
    else:
        warnings.warn(
            "Image filename not passed in... Defaulting to MRI coordinate system."
        )
        coordsystem_name = "MRI"

    fid_json = {
        "IntendedFor": os.path.basename(img_fname),
        "iEEGCoordinateSystem": coordsystem_name,  # MRI, Pixels, or ACPC
        "iEEGCoordinateUnits": unit,  # m (MNE), mm, cm , or pixels
        "iEEGCoordinateSystemDescription": system_description,
        "iEEGCoordinateProcessingDescription": processing_description,
        "iEEGCoordinateProcessingReference": "See DOI: https://zenodo.org/record/3542307#.XoYF9tNKhZI",
    }
    _write_json(fname, fid_json, overwrite, verbose)

    return fname
示例#2
0
def test_handle_info_reading():
    """Test reading information from a BIDS sidecar.json file."""
    bids_root = _TempDir()

    # read in USA dataset, so it should find 50 Hz
    raw = mne.io.read_raw_fif(raw_fname)
    raw.info['line_freq'] = 60

    # write copy of raw with line freq of 60
    # bids basename and fname
    bids_basename = make_bids_basename(subject='01',
                                       session='01',
                                       task='audiovisual',
                                       run='01')
    kind = "meg"
    bids_fname = bids_basename + '_{}.fif'.format(kind)
    write_raw_bids(raw, bids_basename, bids_root, overwrite=True)

    # find sidecar JSON fname
    sidecar_fname = _find_matching_sidecar(bids_fname,
                                           bids_root,
                                           '{}.json'.format(kind),
                                           allow_fail=True)

    # assert that we get the same line frequency set
    raw = mne_bids.read_raw_bids(bids_fname, bids_root)
    assert raw.info['line_freq'] == 60

    # 2. if line frequency is not set in raw file, then default to sidecar
    raw.info['line_freq'] = None
    write_raw_bids(raw, bids_basename, bids_root, overwrite=True)
    _update_sidecar(sidecar_fname, "PowerLineFrequency", 55)
    raw = mne_bids.read_raw_bids(bids_fname, bids_root)
    assert raw.info['line_freq'] == 55

    # make a copy of the sidecar in "derivatives/"
    # to check that we make sure we always get the right sidecar
    # in addition, it should not break the sidecar reading
    # in `read_raw_bids`
    deriv_dir = op.join(bids_root, "derivatives")
    sidecar_copy = op.join(deriv_dir, op.basename(sidecar_fname))
    os.mkdir(deriv_dir)
    with open(sidecar_fname, "r") as fin:
        sidecar_json = json.load(fin)
        sidecar_json["PowerLineFrequency"] = 45
    _write_json(sidecar_copy, sidecar_json)
    raw = mne_bids.read_raw_bids(bids_fname, bids_root)
    assert raw.info['line_freq'] == 55

    # 3. if line frequency is set in raw file, but not sidecar
    raw.info['line_freq'] = 60
    write_raw_bids(raw, bids_basename, bids_root, overwrite=True)
    _update_sidecar(sidecar_fname, "PowerLineFrequency", "n/a")
    raw = mne_bids.read_raw_bids(bids_fname, bids_root)
    assert raw.info['line_freq'] == 60

    # 4. assert that we get an error when sidecar json doesn't match
    _update_sidecar(sidecar_fname, "PowerLineFrequency", 55)
    with pytest.raises(ValueError, match="Line frequency in sidecar json"):
        raw = mne_bids.read_raw_bids(bids_fname, bids_root)
示例#3
0
def _participants_json(fname, overwrite=False, verbose=True):
    """Create participants.json for non-default columns in accompanying TSV.

    Parameters
    ----------
    fname : str
        Filename to save the scans.tsv to.
    overwrite : bool
        Defaults to False.
        Whether to overwrite the existing data in the file.
        If there is already data for the given `fname` and overwrite is False,
        an error will be raised.
    verbose : bool
        Set verbose output to true or false.

    """
    cols = OrderedDict()
    cols['participant_id'] = {'Description': 'Unique participant identifier'}
    cols['age'] = {'Description': 'Age of the participant at time of testing',
                   'Units': 'years'}
    cols['sex'] = {'Description': 'Biological sex of the participant',
                   'Levels': {'F': 'female', 'M': 'male'}}

    _write_json(fname, cols, overwrite, verbose)

    return fname
示例#4
0
def test_handle_chpi_reading(tmp_path):
    """Test reading of cHPI information."""
    raw = _read_raw_fif(raw_fname_chpi, allow_maxshield=True)
    root = tmp_path / 'chpi'
    root.mkdir()
    bids_path = BIDSPath(subject='01', session='01',
                         task='audiovisual', run='01',
                         root=root, datatype='meg')
    bids_path = write_raw_bids(raw, bids_path)

    raw_read = read_raw_bids(bids_path)
    assert raw_read.info['hpi_subsystem'] is not None

    # cause conflicts between cHPI info in sidecar and raw data
    meg_json_path = bids_path.copy().update(suffix='meg', extension='.json')
    with open(meg_json_path, 'r', encoding='utf-8') as f:
        meg_json_data = json.load(f)

    # cHPI frequency mismatch
    meg_json_data_freq_mismatch = meg_json_data.copy()
    meg_json_data_freq_mismatch['HeadCoilFrequency'][0] = 123
    _write_json(meg_json_path, meg_json_data_freq_mismatch, overwrite=True)

    with pytest.warns(RuntimeWarning, match='Defaulting to .* mne.Raw object'):
        raw_read = read_raw_bids(bids_path)

    # cHPI "off" according to sidecar, but present in the data
    meg_json_data_chpi_mismatch = meg_json_data.copy()
    meg_json_data_chpi_mismatch['ContinuousHeadLocalization'] = False
    _write_json(meg_json_path, meg_json_data_chpi_mismatch, overwrite=True)

    raw_read = read_raw_bids(bids_path)
    assert raw_read.info['hpi_subsystem'] is None
    assert raw_read.info['hpi_meas'] == []
示例#5
0
    def write_participants_json(self, participants_dict):
        """
        Write a passed dict object to the participants json location.

        Parameters
        ----------
        participants_dict : dict
            The updated dictionary for the participants.json file.

        """
        outfile = self.participantsjson_fpath
        _write_json(outfile, participants_dict, overwrite=True)
示例#6
0
    def write_sidecar_json(self, sidecar_dict):
        """
        Write a passed dict object to the sidecar json location.

        Parameters
        ----------
        sidecar_dict : dict
            The data to write to the file.

        """
        outfile = self.sidecarjson_fpath
        _write_json(outfile, sidecar_dict, overwrite=True)
示例#7
0
def test_handle_info_reading():
    """Test reading information from a BIDS sidecar.json file."""
    bids_root = _TempDir()

    # read in USA dataset, so it should find 50 Hz
    raw = _read_raw_fif(raw_fname)

    # write copy of raw with line freq of 60
    # bids basename and fname
    bids_path = BIDSPath(subject='01',
                         session='01',
                         task='audiovisual',
                         run='01',
                         root=bids_root)
    suffix = "meg"
    bids_fname = bids_path.copy().update(suffix=suffix, extension='.fif')
    write_raw_bids(raw, bids_path, overwrite=True)

    # find sidecar JSON fname
    bids_fname.update(datatype=suffix)
    sidecar_fname = _find_matching_sidecar(bids_fname,
                                           suffix=suffix,
                                           extension='.json')

    # assert that we get the same line frequency set
    raw = read_raw_bids(bids_path=bids_path)
    assert raw.info['line_freq'] == 60

    # 2. if line frequency is not set in raw file, then ValueError
    raw.info['line_freq'] = None
    with pytest.raises(ValueError, match="PowerLineFrequency .* required"):
        write_raw_bids(raw, bids_path, overwrite=True)

    # make a copy of the sidecar in "derivatives/"
    # to check that we make sure we always get the right sidecar
    # in addition, it should not break the sidecar reading
    # in `read_raw_bids`
    deriv_dir = op.join(bids_root, "derivatives")
    sidecar_copy = op.join(deriv_dir, op.basename(sidecar_fname))
    os.mkdir(deriv_dir)
    with open(sidecar_fname, "r", encoding='utf-8') as fin:
        sidecar_json = json.load(fin)
        sidecar_json["PowerLineFrequency"] = 45
    _write_json(sidecar_copy, sidecar_json)
    raw = read_raw_bids(bids_path=bids_path)
    assert raw.info['line_freq'] == 60

    # 3. assert that we get an error when sidecar json doesn't match
    _update_sidecar(sidecar_fname, "PowerLineFrequency", 55)
    with pytest.raises(ValueError, match="Line frequency in sidecar json"):
        raw = read_raw_bids(bids_path=bids_path)
        assert raw.info['line_freq'] == 55
示例#8
0
def _coordsystem_json(raw,
                      unit,
                      orient,
                      manufacturer,
                      fname,
                      overwrite=False,
                      verbose=True):
    """Create a coordsystem.json file and save it.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    unit : str
        Units to be used in the coordsystem specification.
    orient : str
        Used to define the coordinate system for the head coils.
    manufacturer : str
        Used to define the coordinate system for the MEG sensors.
    fname : str
        Filename to save the coordsystem.json to.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.

    """
    dig = raw.info['dig']
    coords = _extract_landmarks(dig)
    hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
    if hpi:
        for ident in hpi.keys():
            coords['coil%d' % ident] = hpi[ident]['r'].tolist()

    coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])
    if len(coord_frame) > 1:
        err = 'All HPI and Fiducials must be in the same coordinate frame.'
        raise ValueError(err)

    fid_json = {
        'MEGCoordinateSystem': manufacturer,
        'MEGCoordinateUnits': unit,  # XXX validate this
        'HeadCoilCoordinates': coords,
        'HeadCoilCoordinateSystem': orient,
        'HeadCoilCoordinateUnits': unit  # XXX validate this
    }

    _write_json(fname, fid_json, overwrite, verbose)

    return fname
示例#9
0
def _update_sidecar(sidecar_fname, key, val):
    """Update a sidecar JSON file with a given key/value pair.

    Parameters
    ----------
    sidecar_fname : str | os.PathLike
        Full name of the data file
    key : str
        The key in the sidecar JSON file. E.g. "PowerLineFrequency"
    val : str
        The corresponding value to change to in the sidecar JSON file.
    """
    with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin:
        sidecar_json = json.load(fin)
    sidecar_json[key] = val
    _write_json(sidecar_fname, sidecar_json, overwrite=True)
示例#10
0
def _get_sidecar_json_update_file(_get_bids_test_dir):
    """Return path to a sidecar JSON updating file."""
    bids_root = _get_bids_test_dir
    sample_scripts = op.join(bids_root, 'sourcedata')
    sidecar_fpath = op.join(sample_scripts, 'sidecarjson_update.json')
    _mkdir_p(sample_scripts)

    update_json = {
        'InstitutionName': 'mne-bids',
        'InstitutionAddress': 'Internet',
        'MEGChannelCount': 300,
        'MEGREFChannelCount': 6,
        'SEEGChannelCount': 0,
    }
    _write_json(sidecar_fpath, update_json, overwrite=True)

    return sidecar_fpath
示例#11
0
    def save_project(self):
        """
        saves the project information to a JSON file

        Parameters
        ----------
        None

        Returns
        -------
        None

        """
        result_file = self.name + "_results.json"
        result_file_path = os.path.join(self.results_folder, result_file)
        _write_json(
            result_file_path, self.automagic_final, overwrite=True, verbose=True
        )
def init_dataset():
    """Prepare the pipeline directory in /derivatives.
    """
    os.makedirs(config.deriv_root, exist_ok=True)

    # Write a dataset_description.json for the pipeline
    ds_json = dict()
    ds_json['Name'] = config.PIPELINE_NAME + ' outputs'
    ds_json['BIDSVersion'] = BIDS_VERSION
    ds_json['PipelineDescription'] = {
        'Name': config.PIPELINE_NAME,
        'Version': config.VERSION,
        'CodeURL': config.CODE_URL,
    }
    ds_json['SourceDatasets'] = {
        'URL': 'n/a',
    }

    fname = op.join(config.deriv_root, 'dataset_description.json')
    _write_json(fname, ds_json, overwrite=True)
示例#13
0
    def save_all_files(self, results, fig1, fig2):
        """
        Save results dictionary and figures to results path

        Parameters
        ----------
        results:
            MNE raw object with info attribute containing
        fig1:
            Figure of ??

        fig2:
            Figure of ??

        Returns
        -------
        none

        """
        main_result_file = results["automagic"]
        result_filename = self.unique_name + "_results.json"
        result_file_overall = os.path.join(self.result_path, result_filename)
        _write_json(result_file_overall,
                    main_result_file,
                    overwrite=True,
                    verbose=True)
        processed = results["preprocessed"]
        processed_filename = self.unique_name + "_raw.fif"
        processed_file_overall = os.path.join(self.result_path,
                                              processed_filename)
        processed.save(processed_file_overall, overwrite=True)

        plt.figure(fig1.number)
        fig1_name = self.unique_name + ".png"
        fig1_name_overall = os.path.join(self.result_path, fig1_name)
        plt.savefig(fig1_name_overall, dpi=200)
        plt.figure(fig2.number)
        fig2_name = self.unique_name + "_orig.png"
        fig2_name_overall = os.path.join(self.result_path, fig2_name)
        plt.savefig(fig2_name_overall, dpi=100)
示例#14
0
def init_dataset(cfg) -> None:
    """Prepare the pipeline directory in /derivatives.
    """
    msg = "Initializing output directories."
    logger.info(**gen_log_kwargs(message=msg))

    cfg.deriv_root.mkdir(exist_ok=True, parents=True)

    # Write a dataset_description.json for the pipeline
    ds_json = dict()
    ds_json['Name'] = cfg.PIPELINE_NAME + ' outputs'
    ds_json['BIDSVersion'] = BIDS_VERSION
    ds_json['PipelineDescription'] = {
        'Name': cfg.PIPELINE_NAME,
        'Version': cfg.VERSION,
        'CodeURL': cfg.CODE_URL,
    }
    ds_json['SourceDatasets'] = {
        'URL': 'n/a',
    }

    fname = cfg.deriv_root / 'dataset_description.json'
    _write_json(fname, ds_json, overwrite=True)
示例#15
0
def update_sidecar_json(bids_path, entries, verbose=True):
    """Update sidecar files using a dictionary or JSON file.

    Will update metadata fields inside the path defined by
    ``bids_path.fpath`` according to the ``entries``. If a
    field does not exist in the corresponding sidecar file,
    then that field will be created according to the ``entries``.
    If a field does exist in the corresponding sidecar file,
    then that field will be updated according to the ``entries``.

    For example, if ``InstitutionName`` is not defined in
    the sidecar json file, then trying to update
    ``InstitutionName`` to ``Martinos Center`` will update
    the sidecar json file to have ``InstitutionName`` as
    ``Martinos Center``.

    Parameters
    ----------
    bids_path : BIDSPath
        The set of paths to update. The :class:`mne_bids.BIDSPath` instance
        passed here **must** have the ``.root`` attribute set. The
        ``.datatype`` attribute **may** be set. If ``.datatype`` is
        not set and only one data type (e.g., only EEG or MEG data)
        is present in the dataset, it will be
        selected automatically. This must uniquely identify
        an existing file path, else an error will be raised.
    entries : dict | str | pathlib.Path
        A dictionary, or JSON file that defines the
        sidecar fields and corresponding values to be updated to.
    verbose : bool
        The verbosity level.

    Notes
    -----
    This function can only update JSON files.

    Sidecar JSON files include files such as ``*_ieeg.json``,
    ``*_coordsystem.json``, ``*_scans.json``, etc.

    You should double check that your update dictionary is correct
    for the corresponding sidecar JSON file because it will perform
    a dictionary update of the sidecar fields according to
    the passed in dictionary overwriting any information that was
    previously there.

    Raises
    ------
    RuntimeError
        If the specified ``bids_path.fpath`` cannot be found
        in the dataset.

    RuntimeError
        If the ``bids_path.fpath`` does not have ``.json``
        extension.

    Examples
    --------
    >>> # update sidecar json file
    >>> bids_path = BIDSPath(root='./', subject='001', session='001',
                             task='test', run='01', suffix='ieeg',
                             extension='.json')
    >>> entries = {'PowerLineFrequency': 50}
    >>> update_sidecar_json(bids_path, entries)
    >>> # update sidecar coordsystem json file
    >>> bids_path = BIDSPath(root='./', subject='001', session='001',
                             suffix='coordsystem', extension='.json')
    >>> entries = {'iEEGCoordinateSystem,': 'Other'}
    >>> update_sidecar_json(bids_path, entries)
    """
    # get all matching json files
    bids_path = bids_path.copy()
    if bids_path.extension != '.json':
        raise RuntimeError('Only works for ".json" files. The '
                           'BIDSPath object passed in has '
                           f'{bids_path.extension} extension.')

    # get the file path
    fpath = bids_path.fpath
    if not fpath.exists():
        raise RuntimeError(f'Sidecar file does not ' f'exist for {fpath}.')

    # sidecar update either from file, or as dictionary
    if isinstance(entries, dict):
        sidecar_tmp = entries
    else:
        with open(entries, 'r') as tmp_f:
            sidecar_tmp = json.load(tmp_f, object_pairs_hook=OrderedDict)

    if verbose:
        logger.debug(sidecar_tmp)
        logger.debug(f'Updating {fpath}...')

    # load in sidecar filepath
    with open(fpath, 'r') as tmp_f:
        sidecar_json = json.load(tmp_f, object_pairs_hook=OrderedDict)

    # update sidecar JSON file with the fields passed in
    sidecar_json.update(**sidecar_tmp)

    # write back the sidecar JSON
    _write_json(fpath, sidecar_json, overwrite=True, verbose=verbose)
示例#16
0
            orig_time = dat.info['meas_date']
        events = mne.pick_events(events, include=list(event_map.values()))
        annot_new = mne.annotations_from_events(
            events=events, sfreq=dat.info['sfreq'], orig_time=orig_time,
            event_desc=event_name_map, verbose=False
        )
        dat.set_annotations(annot_new)
    except (ValueError, RuntimeError):
        print("   * Unable to find any valid triggers, skipping...\n")
        continue
            
    # Acutally write out BIDS data
    write_raw_bids(dat, bids_path, verbose=False)
                   
    # Update sidecar files with correct metadata
    json_path = BIDSPath(subject=study_id, task=taskname, suffix='eeg',
                         extension='.json', root=bids_root)
    with open(json_path.fpath, 'r') as tmp_f:
        sidecar_json = json.load(tmp_f)
    file_info = metadata.copy()
    file_info.update(**sidecar_json)
    for k in metadata.keys():
        if metadata[k] != None:
            file_info[k] = metadata[k]
    _write_json(json_path.fpath, file_info, overwrite=True, verbose=False)
    

print("\n\n#################################")    
print("### BIDS conversion complete! ###")
print("#################################\n")                   
示例#17
0
def test_handle_info_reading(tmpdir):
    """Test reading information from a BIDS sidecar JSON file."""
    # read in USA dataset, so it should find 50 Hz
    raw = _read_raw_fif(raw_fname)

    # write copy of raw with line freq of 60
    # bids basename and fname
    bids_path = BIDSPath(subject='01',
                         session='01',
                         task='audiovisual',
                         run='01',
                         root=tmpdir)
    suffix = "meg"
    bids_fname = bids_path.copy().update(suffix=suffix, extension='.fif')
    write_raw_bids(raw, bids_path, overwrite=True)

    # find sidecar JSON fname
    bids_fname.update(datatype=suffix)
    sidecar_fname = _find_matching_sidecar(bids_fname,
                                           suffix=suffix,
                                           extension='.json')

    # assert that we get the same line frequency set
    raw = read_raw_bids(bids_path=bids_path)
    assert raw.info['line_freq'] == 60

    # setting line_freq to None should produce 'n/a' in the JSON sidecar
    raw.info['line_freq'] = None
    write_raw_bids(raw, bids_path, overwrite=True)
    raw = read_raw_bids(bids_path=bids_path)
    assert raw.info['line_freq'] is None

    with open(sidecar_fname, 'r', encoding='utf-8') as fin:
        sidecar_json = json.load(fin)
    assert sidecar_json["PowerLineFrequency"] == 'n/a'

    # 2. if line frequency is not set in raw file, then ValueError
    del raw.info['line_freq']
    with pytest.raises(ValueError, match="PowerLineFrequency .* required"):
        write_raw_bids(raw, bids_path, overwrite=True)

    # check whether there are "Extra points" in raw.info['dig'] if
    # DigitizedHeadPoints is set to True and not otherwise
    n_dig_points = 0
    for dig_point in raw.info['dig']:
        if dig_point['kind'] == FIFF.FIFFV_POINT_EXTRA:
            n_dig_points += 1
    if sidecar_json['DigitizedHeadPoints']:
        assert n_dig_points > 0
    else:
        assert n_dig_points == 0

    # check whether any of NAS/LPA/RPA are present in raw.info['dig']
    # DigitizedLandmark is set to True, and False otherwise
    landmark_present = False
    for dig_point in raw.info['dig']:
        if dig_point['kind'] in [
                FIFF.FIFFV_POINT_LPA, FIFF.FIFFV_POINT_RPA,
                FIFF.FIFFV_POINT_NASION
        ]:
            landmark_present = True
            break
    if landmark_present:
        assert sidecar_json['DigitizedLandmarks'] is True
    else:
        assert sidecar_json['DigitizedLandmarks'] is False

    # make a copy of the sidecar in "derivatives/"
    # to check that we make sure we always get the right sidecar
    # in addition, it should not break the sidecar reading
    # in `read_raw_bids`
    raw.info['line_freq'] = 60
    write_raw_bids(raw, bids_path, overwrite=True)
    deriv_dir = tmpdir.mkdir("derivatives")
    sidecar_copy = deriv_dir / op.basename(sidecar_fname)
    with open(sidecar_fname, "r", encoding='utf-8') as fin:
        sidecar_json = json.load(fin)
        sidecar_json["PowerLineFrequency"] = 45
    _write_json(sidecar_copy, sidecar_json)
    raw = read_raw_bids(bids_path=bids_path)
    assert raw.info['line_freq'] == 60

    # 3. assert that we get an error when sidecar json doesn't match
    _update_sidecar(sidecar_fname, "PowerLineFrequency", 55)
    with pytest.warns(RuntimeWarning, match="Defaulting to .* sidecar JSON"):
        raw = read_raw_bids(bids_path=bids_path)
        assert raw.info['line_freq'] == 55
示例#18
0
def test_get_head_mri_trans(tmpdir):
    """Test getting a trans object from BIDS data."""
    import nibabel as nib

    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Smiley': 5,
        'Button': 32
    }
    events_fname = op.join(data_path, 'MEG', 'sample',
                           'sample_audvis_trunc_raw-eve.fif')
    subjects_dir = op.join(data_path, 'subjects')

    # Drop unknown events.
    events = mne.read_events(events_fname)
    events = events[events[:, 2] != 0]

    # Write it to BIDS
    raw = _read_raw_fif(raw_fname)
    bids_path = _bids_path.copy().update(root=tmpdir)
    write_raw_bids(raw,
                   bids_path,
                   events_data=events,
                   event_id=event_id,
                   overwrite=False)

    # We cannot recover trans if no MRI has yet been written
    with pytest.raises(RuntimeError, match='Did not find any T1w'):
        estimated_trans = get_head_mri_trans(bids_path=bids_path,
                                             fs_subject='sample',
                                             fs_subjects_dir=subjects_dir)

    # Write some MRI data and supply a `trans` so that a sidecar gets written
    trans = mne.read_trans(raw_fname.replace('_raw.fif', '-trans.fif'))

    # Get the T1 weighted MRI data file ... test write_anat with a nibabel
    # image instead of a file path
    t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
    t1w_mgh = nib.load(t1w_mgh)

    landmarks = get_anat_landmarks(t1w_mgh,
                                   raw.info,
                                   trans,
                                   fs_subject='sample',
                                   fs_subjects_dir=subjects_dir)
    t1w_bids_path = write_anat(t1w_mgh,
                               bids_path=bids_path,
                               landmarks=landmarks,
                               verbose=True)
    anat_dir = bids_path.directory

    # Try to get trans back through fitting points
    estimated_trans = get_head_mri_trans(bids_path=bids_path,
                                         fs_subject='sample',
                                         fs_subjects_dir=subjects_dir)

    assert trans['from'] == estimated_trans['from']
    assert trans['to'] == estimated_trans['to']
    assert_almost_equal(trans['trans'], estimated_trans['trans'])

    # provoke an error by introducing NaNs into MEG coords
    raw.info['dig'][0]['r'] = np.full(3, np.nan)
    sh.rmtree(anat_dir)
    bad_landmarks = get_anat_landmarks(t1w_mgh, raw.info, trans, 'sample',
                                       op.join(data_path, 'subjects'))
    write_anat(t1w_mgh, bids_path=t1w_bids_path, landmarks=bad_landmarks)
    with pytest.raises(RuntimeError, match='AnatomicalLandmarkCoordinates'):
        estimated_trans = get_head_mri_trans(bids_path=t1w_bids_path,
                                             fs_subject='sample',
                                             fs_subjects_dir=subjects_dir)

    # test we are permissive for different casings of landmark names in the
    # sidecar, and also accept "nasion" instead of just "NAS"
    raw = _read_raw_fif(raw_fname)
    write_raw_bids(raw,
                   bids_path,
                   events_data=events,
                   event_id=event_id,
                   overwrite=True)  # overwrite with new acq
    t1w_bids_path = write_anat(t1w_mgh,
                               bids_path=bids_path,
                               landmarks=landmarks,
                               overwrite=True)

    t1w_json_fpath = t1w_bids_path.copy().update(extension='.json').fpath
    with t1w_json_fpath.open('r', encoding='utf-8') as f:
        t1w_json = json.load(f)

    coords = t1w_json['AnatomicalLandmarkCoordinates']
    coords['lpa'] = coords['LPA']
    coords['Rpa'] = coords['RPA']
    coords['Nasion'] = coords['NAS']
    del coords['LPA'], coords['RPA'], coords['NAS']

    _write_json(t1w_json_fpath, t1w_json, overwrite=True)

    estimated_trans = get_head_mri_trans(bids_path=bids_path,
                                         fs_subject='sample',
                                         fs_subjects_dir=subjects_dir)
    assert_almost_equal(trans['trans'], estimated_trans['trans'])

    # Test t1_bids_path parameter
    #
    # Case 1: different BIDS roots
    meg_bids_path = _bids_path.copy().update(root=tmpdir / 'meg_root')
    t1_bids_path = _bids_path.copy().update(root=tmpdir / 'mri_root')
    raw = _read_raw_fif(raw_fname)

    write_raw_bids(raw, bids_path=meg_bids_path)
    landmarks = get_anat_landmarks(t1w_mgh,
                                   raw.info,
                                   trans,
                                   fs_subject='sample',
                                   fs_subjects_dir=subjects_dir)
    write_anat(t1w_mgh, bids_path=t1_bids_path, landmarks=landmarks)
    read_trans = get_head_mri_trans(bids_path=meg_bids_path,
                                    t1_bids_path=t1_bids_path,
                                    fs_subject='sample',
                                    fs_subjects_dir=subjects_dir)
    assert np.allclose(trans['trans'], read_trans['trans'])

    # Case 2: different sessions
    raw = _read_raw_fif(raw_fname)
    meg_bids_path = _bids_path.copy().update(root=tmpdir / 'session_test',
                                             session='01')
    t1_bids_path = meg_bids_path.copy().update(session='02')

    write_raw_bids(raw, bids_path=meg_bids_path)
    write_anat(t1w_mgh, bids_path=t1_bids_path, landmarks=landmarks)
    read_trans = get_head_mri_trans(bids_path=meg_bids_path,
                                    t1_bids_path=t1_bids_path,
                                    fs_subject='sample',
                                    fs_subjects_dir=subjects_dir)
    assert np.allclose(trans['trans'], read_trans['trans'])

    # Test that incorrect subject directory throws error
    with pytest.raises(ValueError, match='Could not find'):
        estimated_trans = get_head_mri_trans(bids_path=bids_path,
                                             fs_subject='bad',
                                             fs_subjects_dir=subjects_dir)
示例#19
0
def make_dataset_description(path, name=None, data_license=None,
                             authors=None, acknowledgements=None,
                             how_to_acknowledge=None, funding=None,
                             references_and_links=None, doi=None,
                             verbose=False):
    """Create json for a dataset description.

    BIDS datasets may have one or more fields, this function allows you to
    specify which you wish to include in the description. See the BIDS
    documentation for information about what each field means.

    Parameters
    ----------
    path : str
        A path to a folder where the description will be created.
    name : str | None
        The name of this BIDS dataset.
    data_license : str | None
        The license under which this datset is published.
    authors : list | str | None
        List of individuals who contributed to the creation/curation of the
        dataset. Must be a list of strings or a single comma separated string
        like ['a', 'b', 'c'].
    acknowledgements : list | str | None
        Either a str acknowledging individuals who contributed to the
        creation/curation of this dataset OR a list of the individuals'
        names as str.
    how_to_acknowledge : list | str | None
        Either a str describing how to acknowledge this dataset OR a list of
        publications that should be cited.
    funding : list | str | None
        List of sources of funding (e.g., grant numbers). Must be a list of
        strings or a single comma separated string like ['a', 'b', 'c'].
    references_and_links : list | str | None
        List of references to publication that contain information on the
        dataset, or links.  Must be a list of strings or a single comma
        separated string like ['a', 'b', 'c'].
    doi : str | None
        The DOI for the dataset.

    Notes
    -----
    The required field BIDSVersion will be automatically filled by mne_bids.

    """
    # Put potential string input into list of strings
    if isinstance(authors, str):
        authors = authors.split(', ')
    if isinstance(funding, str):
        funding = funding.split(', ')
    if isinstance(references_and_links, str):
        references_and_links = references_and_links.split(', ')

    fname = op.join(path, 'dataset_description.json')
    description = OrderedDict([('Name', name),
                               ('BIDSVersion', BIDS_VERSION),
                               ('License', data_license),
                               ('Authors', authors),
                               ('Acknowledgements', acknowledgements),
                               ('HowToAcknowledge', how_to_acknowledge),
                               ('Funding', funding),
                               ('ReferencesAndLinks', references_and_links),
                               ('DatasetDOI', doi)])
    pop_keys = [key for key, val in description.items() if val is None]
    for key in pop_keys:
        description.pop(key)
    _write_json(fname, description, overwrite=True, verbose=verbose)
示例#20
0
def _sidecar_json(raw, task, manufacturer, fname, kind, overwrite=False,
                  verbose=True):
    """Create a sidecar json file depending on the kind and save it.

    The sidecar json file provides meta data about the data of a certain kind.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    task : str
        Name of the task the data is based on.
    manufacturer : str
        Manufacturer of the acquisition system. For MEG also used to define the
        coordinate system for the MEG sensors.
    fname : str
        Filename to save the sidecar json to.
    kind : str
        Type of the data as in ALLOWED_KINDS.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false. Defaults to true.

    """
    sfreq = raw.info['sfreq']
    powerlinefrequency = raw.info.get('line_freq', None)
    if powerlinefrequency is None:
        warn('No line frequency found, defaulting to 50 Hz')
        powerlinefrequency = 50

    if isinstance(raw, BaseRaw):
        rec_type = 'continuous'
    elif isinstance(raw, Epochs):
        rec_type = 'epoched'
    else:
        rec_type = 'n/a'

    # determine whether any channels have to be ignored:
    n_ignored = len([ch_name for ch_name in
                     IGNORED_CHANNELS.get(manufacturer, list()) if
                     ch_name in raw.ch_names])
    # all ignored channels are trigger channels at the moment...

    n_megchan = len([ch for ch in raw.info['chs']
                     if ch['kind'] == FIFF.FIFFV_MEG_CH])
    n_megrefchan = len([ch for ch in raw.info['chs']
                        if ch['kind'] == FIFF.FIFFV_REF_MEG_CH])
    n_eegchan = len([ch for ch in raw.info['chs']
                     if ch['kind'] == FIFF.FIFFV_EEG_CH])
    n_ecogchan = len([ch for ch in raw.info['chs']
                      if ch['kind'] == FIFF.FIFFV_ECOG_CH])
    n_seegchan = len([ch for ch in raw.info['chs']
                      if ch['kind'] == FIFF.FIFFV_SEEG_CH])
    n_eogchan = len([ch for ch in raw.info['chs']
                     if ch['kind'] == FIFF.FIFFV_EOG_CH])
    n_ecgchan = len([ch for ch in raw.info['chs']
                     if ch['kind'] == FIFF.FIFFV_ECG_CH])
    n_emgchan = len([ch for ch in raw.info['chs']
                     if ch['kind'] == FIFF.FIFFV_EMG_CH])
    n_miscchan = len([ch for ch in raw.info['chs']
                      if ch['kind'] == FIFF.FIFFV_MISC_CH])
    n_stimchan = len([ch for ch in raw.info['chs']
                      if ch['kind'] == FIFF.FIFFV_STIM_CH]) - n_ignored

    # Define modality-specific JSON dictionaries
    ch_info_json_common = [
        ('TaskName', task),
        ('Manufacturer', manufacturer),
        ('PowerLineFrequency', powerlinefrequency),
        ('SamplingFrequency', sfreq),
        ('SoftwareFilters', 'n/a'),
        ('RecordingDuration', raw.times[-1]),
        ('RecordingType', rec_type)]
    ch_info_json_meg = [
        ('DewarPosition', 'n/a'),
        ('DigitizedLandmarks', False),
        ('DigitizedHeadPoints', False),
        ('MEGChannelCount', n_megchan),
        ('MEGREFChannelCount', n_megrefchan)]
    ch_info_json_eeg = [
        ('EEGReference', 'n/a'),
        ('EEGGround', 'n/a'),
        ('EEGPlacementScheme', _infer_eeg_placement_scheme(raw)),
        ('Manufacturer', manufacturer)]
    ch_info_json_ieeg = [
        ('iEEGReference', 'n/a'),
        ('ECOGChannelCount', n_ecogchan),
        ('SEEGChannelCount', n_seegchan)]
    ch_info_ch_counts = [
        ('EEGChannelCount', n_eegchan),
        ('EOGChannelCount', n_eogchan),
        ('ECGChannelCount', n_ecgchan),
        ('EMGChannelCount', n_emgchan),
        ('MiscChannelCount', n_miscchan),
        ('TriggerChannelCount', n_stimchan)]

    # Stitch together the complete JSON dictionary
    ch_info_json = ch_info_json_common
    if kind == 'meg':
        append_kind_json = ch_info_json_meg
    elif kind == 'eeg':
        append_kind_json = ch_info_json_eeg
    elif kind == 'ieeg':
        append_kind_json = ch_info_json_ieeg

    ch_info_json += append_kind_json
    ch_info_json += ch_info_ch_counts
    ch_info_json = OrderedDict(ch_info_json)

    _write_json(fname, ch_info_json, overwrite, verbose)

    return fname
示例#21
0
def write_anat(bids_root, subject, t1w, session=None, acquisition=None,
               raw=None, trans=None, deface=False, overwrite=False,
               verbose=False):
    """Put anatomical MRI data into a BIDS format.

    Given a BIDS directory and a T1 weighted MRI scan for a certain subject,
    format the MRI scan to be in BIDS format and put it into the correct
    location in the bids_dir. If a transformation matrix is supplied, a
    sidecar JSON file will be written for the T1 weighted data.

    Parameters
    ----------
    bids_root : str
        Path to root of the BIDS folder
    subject : str
        Subject label as in 'sub-<label>', for example: '01'
    t1w : str | nibabel image object
        Path to a T1 weighted MRI scan of the subject. Can be in any format
        readable by nibabel. Can also be a nibabel image object of a T1
        weighted MRI scan. Will be written as a .nii.gz file.
    session : str | None
        The session for `t1w`. Corresponds to "ses"
    acquisition: str | None
        The acquisition parameters for `t1w`. Corresponds to "acq"
    raw : instance of Raw | None
        The raw data of `subject` corresponding to `t1w`. If `raw` is None,
        `trans` has to be None as well
    trans : instance of mne.transforms.Transform | str | None
        The transformation matrix from head coordinates to MRI coordinates. Can
        also be a string pointing to a .trans file containing the
        transformation matrix. If None, no sidecar JSON file will be written
        for `t1w`
    deface : bool | dict
        If False, no defacing is performed.
        If True, deface with default parameters.
        `trans` and `raw` must not be `None` if True.
        If dict, accepts the following keys:
            `inset`: how far back in millimeters to start defacing
                     relative to the nasion (default 20)
            `theta`: is the angle of the defacing shear in degrees relative
                     to the normal to the plane passing through the anatomical
                     landmarks (default 35).
    overwrite : bool
        Whether to overwrite existing files or data in files.
        Defaults to False.
        If overwrite is True, any existing files with the same BIDS parameters
        will be overwritten with the exception of the `participants.tsv` and
        `scans.tsv` files. For these files, parts of pre-existing data that
        match the current data will be replaced.
        If overwrite is False, no existing data will be overwritten or
        replaced.
    verbose : bool
        If verbose is True, this will print a snippet of the sidecar files. If
        False, no content will be printed.

    Returns
    -------
    anat_dir : str
        Path to the anatomical scan in the `bids_dir`

    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    if deface and (trans is None or raw is None):
        raise ValueError('The raw object, trans and raw must be provided to '
                         'deface the T1')

    # Make directory for anatomical data
    anat_dir = op.join(bids_root, 'sub-{}'.format(subject))
    # Session is optional
    if session is not None:
        anat_dir = op.join(anat_dir, 'ses-{}'.format(session))
    anat_dir = op.join(anat_dir, 'anat')
    if not op.exists(anat_dir):
        os.makedirs(anat_dir)

    # Try to read our T1 file and convert to MGH representation
    if isinstance(t1w, str):
        t1w = nib.load(t1w)
    elif type(t1w) not in nib.all_image_classes:
        raise ValueError('`t1w` must be a path to a T1 weighted MRI data file '
                         ', or a nibabel image object, but it is of type '
                         '"{}"'.format(type(t1w)))

    t1w = nib.Nifti1Image(t1w.dataobj, t1w.affine)
    # XYZT_UNITS = NIFT_UNITS_MM (10 in binary or 2 in decimal)
    # seems to be the default for Nifti files
    # https://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/xyzt_units.html
    if t1w.header['xyzt_units'] == 0:
        t1w.header['xyzt_units'] = np.array(10, dtype='uint8')

    # Now give the NIfTI file a BIDS name and write it to the BIDS location
    t1w_basename = make_bids_basename(subject=subject, session=session,
                                      acquisition=acquisition, prefix=anat_dir,
                                      suffix='T1w.nii.gz')

    # Check if we have necessary conditions for writing a sidecar JSON
    if trans is not None:

        # get trans and ensure it is from head to MRI
        trans, _ = _get_trans(trans, fro='head', to='mri')

        if not isinstance(raw, BaseRaw):
            raise ValueError('`raw` must be specified if `trans` is not None')

        # Prepare to write the sidecar JSON
        # extract MEG landmarks
        coords_dict = _extract_landmarks(raw.info['dig'])
        meg_landmarks = np.asarray((coords_dict['LPA'],
                                    coords_dict['NAS'],
                                    coords_dict['RPA']))

        # Transform MEG landmarks into MRI space, adjust units by * 1e3
        mri_landmarks = apply_trans(trans, meg_landmarks, move=True) * 1e3

        # Get landmarks in voxel space, using the mgh version of our T1 data
        t1_mgh = nib.MGHImage(t1w.dataobj, t1w.affine)
        vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()
        ras2vox_tkr = np.linalg.inv(vox2ras_tkr)
        mri_landmarks = apply_trans(ras2vox_tkr, mri_landmarks)  # in vox

        # Write sidecar.json
        t1w_json = dict()
        t1w_json['AnatomicalLandmarkCoordinates'] = \
            {'LPA': list(mri_landmarks[0, :]),
             'NAS': list(mri_landmarks[1, :]),
             'RPA': list(mri_landmarks[2, :])}
        fname = t1w_basename.replace('.nii.gz', '.json')
        if op.isfile(fname) and not overwrite:
            raise IOError('Wanted to write a file but it already exists and '
                          '`overwrite` is set to False. File: "{}"'
                          .format(fname))
        _write_json(fname, t1w_json, overwrite, verbose)

        if deface:
            t1w = _deface(t1w, mri_landmarks, deface, trans, raw)

    # Save anatomical data
    if op.exists(t1w_basename):
        if overwrite:
            os.remove(t1w_basename)
        else:
            raise IOError('Wanted to write a file but it already exists and '
                          '`overwrite` is set to False. File: "{}"'
                          .format(t1w_basename))

    nib.save(t1w, t1w_basename)

    return anat_dir
示例#22
0
def _write_coordsystem_json(*,
                            raw,
                            unit,
                            hpi_coord_system,
                            sensor_coord_system,
                            fname,
                            datatype,
                            overwrite=False):
    """Create a coordsystem.json file and save it.

    Parameters
    ----------
    raw : mne.io.Raw
        The data as MNE-Python Raw object.
    unit : str
        Units to be used in the coordsystem specification,
        as in BIDS_COORDINATE_UNITS.
    hpi_coord_system : str
        Name of the coordinate system for the head coils.
    sensor_coord_system : str | tuple of str
        Name of the coordinate system for the sensor positions.
        If a tuple of strings, should be in the form:
        ``(BIDS coordinate frame, MNE coordinate frame)``.
    fname : str
        Filename to save the coordsystem.json to.
    datatype : str
        Type of the data recording. Can be ``meg``, ``eeg``,
        or ``ieeg``.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.

    """
    if raw.get_montage() is None:
        dig = list()
        coords = dict()
    else:
        montage = raw.get_montage()
        pos = montage.get_positions()
        dig = list() if montage.dig is None else montage.dig
        coords = dict(
            NAS=list() if pos['nasion'] is None else pos['nasion'].tolist(),
            LPA=list() if pos['lpa'] is None else pos['lpa'].tolist(),
            RPA=list() if pos['rpa'] is None else pos['rpa'].tolist())

    # get the coordinate frame description
    sensor_coord_system_descr = (BIDS_COORD_FRAME_DESCRIPTIONS.get(
        sensor_coord_system.lower(), "n/a"))

    # create the coordinate json data structure based on 'datatype'
    if datatype == 'meg':
        hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
        if hpi:
            for ident in hpi.keys():
                coords['coil%d' % ident] = hpi[ident]['r'].tolist()

        fid_json = {
            'MEGCoordinateSystem': sensor_coord_system,
            'MEGCoordinateUnits': unit,  # XXX validate this
            'MEGCoordinateSystemDescription': sensor_coord_system_descr,
            'HeadCoilCoordinates': coords,
            'HeadCoilCoordinateSystem': hpi_coord_system,
            'HeadCoilCoordinateUnits': unit,  # XXX validate this
            'AnatomicalLandmarkCoordinates': coords,
            'AnatomicalLandmarkCoordinateSystem': sensor_coord_system,
            'AnatomicalLandmarkCoordinateUnits': unit
        }
    elif datatype == 'eeg':
        fid_json = {
            'EEGCoordinateSystem': sensor_coord_system,
            'EEGCoordinateUnits': unit,
            'EEGCoordinateSystemDescription': sensor_coord_system_descr,
            'AnatomicalLandmarkCoordinates': coords,
            'AnatomicalLandmarkCoordinateSystem': sensor_coord_system,
            'AnatomicalLandmarkCoordinateUnits': unit,
        }
    elif datatype == "ieeg":
        fid_json = {
            # (Other, Pixels, ACPC)
            'iEEGCoordinateSystem': sensor_coord_system,
            'iEEGCoordinateSystemDescription': sensor_coord_system_descr,
            'iEEGCoordinateUnits': unit,  # m (MNE), mm, cm , or pixels
        }
    elif datatype == "nirs":
        fid_json = {
            'NIRSCoordinateSystem': sensor_coord_system,
            'NIRSCoordinateSystemDescription': sensor_coord_system_descr,
            'NIRSCoordinateUnits': unit,
        }

    # note that any coordsystem.json file shared within sessions
    # will be the same across all runs (currently). So
    # overwrite is set to True always
    # XXX: improve later when BIDS is updated
    # check that there already exists a coordsystem.json
    if Path(fname).exists() and not overwrite:
        with open(fname, 'r', encoding='utf-8-sig') as fin:
            coordsystem_dict = json.load(fin)
        if fid_json != coordsystem_dict:
            raise RuntimeError(
                f'Trying to write coordsystem.json, but it already '
                f'exists at {fname} and the contents do not match. '
                f'You must differentiate this coordsystem.json file '
                f'from the existing one, or set "overwrite" to True.')
    _write_json(fname, fid_json, overwrite=True)
示例#23
0
def test_get_head_mri_trans(tmp_path):
    """Test getting a trans object from BIDS data."""
    import nibabel as nib

    event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,
                'Visual/Right': 4, 'Smiley': 5, 'Button': 32}
    events_fname = op.join(data_path, 'MEG', 'sample',
                           'sample_audvis_trunc_raw-eve.fif')
    subjects_dir = op.join(data_path, 'subjects')

    # Drop unknown events.
    events = mne.read_events(events_fname)
    events = events[events[:, 2] != 0]

    # Write it to BIDS
    raw = _read_raw_fif(raw_fname)
    bids_path = _bids_path.copy().update(
        root=tmp_path, datatype='meg', suffix='meg'
    )
    write_raw_bids(raw, bids_path, events_data=events, event_id=event_id,
                   overwrite=False)

    # We cannot recover trans if no MRI has yet been written
    with pytest.raises(FileNotFoundError, match='Did not find'):
        estimated_trans = get_head_mri_trans(
            bids_path=bids_path, fs_subject='sample',
            fs_subjects_dir=subjects_dir)

    # Write some MRI data and supply a `trans` so that a sidecar gets written
    trans = mne.read_trans(raw_fname.replace('_raw.fif', '-trans.fif'))

    # Get the T1 weighted MRI data file ... test write_anat with a nibabel
    # image instead of a file path
    t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
    t1w_mgh = nib.load(t1w_mgh)

    landmarks = get_anat_landmarks(
        t1w_mgh, raw.info, trans, fs_subject='sample',
        fs_subjects_dir=subjects_dir)
    t1w_bids_path = bids_path.copy().update(
        datatype='anat', suffix='T1w'
    )
    t1w_bids_path = write_anat(
        t1w_mgh, bids_path=t1w_bids_path, landmarks=landmarks, verbose=True
    )
    anat_dir = t1w_bids_path.directory

    # Try to get trans back through fitting points
    estimated_trans = get_head_mri_trans(
        bids_path=bids_path, fs_subject='sample', fs_subjects_dir=subjects_dir)

    assert trans['from'] == estimated_trans['from']
    assert trans['to'] == estimated_trans['to']
    assert_almost_equal(trans['trans'], estimated_trans['trans'])

    # provoke an error by introducing NaNs into MEG coords
    raw.info['dig'][0]['r'] = np.full(3, np.nan)
    sh.rmtree(anat_dir)
    bad_landmarks = get_anat_landmarks(t1w_mgh, raw.info, trans, 'sample',
                                       op.join(data_path, 'subjects'))
    write_anat(t1w_mgh, bids_path=t1w_bids_path, landmarks=bad_landmarks)
    with pytest.raises(RuntimeError, match='AnatomicalLandmarkCoordinates'):
        estimated_trans = get_head_mri_trans(bids_path=t1w_bids_path,
                                             fs_subject='sample',
                                             fs_subjects_dir=subjects_dir)

    # test raw with no fiducials to provoke error
    t1w_bids_path = write_anat(  # put back
        t1w_mgh, bids_path=t1w_bids_path, landmarks=landmarks, overwrite=True
    )
    montage = raw.get_montage()
    montage.remove_fiducials()
    raw_test = raw.copy()
    raw_test.set_montage(montage)
    raw_test.save(bids_path.fpath, overwrite=True)

    with pytest.raises(RuntimeError, match='Could not extract fiducial'):
        get_head_mri_trans(bids_path=bids_path, fs_subject='sample',
                           fs_subjects_dir=subjects_dir)

    # test we are permissive for different casings of landmark names in the
    # sidecar, and also accept "nasion" instead of just "NAS"
    raw = _read_raw_fif(raw_fname)
    write_raw_bids(raw, bids_path, events_data=events, event_id=event_id,
                   overwrite=True)  # overwrite with new acq
    t1w_bids_path = write_anat(
        t1w_mgh, bids_path=t1w_bids_path, landmarks=landmarks, overwrite=True
    )

    t1w_json_fpath = t1w_bids_path.copy().update(extension='.json').fpath
    with t1w_json_fpath.open('r', encoding='utf-8') as f:
        t1w_json = json.load(f)

    coords = t1w_json['AnatomicalLandmarkCoordinates']
    coords['lpa'] = coords['LPA']
    coords['Rpa'] = coords['RPA']
    coords['Nasion'] = coords['NAS']
    del coords['LPA'], coords['RPA'], coords['NAS']

    _write_json(t1w_json_fpath, t1w_json, overwrite=True)

    estimated_trans = get_head_mri_trans(
        bids_path=bids_path,
        fs_subject='sample', fs_subjects_dir=subjects_dir)
    assert_almost_equal(trans['trans'], estimated_trans['trans'])

    # Test t1_bids_path parameter
    #
    # Case 1: different BIDS roots
    meg_bids_path = _bids_path.copy().update(
        root=tmp_path / 'meg_root', datatype='meg', suffix='meg'
    )
    t1_bids_path = _bids_path.copy().update(
        root=tmp_path / 'mri_root', task=None, run=None
    )
    raw = _read_raw_fif(raw_fname)

    write_raw_bids(raw, bids_path=meg_bids_path)
    landmarks = get_anat_landmarks(
        t1w_mgh, raw.info, trans, fs_subject='sample',
        fs_subjects_dir=subjects_dir)
    write_anat(t1w_mgh, bids_path=t1_bids_path, landmarks=landmarks)
    read_trans = get_head_mri_trans(
        bids_path=meg_bids_path, t1_bids_path=t1_bids_path,
        fs_subject='sample', fs_subjects_dir=subjects_dir)
    assert np.allclose(trans['trans'], read_trans['trans'])

    # Case 2: different sessions
    raw = _read_raw_fif(raw_fname)
    meg_bids_path = _bids_path.copy().update(
        root=tmp_path / 'session_test', session='01', datatype='meg',
        suffix='meg'
    )
    t1_bids_path = meg_bids_path.copy().update(
        session='02', task=None, run=None, datatype='anat', suffix='T1w'
    )

    write_raw_bids(raw, bids_path=meg_bids_path)
    write_anat(t1w_mgh, bids_path=t1_bids_path, landmarks=landmarks)
    read_trans = get_head_mri_trans(
        bids_path=meg_bids_path, t1_bids_path=t1_bids_path,
        fs_subject='sample', fs_subjects_dir=subjects_dir)
    assert np.allclose(trans['trans'], read_trans['trans'])

    # Test that incorrect subject directory throws error
    with pytest.raises(ValueError, match='Could not find'):
        estimated_trans = get_head_mri_trans(
            bids_path=bids_path, fs_subject='bad',
            fs_subjects_dir=subjects_dir)

    # Case 3: write with suffix for kind
    landmarks2 = landmarks.copy()
    landmarks2.dig[0]['r'] *= -1
    landmarks2.save(tmp_path / 'landmarks2.fif')
    landmarks2 = tmp_path / 'landmarks2.fif'
    write_anat(t1w_mgh, bids_path=t1_bids_path, overwrite=True,
               deface=True,
               landmarks={"coreg": landmarks, "deface": landmarks2})
    read_trans1 = get_head_mri_trans(
        bids_path=meg_bids_path, t1_bids_path=t1_bids_path,
        fs_subject='sample', fs_subjects_dir=subjects_dir,
        kind="coreg")
    assert np.allclose(trans['trans'], read_trans1['trans'])
    read_trans2 = get_head_mri_trans(
        bids_path=meg_bids_path, t1_bids_path=t1_bids_path,
        fs_subject='sample', fs_subjects_dir=subjects_dir,
        kind="deface")
    assert not np.allclose(trans['trans'], read_trans2['trans'])

    # Test we're respecting existing suffix & data type
    # The following path is supposed to mimic a derivative generated by the
    # MNE-BIDS-Pipeline.
    #
    # XXX We MAY want to revise this once the BIDS-Pipeline produces more
    # BIDS-compatible output, e.g. including `channels.tsv` files for written
    # Raw data etc.
    raw = _read_raw_fif(raw_fname)
    deriv_root = tmp_path / 'derivatives' / 'mne-bids-pipeline'
    electrophys_path = (
        deriv_root / 'sub-01' / 'eeg' / 'sub-01_task-av_proc-filt_raw.fif'
    )
    electrophys_path.parent.mkdir(parents=True)
    raw.save(electrophys_path)

    electrophys_bids_path = BIDSPath(
        subject='01', task='av', datatype='eeg', processing='filt',
        suffix='raw', extension='.fif', root=deriv_root,
        check=False
    )
    t1_bids_path = _bids_path.copy().update(
        root=tmp_path / 'mri_root', task=None, run=None
    )
    with pytest.warns(RuntimeWarning, match='Did not find any channels.tsv'):
        get_head_mri_trans(
            bids_path=electrophys_bids_path,
            t1_bids_path=t1_bids_path,
            fs_subject='sample',
            fs_subjects_dir=subjects_dir
        )

    # bids_path without datatype is deprecated
    bids_path = electrophys_bids_path.copy().update(datatype=None)
    with pytest.raises(FileNotFoundError):  # defaut location is all wrong!
        with pytest.warns(DeprecationWarning, match='no datatype'):
            get_head_mri_trans(
                bids_path=bids_path,
                t1_bids_path=t1_bids_path,
                fs_subject='sample',
                fs_subjects_dir=subjects_dir
            )

    # bids_path without suffix is deprecated
    bids_path = electrophys_bids_path.copy().update(suffix=None)
    with pytest.raises(FileNotFoundError):  # defaut location is all wrong!
        with pytest.warns(DeprecationWarning, match='no datatype'):
            get_head_mri_trans(
                bids_path=bids_path,
                t1_bids_path=t1_bids_path,
                fs_subject='sample',
                fs_subjects_dir=subjects_dir
            )

    # Should fail for an unsupported coordinate frame
    raw = _read_raw_fif(raw_fname)
    bids_root = tmp_path / 'unsupported_coord_frame'
    bids_path = BIDSPath(
        subject='01', task='av', datatype='meg', suffix='meg',
        extension='.fif', root=bids_root
    )
    t1_bids_path = _bids_path.copy().update(
        root=tmp_path / 'mri_root', task=None, run=None
    )
    write_raw_bids(raw=raw, bids_path=bids_path, verbose=False)
示例#24
0
def _coordsystem_json(unit,
                      sensor_coord_system,
                      intended_for,
                      fname,
                      overwrite=False,
                      verbose=True):
    """Create a coordsystem.json file and save it.

    Parameters
    ----------
    unit : str
        Units to be used in the coordsystem specification,
        as in BIDS_COORDINATE_UNITS.
    sensor_coord_system : str
        Name of the coordinate system for the sensor positions.
    fname : str
        Filename to save the coordsystem.json to.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.
    """
    if unit not in BIDS_COORDINATE_UNITS:
        raise ValueError(f"Units of the sensor positions must be one "
                         f"of {BIDS_COORDINATE_UNITS}. You passed in "
                         f"{unit}.")

    if sensor_coord_system == "Image" and intended_for is None:
        raise RuntimeError(
            'If coordsystem is "Image", then img_fname '
            "must be passed in as the filename of the corresponding "
            "image the data is in. For example")

    # get the coordinate frame description
    sensor_coord_system_descr = COORD_FRAME_DESCRIPTIONS.get(
        sensor_coord_system.lower(), "n/a")
    if sensor_coord_system.lower() == 'tkras':
        sensor_coord_system = 'Other'
        sensor_coord_system_descr = 'FreeSurfer T1w surface tkras ' \
                                    'space.'

    if sensor_coord_system == "Other" and verbose:
        print("Using the `Other` keyword for the CoordinateSystem field. "
              "Please specify the CoordinateSystemDescription field manually.")

    if sensor_coord_system == "Other" and sensor_coord_system_descr == "n/a":
        raise RuntimeError(
            'If coordsystem is "Other", then coordsystem_description '
            "must be passed in describing the coordinate system "
            "in Free-form text. May also include a link to a "
            "documentation page or paper describing the system in "
            "greater detail.")

    processing_description = (
        "SEEK-algorithm (thresholding, cylindrical clustering and post-processing), "
        "or manual labeling of contacts using FieldTrip Toolbox.")

    # create the coordinate json data structure based on 'datatype'
    fid_json = {
        # (Other, Pixels, ACPC)
        "IntendedFor":
        str(intended_for),
        "iEEGCoordinateSystem":
        sensor_coord_system,
        "iEEGCoordinateSystemDescription":
        sensor_coord_system_descr,
        "iEEGCoordinateUnits":
        unit,  # m (MNE), mm, cm , or pixels
        "iEEGCoordinateProcessingDescription":
        processing_description,
        "iEEGCoordinateProcessingReference":
        "See DOI: https://zenodo.org/record/3542307#.XoYF9tNKhZI "
        "and FieldTrip Toolbox: doi:10.1155/2011/156869",
    }

    # note that any coordsystem.json file shared within sessions
    # will be the same across all runs (currently). So
    # overwrite is set to True always
    # XXX: improve later when BIDS is updated
    # check that there already exists a coordsystem.json
    if Path(fname).exists() and not overwrite:
        with open(fname, "r", encoding="utf-8-sig") as fin:
            coordsystem_dict = json.load(fin)
        if fid_json != coordsystem_dict:
            raise RuntimeError(
                f"Trying to write coordsystem.json, but it already "
                f"exists at {fname} and the contents do not match. "
                f"You must differentiate this coordsystem.json file "
                f'from the existing one, or set "overwrite" to True.')
    _write_json(fname, fid_json, overwrite=True, verbose=verbose)
示例#25
0
def _coordsystem_json(*, raw, unit, hpi_coord_system, sensor_coord_system,
                      fname, datatype, overwrite=False, verbose=True):
    """Create a coordsystem.json file and save it.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    unit : str
        Units to be used in the coordsystem specification,
        as in BIDS_COORDINATE_UNITS.
    hpi_coord_system : str
        Name of the coordinate system for the head coils.
    sensor_coord_system : str
        Name of the coordinate system for the sensor positions.
    fname : str
        Filename to save the coordsystem.json to.
    datatype : str
        Type of the data recording. Can be ``meg``, ``eeg``,
        or ``ieeg``.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.

    """
    dig = raw.info['dig']
    if dig is None:
        dig = []
    coords = _extract_landmarks(dig)
    hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
    if hpi:
        for ident in hpi.keys():
            coords['coil%d' % ident] = hpi[ident]['r'].tolist()

    coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])
    if len(coord_frame) > 1:  # noqa E501
        raise ValueError('All HPI, electrodes, and fiducials must be in the '
                         'same coordinate frame. Found: "{}"'
                         .format(coord_frame))

    # get the coordinate frame description
    sensor_coord_system_descr = (COORD_FRAME_DESCRIPTIONS
                                 .get(sensor_coord_system.lower(), "n/a"))
    if sensor_coord_system == 'Other' and verbose:
        print('Using the `Other` keyword for the CoordinateSystem field. '
              'Please specify the CoordinateSystemDescription field manually.')

    # create the coordinate json data structure based on 'datatype'
    if datatype == 'meg':
        hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
        if hpi:
            for ident in hpi.keys():
                coords['coil%d' % ident] = hpi[ident]['r'].tolist()

        fid_json = {
            'MEGCoordinateSystem': sensor_coord_system,
            'MEGCoordinateUnits': unit,  # XXX validate this
            'MEGCoordinateSystemDescription': sensor_coord_system_descr,
            'HeadCoilCoordinates': coords,
            'HeadCoilCoordinateSystem': hpi_coord_system,
            'HeadCoilCoordinateUnits': unit  # XXX validate this
        }
    elif datatype == 'eeg':
        fid_json = {
            'EEGCoordinateSystem': sensor_coord_system,
            'EEGCoordinateUnits': unit,
            'EEGCoordinateSystemDescription': sensor_coord_system_descr,
            'AnatomicalLandmarkCoordinates': coords,
            'AnatomicalLandmarkCoordinateSystem': sensor_coord_system,
            'AnatomicalLandmarkCoordinateUnits': unit,
        }
    elif datatype == "ieeg":
        fid_json = {
            # (Other, Pixels, ACPC)
            'iEEGCoordinateSystem': sensor_coord_system,
            'iEEGCoordinateSystemDescription': sensor_coord_system_descr,
            'iEEGCoordinateUnits': unit,  # m (MNE), mm, cm , or pixels
        }

    _write_json(fname, fid_json, overwrite, verbose)
示例#26
0
def update_anat_landmarks(
    bids_path, landmarks, *, fs_subject=None, fs_subjects_dir=None,
    kind=None, on_missing='raise', verbose=None
):
    """Update the anatomical landmark coordinates of an MRI scan.

    This will change the ``AnatomicalLandmarkCoordinates`` entry in the
    respective JSON sidecar file, or create it if it doesn't exist.

    Parameters
    ----------
    bids_path : BIDSPath
        Path of the MR image.
    landmarks : mne.channels.DigMontage | path-like
        An :class:`mne.channels.DigMontage` instance with coordinates for the
        nasion and left and right pre-auricular points in MRI voxel
        coordinates. Alternatively, the path to a ``*-fiducials.fif`` file as
        produced by the MNE-Python coregistration GUI or via
        :func:`mne.io.write_fiducials`.

        .. note:: :func:`mne_bids.get_anat_landmarks` provides a convenient and
                  reliable way to generate the landmark coordinates in the
                  required coordinate system.

        .. note:: If ``path-like``, ``fs_subject`` and ``fs_subjects_dir``
                  must be provided as well.

        .. versionchanged:: 0.10
           Added support for ``path-like`` input.
    fs_subject : str | None
        The subject identifier used for FreeSurfer. Must be provided if
        ``landmarks`` is ``path-like``; otherwise, it will be ignored.
    fs_subjects_dir : path-like | None
        The FreeSurfer subjects directory. If ``None``, defaults to the
        ``SUBJECTS_DIR`` environment variable. Must be provided if
        ``landmarks`` is ``path-like``; otherwise, it will be ignored.
    kind : str | None
        The suffix of the anatomical landmark names in the JSON sidecar.
        A suffix might be present e.g. to distinguish landmarks between
        sessions. If provided, should not include a leading underscore ``_``.
        For example, if the landmark names in the JSON sidecar file are
        ``LPA_ses-1``, ``RPA_ses-1``, ``NAS_ses-1``, you should pass
        ``'ses-1'`` here.
        If ``None``, no suffix is appended, the landmarks named
        ``Nasion`` (or ``NAS``), ``LPA``, and ``RPA`` will be used.

        .. versionadded:: 0.10
    on_missing : 'ignore' | 'warn' | 'raise'
        How to behave if the specified landmarks cannot be found in the MRI
        JSON sidecar file.

        .. versionadded:: 0.10
    %(verbose)s

    Notes
    -----
    .. versionadded:: 0.8
    """
    _validate_type(item=bids_path, types=BIDSPath, item_name='bids_path')
    _validate_type(
        item=landmarks, types=(DigMontage, 'path-like'), item_name='landmarks'
    )
    _check_on_missing(on_missing)

    # Do some path verifications and fill in some gaps the users might have
    # left (datatype and extension)
    # XXX We could be more stringent (and less user-friendly) and insist on a
    # XXX full specification of all parts of the BIDSPath, thoughts?
    bids_path_mri = bids_path.copy()
    if bids_path_mri.datatype is None:
        bids_path_mri.datatype = 'anat'

    if bids_path_mri.datatype != 'anat':
        raise ValueError(
            f'Can only operate on "anat" MRI data, but the provided bids_path '
            f'points to: {bids_path_mri.datatype}')

    if bids_path_mri.suffix is None:
        raise ValueError('Please specify the "suffix" entity of the provided '
                         'bids_path.')
    elif bids_path_mri.suffix not in ('T1w', 'FLASH'):
        raise ValueError(
            f'Can only operate on "T1w" and "FLASH" images, but the bids_path '
            f'suffix indicates: {bids_path_mri.suffix}')

    valid_extensions = ('.nii', '.nii.gz')
    tried_paths = []
    file_exists = False
    if bids_path_mri.extension is None:
        # No extension was provided, start searching …
        for extension in valid_extensions:
            bids_path_mri.extension = extension
            tried_paths.append(bids_path_mri.fpath)

            if bids_path_mri.fpath.exists():
                file_exists = True
                break
    else:
        # An extension was provided
        tried_paths.append(bids_path_mri.fpath)
        if bids_path_mri.fpath.exists():
            file_exists = True

    if not file_exists:
        raise ValueError(
            f'Could not find an MRI scan. Please check the provided '
            f'bids_path. Tried the following filenames: '
            f'{", ".join([p.name for p in tried_paths])}')

    if not isinstance(landmarks, DigMontage):  # it's pathlike
        if fs_subject is None:
            raise ValueError(
                'You must provide the "fs_subject" parameter when passing the '
                'path to fiducials'
            )
        landmarks = _get_landmarks_from_fiducials_file(
            bids_path=bids_path,
            fname=landmarks,
            fs_subject=fs_subject,
            fs_subjects_dir=fs_subjects_dir
        )

    positions = landmarks.get_positions()
    coord_frame = positions['coord_frame']
    if coord_frame != 'mri_voxel':
        raise ValueError(
            f'The landmarks must be specified in MRI voxel coordinates, but '
            f'provided DigMontage is in "{coord_frame}"')

    # Extract the cardinal points
    name_to_coords_map = {
        'LPA': positions['lpa'],
        'NAS': positions['nasion'],
        'RPA': positions['rpa']
    }

    # Check if coordinates for any cardinal point are missing, and convert to
    # a list so we can easily store the data in JSON format
    missing_points = []
    for name, coords in name_to_coords_map.items():
        if coords is None:
            missing_points.append(name)
        else:
            # Funnily, np.float64 is JSON-serializabe, while np.float32 is not!
            # Thus, cast to float64 to avoid issues (which e.g. may arise when
            # fiducials were read from disk!)
            name_to_coords_map[name] = list(coords.astype('float64'))

    if missing_points:
        raise ValueError(
            f'The provided DigMontage did not contain all required cardinal '
            f'points (nasion and left and right pre-auricular points). The '
            f'following points are missing: '
            f'{", ".join(missing_points)}')

    bids_path_json = bids_path.copy().update(extension='.json')
    if not bids_path_json.fpath.exists():  # Must exist before we can update it
        _write_json(bids_path_json.fpath, dict())

    mri_json = json.loads(bids_path_json.fpath.read_text(encoding='utf-8'))
    if 'AnatomicalLandmarkCoordinates' not in mri_json:
        _on_missing(
            on_missing=on_missing,
            msg=f'No AnatomicalLandmarkCoordinates section found in '
                f'{bids_path_json.fpath.name}',
            error_klass=KeyError
        )
        mri_json['AnatomicalLandmarkCoordinates'] = dict()

    for name, coords in name_to_coords_map.items():
        if kind is not None:
            name = f'{name}_{kind}'

        if name not in mri_json['AnatomicalLandmarkCoordinates']:
            _on_missing(
                on_missing=on_missing,
                msg=f'Anatomical landmark not found in '
                    f'{bids_path_json.fpath.name}: {name}',
                error_klass=KeyError
            )

        mri_json['AnatomicalLandmarkCoordinates'][name] = coords

    update_sidecar_json(bids_path=bids_path_json, entries=mri_json)
示例#27
0
def _write_coordsystem_json(*,
                            raw,
                            unit,
                            hpi_coord_system,
                            sensor_coord_system,
                            fname,
                            datatype,
                            overwrite=False,
                            verbose=True):
    """Create a coordsystem.json file and save it.

    Parameters
    ----------
    raw : mne.io.Raw
        The data as MNE-Python Raw object.
    unit : str
        Units to be used in the coordsystem specification,
        as in BIDS_COORDINATE_UNITS.
    hpi_coord_system : str
        Name of the coordinate system for the head coils.
    sensor_coord_system : str | tuple of str
        Name of the coordinate system for the sensor positions.
        If a tuple of strings, should be in the form:
        ``(BIDS coordinate frame, MNE coordinate frame)``.
    fname : str
        Filename to save the coordsystem.json to.
    datatype : str
        Type of the data recording. Can be ``meg``, ``eeg``,
        or ``ieeg``.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to true or false.

    """
    dig = raw.info['dig']
    if dig is None:
        dig = []

    coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])
    if len(coord_frame) > 1:  # noqa E501
        raise ValueError(
            'All HPI, electrodes, and fiducials must be in the '
            'same coordinate frame. Found: "{}"'.format(coord_frame))

    # get the coordinate frame description
    try:
        sensor_coord_system, sensor_coord_system_mne = sensor_coord_system
    except ValueError:
        sensor_coord_system_mne = "n/a"
    sensor_coord_system_descr = (BIDS_COORD_FRAME_DESCRIPTIONS.get(
        sensor_coord_system.lower(), "n/a"))
    if sensor_coord_system == 'Other':
        if verbose:
            msg = ('Using the `Other` keyword for the CoordinateSystem field. '
                   'Please specify the CoordinateSystemDescription field '
                   'manually.')
            logger.info(msg)
        sensor_coord_system_descr = (BIDS_COORD_FRAME_DESCRIPTIONS.get(
            sensor_coord_system_mne.lower(), "n/a"))
    coords = _extract_landmarks(dig)
    # create the coordinate json data structure based on 'datatype'
    if datatype == 'meg':
        landmarks = dict(coords)
        hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
        if hpi:
            for ident in hpi.keys():
                coords['coil%d' % ident] = hpi[ident]['r'].tolist()

        fid_json = {
            'MEGCoordinateSystem': sensor_coord_system,
            'MEGCoordinateUnits': unit,  # XXX validate this
            'MEGCoordinateSystemDescription': sensor_coord_system_descr,
            'HeadCoilCoordinates': coords,
            'HeadCoilCoordinateSystem': hpi_coord_system,
            'HeadCoilCoordinateUnits': unit,  # XXX validate this
            'AnatomicalLandmarkCoordinates': landmarks,
            'AnatomicalLandmarkCoordinateSystem': sensor_coord_system,
            'AnatomicalLandmarkCoordinateUnits': unit
        }
    elif datatype == 'eeg':
        fid_json = {
            'EEGCoordinateSystem': sensor_coord_system,
            'EEGCoordinateUnits': unit,
            'EEGCoordinateSystemDescription': sensor_coord_system_descr,
            'AnatomicalLandmarkCoordinates': coords,
            'AnatomicalLandmarkCoordinateSystem': sensor_coord_system,
            'AnatomicalLandmarkCoordinateUnits': unit,
        }
    elif datatype == "ieeg":
        fid_json = {
            # (Other, Pixels, ACPC)
            'iEEGCoordinateSystem': sensor_coord_system,
            'iEEGCoordinateSystemDescription': sensor_coord_system_descr,
            'iEEGCoordinateUnits': unit,  # m (MNE), mm, cm , or pixels
        }

    # note that any coordsystem.json file shared within sessions
    # will be the same across all runs (currently). So
    # overwrite is set to True always
    # XXX: improve later when BIDS is updated
    # check that there already exists a coordsystem.json
    if Path(fname).exists() and not overwrite:
        with open(fname, 'r', encoding='utf-8-sig') as fin:
            coordsystem_dict = json.load(fin)
        if fid_json != coordsystem_dict:
            raise RuntimeError(
                f'Trying to write coordsystem.json, but it already '
                f'exists at {fname} and the contents do not match. '
                f'You must differentiate this coordsystem.json file '
                f'from the existing one, or set "overwrite" to True.')
    _write_json(fname, fid_json, overwrite=True, verbose=verbose)
示例#28
0
    def interpolate(self):
        """
        Interpolates bad channels to create new data and updates info

        Parameters
        ----------
        none

        Returns
        -------
        none

        """
        result_filename = self.unique_name + "_results.json"
        result_file_overall = os.path.join(self.result_path, result_filename)
        processed_filename = self.unique_name + "_raw.fif"
        processed_file_overall = os.path.join(self.result_path,
                                              processed_filename)
        if os.path.isfile(result_file_overall) and os.path.isfile(
                processed_file_overall):
            eeg = _read_raw(processed_file_overall)
            with open(result_file_overall) as json_file:
                automagic = json.load(json_file)
        else:
            raise (ValueError, "The block has not been preprocessed yet.")
            return
        interpolate_chans = self.to_be_interpolated
        if interpolate_chans == []:
            raise (
                ValueError,
                "The block is rated to be interpolated but no channels chosen",
            )
            return
        if (self.params == [] or not "interpolation_params" in self.params
                or self.params["interpolation_params"] == []):
            default_params = self.config["default_params"]
            interpolation_params = default_params["interpolation_params"]
        else:
            interpolation_params = self.params["interpolation_params"]
        eeg.load_data()
        eeg.info["dig"] = mne.channels.make_standard_montage(self.montage)
        interpolated = eeg.interpolate_bads(
        )  # (origin=interpolation_params['origin'])

        overall_thresh = self.project.quality_thresholds["overall_thresh"]
        time_thresh = self.project.quality_thresholds["time_thresh"]
        chan_thresh = self.project.quality_thresholds["chan_thresh"]
        apply_common_avg = self.project.quality_thresholds["apply_common_avg"]
        quality_scores = calcQuality(
            interpolated.get_data(),
            interpolate_chans,
            overall_thresh,
            time_thresh,
            chan_thresh,
            apply_common_avg,
        )
        update_to_be_stored = {
            "rate": "not rated",
            "is_manually_rated": False,
            "to_be_interpolated": [],
            "final_bad_chans": interpolate_chans,
            "is_interpolated": True,
            "quality_scores": quality_scores,
        }
        self.update_rating(update_to_be_stored)
        automagic.update({
            "interpolation": {
                "channels": interpolate_chans,
                "params": interpolation_params,
            },
            "quality_scores": self.quality_scores,
            "rate": self.rate,
        })
        results = {"preprocessed": interpolated, "automagic": automagic}
        self.write_log(automagic)
        automagic.update({
            "to_be_interpolated": self.to_be_interpolated,
            "rate": self.rate,
            "quality_scores": self.quality_scores,
            "is_manually_rated": self.is_manually_rated,
            "is_interpolated": self.is_interpolated,
        })
        main_result_file = results["automagic"]
        result_filename = self.unique_name + "_results.json"
        result_file_overall = os.path.join(self.result_path, result_filename)
        _write_json(result_file_overall,
                    main_result_file,
                    overwrite=True,
                    verbose=True)
        processed = results["preprocessed"]
        processed.info["dig"] = None
        processed_filename = self.unique_name + "_raw.fif"
        processed_file_overall = os.path.join(self.result_path,
                                              processed_filename)
        processed.save(processed_file_overall, overwrite=True)
        return results
def run_maxwell_filter(subject, session=None):
    print("Processing subject: %s" % subject)

    # Construct the search path for the data file. `sub` is mandatory
    subject_path = op.join('sub-{}'.format(subject))
    # `session` is optional
    if session is not None:
        subject_path = op.join(subject_path, 'ses-{}'.format(session))

    subject_path = op.join(subject_path, config.kind)
    data_dir = op.join(config.bids_root, subject_path)

    for run_idx, run in enumerate(config.runs):

        bids_basename = make_bids_basename(subject=subject,
                                           session=session,
                                           task=config.task,
                                           acquisition=config.acq,
                                           run=run,
                                           processing=config.proc,
                                           recording=config.rec,
                                           space=config.space
                                           )
        # Find the data file
        search_str = op.join(data_dir, bids_basename) + '_' + config.kind + '*'
        fnames = sorted(glob.glob(search_str))
        fnames = [f for f in fnames
                  if op.splitext(f)[1] in mne_bids_readers]

        if len(fnames) == 1:
            bids_fpath = fnames[0]
        elif len(fnames) == 0:
            raise ValueError('Could not find input data file matching: '
                             '"{}"'.format(search_str))
        elif len(fnames) > 1:
            raise ValueError('Expected to find a single input data file: "{}" '
                             ' but found:\n\n{}'
                             .format(search_str, fnames))

        if run_idx == 0:  # XXX does this work when no runs are specified?
            # Prepare the pipeline directory in /derivatives
            deriv_path = op.join(config.bids_root, 'derivatives',
                                 config.PIPELINE_NAME)
            fpath_out = op.join(deriv_path, subject_path)
            if not op.exists(fpath_out):
                os.makedirs(fpath_out)

            # Write a dataset_description.json for the pipeline
            ds_json = dict()
            ds_json['Name'] = config.PIPELINE_NAME + ' outputs'
            ds_json['BIDSVersion'] = BIDS_VERSION
            ds_json['PipelineDescription'] = {
                'Name': config.PIPELINE_NAME,
                'Version': config.VERSION,
                'CodeURL': config.CODE_URL,
            }
            ds_json['SourceDatasets'] = {
                'URL': 'n/a',
            }

            fname = op.join(deriv_path, 'dataset_description.json')
            _write_json(fname, ds_json, overwrite=True, verbose=True)

        # read_raw_bids automatically
        # - populates bad channels using the BIDS channels.tsv
        # - sets channels types according to BIDS channels.tsv `type` column
        # - sets raw.annotations using the BIDS events.tsv
        _, bids_fname = op.split(bids_fpath)
        raw = read_raw_bids(bids_fname, config.bids_root)

        # XXX hack to deal with dates that fif files cannot handle
        if config.daysback is not None:
            raw.anonymize(daysback=config.daysback)

        if config.crop is not None:
            raw.crop(*config.crop)

        raw.load_data()
        if hasattr(raw, 'fix_mag_coil_types'):
            raw.fix_mag_coil_types()

        if config.use_maxwell_filter:
            print('Applying maxwell filter.')

            # Warn if no bad channels are set before Maxfilter
            if raw.info['bads'] is None or len(raw.info['bads']) == 0:
                print('\n Warning: Found no bad channels. \n ')

            if run_idx == 0:
                destination = raw.info['dev_head_t']

            if config.mf_st_duration:
                print('    st_duration=%d' % (config.mf_st_duration,))

            raw_sss = mne.preprocessing.maxwell_filter(
                raw,
                calibration=config.mf_cal_fname,
                cross_talk=config.mf_ctc_fname,
                st_duration=config.mf_st_duration,
                origin=config.mf_head_origin,
                destination=destination)

            # Prepare a name to save the data
            raw_fname_out = op.join(fpath_out, bids_basename + '_sss_raw.fif')
            raw_sss.save(raw_fname_out, overwrite=True)

            if config.plot:
                # plot maxfiltered data
                raw_sss.plot(n_channels=50, butterfly=True)

        else:
            print('Not applying maxwell filter.\n'
                  'If you wish to apply it set config.use_maxwell_filter=True')
            # Prepare a name to save the data
            raw_fname_out = op.join(fpath_out, bids_basename +
                                    '_nosss_raw.fif')
            raw.save(raw_fname_out, overwrite=True)

            if config.plot:
                # plot raw data
                raw.plot(n_channels=50, butterfly=True)
示例#30
0
def run_filter(subject, run=None, session=None):
    """Filter data from a single subject."""
    print('\nProcessing subject: {}\n{}'.format(subject,
                                                '-' * (20 + len(subject))))

    # Construct the search path for the data file. `sub` is mandatory
    subject_path = op.join('sub-{}'.format(subject))
    # `session` is optional
    if session is not None:
        subject_path = op.join(subject_path, 'ses-{}'.format(session))

    subject_path = op.join(subject_path, config.kind)
    data_dir = op.join(config.bids_root, subject_path)

    bids_basename = make_bids_basename(subject=subject,
                                       session=session,
                                       task=config.task,
                                       acquisition=config.acq,
                                       run=run,
                                       processing=config.proc,
                                       recording=config.rec,
                                       space=config.space)

    # Find the data file
    search_str = op.join(data_dir, bids_basename) + '_' + config.kind + '*'
    fnames = sorted(glob.glob(search_str))
    fnames = [f for f in fnames if op.splitext(f)[1] in mne_bids_readers]

    if len(fnames) == 1:
        bids_fpath = fnames[0]
    elif len(fnames) == 0:
        raise ValueError('Could not find input data file matching: '
                         '"{}"'.format(search_str))
    elif len(fnames) > 1:
        raise ValueError('Expected to find a single input data file: "{}" '
                         ' but found:\n\n{}'.format(search_str, fnames))

    # read_raw_bids automatically
    # - populates bad channels using the BIDS channels.tsv
    # - sets channels types according to BIDS channels.tsv `type` column
    # - sets raw.annotations using the BIDS events.tsv
    _, bids_fname = op.split(bids_fpath)
    raw = read_raw_bids(bids_fname, config.bids_root)

    if config.crop is not None:
        raw.crop(*config.crop)

    raw.load_data()

    # Band-pass the data channels (MEG and EEG)
    print('Filtering data between {} and {} (Hz)'.format(
        config.l_freq, config.h_freq))

    raw.filter(config.l_freq,
               config.h_freq,
               l_trans_bandwidth=config.l_trans_bandwidth,
               h_trans_bandwidth=config.h_trans_bandwidth,
               filter_length='auto',
               phase='zero',
               fir_window='hamming',
               fir_design='firwin')

    if config.resample_sfreq:
        print('Resampling data to {:.1f} Hz'.format(config.resample_sfreq))

        raw.resample(config.resample_sfreq, npad='auto')

    # Prepare the pipeline directory in /derivatives
    deriv_path = op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME)
    fpath_out = op.join(deriv_path, subject_path)
    if not op.exists(fpath_out):
        os.makedirs(fpath_out)

        # Write a dataset_description.json for the pipeline
        ds_json = dict()
        ds_json['Name'] = config.PIPELINE_NAME + ' outputs'
        ds_json['BIDSVersion'] = BIDS_VERSION
        ds_json['PipelineDescription'] = {
            'Name': config.PIPELINE_NAME,
            'Version': config.VERSION,
            'CodeURL': config.CODE_URL,
        }
        ds_json['SourceDatasets'] = {
            'URL': 'n/a',
        }

        fname = op.join(deriv_path, 'dataset_description.json')
        _write_json(fname, ds_json, overwrite=True, verbose=True)

    # Prepare a name to save the data
    fname_out = op.join(fpath_out, bids_basename + '_filt_raw.fif')
    raw.save(fname_out, overwrite=True)

    if config.plot:
        # plot raw data
        raw.plot(n_channels=50, butterfly=True)

        # plot power spectral densitiy
        raw.plot_psd(area_mode='range',
                     tmin=10.0,
                     tmax=100.0,
                     fmin=0.,
                     fmax=50.,
                     average=True)