Ejemplo n.º 1
0
def write_dig_bids(
    fname: BIDSPath,
    root,
    ch_names: List,
    ch_coords: List,
    unit: str,
    coord_system: str,
    intended_for: Union[str, Path] = None,
    sizes: List = None,
    groups: List = None,
    hemispheres: List = None,
    manufacturers: List = None,
    overwrite: bool = False,
    verbose: bool = True,
):
    """Write iEEG-BIDS coordinates and related files to disc.

    Parameters
    ----------
    fname : str
    root : str
    ch_names : list
    ch_coords : list
    unit : str
    coord_system : str
    intended_for : str
    sizes : list | None
    groups : list | None
    hemispheres : list | None
    manufacturers : list | None
    overwrite : bool
    verbose : bool
        Verbosity
    """
    # check
    _checklen = len(ch_names)
    if not all(len(_check) == _checklen for _check in [ch_names, ch_coords]):
        raise ValueError(
            "Number of channel names should match "
            "number of coordinates passed in. "
            f"{len(ch_names)} names and {len(ch_coords)} coords passed in.")

    for name, _check in zip(
        ["size", "group", "hemisphere", "manufacturer"],
        [sizes, groups, hemispheres, manufacturers],
    ):
        if _check is not None:
            if len(_check) != _checklen:
                raise ValueError(
                    f"Number of {name} ({len(_check)} should match "
                    f"number of channel names passed in "
                    f"({len(ch_names)} names).")

    if intended_for is None and coord_system == "Other":
        raise ValueError("IntendedFor must be defined if "
                         '"coord_system"==Other.')
    if intended_for is None:
        intended_for = "n/a"

    # check that filename adheres to BIDS naming convention
    entities = get_entities_from_fname(fname, on_error="raise")

    # get the 3 channels needed
    datatype = "ieeg"
    elecs_fname = BIDSPath(**entities, datatype=datatype, root=root)
    elecs_fname.update(suffix="electrodes", extension=".tsv")
    coordsys_fname = elecs_fname.copy().update(suffix="coordsystem",
                                               extension=".json")

    # make parent directories
    Path(elecs_fname).parent.mkdir(exist_ok=True, parents=True)

    # write the coordsystem json file
    _coordsystem_json(
        unit,
        coord_system,
        intended_for,
        fname=coordsys_fname,
        overwrite=overwrite,
        verbose=verbose,
    )

    # write basic electrodes tsv file
    _electrodes_tsv(
        ch_names=ch_names,
        ch_coords=ch_coords,
        fname=elecs_fname,
        overwrite=overwrite,
        verbose=verbose,
    )

    for name, _check in zip(
        ["size", "group", "hemisphere", "manufacturer"],
        [sizes, groups, hemispheres, manufacturers],
    ):
        if _check is not None:
            # write this additional data now to electrodes.tsv
            elec_df = pd.read_csv(elecs_fname, delimiter="\t", index=None)
            elec_df[name] = _check
            elec_df.to_csv(elecs_fname, index=None, sep="\t")

            # for groups, these need to match inside the channels data
            if name == "group":
                chs_fname = elecs_fname.copy().update(suffix="channels")
                chs_df = pd.read_csv(chs_fname, delimiter="\t", index=None)
                chs_df[name] = _check
                chs_df.to_csv(chs_fname, index=None, sep="\t")
Ejemplo n.º 2
0
def write_annotations(annot_df: pd.DataFrame,
                      fname: Union[str, Path],
                      intended_for: str,
                      root: Path,
                      description: str = None) -> None:
    """Write annotations dataframe to disc.

    Parameters
    ----------
    annot_df : pd.DataFrame
        The annotations DataFrame.
    fname : str | pathlib.Path
        The BIDS filename to write annotations to.
    intended_for : str | pathlib.Path | BIDSPath
        The ``IntendedFor`` BIDS keyword corresponding to the
        ``Raw`` file that the Annotations were created from.
    root : str | pathlib.Path
        The root of the BIDS dataset.
    description : str | None
        The description of the Annotations file. If None (default),
        will describe it as HFO events detected using mne-hfo.
    """
    fname, ext = os.path.splitext(str(fname))
    fname = Path(fname)
    tsv_fname = fname.with_suffix('.tsv')
    json_fname = fname.with_suffix('.json')

    if description is None:
        description = 'HFO annotated events detected using ' \
                      'mne-hfo algorithms.'

    # error check that intendeFor exists
    entities = get_entities_from_fname(intended_for)
    _, ext = os.path.splitext(intended_for)
    # write the correct extension for BrainVision
    if ext == '.eeg':
        ext = '.vhdr'
    intended_for_path = BIDSPath(**entities, extension=ext, root=root)
    if not intended_for_path.fpath.exists():
        raise RuntimeError(f'The intended for raw dataset '
                           f'does not exist at {intended_for_path}. '
                           f'Please make sure it does.')

    # make sure parent directories exist
    tsv_fname.parent.mkdir(parents=True, exist_ok=True)

    # write the dataframe itself as a tsv file
    annot_df.to_csv(tsv_fname, sep='\t', index=False)

    # create annotations json
    annot_json = {
        'Description': description,
        'IntendedFor': intended_for_path.basename,
        'Author': 'mne-hfo',
        'LabelDescription': {
            'hfo_<ch_name>':
            'Generic HFO detected at channel name.',
            'ripple_<ch_name>':
            'Ripple HFO detected at channel.',
            'fastripple_<ch_name>':
            'Fast ripple HFO detected at channel',
            'frandr_<ch_name>':
            'Fast ripple and ripple HFOs co-occurrence '
            'at channel'
        },
    }
    with open(json_fname, 'w', encoding='utf-8') as fout:
        json.dump(annot_json, fout, ensure_ascii=False, indent=4)
Ejemplo n.º 3
0
def read_annotations(fname: Union[str, Path], root: Path = None) \
        -> pandas.core.frame.DataFrame:
    """Read annotations.tsv Derivative file.

    Annotations are part of the BIDS-Derivatives for Common
    Electrophysiological derivatives [1].

    Parameters
    ----------
    fname : str | pathlib.Path
        The BIDS file path for the ``*annotations.tsv|json`` files.
    root : str | pathlib.Path | None
        The root of the BIDS dataset. If None (default), will try
        to infer the BIDS root from the ``fname`` argument.

    Returns
    -------
    annot_tsv : pd.DataFrame
        The DataFrame for the annotations.tsv with extra columns appended
        to make sense of the sample data.

    References
    ----------
    .. [1] https://docs.google.com/document/d/1PmcVs7vg7Th-cGC-UrX8rAhKUHIzOI-uIOh69_mvdlw/edit#  # noqa
    """
    fname, ext = os.path.splitext(str(fname))
    fname = Path(fname)
    tsv_fname = fname.with_suffix('.tsv')
    json_fname = fname.with_suffix('.json')

    if root is None:
        fpath = fname

        while fpath != fpath.root:
            if fpath.name == 'derivatives':
                break
            fpath = fpath.parent

        # once derivatives is found, then
        # BIDS root is its parent
        root = fpath.parent

    # read the annotations.tsv file
    annot_tsv = pd.read_csv(tsv_fname, delimiter='\t')

    # read the annotations.json file
    with open(json_fname, 'r') as fin:
        annot_json = json.load(fin)

    # extract the sample freq
    raw_rel_fpath = annot_json['IntendedFor']
    entities = get_entities_from_fname(raw_rel_fpath)
    raw_fpath = BIDSPath(**entities,
                         datatype='ieeg',
                         extension=Path(raw_rel_fpath).suffix,
                         root=root)
    if not raw_fpath.fpath.exists():
        raise RuntimeError(f'No raw dataset found for {fpath}. '
                           f'Please set "root" kwarg.')

    # read data
    raw = read_raw_bids(raw_fpath)
    sfreq = raw.info['sfreq']

    # create sample column
    annot_tsv['sample'] = annot_tsv['onset'] * sfreq
    return annot_tsv
Ejemplo n.º 4
0
def _load_patient_dict(datadir, kind="ieeg", expname='sliced', verbose=True):
    """Load from datadir, sliced datasets as a dictionary <subject>: <list of datasets>."""
    pats_to_avg = [
        "umf002",
        "umf004",
        "jh103",
        "ummc005",
        "ummc007",
        "ummc008",
        "ummc009",
        "pt8",
        "pt10",
        "pt11",
        "pt12",
        "pt16",
        "pt17",
        "la00",
        "la01",
        "la02",
        "la03",
        "la04",
        "la05",
        "la06",
        "la07",
        "la08",
        "la10",
        "la11",
        "la12",
        "la13",
        "la15",
        "la16",
        "la20",
        "la21",
        "la22",
        "la23",
        "la24",
        "la27",
        "la28",
        "la29",
        "la31",
        "nl01",
        "nl02",
        "nl03",
        "nl04",
        "nl05",
        "nl06",
        "nl07",
        "nl08",
        "nl09",
        "nl13",
        "nl14",
        "nl15",
        "nl16",
        "nl18",
        "nl21",
        "nl23",
        "nl24",
        "tvb1",
        "tvb2",
        "tvb5",
        "tvb7",
        "tvb8",
        "tvb11",
        "tvb12",
        "tvb14",
        "tvb17",
        "tvb18",
        "tvb19",
        "tvb23",
        "tvb27",
        "tvb28",
        "tvb29",
    ]

    patient_result_dict = collections.defaultdict(list)
    num_datasets = 0

    # get all files inside experiment
    trimmed_npz_fpaths = [x for x in datadir.rglob("*npz")]

    # get a hashmap of all subjects
    subjects_map = {}
    for fpath in trimmed_npz_fpaths:
        params = get_entities_from_fname(
            os.path.basename(fpath).split(f"{expname}-")[1])
        subjects_map[params["subject"]] = 1

    if verbose:
        print(f'Got {len(subjects_map)} subjects')

    # loop through each subject
    subject_list = natsorted(subjects_map.keys())
    for subject in subject_list:
        if subject in pats_to_avg:
            #             print("USING AVERAGE for: ", fpath)
            reference = "average"
        else:
            reference = "monopolar"
        subjdir = Path(datadir / reference / kind)
        fpaths = [x for x in subjdir.glob(f"*sub-{subject}_*npz")]

        # load in each subject's data
        for fpath in fpaths:
            # load in the data and append to the patient dictionary data struct
            with np.load(fpath, allow_pickle=True) as data_dict:
                data_dict = data_dict["data_dict"].item()
                patient_result_dict[subject].append(data_dict)

            num_datasets += 1

    if verbose:
        print("Got ", num_datasets, " datasets.")
        print("Got ", len(patient_result_dict), " patients")
        print(patient_result_dict.keys())

    return patient_result_dict
Ejemplo n.º 5
0
def append_original_fname_to_scans(
    orig_fname: str,
    bids_root: Union[str, Path],
    bids_fname: str,
    overwrite: bool = True,
    verbose: bool = True,
):
    """Append the original filename to *scans.tsv in BIDS data structure.

    This will also create a sidecar *scans.json file alongside to document
    a description of the added column in the scans.tsv file.

    Parameters
    ----------
    orig_fname : str
        The original base filename that will be added into the
        'original_filename' columnn.
    bids_root : str | Path
        The root to the BIDS dataset.
    bids_fname : str | BIDSPath
        The BIDS filename of the BIDSified dataset. This should
        correspond to a specific 'filename' in the *scans.tsv file.
    overwrite : bool
        Whether or not to overwrite the row.
    verbose : bool
    """
    # create a BIDS path object noting that you only need
    # subject and session to define the *scans.tsv file
    entities = get_entities_from_fname(bids_fname)
    bids_path = BIDSPath(entities["subject"], entities["session"], root=bids_root)
    scans_fpath = bids_path.copy().update(suffix="scans", extension=".tsv")

    # make sure the path actually exists
    if not scans_fpath.fpath.exists():
        raise OSError(
            f"Scans.tsv file {scans_fpath} does not "
            f"exist. Please check the path to ensure it is "
            f"valid."
        )
    scans_tsv = _from_tsv(scans_fpath)

    # new filenames
    filenames = scans_tsv["filename"]
    ind = [i for i, fname in enumerate(filenames) if str(bids_fname) in fname]

    if len(ind) > 1:  # pragma: no cover
        msg = (
            "This should not happen. All scans should "
            "be uniquely identifiable from scans.tsv file. "
            "The current scans file has these filenames: "
            f"{filenames}."
        )
        logger.exception(msg)
        raise RuntimeError(msg)
    if len(ind) == 0:
        msg = (
            f"No filename, {bids_fname} found. "
            f"Scans.tsv has these files: {filenames}."
        )
        logger.exception(msg)
        raise RuntimeError(msg)

    # write scans.json
    scans_json_path = _replace_ext(scans_fpath, "json")
    scans_json = {
        "original_filename": "The original filename of the converted BIDs dataset. "
        "Provides possibly ictal/interictal, asleep/awake and "
        "clinical seizure grouping (i.e. SZ2PG, etc.)."
    }
    _write_json(scans_json_path, scans_json, overwrite=True, verbose=verbose)

    # write in original filename
    if "original_filename" not in scans_tsv.keys():
        scans_tsv["original_filename"] = ["n/a"] * len(filenames)
    if scans_tsv["original_filename"][ind[0]] == "n/a" or overwrite:
        scans_tsv["original_filename"][ind[0]] = orig_fname
    else:
        logger.warning(
            "Original filename has already been written here. "
            f"Skipping for {bids_fname}. It is written as "
            f"{scans_tsv['original_filename'][ind[0]]}."
        )
        return

    # write the scans out
    _to_tsv(scans_tsv, scans_fpath)
Ejemplo n.º 6
0
def _handle_mni_trans(
    elec_coords,
    img_fpath: Union[str, Path],
    subjects_dir: Union[str, Path, None],
    revert_mni: bool,
    verbose: bool = True,
):
    """Handle FreeSurfer MRI <-> MNI voxels."""
    entities = get_entities_from_fname(img_fpath)
    subject = entities.get("subject")
    if subject is None:
        raise RuntimeError(
            f"Could not interpret the subject from "
            f"IntendedFor Image filepath {img_fpath}. "
            f"This file path is possibly not named "
            f"according to BIDS."
        )

    # Try to get Norig and Torig
    # (i.e. vox_ras_t and vox_mri_t, respectively)
    subj_fs_dir = Path(subjects_dir) / subject  # type: ignore
    if not op.exists(subj_fs_dir):
        subject = f"sub-{subject}"
        subj_fs_dir = Path(subjects_dir) / subject  # type: ignore

    path = op.join(subj_fs_dir, "mri", "orig.mgz")  # type: ignore
    if not op.isfile(path):
        path = op.join(subj_fs_dir, "mri", "T1.mgz")  # type: ignore
    if not op.isfile(path):
        raise IOError("mri not found: %s" % path)
    _, _, mri_ras_t, _, _ = _read_mri_info(path, units="mm")

    # get the intended affine transform from vox -> RAS
    img = nb.load(img_fpath)
    # voxel -> xyz
    intended_affine = img.affine

    # check that vox2ras is the same as our affine
    # if not np.all(np.isclose(intended_affine, mri_ras_t['trans'],
    #                          atol=1e-6)):
    #     print(np.isclose(intended_affine, mri_ras_t['trans'],
    #                          atol=1e-6))
    #     print(intended_affine)
    #     print(mri_ras_t['trans'])
    #     raise RuntimeError(
    #         f"You are trying to convert data "
    #         f"to MNI coordinates for {img_fpath}, "
    #         f"but this does not correspond to the "
    #         f"original T1.mgz file of FreeSurfer. "
    #         f"This is a limitation..."
    #     )

    # read mri voxel of T1.mgz -> MNI tal xfm
    mri_mni_t = read_talxfm(subject=subject, subjects_dir=subjects_dir, verbose=verbose)

    # make sure these are in mm
    mri_to_mni_aff = mri_mni_t["trans"] * 1000.0

    # if reversing MNI, invert affine transform
    # else keep the same as read in
    if revert_mni:
        affine = np.linalg.inv(mri_to_mni_aff)
    else:
        affine = mri_to_mni_aff

    # first convert to voxels
    elec_coords = apply_affine(affine, elec_coords)

    return elec_coords