Ejemplo n.º 1
0
def test_parse_file_entities():
    filename = '/sub-03_ses-07_run-4_desc-bleargh_sekret.nii.gz'

    # Test with entities taken from bids config
    target = {'subject': '03', 'session': '07', 'run': 4, 'suffix': 'sekret'}
    assert target == parse_file_entities(filename, config='bids')
    config = Config.load('bids')
    assert target == parse_file_entities(filename, config=[config])

    # Test with entities taken from bids and derivatives config
    target = {'subject': '03', 'session': '07', 'run': 4, 'suffix': 'sekret',
              'desc': 'bleargh'}
    assert target == parse_file_entities(filename)
    assert target == parse_file_entities(filename, config=['bids', 'derivatives'])

    # Test with list of Entities
    entities = [
        Entity('subject', "[/\\\\]sub-([a-zA-Z0-9]+)"),
        Entity('run', "[_/\\\\]run-0*(\\d+)", dtype=int),
        Entity('suffix', "[._]*([a-zA-Z0-9]*?)\\.[^/\\\\]+$"),
        Entity('desc', "desc-([a-zA-Z0-9]+)"),
    ]
    # Leave out session to distinguish from previous test target
    target = {'subject': '03', 'run': 4, 'suffix': 'sekret', 'desc': 'bleargh'}
    assert target == parse_file_entities(filename, entities=entities)
Ejemplo n.º 2
0
    def _run_interface(self, runtime):
        import json
        import os.path as op
        import pkg_resources
        from bids.layout import parse_file_entities
        from bids.layout.writing import build_path

        deriv_cfg = pkg_resources.resource_string(
            "nibetaseries", op.join("data", "derivatives.json"))
        deriv_patterns = json.loads(
            deriv_cfg.decode('utf-8'))['fmriprep_path_patterns']

        subject_entities = parse_file_entities(self.inputs.source_file)
        betaseries_entities = parse_file_entities(self.inputs.in_file)
        # hotfix
        betaseries_entities['description'] = betaseries_entities['desc']

        subject_entities.update(betaseries_entities)

        out_file = build_path(subject_entities, deriv_patterns)

        if not out_file:
            raise ValueError("the provided entities do not make a valid file")

        base_directory = runtime.cwd
        if isdefined(self.inputs.base_directory):
            base_directory = os.path.abspath(self.inputs.base_directory)

        out_path = op.join(base_directory, self.out_path_base, out_file)

        os.makedirs(op.dirname(out_path), exist_ok=True)

        # copy the file to the output directory
        copy(self.inputs.in_file, out_path)

        self._results['out_file'] = out_path

        return runtime
Ejemplo n.º 3
0
def test_parse_file_entities():
    filename = '/sub-03_ses-07_run-4_desc-bleargh_sekret.nii.gz'

    # Test with entities taken from bids config
    target = {
        'subject': '03',
        'session': '07',
        'run': 4,
        'suffix': 'sekret',
        'extension': 'nii.gz'
    }
    assert target == parse_file_entities(filename, config='bids')
    config = Config.load('bids')
    assert target == parse_file_entities(filename, config=[config])

    # Test with entities taken from bids and derivatives config
    target = {
        'subject': '03',
        'session': '07',
        'run': 4,
        'suffix': 'sekret',
        'desc': 'bleargh',
        'extension': 'nii.gz'
    }
    assert target == parse_file_entities(filename)
    assert target == parse_file_entities(filename,
                                         config=['bids', 'derivatives'])

    # Test with list of Entities
    entities = [
        Entity('subject', "[/\\\\]sub-([a-zA-Z0-9]+)"),
        Entity('run', "[_/\\\\]run-0*(\\d+)", dtype=int),
        Entity('suffix', "[._]*([a-zA-Z0-9]*?)\\.[^/\\\\]+$"),
        Entity('desc', "desc-([a-zA-Z0-9]+)"),
    ]
    # Leave out session to distinguish from previous test target
    target = {'subject': '03', 'run': 4, 'suffix': 'sekret', 'desc': 'bleargh'}
    assert target == parse_file_entities(filename, entities=entities)
Ejemplo n.º 4
0
 def _run_interface(self, runtime):
     bids_dir = self.inputs.bids_dir
     in_file = self.inputs.in_file
     if bids_dir is not None:
         try:
             in_file = str(Path(in_file).relative_to(bids_dir))
         except ValueError:
             pass
     params = parse_file_entities(in_file)
     self._results = {
         key: params.get(key, Undefined)
         for key in _BIDSInfoOutputSpec().get().keys()
     }
     return runtime
Ejemplo n.º 5
0
    def __attrs_post_init__(self):
        """Validate metadata and additional checks."""
        self.entities = parse_file_entities(str(self.path))
        self.suffix = self.entities.pop("suffix")
        extension = self.entities.pop("extension").lstrip(".")

        # Automatically fill metadata in when possible
        # TODO: implement BIDS hierarchy of metadata
        if self.find_meta:
            sidecar = Path(str(self.path).replace(extension, "json"))
            if sidecar.is_file():
                _meta = self.metadata or {}
                self.metadata = loads(sidecar.read_text())
                self.metadata.update(_meta)

        # Attempt to infer a bids_root folder
        relative_path = relative_to_root(self.path)
        self.bids_root = (Path(str(self.path)[:-len(str(relative_path))])
                          if str(relative_path) != str(self.path) else None)

        if self.suffix in ("T1w", "T2w"):
            self.metadata["TotalReadoutTime"] = 0.0

        # Check for REQUIRED metadata (depends on suffix.)
        if self.suffix in ("bold", "dwi", "epi", "sbref"):
            if "PhaseEncodingDirection" not in self.metadata:
                raise MetadataError(
                    f"Missing 'PhaseEncodingDirection' for <{self.path}>.")

            from .utils.epimanip import get_trt

            try:
                get_trt(self.metadata, in_file=self.path)
            except ValueError as exc:
                raise MetadataError(
                    f"Missing readout timing information for <{self.path}>."
                ) from exc

        elif self.suffix == "fieldmap" and "Units" not in self.metadata:
            raise MetadataError(f"Missing 'Units' for <{self.path}>.")

        elif self.suffix == "phasediff" and ("EchoTime1" not in self.metadata
                                             or "EchoTime2"
                                             not in self.metadata):
            raise MetadataError(
                f"Missing 'EchoTime1' and/or 'EchoTime2' for <{self.path}>.")

        elif self.suffix in ("phase1", "phase2") and ("EchoTime"
                                                      not in self.metadata):
            raise MetadataError(f"Missing 'EchoTime' for <{self.path}>.")
Ejemplo n.º 6
0
def parse_file_entities_with_pipelines(
        filename,
        entities=None,
        config=None,
        include_unmatched=False) -> t.Dict[str, str]:
    """
    bids.extract_pipelines_from_path extended with ability to
    """
    et_dict = parse_file_entities(filename, entities, config,
                                  include_unmatched)
    pipeline = extract_pipeline_from_path(filename)
    if pipeline:
        et_dict['pipeline'] = pipeline
    return et_dict
Ejemplo n.º 7
0
def bind_epi_fmaps(epi_fmap_jsons, bold_jsons, t_bold):
    """
    SE-EPI fieldmap binding

    :param epi_fmap_jsons:
    :param bold_jsons:
    :param t_bold:
    :return: 
    """

    # Get list of SE-EPI directions
    dirs = []
    for fname in epi_fmap_jsons:
        ents = parse_file_entities(fname)
        if 'direction' in ents:
            dirs.append(ents['direction'])
    pedirs = np.unique(dirs)

    # Loop over phase encoding directions
    for pedir in pedirs:

        print('    Scanning for dir-{} SE-EPI fieldmaps'.format(pedir))

        # List of JSONS with current PE direction
        pedir_jsons = [fname for fname in epi_fmap_jsons if pedir in fname]

        # Create list for storing IntendedFor lists
        intended_for = [[] for ic in range(len(pedir_jsons))]

        # Get SE-EPI fmap acquisition times
        t_epi_fmap = np.array([acqtime_mins(fname) for fname in pedir_jsons])

        # Find the closest fieldmap in time to each BOLD series
        for ic, bold_json in enumerate(bold_jsons):

            # Time difference between all fieldmaps in this direction and current BOLD series
            dt = np.abs(t_bold[ic] - t_epi_fmap)

            # Index of closest fieldmap to this BOLD series
            idx = np.argmin(dt)

            # Add this BOLD series image name to list for this fmap
            intended_for[idx].append(bids_intended_name(bold_json))

        # Replace IntendedFor field in fmap JSON file
        for fc, json_fname in enumerate(pedir_jsons):
            info = read_json(json_fname)
            info['IntendedFor'] = intended_for[fc]
            write_json(json_fname, info, overwrite=True)
Ejemplo n.º 8
0
 def _run_interface(self, runtime):
     if exists(self.inputs.fmri_prep):
         img = load(self.inputs.fmri_prep)
         smoothed = smooth_img(img, fwhm=6)
         entities = parse_file_entities(self.inputs.fmri_prep)
         output_path = join(
             self.inputs.output_directory,
             build_path(entities, self.smooth_file_pattern, False))
         assert not exists(
             output_path), f"Smoothing is run twice at {output_path}"
         save(smoothed, output_path)
         self._results['fmri_smoothed'] = output_path
     elif self.inputs.is_file_mandatory:
         raise FileExistsError(
             f"Mandatory fMRI image file doesn't exists (input arg {self.inputs.fmri_prep})"
         )
     return runtime
Ejemplo n.º 9
0
    def _run_interface(self, runtime):
        # Ready the output folder
        base_directory = runtime.cwd
        if isdefined(self.inputs.base_directory):
            base_directory = self.inputs.base_directory
        base_directory = Path(base_directory).absolute()
        out_path = base_directory / self.out_path_base
        out_path.mkdir(exist_ok=True, parents=True)

        # Ensure we have a list
        in_file = listify(self.inputs.in_file)

        # Read in the dictionary of metadata
        if isdefined(self.inputs.meta_dict):
            meta = self.inputs.meta_dict
            # inputs passed in construction take priority
            meta.update(self._metadata)
            self._metadata = meta

        # Initialize entities with those from the source file.
        in_entities = [
            parse_file_entities(str(relative_to_root(source_file)))
            for source_file in self.inputs.source_file
        ]
        out_entities = {
            k: v
            for k, v in in_entities[0].items() if all(
                ent.get(k) == v for ent in in_entities[1:])
        }
        for drop_entity in listify(self.inputs.dismiss_entities or []):
            out_entities.pop(drop_entity, None)

        # Override extension with that of the input file(s)
        out_entities["extension"] = [
            # _splitext does not accept .surf.gii (for instance)
            "".join(Path(orig_file).suffixes).lstrip(".")
            for orig_file in in_file
        ]

        compress = listify(self.inputs.compress) or [None]
        if len(compress) == 1:
            compress = compress * len(in_file)
        for i, ext in enumerate(out_entities["extension"]):
            if compress[i] is not None:
                ext = regz.sub("", ext)
                out_entities["extension"][
                    i] = f"{ext}.gz" if compress[i] else ext

        # Override entities with those set as inputs
        for key in self._allowed_entities:
            value = getattr(self.inputs, key)
            if value is not None and isdefined(value):
                out_entities[key] = value

        # Clean up native resolution with space
        if out_entities.get("resolution") == "native" and out_entities.get(
                "space"):
            out_entities.pop("resolution", None)

        if len(set(out_entities["extension"])) == 1:
            out_entities["extension"] = out_entities["extension"][0]

        # Insert custom (non-BIDS) entities from allowed_entities.
        custom_entities = set(out_entities.keys()) - set(BIDS_DERIV_ENTITIES)
        patterns = BIDS_DERIV_PATTERNS
        if custom_entities:
            # Example: f"{key}-{{{key}}}" -> "task-{task}"
            custom_pat = "_".join(f"{key}-{{{key}}}"
                                  for key in sorted(custom_entities))
            patterns = [
                pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix")))
                for pat in patterns
            ]

        # Prepare SimpleInterface outputs object
        self._results["out_file"] = []
        self._results["compression"] = []
        self._results["fixed_hdr"] = [False] * len(in_file)

        dest_files = build_path(out_entities, path_patterns=patterns)
        if not dest_files:
            raise ValueError(
                f"Could not build path with entities {out_entities}.")

        # Make sure the interpolated values is embedded in a list, and check
        dest_files = listify(dest_files)
        if len(in_file) != len(dest_files):
            raise ValueError(f"Input files ({len(in_file)}) not matched "
                             f"by interpolated patterns ({len(dest_files)}).")

        for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)):
            out_file = out_path / dest_file
            out_file.parent.mkdir(exist_ok=True, parents=True)
            self._results["out_file"].append(str(out_file))
            self._results["compression"].append(
                _copy_any(orig_file, str(out_file)))

            is_nifti = out_file.name.endswith(
                (".nii", ".nii.gz")) and not out_file.name.endswith(
                    (".dtseries.nii", ".dtseries.nii.gz"))
            data_dtype = self.inputs.data_dtype or DEFAULT_DTYPES[
                self.inputs.suffix]
            if is_nifti and any((self.inputs.check_hdr, data_dtype)):
                # Do not use mmap; if we need to access the data at all, it will be to
                # rewrite, risking a BusError
                nii = nb.load(out_file, mmap=False)

                if self.inputs.check_hdr:
                    hdr = nii.header
                    curr_units = tuple([
                        None if u == "unknown" else u
                        for u in hdr.get_xyzt_units()
                    ])
                    curr_codes = (int(hdr["qform_code"]),
                                  int(hdr["sform_code"]))

                    # Default to mm, use sec if data type is bold
                    units = (
                        curr_units[0] or "mm",
                        "sec" if out_entities["suffix"] == "bold" else None,
                    )
                    xcodes = (1, 1)  # Derivative in its original scanner space
                    if self.inputs.space:
                        xcodes = ((4, 4) if self.inputs.space
                                  in STANDARD_SPACES else (2, 2))

                    if curr_codes != xcodes or curr_units != units:
                        self._results["fixed_hdr"][i] = True
                        hdr.set_qform(nii.affine, xcodes[0])
                        hdr.set_sform(nii.affine, xcodes[1])
                        hdr.set_xyzt_units(*units)

                        # Rewrite file with new header
                        overwrite_header(nii, out_file)

                if data_dtype == "source":  # match source dtype
                    try:
                        data_dtype = nb.load(
                            self.inputs.source_file[0]).get_data_dtype()
                    except Exception:
                        LOGGER.warning(
                            f"Could not get data type of file {self.inputs.source_file[0]}"
                        )
                        data_dtype = None

                if data_dtype:
                    if self.inputs.check_hdr:
                        # load updated NIfTI
                        nii = nb.load(out_file, mmap=False)
                    data_dtype = np.dtype(data_dtype)
                    orig_dtype = nii.get_data_dtype()
                    if orig_dtype != data_dtype:
                        LOGGER.warning(
                            f"Changing {out_file} dtype from {orig_dtype} to {data_dtype}"
                        )
                        # coerce dataobj to new data dtype
                        if np.issubdtype(data_dtype, np.integer):
                            new_data = np.rint(nii.dataobj).astype(data_dtype)
                        else:
                            new_data = np.asanyarray(nii.dataobj,
                                                     dtype=data_dtype)
                        # and set header to match
                        nii.set_data_dtype(data_dtype)
                        nii = nii.__class__(new_data, nii.affine, nii.header)
                        nii.to_filename(out_file)

        if len(self._results["out_file"]) == 1:
            meta_fields = self.inputs.copyable_trait_names()
            self._metadata.update({
                k: getattr(self.inputs, k)
                for k in meta_fields if k not in self._static_traits
            })
            if self._metadata:
                out_file = Path(self._results["out_file"][0])
                # 1.3.x hack
                # For dtseries, we have been generating weird non-BIDS JSON files.
                # We can safely keep producing them to avoid breaking derivatives, but
                # only the existing keys should keep going into them.
                if out_file.name.endswith(".dtseries.nii"):
                    legacy_metadata = {}
                    for key in ("grayordinates", "space", "surface",
                                "surface_density", "volume"):
                        if key in self._metadata:
                            legacy_metadata[key] = self._metadata.pop(key)
                    if legacy_metadata:
                        sidecar = out_file.parent / f"{_splitext(str(out_file))[0]}.json"
                        sidecar.write_text(
                            dumps(legacy_metadata, sort_keys=True, indent=2))
                # The future: the extension is the first . and everything after
                sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json"
                sidecar.write_text(
                    dumps(self._metadata, sort_keys=True, indent=2))
                self._results["out_meta"] = str(sidecar)
        return runtime
Ejemplo n.º 10
0
def demo_rsHRF(input_file,
               mask_file,
               output_dir,
               para,
               p_jobs,
               file_type=".nii",
               mode="bids",
               wiener=False,
               temporal_mask=[]):
    # book-keeping w.r.t parameter values
    if 'localK' not in para or para['localK'] == None:
        if para['TR'] <= 2:
            para['localK'] = 1
        else:
            para['localK'] = 2
    # creating the output-directory if not already present
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    # for four-dimensional input
    if mode != 'time-series':
        if mode == 'bids' or mode == 'bids w/ atlas':
            name = input_file.split('/')[-1].split('.')[0]
            v1 = spm_dep.spm.spm_vol(input_file)
        else:
            name = input_file.split('/')[-1].split('.')[0]
            v1 = spm_dep.spm.spm_vol(input_file)
        if mask_file != None:
            if mode == 'bids':
                mask_name = mask_file.split('/')[-1].split('.')[0]
                v = spm_dep.spm.spm_vol(mask_file)
            else:
                mask_name = mask_file.split('/')[-1].split('.')[0]
                v = spm_dep.spm.spm_vol(mask_file)
            if file_type == ".nii" or file_type == ".nii.gz":
                brain = spm_dep.spm.spm_read_vols(v)
            else:
                brain = v.agg_data().flatten(order='F')
            if  ((file_type == ".nii" or file_type == ".nii.gz") and \
                    v1.header.get_data_shape()[:-1] != v.header.get_data_shape()) or \
                ((file_type == ".gii" or file_type == ".gii.gz") and \
                    v1.agg_data().shape[0]!= v.agg_data().shape[0]):
                raise ValueError('Inconsistency in input-mask dimensions' +
                                 '\n\tinput_file == ' + name + file_type +
                                 '\n\tmask_file == ' + mask_name + file_type)
            else:
                if file_type == ".nii" or file_type == ".nii.gz":
                    data = v1.get_data()
                else:
                    data = v1.agg_data()
        else:
            print('No atlas provided! Generating mask file...')
            if file_type == ".nii" or file_type == ".nii.gz":
                data = v1.get_data()
                brain = np.nanvar(data.reshape(-1, data.shape[3]), -1, ddof=0)
            else:
                data = v1.agg_data()
                brain = np.nanvar(data, -1, ddof=0)
            print('Done')
        voxel_ind = np.where(brain > 0)[0]
        mask_shape = data.shape[:-1]
        nobs = data.shape[-1]
        data1 = np.reshape(data, (-1, nobs), order='F').T
        bold_sig = stats.zscore(data1[:, voxel_ind], ddof=1)
# for time-series input
    else:
        name = input_file.split('/')[-1].split('.')[0]
        data1 = (np.loadtxt(input_file, delimiter=","))
        if data1.ndim == 1:
            data1 = np.expand_dims(data1, axis=1)
        nobs = data1.shape[0]
        bold_sig = stats.zscore(data1, ddof=1)
    if len(temporal_mask) > 0 and len(temporal_mask) != nobs:
        raise ValueError('Inconsistency in temporal_mask dimensions.\n' +
                         'Size of mask: ' + str(len(temporal_mask)) + '\n' +
                         'Size of time-series: ' + str(nobs))
    bold_sig = np.nan_to_num(bold_sig)
    bold_sig_deconv = processing. \
                      rest_filter. \
                      rest_IdealFilter(bold_sig, para['TR'], para['passband_deconvolve'])
    bold_sig = processing. \
               rest_filter. \
               rest_IdealFilter(bold_sig, para['TR'], para['passband'])
    data_deconv = np.zeros(bold_sig.shape)
    event_number = np.zeros((1, bold_sig.shape[1]))
    print('Retrieving HRF ...')
    #Estimate HRF for the fourier / hanning / gamma / cannon basis functions
    if not (para['estimation'] == 'sFIR' or para['estimation'] == 'FIR'):
        bf = basis_functions.basis_functions.get_basis_function(
            bold_sig.shape, para)
        beta_hrf, event_bold = utils.hrf_estimation.compute_hrf(bold_sig,
                                                                para,
                                                                temporal_mask,
                                                                p_jobs,
                                                                bf=bf)
        hrfa = np.dot(bf, beta_hrf[np.arange(0, bf.shape[1]), :])
    #Estimate HRF for FIR and sFIR
    else:
        para['T'] = 1
        beta_hrf, event_bold = utils.hrf_estimation.compute_hrf(
            bold_sig, para, temporal_mask, p_jobs)
        hrfa = beta_hrf[:-1, :]
    nvar = hrfa.shape[1]
    PARA = np.zeros((3, nvar))
    for voxel_id in range(nvar):
        hrf1 = hrfa[:, voxel_id]
        PARA[:, voxel_id] = \
            parameters.wgr_get_parameters(hrf1, para['TR'] / para['T'])
    print('Done')
    print('Deconvolving HRF ...')
    if para['T'] > 1:
        hrfa_TR = signal.resample_poly(hrfa, 1, para['T'])
    else:
        hrfa_TR = hrfa
    for voxel_id in range(nvar):
        hrf = hrfa_TR[:, voxel_id]
        if not wiener:
            H = np.fft.fft(np.append(hrf, np.zeros(
                (nobs - max(hrf.shape), 1))),
                           axis=0)
            M = np.fft.fft(bold_sig_deconv[:, voxel_id])
            data_deconv[:, voxel_id] = \
                np.fft.ifft(H.conj() * M / (H * H.conj() + .1*np.mean((H * H.conj()))))
        else:
            data_deconv[:,
                        voxel_id] = iterative_wiener_deconv.rsHRF_iterative_wiener_deconv(
                            bold_sig_deconv[:, voxel_id], hrf)
        event_number[:, voxel_id] = np.amax(event_bold[voxel_id].shape)
    print('Done')
    print('Saving Output ...')
    # setting the output-path
    if mode == 'bids' or mode == 'bids w/ atlas':
        layout_output = BIDSLayout(output_dir)
        entities = parse_file_entities(input_file)
        sub_save_dir = layout_output.build_path(entities).rsplit('/', 1)[0]
    else:
        sub_save_dir = output_dir
    if not os.path.isdir(sub_save_dir):
        os.makedirs(sub_save_dir, exist_ok=True)
    dic = {'para': para, 'hrfa': hrfa, 'event_bold': event_bold, 'PARA': PARA}
    ext = '_hrf.mat'
    if mode == "time-series":
        dic["event_number"] = event_number
        dic["data_deconv"] = data_deconv
        ext = '_hrf_deconv.mat'
    name = name.rsplit('_bold', 1)[0]
    sio.savemat(os.path.join(sub_save_dir, name + ext), dic)
    HRF_para_str = ['height', 'T2P', 'FWHM']
    if mode != "time-series":
        mask_data = np.zeros(mask_shape).flatten(order='F')
        for i in range(3):
            fname = os.path.join(sub_save_dir, name + '_' + HRF_para_str[i])
            mask_data[voxel_ind] = PARA[i, :]
            mask_data = mask_data.reshape(mask_shape, order='F')
            spm_dep.spm.spm_write_vol(v1, mask_data, fname, file_type)
            mask_data = mask_data.flatten(order='F')
        fname = os.path.join(sub_save_dir, name + '_eventnumber')
        mask_data[voxel_ind] = event_number
        mask_data = mask_data.reshape(mask_shape, order='F')
        spm_dep.spm.spm_write_vol(v1, mask_data, fname, file_type)
        mask_data = np.zeros(data.shape)
        dat3 = np.zeros(data.shape[:-1]).flatten(order='F')
        for i in range(nobs):
            fname = os.path.join(sub_save_dir, name + '_deconv')
            dat3[voxel_ind] = data_deconv[i, :]
            dat3 = dat3.reshape(data.shape[:-1], order='F')
            if file_type == ".nii" or file_type == ".nii.gz":
                mask_data[:, :, :, i] = dat3
            else:
                mask_data[:, i] = dat3
            dat3 = dat3.flatten(order='F')
        spm_dep.spm.spm_write_vol(v1, mask_data, fname, file_type)
    pos = 0
    while pos < hrfa_TR.shape[1]:
        if np.any(hrfa_TR[:, pos]):
            break
        pos += 1
    event_plot = lil_matrix((1, nobs))
    if event_bold.size:
        event_plot[:, event_bold[pos]] = 1
    else:
        print("No Events Detected!")
        return 0
    event_plot = np.ravel(event_plot.toarray())
    plt.figure()
    plt.plot(para['TR'] * np.arange(1,
                                    np.amax(hrfa_TR[:, pos].shape) + 1),
             hrfa_TR[:, pos],
             linewidth=1)
    plt.xlabel('time (s)')
    plt.savefig(os.path.join(sub_save_dir, name + '_hrf_plot.png'))
    plt.figure()
    plt.plot(para['TR'] * np.arange(1, nobs + 1),
             np.nan_to_num(stats.zscore(bold_sig[:, pos], ddof=1)),
             linewidth=1)
    plt.plot(para['TR'] * np.arange(1, nobs + 1),
             np.nan_to_num(stats.zscore(data_deconv[:, pos], ddof=1)),
             color='r',
             linewidth=1)
    markerline, stemlines, baseline = \
        plt.stem(para['TR'] * np.arange(1, nobs + 1), event_plot)
    plt.setp(baseline, 'color', 'k', 'markersize', 1)
    plt.setp(stemlines, 'color', 'k')
    plt.setp(markerline, 'color', 'k', 'markersize', 3, 'marker', 'd')
    plt.legend(['BOLD', 'Deconvolved BOLD', 'Events'], loc='best')
    plt.xlabel('time (s)')
    plt.savefig(os.path.join(sub_save_dir, name + '_deconvolution_plot.png'))
    print('Done')
    return 0
Ejemplo n.º 11
0
def bind_fmaps(bids_subj_dir):
    """
    Bind nearest fieldmap in time to each functional series for this subject

    :param bids_subj_dir: string
        BIDS root directory
    """

    print('  Subject {}'.format(os.path.basename(bids_subj_dir)))

    sess_dirs = glob(os.path.join(bids_subj_dir, 'ses-*'))

    # Session loop
    for sess_dir in sess_dirs:

        print('    Session {}'.format(os.path.basename(sess_dir)))

        # Get list of BOLD fMRI JSON sidecars
        bold_jsons = glob(os.path.join(sess_dir, 'func', '*task-*_bold.json'))

        # Get acquisition times for all BOLD and fieldmap series
        t_bold = np.array([acqtime_mins(fname) for fname in bold_jsons])

        # Get list of all SE-EPI fieldmaps
        epi_fmap_jsons = glob(
            os.path.join(sess_dir, 'fmap', '*_dir-*_epi.json'))

        if not epi_fmap_jsons:
            print('* No SE-EPI fieldmaps found - skipping this subject/series')
            break

        # Get list of SE-EPI directions
        dirs = []
        for fname in epi_fmap_jsons:
            ents = parse_file_entities(fname)
            if 'direction' in ents:
                dirs.append(ents['direction'])
        fmap_dirs = np.unique(dirs)

        # Loop over directions
        for fmap_dir in fmap_dirs:

            print('    Scanning for dir-{} SE-EPI fieldmaps'.format(fmap_dir))

            # New SE-EPI fmap list for this direction
            fmap_dir_jsons = glob(
                os.path.join(sess_dir, 'fmap',
                             '*_dir-{}*_epi.json'.format(fmap_dir)))

            # Create list for storing IntendedFor lists
            intended_for = [[] for ic in range(len(fmap_dir_jsons))]

            # Get SE-EPI fmap acquisition times
            t_epi_fmap = np.array(
                [acqtime_mins(fname) for fname in fmap_dir_jsons])

            # Find the closest fieldmap in time to each BOLD series
            for ic, bold_json in enumerate(bold_jsons):

                # Time difference between all fieldmaps in this direction and current BOLD series
                dt = np.abs(t_bold[ic] - t_epi_fmap)

                # Index of closest fieldmap to this BOLD series
                idx = np.argmin(dt)

                # Add this BOLD series image name to list for this fmap
                intended_for[idx].append(bids_intended_name(bold_json))

            # Replace IntendedFor field in fmap JSON file
            for fc, json_fname in enumerate(fmap_dir_jsons):
                info = read_json(json_fname)
                info['IntendedFor'] = intended_for[fc]
                write_json(json_fname, info, overwrite=True)