def __init__(self, *, mask=None, resample=False, memory_limit=None, **kwargs): if mask is not None: mask = get_masker(mask) self.masker = mask self.resample = resample self.memory_limit = memory_limit # defaults for resampling images (nilearn's defaults do not work well) self._resample_kwargs = {"clip": True, "interpolation": "linear"} # Identify any kwargs resample_kwargs = { k: v for k, v in kwargs.items() if k.startswith("resample__") } # Flag any extraneous kwargs other_kwargs = dict(set(kwargs.items()) - set(resample_kwargs.items())) if other_kwargs: LGR.warn( f"Unused keyword arguments found: {tuple(other_kwargs.items())}" ) # Update the default resampling parameters resample_kwargs = { k.split("resample__")[1]: v for k, v in resample_kwargs.items() } self._resample_kwargs.update(resample_kwargs)
def __init__(self, kernel_transformer, *, mask=None, **kwargs): if mask is not None: mask = get_masker(mask) self.masker = mask # Identify any kwargs kernel_args = { k: v for k, v in kwargs.items() if k.startswith("kernel__") } # Flag any extraneous kwargs other_kwargs = dict(set(kwargs.items()) - set(kernel_args.items())) if other_kwargs: LGR.warn( f"Unused keyword arguments found: {tuple(other_kwargs.items())}" ) # Get kernel transformer kernel_args = { k.split("kernel__")[1]: v for k, v in kernel_args.items() } kernel_transformer = _check_type(kernel_transformer, KernelTransformer, **kernel_args) self.kernel_transformer = kernel_transformer
def masker(self, mask): mask = get_masker(mask) if hasattr(self, "masker") and not np.array_equal( self.masker.mask_img.affine, mask.mask_img.affine): # This message does not have an associated effect, # since matrix indices are calculated as necessary LGR.warning( "New masker does not match old masker. Space is assumed to be the same." ) self.__masker = mask
def test_kernel_peaks(testdata_cbma, tmp_path_factory, kern, res, param, return_type, kwargs): """Peak/COMs of kernel maps should match the foci fed in (assuming focus isn't masked out). Notes ----- Remember that dataframe --> dataset won't work. Only testing dataset --> dataset with ALEKernel because it takes a while. Test on multiple template resolutions. """ tmpdir = tmp_path_factory.mktemp("test_kernel_peaks") testdata_cbma.update_path(tmpdir) id_ = "pain_03.nidm-1" # Ignoring resolution until we support 0.8.1 # template = load_mni152_brain_mask(resolution=res) template = load_mni152_brain_mask() masker = get_masker(template) xyz = testdata_cbma.coordinates.loc[testdata_cbma.coordinates["id"] == id_, ["x", "y", "z"]] ijk = mm2vox(xyz, masker.mask_img.affine) ijk = np.squeeze(ijk.astype(int)) if param == "dataframe": input_ = testdata_cbma.coordinates.copy() elif param == "dataset": input_ = testdata_cbma.copy() kern_instance = kern(**kwargs) output = kern_instance.transform(input_, masker, return_type=return_type) if return_type == "image": kern_data = output[0].get_fdata() elif return_type == "array": kern_data = np.squeeze( masker.inverse_transform(output[:1, :]).get_fdata()) else: f = output.images.loc[output.images["id"] == id_, kern_instance.image_type].values[0] kern_data = nib.load(f).get_fdata() if isinstance(kern_instance, kernel.ALEKernel): loc_idx = np.array(np.where(kern_data == np.max(kern_data))).T elif isinstance(kern_instance, (kernel.MKDAKernel, kernel.KDAKernel)): loc_idx = np.array(center_of_mass(kern_data)).astype(int).T else: raise Exception(f"A {type(kern_instance)}? Why?") loc_ijk = np.squeeze(loc_idx) assert np.array_equal(ijk, loc_ijk)
def __init__(self, *args, **kwargs): mask = kwargs.get("mask") if mask is not None: mask = get_masker(mask) self.masker = mask self.resample = kwargs.get("resample", False) self.memory_limit = kwargs.get("memory_limit", None) # defaults for resampling images (nilearn's defaults do not work well) self._resample_kwargs = {"clip": True, "interpolation": "linear"} self._resample_kwargs.update( {k.split("resample__")[1]: v for k, v in kwargs.items() if k.startswith("resample__")} )
def __init__( self, masker, kernel_transformer=MKDAKernel, feature_group=None, features=None, **kwargs, ): self.masker = get_masker(masker) # Get kernel transformer kernel_args = { k.split("kernel__")[1]: v for k, v in kwargs.items() if k.startswith("kernel__") } kernel_transformer = _check_type(kernel_transformer, KernelTransformer, **kernel_args) self.kernel_transformer = kernel_transformer self.feature_group = feature_group self.features = features self.frequency_threshold = 0
def __init__(self, df, img): self.coordinates = df self.masker = get_masker(img)
def transform_images(images_df, target, masker, metadata_df=None, out_dir=None, overwrite=False): """Generate images of a given type from other image types and write out to files. .. versionchanged:: 0.0.9 * [ENH] Add overwrite option to transform_images .. versionadded:: 0.0.4 Parameters ---------- images_df : :class:`pandas.DataFrame` DataFrame with paths to images for studies in Dataset. target : {'z', 'p', 'beta', 'varcope'} Target data type. masker : :class:`~nilearn.input_data.NiftiMasker` or similar Masker used to define orientation and resolution of images. Specific voxels defined in mask will not be used, and a new masker with _all_ voxels in acquisition matrix selected will be created. metadata_df : :class:`pandas.DataFrame` or :obj:`None`, optional DataFrame with metadata. Rows in this DataFrame must match those in ``images_df``, including the ``'id'`` column. out_dir : :obj:`str` or :obj:`None`, optional Path to output directory. If None, use folder containing first image for each study in ``images_df``. overwrite : :obj:`bool`, optional Whether to overwrite existing files or not. Default is False. Returns ------- images_df : :class:`pandas.DataFrame` DataFrame with paths to new images added. """ images_df = images_df.copy() valid_targets = {"z", "p", "beta", "varcope"} if target not in valid_targets: raise ValueError( f"Target type {target} not supported. Must be one of: {', '.join(valid_targets)}" ) mask_img = masker.mask_img new_mask = np.ones(mask_img.shape, int) new_mask = nib.Nifti1Image(new_mask, mask_img.affine, header=mask_img.header) new_masker = get_masker(new_mask) res = masker.mask_img.header.get_zooms() res = "x".join([str(r) for r in res]) if target not in images_df.columns: target_ids = images_df["id"].values else: target_ids = images_df.loc[images_df[target].isnull(), "id"] for id_ in target_ids: row = images_df.loc[images_df["id"] == id_].iloc[0] # Determine output filename, if file can be generated if out_dir is None: options = [ r for r in row.values if isinstance(r, str) and op.isfile(r) ] id_out_dir = op.dirname(options[0]) else: id_out_dir = out_dir new_file = op.join(id_out_dir, f"{id_}_{res}_{target}.nii.gz") # Grab columns with actual values available_data = row[~row.isnull()].to_dict() if metadata_df is not None: metadata_row = metadata_df.loc[metadata_df["id"] == id_].iloc[0] metadata = metadata_row[~metadata_row.isnull()].to_dict() for k, v in metadata.items(): if k not in available_data.keys(): available_data[k] = v # Get converted data img = resolve_transforms(target, available_data, new_masker) if img is not None: if overwrite or not op.isfile(new_file): img.to_filename(new_file) else: LGR.debug("Image already exists. Not overwriting.") images_df.loc[images_df["id"] == id_, target] = new_file else: images_df.loc[images_df["id"] == id_, target] = None return images_df
def __init__(self, estimator, mask, maps=None): self.estimator = copy.deepcopy(estimator) self.masker = get_masker(mask) self.maps = maps or {}