Beispiel #1
0
    def _collect_ma_maps(self,
                         coords_key="coordinates",
                         maps_key="ma_maps",
                         fname_idx=0):
        """Collect modeled activation maps from Estimator inputs.

        Parameters
        ----------
        coords_key : :obj:`str`, optional
            Key to Estimator.inputs_ dictionary containing coordinates DataFrame.
            This key should **always** be present.
        maps_key : :obj:`str`, optional
            Key to Estimator.inputs_ dictionary containing list of MA map files.
            This key should only be present if the kernel transformer was already fitted to the
            input Dataset.
        fname_idx : :obj:`int`, optional
            When the Estimator is set with ``memory_limit`` as a string,
            there is a ``memmap_filenames`` attribute that is a list of filenames or Nones.
            This parameter specifies which item in that list should be used for a memory-mapped
            array. Default is 0.

        Returns
        -------
        ma_maps : :obj:`numpy.ndarray` or :obj:`numpy.memmap`
            2D numpy array of shape (n_studies, n_voxels) with MA values.
            This will be a memmap if MA maps have been pre-generated.
        """
        if maps_key in self.inputs_.keys():
            LGR.debug(f"Loading pre-generated MA maps ({maps_key}).")
            if self.memory_limit:
                # perform transform on chunks of the input maps
                ma_maps = _safe_transform(
                    self.inputs_[maps_key],
                    masker=self.masker,
                    memory_limit=self.memory_limit,
                    memfile=self.memmap_filenames[fname_idx],
                )
            else:
                ma_maps = self.masker.transform(self.inputs_[maps_key])
        else:
            LGR.debug(f"Generating MA maps from coordinates ({coords_key}).")
            ma_maps = self.kernel_transformer.transform(
                self.inputs_[coords_key],
                masker=self.masker,
                return_type="array",
            )
        return ma_maps
Beispiel #2
0
    def _fit(self, dataset):
        """Collect sets of maps from the Dataset corresponding to each requested feature.

        Parameters
        ----------
        dataset : :obj:`~nimare.dataset.Dataset`
            Dataset for which to run meta-analyses to generate maps.

        Attributes
        ----------
        masker : :class:`~nilearn.input_data.NiftiMasker` or similar
            Masker from dataset
        features_ : :obj:`list`
            Reduced list of features
        images_ : array_like
            Masked meta-analytic maps
        """
        self.masker = dataset.masker

        images_ = {}
        for feature in self.features_:
            feature_ids = dataset.get_studies_by_label(
                labels=[feature], label_threshold=self.frequency_threshold)
            selected_ids = sorted(
                list(set(feature_ids).intersection(self.inputs_["id"])))
            selected_id_idx = [
                i_id for i_id, id_ in enumerate(self.inputs_["id"])
                if id_ in selected_ids
            ]
            test_imgs = [
                img for i_img, img in enumerate(self.inputs_["images"])
                if i_img in selected_id_idx
            ]
            if len(test_imgs):
                feature_arr = _safe_transform(
                    test_imgs,
                    self.masker,
                    memory_limit=self.memory_limit,
                    memfile=None,
                )
                images_[feature] = feature_arr
            else:
                LGR.info(f"Skipping feature '{feature}'. No images found.")
        # reduce features again
        self.features_ = [f for f in self.features_ if f in images_.keys()]
        self.images_ = images_
Beispiel #3
0
    def transform(self, dataset, masker=None, return_type="image"):
        """Generate modeled activation images for each Contrast in dataset.

        Parameters
        ----------
        dataset : :obj:`~nimare.dataset.Dataset` or :obj:`pandas.DataFrame`
            Dataset for which to make images. Can be a DataFrame if necessary.
        masker : img_like or None, optional
            Mask to apply to MA maps. Required if ``dataset`` is a DataFrame.
            If None (and ``dataset`` is a Dataset), the Dataset's masker attribute will be used.
            Default is None.
        return_type : {'sparse', 'array', 'image', 'dataset'}, optional
            Whether to return a numpy array ('array'), a list of niimgs ('image'),
            or a Dataset with MA images saved as files ('dataset').
            Default is 'image'.

        Returns
        -------
        imgs : (C x V) :class:`numpy.ndarray` or :obj:`list` of :class:`nibabel.Nifti1Image` \
               or :class:`~nimare.dataset.Dataset`
            If return_type is 'sparse', a 4D sparse array (E x S), where E is
            the number of unique experiments, and the remaining 3 dimensions are
            equal to `shape` of the images.
            If return_type is 'array', a 2D numpy array (C x V), where C is
            contrast and V is voxel.
            If return_type is 'image', a list of modeled activation images
            (one for each of the Contrasts in the input dataset).
            If return_type is 'dataset', a new Dataset object with modeled
            activation images saved to files and referenced in the
            Dataset.images attribute.

        Attributes
        ----------
        filename_pattern : str
            Filename pattern for MA maps that will be saved by the transformer.
        image_type : str
            Name of the corresponding column in the Dataset.images DataFrame.
        """
        if return_type not in ("sparse", "array", "image", "dataset"):
            raise ValueError(
                'Argument "return_type" must be "image", "array", or "dataset".'
            )

        if isinstance(dataset, pd.DataFrame):
            assert (
                masker is not None
            ), "Argument 'masker' must be provided if dataset is a DataFrame."
            mask = masker.mask_img
            coordinates = dataset
            assert (
                return_type != "dataset"
            ), "Input dataset must be a Dataset if return_type='dataset'."

            # Calculate IJK. Must assume that the masker is in same space,
            # but has different affine, from original IJK.
            coordinates[["i", "j", "k"]] = mm2vox(dataset[["x", "y", "z"]],
                                                  mask.affine)
        else:
            masker = dataset.masker if not masker else masker
            mask = masker.mask_img
            coordinates = dataset.coordinates.copy()

            # Determine MA map filenames. Must happen after parameters are set.
            self._infer_names(affine=md5(mask.affine).hexdigest())

            # Check for existing MA maps
            # Use coordinates to get IDs instead of Dataset.ids bc of possible
            # mismatch between full Dataset and contrasts with coordinates.
            if self.image_type in dataset.images.columns:
                files = dataset.get_images(ids=coordinates["id"].unique(),
                                           imtype=self.image_type)
                if all(f is not None for f in files):
                    LGR.debug("Files already exist. Using them.")
                    if return_type == "array":
                        masked_data = _safe_transform(files, masker)
                        return masked_data
                    elif return_type == "image":
                        return [nib.load(f) for f in files]
                    elif return_type == "dataset":
                        return dataset.copy()

            # Calculate IJK
            if not np.array_equal(mask.affine, dataset.masker.mask_img.affine):
                LGR.warning(
                    "Mask affine does not match Dataset affine. Assuming same space."
                )

            coordinates[["i", "j", "k"]] = mm2vox(coordinates[["x", "y", "z"]],
                                                  mask.affine)

            # Add any metadata the Transformer might need to the coordinates DataFrame
            # This approach is probably inferior to one which uses a _required_inputs attribute
            # (like the MetaEstimators), but it should work just fine as long as individual
            # requirements are written in here.
            if (hasattr(self, "sample_size") and (self.sample_size is None)
                    and ("sample_size" not in coordinates.columns)):
                coordinates = _add_metadata_to_dataframe(
                    dataset,
                    coordinates,
                    metadata_field="sample_sizes",
                    target_column="sample_size",
                    filter_func=np.mean,
                )

        # Generate the MA maps if they weren't already available as images
        if return_type == "array":
            mask_data = mask.get_fdata().astype(bool)
        elif return_type == "image":
            dtype = type(self.value) if hasattr(self, "value") else float
            mask_data = mask.get_fdata().astype(dtype)
        elif return_type == "dataset":
            if dataset.basepath is None:
                raise ValueError(
                    "Dataset output path is not set. Set the path with Dataset.update_path()."
                )
            elif not os.path.isdir(dataset.basepath):
                raise ValueError(
                    "Output directory does not exist. Set the path to an existing folder with "
                    "Dataset.update_path().")
            dataset = dataset.copy()

        transformed_maps = self._transform(mask, coordinates)

        if return_type == "sparse":
            return transformed_maps[0]

        imgs = []
        # Loop over exp ids since sparse._coo.core.COO is not iterable
        for i_exp, id_ in enumerate(transformed_maps[1]):
            if isinstance(transformed_maps[0][i_exp], sparse._coo.core.COO):
                kernel_data = transformed_maps[0][i_exp].todense()
            else:
                kernel_data = transformed_maps[0][i_exp]

            if return_type == "array":
                img = kernel_data[mask_data]
                imgs.append(img)
            elif return_type == "image":
                kernel_data *= mask_data
                img = nib.Nifti1Image(kernel_data, mask.affine)
                imgs.append(img)
            elif return_type == "dataset":
                img = nib.Nifti1Image(kernel_data, mask.affine)
                out_file = os.path.join(dataset.basepath,
                                        self.filename_pattern.format(id=id_))
                img.to_filename(out_file)
                dataset.images.loc[dataset.images["id"] == id_,
                                   self.image_type] = out_file

        del kernel_data, transformed_maps

        if return_type == "array":
            return np.vstack(imgs)
        elif return_type == "image":
            return imgs
        elif return_type == "dataset":
            # Replace NaNs with Nones
            dataset.images[self.image_type] = dataset.images[
                self.image_type].where(
                    dataset.images[self.image_type].notnull(), None)
            # Infer relative path
            dataset.images = dataset.images
            return dataset