def features_summary( self: HasGetItemProtocol, layer: str, feature_name: str = "summary", channels: Optional[Channel_t] = None, quantiles: Sequence[float] = (0.9, 0.5, 0.1), ) -> Feature_t: """ Calculate summary statistics of image channels. Parameters ---------- %(img_layer)s %(feature_name)s %(channels)s quantiles Quantiles that are computed. Returns ------- Returns features with the following keys for each channel `c` in ``channels``: - ``'{feature_name}_ch-{c}_quantile-{q}'`` - the quantile features for each quantile `q` in ``quantiles``. - ``'{feature_name}_ch-{c}_mean'`` - the mean. - ``'{feature_name}_ch-{c}_std'`` - the standard deviation. """ layer = self._get_layer(layer) quantiles = _assert_non_empty_sequence(quantiles, name="quantiles") channels = _get_channels(self[layer], channels) channels = _assert_non_empty_sequence(channels, name="channels") features = {} for c in channels: for q in quantiles: features[f"{feature_name}_ch-{c}_quantile-{q}"] = np.quantile( self[layer][:, :, c], q) features[f"{feature_name}_ch-{c}_mean"] = np.mean( self[layer][:, :, c].values) features[f"{feature_name}_ch-{c}_std"] = np.std( self[layer][:, :, c].values) return features
def features_histogram( self: HasGetItemProtocol, layer: str, feature_name: str = "histogram", channels: Optional[Channel_t] = None, bins: int = 10, v_range: Optional[Tuple[int, int]] = None, ) -> Feature_t: """ Compute histogram counts of color channel values. Returns one feature per bin and channel. Parameters ---------- %(img_layer)s %(feature_name)s %(channels)s bins Number of binned value intervals. v_range Range on which values are binned. If `None`, use the whole image range. Returns ------- Returns features with the following keys for each channel `c` in ``channels``: - ``'{feature_name}_ch-{c}_bin-{i}'`` - the histogram counts for each bin `i` in ``bins``. """ layer = self._get_layer(layer) channels = _get_channels(self[layer], channels) channels = _assert_non_empty_sequence(channels, name="channels") # if v_range is None, use whole-image range if v_range is None: v_range = np.min(self[layer].values), np.max(self[layer].values) features = {} for c in channels: hist, _ = np.histogram(self[layer][:, :, c], bins=bins, range=v_range, weights=None, density=False) for i, count in enumerate(hist): features[f"{feature_name}_ch-{c}_bin-{i}"] = count return features
def moran( adata: AnnData, connectivity_key: str = Key.obsp.spatial_conn(), genes: Optional[Union[str, Sequence[str]]] = None, transformation: Literal["r", "B", "D", "U", "V"] = "r", n_perms: int = 1000, corr_method: Optional[str] = "fdr_bh", layer: Optional[str] = None, seed: Optional[int] = None, copy: bool = False, n_jobs: Optional[int] = None, backend: str = "loky", show_progress_bar: bool = True, ) -> Optional[pd.DataFrame]: """ Calculate Moran’s I Global Autocorrelation Statistic. Parameters ---------- %(adata)s %(conn_key)s genes List of gene names, as stored in :attr:`anndata.AnnData.var_names`, used to compute Moran's I statistics :cite:`pysal`. If `None`, it's computed :attr:`anndata.AnnData.var` ``['highly_variable']``, if present. Otherwise, it's computed for all genes. transformation Transformation to be used, as reported in :class:`esda.Moran`. Default is `"r"`, row-standardized. %(n_perms)s %(corr_method)s layer Layer in :attr:`anndata.AnnData.layers` to use. If `None`, use :attr:`anndata.AnnData.X`. %(seed)s %(copy)s %(parallelize)s Returns ------- If ``copy = True``, returns a :class:`pandas.DataFrame` with the following keys: - `'I'` - Moran's I statistic. - `'pval_sim'` - p-value based on permutations. - `'VI_sim'` - variance of `'I'` from permutations. - `'pval_sim_{{corr_method}}'` - the corrected p-values if ``corr_method != None`` . Otherwise, modifies the ``adata`` with the following key: - :attr:`anndata.AnnData.uns` ``['moranI']`` - the above mentioned dataframe. """ if esda is None or libpysal is None: raise ImportError( "Please install `esda` and `libpysal` as `pip install esda libpysal`." ) _assert_positive(n_perms, name="n_perms") _assert_connectivity_key(adata, connectivity_key) if genes is None: if "highly_variable" in adata.var.columns: genes = adata[:, adata.var.highly_variable.values].var_names.values else: genes = adata.var_names.values genes = _assert_non_empty_sequence(genes, name="genes") n_jobs = _get_n_cores(n_jobs) start = logg.info( f"Calculating for `{len(genes)}` genes using `{n_jobs}` core(s)") w = _set_weight_class(adata, key=connectivity_key) # init weights df = parallelize( _moran_helper, collection=genes, extractor=pd.concat, use_ixs=True, n_jobs=n_jobs, backend=backend, show_progress_bar=show_progress_bar, )(adata=adata, weights=w, transformation=transformation, permutations=n_perms, layer=layer, seed=seed) if corr_method is not None: _, pvals_adj, _, _ = multipletests(df["pval_sim"].values, alpha=0.05, method=corr_method) df[f"pval_sim_{corr_method}"] = pvals_adj df.sort_values(by="I", ascending=False, inplace=True) if copy: logg.info("Finish", time=start) return df _save_data(adata, attr="uns", key="moranI", data=df, time=start)
def spatial_autocorr( adata: AnnData, connectivity_key: str = Key.obsp.spatial_conn(), genes: Optional[Union[str, Sequence[str]]] = None, mode: Literal[ "moran", "geary"] = SpatialAutocorr.MORAN.s, # type: ignore[assignment] transformation: bool = True, n_perms: Optional[int] = None, two_tailed: bool = False, corr_method: Optional[str] = "fdr_bh", layer: Optional[str] = None, seed: Optional[int] = None, use_raw: bool = False, copy: bool = False, n_jobs: Optional[int] = None, backend: str = "loky", show_progress_bar: bool = True, ) -> Optional[pd.DataFrame]: """ Calculate Global Autocorrelation Statistic (Moran’s I or Geary's C). See :cite:`pysal` for reference. Parameters ---------- %(adata)s %(conn_key)s genes List of gene names, as stored in :attr:`anndata.AnnData.var_names`, used to compute global spatial autocorrelation statistic. If `None`, it's computed :attr:`anndata.AnnData.var` ``['highly_variable']``, if present. Otherwise, it's computed for all genes. mode Mode of score calculation: - `{sp.MORAN.s!r}` - `Moran's I autocorrelation <https://en.wikipedia.org/wiki/Moran%27s_I>`_. - `{sp.GEARY.s!r}` - `Geary's C autocorrelation <https://en.wikipedia.org/wiki/Geary%27s_C>`_. transformation If `True`, weights in :attr:`anndata.AnnData.obsp` ``['{key}']`` are row-normalized, advised for analytic p-value calculation. %(n_perms)s If `None`, only p-values under normality assumption are computed. two_tailed If `True`, p-values are two-tailed, otherwise they are one-tailed. %(corr_method)s layer Layer in :attr:`anndata.AnnData.layers` to use. If `None`, use :attr:`anndata.AnnData.X`. %(seed)s %(copy)s %(parallelize)s Returns ------- If ``copy = True``, returns a :class:`pandas.DataFrame` with the following keys: - `'I' or 'C'` - Moran's I or Geary's C statistic. - `'pval_norm'` - p-value under normality assumption. - `'var_norm'` - variance of `'score'` under normality assumption. - `'{{p_val}}_{{corr_method}}'` - the corrected p-values if ``corr_method != None`` . If ``n_perms != None`` is not None, additionally returns the following columns: - `'pval_z_sim'` - p-value based on standard normal approximation from permutations. - `'pval_sim'` - p-value based on permutations. - `'var_sim'` - variance of `'score'` from permutations. Otherwise, modifies the ``adata`` with the following key: - :attr:`anndata.AnnData.uns` ``['moranI']`` - the above mentioned dataframe, if ``mode = {sp.MORAN.s!r}``. - :attr:`anndata.AnnData.uns` ``['gearyC']`` - the above mentioned dataframe, if ``mode = {sp.GEARY.s!r}``. """ _assert_connectivity_key(adata, connectivity_key) if genes is None: if "highly_variable" in adata.var.columns: genes = adata[:, adata.var.highly_variable.values].var_names.values else: genes = adata.var_names.values genes = _assert_non_empty_sequence(genes, name="genes") mode = SpatialAutocorr(mode) # type: ignore[assignment] if TYPE_CHECKING: assert isinstance(mode, SpatialAutocorr) params = { "mode": mode.s, "transformation": transformation, "two_tailed": two_tailed } if mode == SpatialAutocorr.MORAN: params["func"] = _morans_i params["stat"] = "I" params["expected"] = -1.0 / (adata.shape[0] - 1) # expected score params["ascending"] = False elif mode == SpatialAutocorr.GEARY: params["func"] = _gearys_c params["stat"] = "C" params["expected"] = 1.0 params["ascending"] = True else: raise NotImplementedError(f"Mode `{mode}` is not yet implemented.") n_jobs = _get_n_cores(n_jobs) vals = _get_obs_rep(adata[:, genes], use_raw=use_raw, layer=layer).T g = adata.obsp[connectivity_key].copy() # row-normalize if transformation: normalize(g, norm="l1", axis=1, copy=False) score = params["func"](g, vals) start = logg.info( f"Calculating {mode}'s statistic for `{n_perms}` permutations using `{n_jobs}` core(s)" ) if n_perms is not None: _assert_positive(n_perms, name="n_perms") perms = np.arange(n_perms) score_perms = parallelize( _score_helper, collection=perms, extractor=np.concatenate, use_ixs=True, n_jobs=n_jobs, backend=backend, show_progress_bar=show_progress_bar, )(mode=mode, g=g, vals=vals, seed=seed) else: score_perms = None with np.errstate(divide="ignore"): pval_results = _p_value_calc(score, score_perms, g, params) results = {params["stat"]: score} results.update(pval_results) df = pd.DataFrame(results, index=genes) if corr_method is not None: for pv in filter(lambda x: "pval" in x, df.columns): _, pvals_adj, _, _ = multipletests(df[pv].values, alpha=0.05, method=corr_method) df[f"{pv}_{corr_method}"] = pvals_adj df.sort_values(by=params["stat"], ascending=params["ascending"], inplace=True) if copy: logg.info("Finish", time=start) return df _save_data(adata, attr="uns", key=params["mode"] + params["stat"], data=df, time=start)
def centrality_scores( adata: AnnData, cluster_key: str, score: Optional[Union[str, Sequence[str]]] = None, legend_kwargs: Mapping[str, Any] = MappingProxyType({}), palette: Palette_t = None, figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, save: Optional[Union[str, Path]] = None, **kwargs: Any, ) -> None: """ Plot centrality scores. The centrality scores are computed by :func:`squidpy.gr.centrality_scores`. Parameters ---------- %(adata)s %(cluster_key)s score Whether to plot all scores or only selected ones. legend_kwargs Keyword arguments for :func:`matplotlib.pyplot.legend`. %(cat_plotting)s Returns ------- %(plotting_returns)s """ _assert_categorical_obs(adata, key=cluster_key) df = _get_data(adata, cluster_key=cluster_key, func_name="centrality_scores").copy() legend_kwargs = dict(legend_kwargs) if "loc" not in legend_kwargs: legend_kwargs["loc"] = "center left" legend_kwargs.setdefault("bbox_to_anchor", (1, 0.5)) scores = df.columns.values df[cluster_key] = df.index.values clusters = adata.obs[cluster_key].cat.categories palette = _get_palette(adata, cluster_key=cluster_key, categories=clusters) if palette is None else palette score = scores if score is None else score score = _assert_non_empty_sequence(score, name="centrality scores") score = sorted(_get_valid_values(score, scores)) fig, axs = plt.subplots(1, len(score), figsize=figsize, dpi=dpi, constrained_layout=True) axs = np.ravel(axs) # make into iterable for g, ax in zip(score, axs): sns.scatterplot( x=g, y=cluster_key, data=df, hue=cluster_key, hue_order=clusters, palette=palette, ax=ax, **kwargs, ) ax.set_title(str(g).replace("_", " ").capitalize()) ax.set_xlabel("value") ax.set_yticks([]) ax.legend(**legend_kwargs) if save is not None: save_fig(fig, path=save)
def co_occurrence( adata: AnnData, cluster_key: str, palette: Palette_t = None, clusters: Optional[Union[str, Sequence[str]]] = None, figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, save: Optional[Union[str, Path]] = None, legend_kwargs: Mapping[str, Any] = MappingProxyType({}), **kwargs: Any, ) -> None: """ Plot co-occurrence probability ratio for each cluster. The co-occurrence is computed by :func:`squidpy.gr.co_occurrence`. Parameters ---------- %(adata)s %(cluster_key)s clusters Cluster instances for which to plot conditional probability. %(cat_plotting)s legend_kwargs Keyword arguments for :func:`matplotlib.pyplot.legend`. kwargs Keyword arguments for :func:`seaborn.lineplot`. Returns ------- %(plotting_returns)s """ _assert_categorical_obs(adata, key=cluster_key) occurrence_data = _get_data(adata, cluster_key=cluster_key, func_name="co_occurrence") legend_kwargs = dict(legend_kwargs) if "loc" not in legend_kwargs: legend_kwargs["loc"] = "center left" legend_kwargs.setdefault("bbox_to_anchor", (1, 0.5)) out = occurrence_data["occ"] interval = occurrence_data["interval"][1:] categories = adata.obs[cluster_key].cat.categories clusters = categories if clusters is None else clusters clusters = _assert_non_empty_sequence(clusters, name="clusters") clusters = sorted(_get_valid_values(clusters, categories)) palette = _get_palette(adata, cluster_key=cluster_key, categories=categories) if palette is None else palette fig, axs = plt.subplots( 1, len(clusters), figsize=(5 * len(clusters), 5) if figsize is None else figsize, dpi=dpi, constrained_layout=True, ) axs = np.ravel(axs) # make into iterable for g, ax in zip(clusters, axs): idx = np.where(categories == g)[0][0] df = pd.DataFrame(out[idx, :, :].T, columns=categories).melt(var_name=cluster_key, value_name="probability") df["distance"] = np.tile(interval, len(categories)) sns.lineplot( x="distance", y="probability", data=df, dashes=False, hue=cluster_key, hue_order=categories, palette=palette, ax=ax, **kwargs, ) ax.legend(**legend_kwargs) ax.set_title(rf"$\frac{{p(exp|{g})}}{{p(exp)}}$") ax.set_ylabel("value") if save is not None: save_fig(fig, path=save)
def features_segmentation( self: HasGetItemProtocol, label_layer: str, intensity_layer: Optional[str] = None, feature_name: str = "segmentation", channels: Optional[Channel_t] = None, props: Sequence[str] = ("label", "area", "mean_intensity"), ) -> Feature_t: """ Calculate segmentation features using :func:`skimage.measure.regionprops`. Features are calculated using ``label_layer``, a cell segmentation of ``intensity_layer``, resulting from from calling e.g. :func:`squidpy.im.segment`. Depending on the specified parameters, mean and std of the requested props are returned. For the `'label'` feature, the number of labels is returned, i.e. the number of cells in this image. Parameters ---------- label_layer Name of the image layer used to calculate the non-intensity properties. intensity_layer Name of the image layer used to calculate the intensity properties. %(feature_name)s %(channels)s Only relevant for features that use the ``intensity_layer``. props Segmentation features that are calculated. See `properties` in :func:`skimage.measure.regionprops_table`. Each feature is calculated for each segment (e.g., nucleous) and mean and std values are returned, except for `'centroid'` and `'label'`. Valid options are: - `'area'` - number of pixels of segment. - `'bbox_area'` - number of pixels of bounding box area of segment. - `'centroid'` - centroid coordinates of segment. - `'convex_area'` - number of pixels in convex hull of segment. - `'eccentricity'` - eccentricity of ellipse with same second moments as segment. - `'equivalent_diameter'` - diameter of circles with same area as segment. - `'euler_number'` - euler characteristic of segment. - `'extent'` - ratio of pixels in segment to its bounding box. - `'feret_diameter_max'` - longest distance between points around convex hull of segment. - `'filled_area'` - number of pixels of segment with all holes filled in. - `'label'` - number of segments. - `'major_axis_length'` - length of major axis of ellipse with same second moments as segment. - `'max_intensity'` - maximum intensity of ``intensity_layer`` in segment. - `'mean_intensity'` - mean intensity of ``intensity_layer`` in segment. - `'min_intensity'` - min intensity of ``intensity_layer`` in segment. - `'minor_axis_length'` - length of minor axis of ellipse with same second moments as segment. - `'orientation'` - angle of major axis of ellipse with same second moments as segment. - `'perimeter'` - perimeter of segment using 4-connectivity. - `'perimeter_crofton'` - perimeter of segmeent approximated by the Crofton formula. - `'solidity'` - ratio of pixels in the segment to the convex hull of the segment. Returns ------- Returns features with the following keys: - ``'{feature_name}_label'`` - if `'label`` is in ``props``. - ``'{feature_name}_centroid'`` - if `'centroid`` is in ``props``. - ``'{feature_name}_{p}_mean'`` - mean for each non-intensity property `p` in ``props``. - ``'{feature_name}_{p}_std'`` - standard deviation for each non-intensity property `p` in ``props``. - ``'{feature_name}_ch-{c}_{p}_mean'`` - mean for each intensity property `p` in ``props`` and channel `c` in ``channels``. - ``'{feature_name}_ch-{c}_{p}_std'`` - standard deviation for each intensity property `p` in ``props`` and channel `c` in ``channels``. """ def convert_to_full_image_coordinates(x: np.ndarray, y: np.ndarray) -> np.ndarray: if not len(y): return np.array([[]], dtype=np.float64) if self.data.attrs.get("mask_circle", False): if self.data.dims["y"] != self.data.dims["x"]: raise ValueError( f"Crop is not a square: `{self.data.dims}`.") c = self.data.dims["x"] // 2 # center mask = (x - c)**2 + (y - c)**2 <= c**2 y = y[mask] x = x[mask] if not len(y): return np.array( [[]], dtype=np.float64) # because of masking, should not happen coord = self.data.attrs.get( Key.img.coords, CropCoords(x0=0, y0=0, x1=self.data.dims["x"], y1=self.data.dims["y"] )) # fall back to default (i.e no crop) coordinates padding = self.data.attrs.get( Key.img.padding, _NULL_PADDING) # fallback to no padding y_slc, x_slc = coord.to_image_coordinates(padding).slice # relative coordinates y = (y - np.min(y)) / (np.max(y) - np.min(y)) x = (x - np.min(x)) / (np.max(x) - np.min(x)) # coordinates in the uncropped image y = coord.slice[0].start + (y_slc.stop - y_slc.start) * y x = coord.slice[1].start + (x_slc.stop - x_slc.start) * x return np.c_[x, y] # type: ignore[no-any-return] label_layer = self._get_layer(label_layer) props = _assert_non_empty_sequence(props, name="properties") for prop in props: if prop not in _valid_seg_prop: raise ValueError( f"Invalid property `{prop}`. Valid properties are `{_valid_seg_prop}`." ) no_intensity_props = [p for p in props if "intensity" not in p] intensity_props = [p for p in props if "intensity" in p] if len(intensity_props): if intensity_layer is None: raise ValueError( "Please specify `intensity_layer` if using intensity properties." ) channels = _get_channels(self[intensity_layer], channels) channels = _assert_non_empty_sequence(channels, name="channels") else: channels = () features: Dict[str, Any] = {} # calculate features that do not depend on the intensity image tmp_features = skimage.measure.regionprops_table( self[label_layer].values[:, :, 0], properties=no_intensity_props) for p in no_intensity_props: if p == "label": features[f"{feature_name}_{p}"] = len(tmp_features["label"]) elif p == "centroid": features[ f"{feature_name}_centroid"] = convert_to_full_image_coordinates( tmp_features["centroid-0"], tmp_features["centroid-1"]) else: features[f"{feature_name}_{p}_mean"] = np.mean(tmp_features[p]) features[f"{feature_name}_{p}_std"] = np.std(tmp_features[p]) # calculate features that depend on the intensity image for c in channels: if TYPE_CHECKING: assert isinstance(intensity_layer, str) tmp_features = skimage.measure.regionprops_table( self[label_layer].values[:, :, 0], intensity_image=self[intensity_layer].values[:, :, c], properties=props, ) for p in intensity_props: features[f"{feature_name}_ch-{c}_{p}_mean"] = np.mean( tmp_features[p]) features[f"{feature_name}_ch-{c}_{p}_std"] = np.std( tmp_features[p]) return features
def features_texture( self: HasGetItemProtocol, layer: str, feature_name: str = "texture", channels: Optional[Channel_t] = None, props: Sequence[str] = ("contrast", "dissimilarity", "homogeneity", "correlation", "ASM"), distances: Sequence[int] = (1, ), angles: Sequence[float] = (0, np.pi / 4, np.pi / 2, 3 * np.pi / 4), ) -> Feature_t: """ Calculate texture features. A grey level co-occurrence matrix (`GLCM <https://en.wikipedia.org/wiki/Co-occurrence_matrix>`_) is computed for different combinations of distance and angle. The distance defines the pixel difference of co-occurrence. The angle define the direction along which we check for co-occurrence. The GLCM includes the number of times that grey-level :math:`j` occurs at a distance :math:`d` and at an angle theta from grey-level :math:`i`. Parameters ---------- %(img_layer)s %(feature_name)s %(channels)s props Texture features that are calculated, see the `prop` argument in :func:`skimage.feature.greycoprops`. distances The `distances` argument in :func:`skimage.feature.greycomatrix`. angles The `angles` argument in :func:`skimage.feature.greycomatrix`. Returns ------- Returns features with the following keys for each channel `c` in ``channels``: - ``'{feature_name}_ch-{c}_{p}_dist-{dist}_angle-{a}'`` - the GLCM properties, for each `p` in ``props``, `d` in ``distances`` and `a` in ``angles``. Notes ----- If the image is not of type :class:`numpy.uint8`, it will be converted. """ layer = self._get_layer(layer) props = _assert_non_empty_sequence(props, name="properties") angles = _assert_non_empty_sequence(angles, name="angles") distances = _assert_non_empty_sequence(distances, name="distances") channels = _get_channels(self[layer], channels) channels = _assert_non_empty_sequence(channels, name="channels") arr = self[layer][..., channels].values if not np.issubdtype(arr.dtype, np.uint8): arr = img_as_ubyte(arr, force_copy=False) # values must be in [0, 255] features = {} for c in channels: comatrix = greycomatrix(arr[..., c], distances=distances, angles=angles, levels=256) for p in props: tmp_features = greycoprops(comatrix, prop=p) for d_idx, dist in enumerate(distances): for a_idx, a in enumerate(angles): features[ f"{feature_name}_ch-{c}_{p}_dist-{dist}_angle-{a:.2f}"] = tmp_features[ d_idx, a_idx] return features
def generate_spot_crops( self, adata: AnnData, library_id: Optional[str] = None, spatial_key: str = Key.obsm.spatial, spot_scale: float = 1.0, obs_names: Optional[Iterable[Any]] = None, as_array: Union[str, bool] = False, return_obs: bool = False, **kwargs: Any, ) -> Union[Iterator["ImageContainer"], Iterator[np.ndarray], Iterator[Tuple[np.ndarray, ...]], Iterator[Dict[str, np.ndarray]], ]: """ Iterate over :attr:`adata.obs_names` and extract crops. Implemented for 10X spatial datasets. Parameters ---------- %(adata)s library_id Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']`` used to get the spot diameter. %(spatial_key)s spot_scale Scaling factor for the spot diameter. Larger values mean more context. obs_names Observations from :attr:`adata.obs_names` for which to generate the crops. If `None`, all names are used. %(as_array)s return_obs Whether to also yield names from ``obs_names``. kwargs Keyword arguments for :meth:`crop_center`. Yields ------ If ``return_obs = True``, yields a :class:`tuple` ``(crop, obs_name)``. Otherwise, yields just the crops. The type of the crops depends on ``as_array``. """ self._assert_not_empty() _assert_positive(spot_scale, name="scale") _assert_spatial_basis(adata, spatial_key) library_id = Key.uns.library_id(adata, spatial_key=spatial_key, library_id=library_id) if obs_names is None: obs_names = adata.obs_names obs_names = _assert_non_empty_sequence(obs_names, name="observations") adata = adata[obs_names, :] spatial = adata.obsm[spatial_key][:, :2] diameter = adata.uns[spatial_key][library_id]["scalefactors"][ "spot_diameter_fullres"] radius = int(round(diameter // 2 * spot_scale)) for i, obs in enumerate(adata.obs_names): crop = self.crop_center(y=spatial[i][1], x=spatial[i][0], radius=radius, **kwargs) crop.data.attrs[Key.img.obs] = obs crop = crop._maybe_as_array(as_array) yield (crop, obs) if return_obs else crop