def log_norm( data: MultimodalData, norm_count: float = 1e5, backup_matrix: str = "raw.X", ) -> None: """Normalization, and then apply natural logarithm to the data. Parameters ---------- data: ``pegasusio.MultimodalData`` Use current selected modality in data, which should contain one RNA expression matrix. norm_count: ``int``, optional, default: ``1e5``. Total counts of one cell after normalization. backup_matrix: ``str``, optional, default: ``raw.X``. The key name of the backup count matrix, usually the raw counts. Returns ------- ``None`` Update ``data.X`` with count matrix after log-normalization. In addition, back up the original count matrix as ``backup_matrix``. In case of rerunning normalization while ``backup_matrix`` already exists, use ``backup_matrix`` instead of ``data.X`` for normalization. Examples -------- >>> pg.log_norm(data) """ if isinstance(data, MultimodalData): data = data.current_data() assert data.get_modality() == "rna" if backup_matrix not in data.list_keys(): data.add_matrix(backup_matrix, data.X) data.X = data.X.astype(np.float32) # force copy else: # The case of rerunning log_norm. Use backup matrix as source. data.X = data.get_matrix(backup_matrix).astype( np.float32) # force copy logger.warning( "Rerun log-normalization. Use the raw counts in backup instead.") data.obs["scale"] = normalize_by_count(data.X, data.var["robust"].values, norm_count, True) data.uns["norm_count"] = norm_count
def log_norm(data: MultimodalData, norm_count: float = 1e5, backup_matrix: str = "raw.X") -> None: """Normalization, and then apply natural logarithm to the data. Parameters ---------- data: ``pegasusio.MultimodalData`` Use current selected modality in data, which should contain one RNA expression matrix. norm_count: ``int``, optional, default: ``1e5``. Total count of cells after normalization. backup_matrix: ``str``, optional, default: ``raw.X``. Where to back up the count matrix. Returns ------- ``None`` Update ``data.X`` with count matrix after log-normalization. In addition, back up the original count matrix as ``backup_matrix``. Examples -------- >>> pg.log_norm(data) """ if isinstance(data, MultimodalData): data = data.current_data() assert data.get_modality() == "rna" data.add_matrix(backup_matrix, data.X) data.X = data.X.astype(np.float32) # force copy from pegasus.cylib.fast_utils import normalize_by_count data.obs["scale"] = normalize_by_count(data.X, data.var["robust"].values, norm_count, True) data.uns["norm_count"] = norm_count
def infer_doublets( data: MultimodalData, channel_attr: Optional[str] = None, clust_attr: Optional[str] = None, min_cell: Optional[int] = 100, expected_doublet_rate: Optional[float] = None, sim_doublet_ratio: Optional[float] = 2.0, n_prin_comps: Optional[int] = 30, robust: Optional[bool] = False, k: Optional[int] = None, n_jobs: Optional[int] = -1, alpha: Optional[float] = 0.05, random_state: Optional[int] = 0, plot_hist: Optional[str] = "dbl", ) -> None: """Infer doublets using a Scrublet-like strategy. [Li20-2]_ This function must be called after clustering. Parameters ---------- data: ``pegasusio.MultimodalData`` Annotated data matrix with rows for cells and columns for genes. channel_attr: ``str``, optional, default: None Attribute indicating sample channels. If set, calculate scrublet-like doublet scores per channel. clust_attr: ``str``, optional, default: None Attribute indicating cluster labels. If set, estimate proportion of doublets in each cluster and statistical significance. min_cell: ``int``, optional, default: 100 Minimum number of cells per sample to calculate doublet scores. For samples having less than 'min_cell' cells, doublet score calculation will be skipped. expected_doublet_rate: ``float``, optional, default: ``None`` The expected doublet rate for the experiment. By default, calculate the expected rate based on number of cells from the 10x multiplet rate table sim_doublet_ratio: ``float``, optional, default: ``2.0`` The ratio between synthetic doublets and observed cells. n_prin_comps: ``int``, optional, default: ``30`` Number of principal components. robust: ``bool``, optional, default: ``False``. If true, use 'arpack' instead of 'randomized' for large matrices (i.e. max(X.shape) > 500 and n_components < 0.8 * min(X.shape)) k: ``int``, optional, default: ``None`` Number of observed cell neighbors. If None, k = round(0.5 * sqrt(number of observed cells)). Total neighbors k_adj = round(k * (1.0 + sim_doublet_ratio)). n_job: ``int``, optional, default: ``-`` Number of threads to use. If ``-1``, use all available threads. alpha: ``float``, optional, default: ``0.05`` FDR significant level for cluster-level fisher exact test. random_state: ``int``, optional, default: ``0`` Random seed for reproducing results. plot_hist: ``str``, optional, default: ``dbl`` If not None, plot diagnostic histograms using ``plot_hist`` as the prefix. If `channel_attr` is None, ``plot_hist.png`` is generated; Otherwise, ``plot_hist.channel_name.png`` files are generated. Returns ------- ``None`` Update ``data.obs``: * ``data.obs['pred_dbl_type']``: Predicted singlet/doublet types. * ``data.uns['pred_dbl_cluster']``: Only generated if 'clust_attr' is not None. This is a dataframe with two columns, 'Cluster' and 'Qval'. Only clusters with significantly more doublets than expected will be recorded here. Examples -------- >>> pg.infer_doublets(data, channel_attr = 'Channel', clust_attr = 'Annotation') """ assert data.get_modality() == "rna" try: rawX = data.get_matrix("raw.X") except ValueError: raise ValueError( "Cannot detect the raw count matrix raw.X; stop inferring doublets!" ) if_plot = plot_hist is not None if channel_attr is None: if data.shape[0] >= min_cell: fig = _run_scrublet(data, expected_doublet_rate = expected_doublet_rate, sim_doublet_ratio = sim_doublet_ratio, \ n_prin_comps = n_prin_comps, robust = robust, k = k, n_jobs = n_jobs, random_state = random_state, \ plot_hist = if_plot) if if_plot: fig.savefig(f"{plot_hist}.png") else: logger.warning( f"Data has {data.shape[0]} < {min_cell} cells and thus doublet score calculation is skipped!" ) data.obs["doublet_score"] = 0.0 data.obs["pred_dbl"] = False else: from pandas.api.types import is_categorical_dtype from pegasus.tools import identify_robust_genes, log_norm, highly_variable_features assert is_categorical_dtype(data.obs[channel_attr]) genome = data.get_genome() modality = data.get_modality() channels = data.obs[channel_attr].cat.categories dbl_score = np.zeros(data.shape[0], dtype=np.float32) pred_dbl = np.zeros(data.shape[0], dtype=np.bool_) thresholds = {} for channel in channels: # Generate a new unidata object for the channel idx = np.where(data.obs[channel_attr] == channel)[0] if idx.size >= min_cell: unidata = UnimodalData({"barcodekey": data.obs_names[idx]}, {"featurekey": data.var_names}, {"X": rawX[idx]}, { "genome": genome, "modality": modality }) # Identify robust genes, count and log normalized and select top 2,000 highly variable features identify_robust_genes(unidata) log_norm(unidata) highly_variable_features(unidata) # Run _run_scrublet fig = _run_scrublet(unidata, name = channel, expected_doublet_rate = expected_doublet_rate, sim_doublet_ratio = sim_doublet_ratio, \ n_prin_comps = n_prin_comps, robust = robust, k = k, n_jobs = n_jobs, random_state = random_state, \ plot_hist = if_plot) if if_plot: fig.savefig(f"{plot_hist}.{channel}.png") dbl_score[idx] = unidata.obs["doublet_score"].values pred_dbl[idx] = unidata.obs["pred_dbl"].values thresholds[channel] = unidata.uns["doublet_threshold"] else: logger.warning( f"Channel {channel} has {idx.size} < {min_cell} cells and thus doublet score calculation is skipped!" ) data.obs["doublet_score"] = dbl_score data.obs["pred_dbl"] = pred_dbl data.uns["doublet_thresholds"] = thresholds if clust_attr is not None: data.uns["pred_dbl_cluster"] = _identify_doublets_fisher( data.obs[clust_attr].values, data.obs["pred_dbl"].values, alpha=alpha) logger.info('Doublets are predicted!')
def infer_doublets( data: MultimodalData, channel_attr: Optional[str] = None, clust_attr: Optional[str] = None, raw_mat_key: Optional[str] = 'counts', min_cell: Optional[int] = 100, expected_doublet_rate: Optional[float] = None, sim_doublet_ratio: Optional[float] = 2.0, n_prin_comps: Optional[int] = 30, k: Optional[int] = None, n_jobs: Optional[int] = -1, alpha: Optional[float] = 0.05, random_state: Optional[int] = 0, plot_hist: Optional[str] = "sample", manual_correction: Optional[str] = None, ) -> None: """Infer doublets by first calculating Scrublet-like [Wolock18]_ doublet scores and then smartly determining an appropriate doublet score cutoff [Li20-2]_ . This function should be called after clustering if clust_attr is not None. In this case, we will test if each cluster is significantly enriched for doublets using Fisher's exact test. Parameters ---------- data: ``pegasusio.MultimodalData`` Annotated data matrix with rows for cells and columns for genes. channel_attr: ``str``, optional, default: None Attribute indicating sample channels. If set, calculate scrublet-like doublet scores per channel. clust_attr: ``str``, optional, default: None Attribute indicating cluster labels. If set, estimate proportion of doublets in each cluster and statistical significance. min_cell: ``int``, optional, default: 100 Minimum number of cells per sample to calculate doublet scores. For samples having less than 'min_cell' cells, doublet score calculation will be skipped. expected_doublet_rate: ``float``, optional, default: ``None`` The expected doublet rate for the experiment. By default, calculate the expected rate based on number of cells from the 10x multiplet rate table sim_doublet_ratio: ``float``, optional, default: ``2.0`` The ratio between synthetic doublets and observed cells. n_prin_comps: ``int``, optional, default: ``30`` Number of principal components. k: ``int``, optional, default: ``None`` Number of observed cell neighbors. If None, k = round(0.5 * sqrt(number of observed cells)). Total neighbors k_adj = round(k * (1.0 + sim_doublet_ratio)). n_jobs: ``int``, optional, default: ``-1`` Number of threads to use. If ``-1``, use all physical CPU cores. alpha: ``float``, optional, default: ``0.05`` FDR significant level for cluster-level fisher exact test. random_state: ``int``, optional, default: ``0`` Random seed for reproducing results. plot_hist: ``str``, optional, default: ``sample`` If not None, plot diagnostic histograms using ``plot_hist`` as the prefix. If `channel_attr` is None, ``plot_hist.dbl.png`` is generated; Otherwise, ``plot_hist.channel_name.dbl.png`` files are generated. Each figure consists of 4 panels showing histograms of doublet scores for observed cells (panel 1, density in log scale), simulated doublets (panel 2, density in log scale), KDE plot (panel 3) and signed curvature plot (panel 4) of log doublet scores for simulated doublets. Each plot contains two dashed lines. The red dashed line represents the theoretical cutoff (calucalted based on number of cells and 10x doublet table) and the black dashed line represents the cutof inferred from the data. manual_correction: ``str``, optional, default: ``None`` Use human guide to correct doublet threshold for certain channels. This is string representing a comma-separately list. Each item in the list represent one sample and the sample name and correction guide are separated using ':'. The only correction guide supported is 'peak', which means cut at the center of the peak. If only one sample available, use '' as the sample name. Returns ------- ``None`` Update ``data.obs``: * ``data.obs['pred_dbl']``: Predicted singlet/doublet types. * ``data.uns['pred_dbl_cluster']``: Only generated if 'clust_attr' is not None. This is a dataframe with two columns, 'Cluster' and 'Qval'. Only clusters with significantly more doublets than expected will be recorded here. Examples -------- >>> pg.infer_doublets(data, channel_attr = 'Channel', clust_attr = 'Annotation') """ assert data.get_modality() == "rna" try: rawX = data.get_matrix(raw_mat_key) except ValueError: raise ValueError( f"Cannot detect the raw count matrix {raw_mat_key}; stop inferring doublets!" ) if_plot = plot_hist is not None mancor = {} if manual_correction is not None: for item in manual_correction.split(','): name, action = item.split(':') mancor[name] = action if channel_attr is None: if data.shape[0] >= min_cell: fig = _run_scrublet(data, raw_mat_key, expected_doublet_rate = expected_doublet_rate, sim_doublet_ratio = sim_doublet_ratio, \ n_prin_comps = n_prin_comps, k = k, n_jobs = n_jobs, random_state = random_state, plot_hist = if_plot, manual_correction = mancor.get('', None)) if if_plot: fig.savefig(f"{plot_hist}.dbl.png") else: logger.warning( f"Data has {data.shape[0]} < {min_cell} cells and thus doublet score calculation is skipped!" ) data.obs["doublet_score"] = 0.0 data.obs["pred_dbl"] = False else: from pandas.api.types import is_categorical_dtype from pegasus.tools import identify_robust_genes, log_norm, highly_variable_features assert is_categorical_dtype(data.obs[channel_attr]) genome = data.get_genome() modality = data.get_modality() channels = data.obs[channel_attr].cat.categories dbl_score = np.zeros(data.shape[0], dtype=np.float32) pred_dbl = np.zeros(data.shape[0], dtype=np.bool_) thresholds = {} for channel in channels: # Generate a new unidata object for the channel idx = np.where(data.obs[channel_attr] == channel)[0] if idx.size >= min_cell: unidata = UnimodalData({"barcodekey": data.obs_names[idx]}, {"featurekey": data.var_names}, {"counts": rawX[idx]}, { "genome": genome, "modality": modality }, cur_matrix="counts") # Identify robust genes, count and log normalized and select top 2,000 highly variable features identify_robust_genes(unidata) log_norm(unidata) highly_variable_features(unidata) # Run _run_scrublet fig = _run_scrublet(unidata, raw_mat_key, name = channel, expected_doublet_rate = expected_doublet_rate, sim_doublet_ratio = sim_doublet_ratio, \ n_prin_comps = n_prin_comps, k = k, n_jobs = n_jobs, random_state = random_state, plot_hist = if_plot, manual_correction = mancor.get(channel, None)) if if_plot: fig.savefig(f"{plot_hist}.{channel}.dbl.png") dbl_score[idx] = unidata.obs["doublet_score"].values pred_dbl[idx] = unidata.obs["pred_dbl"].values thresholds[channel] = unidata.uns["doublet_threshold"] else: logger.warning( f"Channel {channel} has {idx.size} < {min_cell} cells and thus doublet score calculation is skipped!" ) data.obs["doublet_score"] = dbl_score data.obs["pred_dbl"] = pred_dbl data.uns["doublet_thresholds"] = thresholds if clust_attr is not None: data.uns["pred_dbl_cluster"] = _identify_doublets_fisher( data.obs[clust_attr].values, data.obs["pred_dbl"].values, alpha=alpha) logger.info('Doublets are predicted!')