Exemplo n.º 1
0
        def pool_mi(x, y, k):
            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

            return mi(x, y, k)
Exemplo n.º 2
0
def coexp_measure_mat(
    adata,
    TFs=None,
    Targets=None,
    guide_keys=None,
    t0_key="spliced",
    t1_key="velocity",
    normalize=True,
    drop_zero_cells=True,
    skip_mi=True,
    cores=1,
    copy=False,
):
    """Infer causal networks with dynamics-coupled single cells measurements.
    Network inference is a insanely challenging problem which has a long history and that none of the existing
    algorithms work well. However, it's quite possible that one or more of the algorithms could work if only they were
    given enough data. Single-cell RNA-seq is exciting because it provides a ton of data. Somewhat surprisingly, just
    having a lot of single-cell RNA-seq data won't make causal inference work well. We need a fundamentally better type
    of measurement that couples information across cells and across time points. Experimental improvements are coming
    now, and whether they are sufficient to power methods like Scribe is important future work. For example, the recent
    developed computational algorithm (La Manno et al. 2018) estimates the levels of new (unspliced) versus mature
    (spliced) transcripts from single-cell RNA-seq data for free. Moreover, exciting experimental approaches, like
    single cell SLAM-seq methods (Hendriks et al. 2018; Erhard et al. 2019; Cao, Zhou, et al. 2019) are recently
    developed that measures the transcriptome of two time points of the same cells. Datasets generated from those
    methods will provide improvements of causal network inference as we comprehensively demonstrated from the manuscript
    . This function take advantages of those datasets to infer the causal networks. We note that those technological
    advance may be still not sufficient, radically different methods, for example something like highly multiplexed live
    imaging that can record many genes may be needed.

    Arguments
    ---------
    adata: `anndata`
        Annotated data matrix.
    TFs: `List` or `None` (default: None)
        The list of transcription factors that will be used for casual network inference.
    Targets: `List` or `None` (default: None)
        The list of target genes that will be used for casual network inference.
    guide_keys: `List` (default: None)
        The key of the CRISPR-guides, stored as a column in the .obs attribute. This argument is useful
        for identifying the knockout or knockin genes for a perturb-seq experiment. Currently not used.
    t0_key: `str` (default: spliced)
        Key corresponds to the transcriptome of the initial time point, for example spliced RNAs from RNA velocity, old
        RNA from scSLAM-seq data.
    t1_key: `str` (default: velocity)
        Key corresponds to the transcriptome of the next time point, for example unspliced RNAs (or estimated velocity,
        see Fig 6 of the Scribe preprint) from RNA velocity, old RNA from scSLAM-seq data.
    normalize: `bool`
        Whether to scale the expression or velocity values into 0 to 1 before calculating causal networks.
    drop_zero_cells: `bool` (Default: True)
        Whether to drop cells that with zero expression for either the potential regulator or potential target. This
        can signify the relationship between potential regulators and targets, speed up the calculation, but at the risk
        of ignoring strong inhibition effects from certain regulators to targets.
    copy: `bool`
        Whether to return a copy of the adata or just update adata in place.
    Returns
    ---------
        An update AnnData object with inferred causal network stored as a matrix related to the key `causal_net` in the
        `uns` slot.
    """

    try:
        from Scribe.information_estimators import mi
    except ImportError:
        raise ImportError(
            "You need to install the package `Scribe`."
            "Please install from https://github.com/aristoteleo/Scribe-py."
            "Also check our paper: "
            "https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
        )

    if TFs is None:
        TFs = adata.var_names.tolist()
    else:
        TFs = adata.var_names.intersection(TFs).tolist()
        if len(TFs) == 0:
            raise Exception(
                "The adata object has no gene names from .var_name that intersects with the TFs list you provided"
            )

    if Targets is None:
        Targets = adata.var_names.tolist()
    else:
        Targets = adata.var_names.intersection(Targets).tolist()
        if len(Targets) == 0:
            raise Exception(
                "The adata object has no gene names from .var_name that intersect with the Targets list you provided"
            )

    if guide_keys is not None:
        guides = np.unique(adata.obs[guide_keys].tolist())
        guides = np.setdiff1d(guides, ["*", "nan", "neg"])

        idx_var = [vn in guides for vn in adata.var_names]
        idx_var = np.argwhere(idx_var)
        guides = adata.var_names.values[idx_var.flatten()].tolist()

    # support sparse matrix:
    genes = TFs + Targets
    genes = np.unique(genes)
    t0_df = (pd.DataFrame(adata[:, genes].layers[t0_key].todense(),
                          index=adata.obs_names,
                          columns=genes) if isspmatrix(adata.layers[t0_key])
             else pd.DataFrame(adata[:, genes].layers[t0_key],
                               index=adata.obs_names,
                               columns=genes))

    t1_df = (pd.DataFrame(adata[:, genes].layers[t1_key].todense(),
                          index=adata.obs_names,
                          columns=genes) if isspmatrix(adata.layers[t1_key])
             else pd.DataFrame(adata[:, genes].layers[t1_key],
                               index=adata.obs_names,
                               columns=genes))

    t1_df[pd.isna(t1_df)] = 0  # set NaN value to 0

    if normalize:
        t0_df = (t0_df - t0_df.min()) / (t0_df.max() - t0_df.min())
        t1_df = (t1_df - t1_df.min()) / (t1_df.max() - t1_df.min())

    pearson_mat, mi_mat = np.zeros((t0_df.shape[1], t0_df.shape[1])), np.zeros(
        (t0_df.shape[1], t0_df.shape[1]))

    for g_a_ind, g_a in tqdm(
            enumerate(TFs),
            desc=
            "Calculate pearson correlation or mutual information from each TF to "
            "potential target:"):
        for g_b_ind, g_b in enumerate(Targets):
            x, y = t0_df.loc[:, g_a].values, t1_df.loc[:, g_b].values
            x, y = flatten(x), flatten(y)

            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            pearson_mat[g_a_ind,
                        g_b_ind] = einsum_correlation(x[None, mask],
                                                      y[mask],
                                                      type="pearson")[0]
            x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

            if not skip_mi and cores == 1:
                k = min(5, int(adata.n_obs / 5 + 1))
                mi_mat[g_a_ind, g_b_ind] = mi(x, y, k=k)

        if not skip_mi:
            k = min(5, int(adata.n_obs / 5 + 1))

            if cores > 1:

                def pool_mi(x, y, k):
                    mask = np.logical_and(np.isfinite(x), np.isfinite(y))
                    x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

                    return mi(x, y, k)

                X = np.repeat(x[:, None], len(Targets), axis=1)
                Y = t1_df[:, Targets] if issparse(t1_df) else t1_df[:,
                                                                    Targets].A
                pool = ThreadPool(cores)
                res = pool.starmap(pool_mi, zip(X, Y, itertools.repeat(k)))
                pool.close()
                pool.join()
                mi_mat[g_a_ind, :] = res

    adata.uns["pearson"] = pd.DataFrame(pearson_mat,
                                        index=genes,
                                        columns=genes)
    if not skip_mi:
        adata.uns["mi"] = pd.DataFrame(mi_mat, index=genes, columns=genes)

    return adata if copy else None
Exemplo n.º 3
0
def mutual_inform(adata, genes, layer_x, layer_y, cores=1):
    """Calculate mutual information (as well as pearson correlation) of genes between two different layers.

    Parameters
    ----------
        adata: :class:`~anndata.AnnData`.
            adata object that will be used for mutual information calculation.
        genes: `List` (default: None)
            Gene names from the adata object that will be used for mutual information calculation.
        layer_x
            The first key of the layer from the adata object that will be used for mutual information calculation.
        layer_y
            The second key of the layer from the adata object that will be used for mutual information calculation.
        cores: `int` (default: 1)
            Number of cores to run the MI calculation. If cores is set to be > 1, multiprocessing will be used to
            parallel the calculation.

    Returns
    -------
        An updated adata object that updated with a new columns (`mi`, `pearson`) in .var contains the mutual information
        of input genes.
    """

    try:
        import Scribe
    except ImportError:
        raise ImportError(
            "You need to install the package `Scribe`."
            "Plelease install from https://github.com/aristoteleo/Scribe-py."
            "Also check our paper: "
            "https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
        )

    from Scribe.information_estimators import mi

    adata.var['mi'], adata.var['pearson'] = np.nan, np.nan

    mi_vec, pearson = np.zeros(len(genes)), np.zeros(len(genes))
    X, Y = adata[:, genes].layers[layer_x], adata[:, genes].layers[layer_y]
    X, Y = X.A if issparse(X) else X, Y.A if issparse(Y) else Y

    k = min(5, int(adata.n_obs / 5 + 1))
    if cores == 1:
        for i in tqdm(
                range(len(genes)),
                desc=
                f'calculating mutual information between {layer_x} and {layer_y} data'
        ):
            x, y = X[i], Y[i]
            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            pearson[i] = einsum_correlation(x[None, mask],
                                            y[mask],
                                            type="pearson")
            x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

            mi_vec[i] = mi(x, y, k=k)
    else:
        for i in tqdm(
                range(len(genes)),
                desc=
                f'calculating mutual information between {layer_x} and {layer_y} data'
        ):
            x, y = X[i], Y[i]
            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            pearson[i] = einsum_correlation(x[None, mask],
                                            y[mask],
                                            type="pearson")

        def pool_mi(x, y, k):
            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

            return mi(x, y, k)

        pool = ThreadPool(cores)
        res = pool.starmap(pool_mi, zip(X, Y, itertools.repeat(k)))
        pool.close()
        pool.join()
        mi_vec = np.array(res)

    adata.var.loc[genes, 'mi'] = mi_vec
    adata.var.loc[genes, 'pearson'] = pearson
Exemplo n.º 4
0
def coexp_measure(adata, genes, layer_x, layer_y, cores=1, skip_mi=True):
    """Calculate co-expression measures, including mutual information (MI), pearson correlation, etc. of genes between
    two different layers.

    Parameters
    ----------
        adata: :class:`~anndata.AnnData`.
            adata object that will be used for mutual information calculation.
        genes: `List` (default: None)
            Gene names from the adata object that will be used for mutual information calculation.
        layer_x: `str`
            The first key of the layer from the adata object that will be used for mutual information calculation.
        layer_y: `str`
            The second key of the layer from the adata object that will be used for mutual information calculation.
        cores: `int` (default: 1)
            Number of cores to run the MI calculation. If cores is set to be > 1, multiprocessing will be used to
            parallel the calculation. `cores` is only applicable to MI calculation.
        skip_mi: `bool` (default: `True`)
            Whether to skip the mutual information calculation step which is time-consuming.

    Returns
    -------
        An updated adata object that updated with a new columns (`mi`, `pearson`) in .var contains the mutual
        information of input genes.
    """

    try:
        from Scribe.information_estimators import mi
    except ImportError:
        raise ImportError(
            "You need to install the package `Scribe`."
            "Plelease install from https://github.com/aristoteleo/Scribe-py."
            "Also check our paper: "
            "https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
        )

    adata.var["mi"], adata.var["pearson"] = np.nan, np.nan

    if not skip_mi:
        mi_vec = np.zeros(len(genes))
    pearson = np.zeros(len(genes))

    X, Y = adata[:, genes].layers[layer_x], adata[:, genes].layers[layer_y]
    X, Y = X.A if issparse(X) else X, Y.A if issparse(Y) else Y

    k = min(5, int(adata.n_obs / 5 + 1))
    if cores == 1:
        for i in tqdm(
                range(len(genes)),
                desc=
                f"calculating mutual information between {layer_x} and {layer_y} data",
        ):
            x, y = X[i], Y[i]
            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            pearson[i] = einsum_correlation(x[None, mask],
                                            y[mask],
                                            type="pearson")
            x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

            if not skip_mi:
                mi_vec[i] = mi(x, y, k=k)
    else:
        for i in tqdm(
                range(len(genes)),
                desc=
                f"calculating mutual information between {layer_x} and {layer_y} data",
        ):
            x, y = X[i], Y[i]
            mask = np.logical_and(np.isfinite(x), np.isfinite(y))
            pearson[i] = einsum_correlation(x[None, mask],
                                            y[mask],
                                            type="pearson")

        if not skip_mi:

            def pool_mi(x, y, k):
                mask = np.logical_and(np.isfinite(x), np.isfinite(y))
                x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]

                return mi(x, y, k)

            pool = ThreadPool(cores)
            res = pool.starmap(pool_mi, zip(X, Y, itertools.repeat(k)))
            pool.close()
            pool.join()
            mi_vec = np.array(res)

    if not skip_mi:
        adata.var.loc[genes, "mi"] = mi_vec
    adata.var.loc[genes, "pearson"] = pearson