Exemplo n.º 1
0
def multinomial(table: biom.Table,
                metadata: Metadata,
                formula: str,
                training_column: str = None,
                num_random_test_examples: int = 10,
                epoch: int = 10,
                batch_size: int = 5,
                beta_prior: float = 1,
                learning_rate: float = 0.1,
                clipnorm: float = 10,
                min_sample_count: int = 10,
                min_feature_count: int = 10,
                summary_interval: int = 60) -> (pd.DataFrame):

    # load metadata and tables
    metadata = metadata.to_dataframe()

    # match them
    table, metadata, design = match_and_filter(table, metadata, formula,
                                               training_column,
                                               num_random_test_examples,
                                               min_sample_count,
                                               min_feature_count)

    # convert to dense representation
    dense_table = table.to_dataframe().to_dense().T

    # split up training and testing
    trainX, testX, trainY, testY = split_training(dense_table, metadata,
                                                  design, training_column,
                                                  num_random_test_examples)

    model = MultRegression(learning_rate=learning_rate,
                           clipnorm=clipnorm,
                           beta_mean=beta_prior,
                           batch_size=batch_size,
                           save_path=None)
    with tf.Graph().as_default(), tf.Session() as session:
        model(session, trainX, trainY, testX, testY)

        model.fit(epoch=epoch,
                  summary_interval=summary_interval,
                  checkpoint_interval=None)

    md_ids = np.array(design.columns)
    obs_ids = table.ids(axis='observation')

    beta_ = clr(clr_inv(np.hstack((np.zeros((model.p, 1)), model.B))))

    beta_ = pd.DataFrame(
        beta_.T,
        columns=md_ids,
        index=obs_ids,
    )
    return beta_
Exemplo n.º 2
0
def ols_regression(output_dir: str, table: pd.DataFrame, tree: skbio.TreeNode,
                   metadata: Metadata, formula: str) -> None:

    if np.any(table.var(axis=0) == 0):
        message = ('Detected zero variance balances - '
                   'double check your table for unobserved features.')
        raise UserWarning(message)

    res = ols(table=table, metadata=metadata.to_dataframe(), formula=formula)
    res.fit()

    ols_summary(output_dir, res, tree)
Exemplo n.º 3
0
def paired_omics(microbes: biom.Table,
                 metabolites: biom.Table,
                 metadata: Metadata = None,
                 training_column: str = None,
                 num_testing_examples: int = 5,
                 min_feature_count: int = 10,
                 epochs: int = 100,
                 batch_size: int = 50,
                 latent_dim: int = 3,
                 input_prior: float = 1,
                 output_prior: float = 1,
                 learning_rate: float = 1e-3,
                 equalize_biplot: float = False,
                 arm_the_gpu: bool = False,
                 summary_interval: int = 60) -> (
                     pd.DataFrame, OrdinationResults, qiime2.Metadata
                 ):

    if metadata is not None:
        metadata = metadata.to_dataframe()

    if arm_the_gpu:
        # pick out the first GPU
        device_name = '/device:GPU:0'
    else:
        device_name = '/cpu:0'

    # Note: there are a couple of biom -> pandas conversions taking
    # place here.  This is currently done on purpose, since we
    # haven't figured out how to handle sparse matrix multiplication
    # in the context of this algorithm.  That is a future consideration.
    res = split_tables(
        microbes, metabolites,
        metadata=metadata, training_column=training_column,
        num_test=num_testing_examples,
        min_samples=min_feature_count)

    (train_microbes_df, test_microbes_df,
     train_metabolites_df, test_metabolites_df) = res

    train_microbes_coo = coo_matrix(train_microbes_df.values)
    test_microbes_coo = coo_matrix(test_microbes_df.values)

    with tf.Graph().as_default(), tf.Session() as session:
        model = MMvec(
            latent_dim=latent_dim,
            u_scale=input_prior, v_scale=output_prior,
            batch_size=batch_size,
            device_name=device_name,
            learning_rate=learning_rate)
        model(session,
              train_microbes_coo, train_metabolites_df.values,
              test_microbes_coo, test_metabolites_df.values)

        loss, cv = model.fit(epoch=epochs, summary_interval=summary_interval)
        ranks = pd.DataFrame(model.ranks(), index=train_microbes_df.columns,
                             columns=train_metabolites_df.columns)
        if latent_dim > 0:
            u, s, v = svds(ranks - ranks.mean(axis=0), k=latent_dim)
        else:
            # fake it until you make it
            u, s, v = svds(ranks - ranks.mean(axis=0), k=1)

        ranks = ranks.T
        ranks.index.name = 'featureid'
        s = s[::-1]
        u = u[:, ::-1]
        v = v[::-1, :]
        if equalize_biplot:
            microbe_embed = u @ np.sqrt(np.diag(s))
            metabolite_embed = v.T @ np.sqrt(np.diag(s))
        else:
            microbe_embed = u @ np.diag(s)
            metabolite_embed = v.T

        pc_ids = ['PC%d' % i for i in range(microbe_embed.shape[1])]
        features = pd.DataFrame(
            microbe_embed, columns=pc_ids,
            index=train_microbes_df.columns)
        samples = pd.DataFrame(
            metabolite_embed, columns=pc_ids,
            index=train_metabolites_df.columns)
        short_method_name = 'mmvec biplot'
        long_method_name = 'Multiomics mmvec biplot'
        eigvals = pd.Series(s, index=pc_ids)
        proportion_explained = pd.Series(s**2 / np.sum(s**2), index=pc_ids)
        biplot = OrdinationResults(
            short_method_name, long_method_name, eigvals,
            samples=samples, features=features,
            proportion_explained=proportion_explained)

        its = np.arange(len(loss))
        convergence_stats = pd.DataFrame(
            {
                'loss': loss,
                'cross-validation': cv,
                'iteration': its
            }
        )

        convergence_stats.index.name = 'id'
        convergence_stats.index = convergence_stats.index.astype(np.str)

        c = convergence_stats['loss'].astype(np.float)
        convergence_stats['loss'] = c

        c = convergence_stats['cross-validation'].astype(np.float)
        convergence_stats['cross-validation'] = c

        c = convergence_stats['iteration'].astype(np.int)
        convergence_stats['iteration'] = c

        return ranks, biplot, qiime2.Metadata(convergence_stats)
Exemplo n.º 4
0
def paired_omics(
        microbes: biom.Table,
        metabolites: biom.Table,
        metadata: Metadata = None,
        training_column: str = None,
        num_testing_examples: int = 5,
        min_feature_count: int = 10,
        epochs: int = 100,
        batch_size: int = 50,
        latent_dim: int = 3,
        input_prior: float = 1,
        output_prior: float = 1,
        learning_rate: float = 0.001,
        summary_interval: int = 60) -> (pd.DataFrame, OrdinationResults):

    if metadata is not None:
        metadata = metadata.to_dataframe()

    # Note: there are a couple of biom -> pandas conversions taking
    # place here.  This is currently done on purpose, since we
    # haven't figured out how to handle sparse matrix multiplication
    # in the context of this algorithm.  That is a future consideration.
    res = split_tables(microbes,
                       metabolites,
                       metadata=metadata,
                       training_column=training_column,
                       num_test=num_testing_examples,
                       min_samples=min_feature_count)

    (train_microbes_df, test_microbes_df, train_metabolites_df,
     test_metabolites_df) = res

    train_microbes_coo = coo_matrix(train_microbes_df.values)
    test_microbes_coo = coo_matrix(test_microbes_df.values)

    with tf.Graph().as_default(), tf.Session() as session:
        model = MMvec(latent_dim=latent_dim,
                      u_scale=input_prior,
                      v_scale=output_prior,
                      learning_rate=learning_rate)
        model(session, train_microbes_coo, train_metabolites_df.values,
              test_microbes_coo, test_metabolites_df.values)

        loss, cv = model.fit(epoch=epochs, summary_interval=summary_interval)

        U, V = model.U, model.V

        U_ = np.hstack((np.ones(
            (model.U.shape[0], 1)), model.Ubias.reshape(-1, 1), U))
        V_ = np.vstack(
            (model.Vbias.reshape(1, -1), np.ones((1, model.V.shape[1])), V))

        ranks = pd.DataFrame(np.hstack((np.zeros(
            (model.U.shape[0], 1)), U_ @ V_)),
                             index=train_microbes_df.columns,
                             columns=train_metabolites_df.columns)

        ranks = ranks - ranks.mean(axis=1).values.reshape(-1, 1)
        ranks = ranks - ranks.mean(axis=0)
        u, s, v = svds(ranks, k=latent_dim)
        s = s[::-1]
        u = u[:, ::-1]
        v = v[::-1, :]
        microbe_embed = u @ np.diag(s)
        metabolite_embed = v.T

        pc_ids = ['PC%d' % i for i in range(microbe_embed.shape[1])]
        features = pd.DataFrame(microbe_embed,
                                columns=pc_ids,
                                index=train_microbes_df.columns)
        samples = pd.DataFrame(metabolite_embed,
                               columns=pc_ids,
                               index=train_metabolites_df.columns)
        short_method_name = 'mmvec biplot'
        long_method_name = 'Multiomics mmvec biplot'
        eigvals = pd.Series(s, index=pc_ids)
        proportion_explained = pd.Series(s**2 / np.sum(s**2), index=pc_ids)
        biplot = OrdinationResults(short_method_name,
                                   long_method_name,
                                   eigvals,
                                   samples=samples,
                                   features=features,
                                   proportion_explained=proportion_explained)

        return ranks, biplot
Exemplo n.º 5
0
def _load_metadata(metadata: Metadata = None):
    if not metadata:
        raise ValueError('Metadata parameter not provided!')
    metadata = metadata.to_dataframe()
    return metadata
Exemplo n.º 6
0
def multinomial(table: biom.Table,
                metadata: Metadata,
                formula: str,
                training_column: str = DEFAULTS["training-column"],
                num_random_test_examples: int = (
                    DEFAULTS["num-random-test-examples"]
                ),
                epochs: int = DEFAULTS["epochs"],
                batch_size: int = DEFAULTS["batch-size"],
                differential_prior: float = DEFAULTS["differential-prior"],
                learning_rate: float = DEFAULTS["learning-rate"],
                clipnorm: float = DEFAULTS["clipnorm"],
                min_sample_count: int = DEFAULTS["min-sample-count"],
                min_feature_count: int = DEFAULTS["min-feature-count"],
                summary_interval: int = DEFAULTS["summary-interval"],
                random_seed: int = DEFAULTS["random-seed"],
                ) -> (
                    pd.DataFrame, qiime2.Metadata, skbio.OrdinationResults
                ):

    # load metadata and tables
    metadata = metadata.to_dataframe()
    # match them
    table, metadata, design = match_and_filter(
        table, metadata,
        formula, min_sample_count, min_feature_count
    )

    # convert to dense representation
    dense_table = table.to_dataframe().to_dense().T

    # split up training and testing
    trainX, testX, trainY, testY = split_training(
        dense_table, metadata, design,
        training_column, num_random_test_examples,
        seed=random_seed,
    )

    model = MultRegression(learning_rate=learning_rate, clipnorm=clipnorm,
                           beta_mean=differential_prior,
                           batch_size=batch_size,
                           save_path=None)
    with tf.Graph().as_default(), tf.Session() as session:
        tf.set_random_seed(random_seed)
        model(session, trainX, trainY, testX, testY)

        loss, cv, its = model.fit(
            epochs=epochs,
            summary_interval=summary_interval,
            checkpoint_interval=None)

    md_ids = np.array(design.columns)
    obs_ids = table.ids(axis='observation')

    beta_ = np.hstack((np.zeros((model.p, 1)), model.B))
    beta_ = beta_ - beta_.mean(axis=1).reshape(-1, 1)

    differentials = pd.DataFrame(
        beta_.T, columns=md_ids, index=obs_ids,
    )
    differentials.index.name = 'featureid'

    convergence_stats = pd.DataFrame(
        {
            'loss': loss,
            'cross-validation': cv,
            'iteration': its
        }
    )

    convergence_stats.index.name = 'id'
    convergence_stats.index = convergence_stats.index.astype(np.str)

    c = convergence_stats['loss'].astype(np.float)
    convergence_stats['loss'] = c

    c = convergence_stats['cross-validation'].astype(np.float)
    convergence_stats['cross-validation'] = c

    c = convergence_stats['iteration'].astype(np.int)
    convergence_stats['iteration'] = c

    # regression biplot
    if differentials.shape[-1] > 1:
        u, s, v = np.linalg.svd(differentials)
        pc_ids = ['PC%d' % i for i in range(len(s))]
        samples = pd.DataFrame(u[:, :len(s)] @ np.diag(s),
                               columns=pc_ids, index=differentials.index)
        features = pd.DataFrame(v.T[:, :len(s)],
                                columns=pc_ids, index=differentials.columns)
        short_method_name = 'regression_biplot'
        long_method_name = 'Multinomial regression biplot'
        eigvals = pd.Series(s, index=pc_ids)
        proportion_explained = eigvals**2 / (eigvals**2).sum()
        biplot = OrdinationResults(
            short_method_name, long_method_name, eigvals,
            samples=samples, features=features,
            proportion_explained=proportion_explained)
    else:
        # this is to handle the edge case with only intercepts
        biplot = OrdinationResults('', '', pd.Series(), pd.DataFrame())

    return differentials, qiime2.Metadata(convergence_stats), biplot
Exemplo n.º 7
0
def _2(ff: MetadataFormat) -> Metadata:
    path = str(ff) + '/metadata.tsv'
    return Metadata.load(path)
Exemplo n.º 8
0
def _1(data: Metadata) -> MetadataFormat:
    ff = MetadataFormat()
    path = str(ff) + '/metadata.tsv'
    data.save(path)
    return ff