示例#1
0
    def __init__(self, *args, **kwargs):

        super(self.__class__, self).__init__(*args, **kwargs)
        dtype = tf.float32

        # All parameters that are returned for analysis
        self.param_names = ["alpha", "beta", "concentration", "prediction"]

        # Model definition
        def define_model(x, n_total, K):
            N, D = x.shape
            dtype = tf.float32

            alpha = ed.Normal(loc=tf.zeros([K], dtype=dtype),
                              scale=tf.ones([K], dtype=dtype),
                              name="alpha")
            beta = ed.Normal(loc=tf.zeros([D, K], dtype=dtype),
                             scale=tf.ones([D, K], dtype=dtype),
                             name="beta")

            concentration_ = tf.exp(alpha + tf.matmul(x, beta))

            # Likelihood
            predictions = ed.DirichletMultinomial(n_total,
                                                  concentration=concentration_,
                                                  name="predictions")
            return predictions

        # Joint posterior distribution
        self.log_joint = ed.make_log_joint_fn(define_model)
        # Function to compute log posterior probability

        self.target_log_prob_fn = lambda alpha_, beta_: self.log_joint(
            x=self.x,
            n_total=self.n_total,
            K=self.K,
            predictions=self.y,
            alpha=alpha_,
            beta=beta_,
        )

        alpha_size = [self.K]
        beta_size = [self.D, self.K]

        self.params = [
            tf.random.uniform(minval=-3,
                              maxval=3,
                              name="alpha",
                              shape=alpha_size,
                              dtype=dtype),
            tf.random.uniform(minval=-2,
                              maxval=2,
                              name="beta",
                              shape=beta_size,
                              dtype=dtype),
        ]
示例#2
0
    def __init__(self, baseline_index, *args, **kwargs):
        """
        Constructor of model class

        Parameters
        ----------
        baseline_index -- string or int
            Index of reference cell type (column in count data matrix)
        args -- arguments passed to top-level class
        kwargs -- arguments passed to top-level class
        """
        super(self.__class__, self).__init__(*args, **kwargs)

        self.baseline_index = baseline_index
        dtype = tf.float32

        # All parameters that are returned for analysis
        self.param_names = [
            "alpha", "mu_b", "sigma_b", "b_offset", "ind_raw", "ind", "b_raw",
            "beta", "concentration", "prediction"
        ]

        def define_model(x, n_total, K):
            """
            Model definition in Edward2

            Parameters
            ----------
            x -- numpy array [NxD]
                covariate matrix
            n_total -- numpy array [N]
                number of cells per sample
            K -- int
                Number of cell types
            """
            dtype = tf.float32
            N, D = x.shape

            # normal prior on bias
            alpha = ed.Normal(loc=tf.zeros([K]),
                              scale=tf.ones([K]) * 5,
                              name="alpha")

            # Noncentered parametrization for raw slopes of all cell types except baseline type (before spike-and-slab)
            mu_b = ed.Normal(loc=tf.zeros(1, dtype=dtype),
                             scale=tf.ones(1, dtype=dtype),
                             name="mu_b")
            sigma_b = ed.HalfCauchy(tf.zeros(1, dtype=dtype),
                                    tf.ones(1, dtype=dtype),
                                    name="sigma_b")
            b_offset = ed.Normal(loc=tf.zeros([D, K - 1], dtype=dtype),
                                 scale=tf.ones([D, K - 1], dtype=dtype),
                                 name="b_offset")

            b_raw = mu_b + sigma_b * b_offset

            # Spike-and-slab priors
            sigma_ind_raw = ed.Normal(loc=tf.zeros(shape=[D, K - 1],
                                                   dtype=dtype),
                                      scale=tf.ones(shape=[D, K - 1],
                                                    dtype=dtype),
                                      name='sigma_ind_raw')
            ind_t = sigma_ind_raw * 50
            ind = tf.exp(ind_t) / (1 + tf.exp(ind_t))

            # Calculate betas
            beta = ind * b_raw

            # Include slope 0 for baseline cell type
            beta = tf.concat(axis=1,
                             values=[
                                 beta[:, :baseline_index],
                                 tf.fill(value=0., dims=[D, 1]),
                                 beta[:, baseline_index:]
                             ])

            # Concentration vector from intercepts, slopes
            concentration_ = tf.exp(alpha + tf.matmul(x, beta))

            # Cell count prediction via DirMult
            predictions = ed.DirichletMultinomial(n_total,
                                                  concentration=concentration_,
                                                  name="predictions")
            return predictions

        # Joint posterior distribution
        self.log_joint = ed.make_log_joint_fn(define_model)
        # Function to compute log posterior probability

        self.target_log_prob_fn = lambda alpha_, mu_b_, sigma_b_, b_offset_, sigma_ind_raw_:\
            self.log_joint(x=self.x,
                           n_total=self.n_total,
                           K=self.K,
                           predictions=self.y,
                           alpha=alpha_,
                           mu_b=mu_b_,
                           sigma_b=sigma_b_,
                           b_offset=b_offset_,
                           sigma_ind_raw=sigma_ind_raw_,
                           )

        alpha_size = [self.K]
        beta_size = [self.D, self.K - 1]

        # MCMC starting values
        self.params = [
            tf.random.normal(alpha_size, 0, 1, name='init_alpha'),
            tf.zeros(1, name="init_mu_b", dtype=dtype),
            tf.ones(1, name="init_sigma_b", dtype=dtype),
            tf.random.normal(beta_size, 0, 1, name='init_b_offset'),
            tf.zeros(beta_size, name='init_sigma_ind_raw', dtype=dtype),
        ]
示例#3
0
    # Calculate betas
    beta = ind * b_raw

    # Concentration vector from intercepts, slopes
    concentration_ = tf.exp(alpha + tf.matmul(x, beta))

    # Cell count prediction via DirMult
    predictions = ed.DirichletMultinomial(n_total,
                                          concentration=concentration_,
                                          name="predictions")
    return predictions


# Joint posterior distribution
log_joint_ed = ed.make_log_joint_fn(edward_model)

# Function to compute log posterior probability
target_log_prob_fn_ed = lambda alpha_, mu_b_, sigma_b_, b_offset_, sigma_ind_raw_: \
    log_joint_ed(x=tf.cast(x, dtype),
                   n_total=tf.cast(n_total, dtype),
                   K=K,
                   predictions=tf.cast(y, dtype),
                   alpha=alpha_,
                   mu_b=mu_b_,
                   sigma_b=sigma_b_,
                   b_offset=b_offset_,
                   sigma_ind_raw=sigma_ind_raw_,
                   )

alpha_size = [K]