Example #1
0
 def _test(self, x, p):
     xtf = tf.constant(x)
     val_true = stats.bernoulli.logpmf(x, p=p)
     with self.test_session():
         self.assertAllClose(bernoulli.logpmf(xtf, p=p).eval(), val_true)
         self.assertAllClose(
             bernoulli.logpmf(xtf, p=tf.constant(p)).eval(), val_true)
    def log_prob(self, xs, zs):
        """
        Calculates the unnormalized log joint density.

        Parameters
        ----------
        xs : tf.tensor
            n_data x (D + 1), where first column is outputs and other
            columns are inputs (features)
        zs : tf.tensor or np.ndarray
            n_minibatch x num_vars, where n_minibatch is the number of
            weight samples and num_vars is the number of weights

        Returns
        -------
        tf.tensor
            vector of length n_minibatch, where the i^th element is
            the log joint density of xs and zs[i, :]
        """
        y = xs[:, 0]
        x = xs[:, 1:]
        log_lik = []
        for z in tf.unpack(zs):
            p = self.mapping(x, z)
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.concat(0, log_lik)
        log_prior = -self.prior_variance * tf.reduce_sum(zs*zs, 1)
        return log_lik + log_prior
Example #3
0
 def log_prob(self, xs, zs):
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs[:, :], cov=K)
     log_lik = tf.pack([tf.reduce_sum( \
         bernoulli.logpmf(xs[:,0], self.inverse_link(tf.mul(xs[:,0], z))) \
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
Example #4
0
    def log_prob(self, xs, zs):
        X, Y = xs['X'], xs['Y']

        m_prior = {}
        m_pred = {}
        for g, m in self.model_config.items():
            x = _get_slice(X, m)

            # Select unobserved variables for this group and place them into a dict
            # keyed by the variable name with the group prefix removed
            z = {k.replace(g + ':', ''): v for k, v in zs.items() if k.startswith(g + ':')}

            # Get contribution to prior log probability from this group
            m_prior[g] = m.get_prior_log_proba(x, Y, z)

            # Get contribution to prediction equation from this group
            m_pred[g] = m.get_prediction_tf(x, z)

        # Compute total log probability sum from priors
        lp_prior = _sum(list(m_prior.values()))
        # lp_prior = tf_print(lp_prior, lambda x: x)

        # Compute log probability for sum of predictions on link scale
        y_logit = tf.reduce_sum(tf.pack(list(m_pred.values()), axis=1), 1)
        # y_logit = tf_print(y_logit, lambda x: [np.min(x), np.max(x), np.all(np.isfinite(x))])

        y_proba = self.inv_link_tf(y_logit)

        # Clip probability predictions to avoid log(0) in pmf calculation
        y_proba = tf.clip_by_value(y_proba, 1E-6, 1-1E-6)
        # y_proba = tf_print(y_proba, lambda x: [np.min(x), np.max(x), np.all(np.isfinite(x))])
        lp_data = tf.reduce_sum(bernoulli.logpmf(Y, p=y_proba))

        return lp_prior + lp_data
Example #5
0
 def log_prob(self, xs, zs):
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs[:, :], cov=K)
     log_lik = tf.pack([tf.reduce_sum( \
         bernoulli.logpmf(xs[:,0], self.inverse_link(tf.mul(xs[:,0], z))) \
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
 def log_prob(self, xs, zs):
     x, y = xs['x'], xs['y']
     w, b = zs['w'], zs['b']
     log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
     log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
     log_lik = tf.reduce_sum(
         bernoulli.logpmf(y, p=self.inv_link(ed.dot(x, w) + b)))
     return log_lik + log_prior
 def log_prob(self, xs, zs):
   x, y = xs['x'], xs['y']
   w, b = zs['w'], zs['b']
   log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
   log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
   log_lik = tf.reduce_sum(bernoulli.logpmf(y,
                           p=self.inv_link(ed.dot(x, w) + b)))
   return log_lik + log_prior
Example #8
0
 def log_prob(self, xs, zs):
     """Return scalar, the log joint density log p(xs, zs)."""
     x, y = xs['x'], xs['y']
     log_prior = multivariate_normal.logpdf(zs['z'], tf.zeros(self.N),
                                            self.kernel(x))
     log_lik = tf.reduce_sum(
         bernoulli.logpmf(y, p=self.inverse_link(y * zs['z'])))
     return log_prior + log_lik
Example #9
0
 def log_prob(self, xs, zs):
   """Return scalar, the log joint density log p(xs, zs)."""
   x, y = xs['x'], xs['y']
   log_prior = multivariate_normal.logpdf(
       zs['z'], tf.zeros(self.N), self.kernel(x))
   log_lik = tf.reduce_sum(
       bernoulli.logpmf(y, p=self.inverse_link(y * zs['z'])))
   return log_prior + log_lik
Example #10
0
 def log_prob(self, xs, zs):
     """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     x, y = xs['x'], xs['y']
     log_prior = multivariate_normal.logpdf(zs, cov=self.kernel(x))
     log_lik = tf.pack([tf.reduce_sum(
         bernoulli.logpmf(y, self.inverse_link(tf.mul(y, z)))
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
Example #11
0
    def log_lik(self, x, z):
        """
        Bernoulli log-likelihood, summing over every image n and pixel i
        in image n.

        log p(x | z) = log Bernoulli(x | p = varphi(z))
         = sum_{n=1}^N sum_{i=1}^{28*28} log Bernoulli (x_{n,i} | p_{n,i})
        """
        return tf.reduce_sum(bernoulli.logpmf(x, p=self.mapping(z)))
Example #12
0
    def log_lik(self, xs, z):
        """
        Bernoulli log-likelihood, summing over every image n and pixel i
        in image n.

        log p(x | z) = log Bernoulli(x | p = neural_network(z))
         = sum_{n=1}^N sum_{i=1}^{28*28} log Bernoulli (x_{n,i} | p_{n,i})
        """
        return tf.reduce_sum(bernoulli.logpmf(xs['x'], p=self.neural_network(z)))
Example #13
0
    def log_lik(self, x, z):
        """
        Bernoulli log-likelihood, summing over every image n and pixel i
        in image n.

        log p(x | z) = log Bernoulli(x | p = varphi(z))
         = sum_{n=1}^N sum_{i=1}^{28*28} log Bernoulli (x_{n,i} | p_{n,i})
        """
        return tf.reduce_sum(bernoulli.logpmf(x, p=self.mapping(z)))
Example #14
0
 def log_prob(self, xs, zs):
     """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     x, y = xs['x'], xs['y']
     log_prior = multivariate_normal.logpdf(zs, cov=self.kernel(x))
     log_lik = tf.pack([
         tf.reduce_sum(bernoulli.logpmf(y, self.inverse_link(tf.mul(y, z))))
         for z in tf.unpack(zs)
     ])
     return log_prior + log_lik
Example #15
0
 def log_lik(self, xs, z):
     """
     Bernoulli log-likelihood, summing over every image n and pixel i
     in image n.
     log p(x | z) = log Bernoulli(x | p = neural_network(z))
      = sum_{n=1}^N sum_{i=1}^{28*28} log Bernoulli (x_{n,i} | p_{n,i})
     """
     return tf.reduce_sum(
         bernoulli.logpmf(xs['x'], p=self.neural_network(z)))
Example #16
0
    def log_lik(self, xs, zs):
        """Bernoulli log-likelihood, summing over every image n and pixel i
    in image n.

    log p(x | z) = log Bernoulli(x | logits = neural_network(z))
     = sum_{n=1}^N sum_{i=1}^{28*28} log Bernoulli (x_{n,i} | logits_{n,i})
    """
        return tf.reduce_sum(
            bernoulli.logpmf(xs['x'], logits=self.generative_network(zs['z'])))
Example #17
0
 def log_prob(self, xs, zs):
     """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     # Data must have labels in the first column and features in
     # subsequent columns.
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs, cov=K)
     log_lik = tf.pack([tf.reduce_sum(
         bernoulli.logpmf(xs[:, 0], self.inverse_link(tf.mul(xs[:, 0], z)))
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x, y = xs['x'], xs['y']
        log_lik = []
        for z in tf.unpack(zs):
            p = self.mapping(x, z)
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.pack(log_lik)
        log_prior = -tf.reduce_sum(zs*zs, 1) / self.prior_variance
        return log_lik + log_prior
Example #19
0
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x, y = xs['x'], xs['y']
        log_lik = []
        for z in tf.unpack(zs):
            p = self.mapping(x, z)
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.pack(log_lik)
        log_prior = -tf.reduce_sum(zs * zs, 1) / self.prior_variance
        return log_lik + log_prior
Example #20
0
 def log_prob(self, xs, zs):
     """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     # Data must have labels in the first column and features in
     # subsequent columns.
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs, cov=K)
     log_lik = tf.pack([
         tf.reduce_sum(
             bernoulli.logpmf(xs[:, 0],
                              self.inverse_link(tf.mul(xs[:, 0], z))))
         for z in tf.unpack(zs)
     ])
     return log_prior + log_lik
Example #21
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        # Data must have labels in the first column and features in
        # subsequent columns.
        y = xs[:, 0]
        x = xs[:, 1:]
        log_lik = []
        for z in tf.unpack(zs):
            p = self.mapping(x, z)
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.pack(log_lik)
        log_prior = -self.prior_variance * tf.reduce_sum(zs * zs, 1)
        return log_lik + log_prior
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        # Data must have labels in the first column and features in
        # subsequent columns.
        y = xs[:, 0]
        x = xs[:, 1:]
        log_lik = []
        for z in tf.unpack(zs):
            p = self.mapping(x, z)
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.pack(log_lik)
        log_prior = -self.prior_variance * tf.reduce_sum(zs*zs, 1)
        return log_lik + log_prior
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x, y = xs['x'], xs['y']
        m, n = self.weight_dim[0], self.weight_dim[1]
        log_lik = []
        for z in tf.unpack(zs):
            W = tf.reshape(z[:m*n], [m, n])
            b = tf.reshape(z[m*n:], [1, n])
            # broadcasting to do (x*W) + b (e.g. 40x10 + 1x10)
            p = self.inv_link(tf.matmul(x, W) + b)
            p = tf.squeeze(p) # n_minibatch x 1 to n_minibatch
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.pack(log_lik)
        log_prior = -tf.reduce_sum(zs*zs, 1) / self.prior_variance
        return log_lik + log_prior
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x, y = xs['x'], xs['y']
        m, n = self.weight_dim[0], self.weight_dim[1]
        log_lik = []
        for z in tf.unpack(zs):
            W = tf.reshape(z[:m * n], [m, n])
            b = tf.reshape(z[m * n:], [1, n])
            # broadcasting to do (x*W) + b (e.g. 40x10 + 1x10)
            p = self.inv_link(tf.matmul(x, W) + b)
            p = tf.squeeze(p)  # n_minibatch x 1 to n_minibatch
            log_lik += [bernoulli.logpmf(y, p)]

        log_lik = tf.pack(log_lik)
        log_prior = -tf.reduce_sum(zs * zs, 1) / self.prior_variance
        return log_lik + log_prior
def _test(x, p):
    xtf = tf.constant(x)
    val_true = stats.bernoulli.logpmf(x, p)
    _assert_eq(bernoulli.logpmf(xtf, p), val_true)
    _assert_eq(bernoulli.logpmf(xtf, tf.constant(p)), val_true)
    _assert_eq(bernoulli.logpmf(xtf, tf.constant([p])), val_true)
Example #26
0
 def log_prob(self, xs, zs):
   log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
   log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
   return log_lik + log_prior
 def _test(self, x, p):
     xtf = tf.constant(x)
     val_true = stats.bernoulli.logpmf(x, p)
     with self.test_session():
         self.assertAllClose(bernoulli.logpmf(xtf, p).eval(), val_true)
         self.assertAllClose(bernoulli.logpmf(xtf, tf.constant(p)).eval(), val_true)
def _test_logpdf(scalar, param):
    x = tf.constant(scalar)
    val_true = stats.bernoulli.logpmf(scalar, param)
    _assert_eq(bernoulli.logpmf(x, tf.constant(param)), val_true)
    _assert_eq(bernoulli.logpmf(x, tf.constant([param])), val_true)
Example #29
0
 def log_prob_idx(self, idx, xs):
     full_idx = (slice(0, None), ) + idx # slice over batch size
     return bernoulli.logpmf(xs[full_idx], self.p[idx])
Example #30
0
 def log_prob(self, xs, zs):
     log_prior = beta.logpdf(zs, a=1.0, b=1.0)
     log_lik = tf.pack([
         tf.reduce_sum(bernoulli.logpmf(xs['x'], z)) for z in tf.unpack(zs)
     ])
     return log_lik + log_prior
Example #31
0
 def log_prob(self, xs, zs):
     return bernoulli.logpmf(zs["p"], p=0.6)
Example #32
0
 def log_prob_idx(self, idx, xs):
     full_idx = (slice(0, None), ) + idx  # slice over sample size
     return bernoulli.logpmf(xs[full_idx], self.p[idx])
 def log_prob(self, xs, zs):
   return bernoulli.logpmf(zs, p)
Example #34
0
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        if i >= self.num_vars:
            raise

        return bernoulli.logpmf(z[:, i], self.p[i])
Example #35
0
 def log_prob(self, xs, zs):
     log_prior = beta.logpdf(zs[:, 0], a=1.0, b=1.0)
     log_lik = tf.concat(0, [
         tf.reduce_sum(bernoulli.logpmf(xs, z)) \
         for z in tf.unpack(zs)])
     return log_lik + log_prior
Example #36
0
 def log_prob(self, xs, zs):
     return bernoulli.logpmf(zs['p'], p=0.6)
Example #37
0
 def log_prob(self, xs, zs):
     log_prior = beta.logpdf(zs, a=1.0, b=1.0)
     log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], z))
                        for z in tf.unpack(zs)])
     return log_lik + log_prior
Example #38
0
 def log_prob(self, xs, zs):
     # TODO use table lookup for everything not resort to if-elses
     if get_dims(zs)[1] == 1:
         return bernoulli.logpmf(zs[:, 0], p)
     else:
         return tf.concat(0, [self.table_lookup(z) for z in tf.unpack(zs)])
Example #39
0
 def log_prob(self, xs, zs):
     return bernoulli.logpmf(zs, p)
def _test(x, p):
    xtf = tf.constant(x)
    val_true = stats.bernoulli.logpmf(x, p)
    _assert_eq(bernoulli.logpmf(xtf, p), val_true)
    _assert_eq(bernoulli.logpmf(xtf, tf.constant(p)), val_true)
    _assert_eq(bernoulli.logpmf(xtf, tf.constant([p])), val_true)
Example #41
0
 def _test(self, x, p):
   val_true = stats.bernoulli.logpmf(x, p=p)
   with self.test_session():
     self.assertAllClose(bernoulli.logpmf(x, p=p).eval(), val_true)
Example #42
0
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        if i >= self.num_vars:
            raise

        return bernoulli.logpmf(z[:, i], self.p[i])
Example #43
0
    def log_prob_i(self, i, xs):
        """log p(x_i | params)"""
        if i >= self.num_factors:
            raise IndexError()

        return bernoulli.logpmf(xs[:, i], self.p[i])
Example #44
0
    def log_prob_zi(self, i, zs):
        """log q(z_i | lambda)"""
        if i >= self.num_factors:
            raise IndexError()

        return bernoulli.logpmf(zs[:, i], self.p[i])
Example #45
0
    def log_prob_zi(self, i, zs):
        """log q(z_i | lambda)"""
        if i >= self.num_factors:
            raise IndexError()

        return bernoulli.logpmf(zs[:, i], self.p[i])
Example #46
0
 def log_prob(self, xs, zs):
     log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
     log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
     return log_lik + log_prior
Example #47
0
 def _test(self, x, p):
     val_true = stats.bernoulli.logpmf(x, p=p)
     with self.test_session():
         self.assertAllClose(bernoulli.logpmf(x, p=p).eval(), val_true)