def _test(x, mean=None, cov=1):
    xtf = tf.constant(x)
    mean_tf = tf.convert_to_tensor(mean)
    cov_tf = tf.convert_to_tensor(cov)
    val_true = stats.multivariate_normal.logpdf(x, mean, cov)
    _assert_eq(multivariate_normal.logpdf(xtf, mean, cov), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, mean_tf, cov), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, mean, cov_tf), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, mean_tf, cov_tf), val_true)
def _test_logpdf(x, mean, cov):
    xtf = tf.constant(x)
    mean_tf = tf.convert_to_tensor(mean)
    cov_tf = tf.convert_to_tensor(cov)
    val_true = stats.multivariate_normal.logpdf(x, mean, cov)
    _assert_eq(multivariate_normal.logpdf(xtf, mean, cov), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, mean_tf, cov), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, mean, cov_tf), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, mean_tf, cov_tf), val_true)
 def _test(self, x, mean=None, cov=1):
     xtf = tf.constant(x)
     mean_tf = tf.convert_to_tensor(mean)
     cov_tf = tf.convert_to_tensor(cov)
     val_true = stats.multivariate_normal.logpdf(x, mean, cov)
     with self.test_session():
         self.assertAllClose(multivariate_normal.logpdf(xtf, mean, cov).eval(), val_true)
         self.assertAllClose(multivariate_normal.logpdf(xtf, mean_tf, cov).eval(), val_true)
         self.assertAllClose(multivariate_normal.logpdf(xtf, mean, cov_tf).eval(), val_true)
         self.assertAllClose(multivariate_normal.logpdf(xtf, mean_tf, cov_tf).eval(), val_true)
def _test_logpdf_standard_2d(x):
    xtf = tf.constant(x)
    val_true = stats.multivariate_normal.logpdf(x, np.zeros(2), np.diag(np.ones(2)))
    _assert_eq(multivariate_normal.logpdf(xtf), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, np.zeros([2]), np.ones([2])),
               val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, tf.zeros([2]), tf.ones([2])),
               val_true)
    _assert_eq(
        multivariate_normal.logpdf(xtf, np.zeros([2]), np.diag(np.ones([2]))),
        val_true)
def _test_logpdf_standard_2d(x):
    xtf = tf.constant(x)
    val_true = stats.multivariate_normal.logpdf(x, np.zeros(2),
                                                np.diag(np.ones(2)))
    _assert_eq(multivariate_normal.logpdf(xtf), val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, np.zeros([2]), np.ones([2])),
               val_true)
    _assert_eq(multivariate_normal.logpdf(xtf, tf.zeros([2]), tf.ones([2])),
               val_true)
    _assert_eq(
        multivariate_normal.logpdf(xtf, np.zeros([2]), np.diag(np.ones([2]))),
        val_true)
Example #6
0
 def _test(self, x, mu, sigma):
   xtf = tf.constant(x)
   mu_tf = tf.convert_to_tensor(mu)
   sigma_tf = tf.convert_to_tensor(sigma)
   val_true = stats.multivariate_normal.logpdf(x, mu, sigma)
   with self.test_session():
     self.assertAllClose(
         multivariate_normal.logpdf(xtf, mu, sigma).eval(), val_true)
     self.assertAllClose(
         multivariate_normal.logpdf(xtf, mu_tf, sigma).eval(), val_true)
     self.assertAllClose(
         multivariate_normal.logpdf(xtf, mu, sigma_tf).eval(), val_true)
     self.assertAllClose(
         multivariate_normal.logpdf(xtf, mu_tf, sigma_tf).eval(), val_true)
 def _test(self, x, mean=None, cov=1):
     xtf = tf.constant(x)
     mean_tf = tf.convert_to_tensor(mean)
     cov_tf = tf.convert_to_tensor(cov)
     val_true = stats.multivariate_normal.logpdf(x, mean, cov)
     with self.test_session():
         self.assertAllClose(
             multivariate_normal.logpdf(xtf, mean, cov).eval(), val_true)
         self.assertAllClose(
             multivariate_normal.logpdf(xtf, mean_tf, cov).eval(), val_true)
         self.assertAllClose(
             multivariate_normal.logpdf(xtf, mean, cov_tf).eval(), val_true)
         self.assertAllClose(
             multivariate_normal.logpdf(xtf, mean_tf, cov_tf).eval(),
             val_true)
Example #8
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            pi, mus, sigmas = self.unpack_params(z)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k * self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k * self.D + 1], 0,
                                         np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k * self.D], self.a,
                                             self.b)
                log_prior += invgamma.logpdf(sigmas[k * self.D + 1], self.a,
                                             self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(
                        x, mus[(k * self.D):((k + 1) * self.D)],
                        sigmas[(k * self.D):((k + 1) * self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
Example #9
0
 def log_prob(self, xs, zs):
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs[:, :], cov=K)
     log_lik = tf.pack([tf.reduce_sum( \
         bernoulli.logpmf(xs[:,0], self.inverse_link(tf.mul(xs[:,0], z))) \
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
Example #10
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            # Do the unconstrained to constrained transformation for MAP here.
            pi, mus, sigmas = self.unpack_params(z)
            pi = tf.sigmoid(pi)
            pi = tf.concat(0, [pi[0:(self.K-1)],
                         tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K-1)]), 0)])
            sigmas = tf.nn.softplus(sigmas)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
                log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(x,
                        mus[(k*self.D):((k+1)*self.D)],
                        sigmas[(k*self.D):((k+1)*self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
Example #11
0
 def log_prob(self, xs, zs):
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs[:, :], cov=K)
     log_lik = tf.pack([tf.reduce_sum( \
         bernoulli.logpmf(xs[:,0], self.inverse_link(tf.mul(xs[:,0], z))) \
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
Example #12
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            pi, mus, sigmas = self.unpack_params(z)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
                log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(x,
                        mus[(k*self.D):((k+1)*self.D)],
                        sigmas[(k*self.D):((k+1)*self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
Example #13
0
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = zs
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [tf.ones(N)*tf.log(pi[s, k]) +
                           multivariate_normal.logpdf(x,
                               mus[s, (k*self.D):((k+1)*self.D)],
                               sigmas[s, (k*self.D):((k+1)*self.D)])]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Example #14
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [
                    tf.ones(N) * tf.log(pi[s, k]) + multivariate_normal.logpdf(
                        x, mus[s, (k * self.D):((k + 1) * self.D)],
                        sigmas[s, (k * self.D):((k + 1) * self.D)])
                ]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Example #15
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            # Do the unconstrained to constrained transformation for MAP here.
            pi, mus, sigmas = self.unpack_params(z)
            pi = tf.sigmoid(pi)
            pi = tf.concat(0, [
                pi[0:(self.K - 1)],
                tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K - 1)]), 0)
            ])
            sigmas = tf.nn.softplus(sigmas)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k * self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k * self.D + 1], 0,
                                         np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k * self.D], self.a,
                                             self.b)
                log_prior += invgamma.logpdf(sigmas[k * self.D + 1], self.a,
                                             self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(
                        x, mus[(k * self.D):((k + 1) * self.D)],
                        sigmas[(k * self.D):((k + 1) * self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
def test_logpdf_cov_float_2d():
    x = np.zeros(2)
    xtf = tf.constant([0.0, 0.0])
    val_true = stats.multivariate_normal.logpdf(
        x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
    _assert_eq(multivariate_normal.logpdf(
        xtf, tf.zeros([2]), tf.constant([[2.0, 0.5], [0.5, 1.0]])),
        val_true)
Example #17
0
 def log_prob(self, xs, zs):
   """Return scalar, the log joint density log p(xs, zs)."""
   x, y = xs['x'], xs['y']
   log_prior = multivariate_normal.logpdf(
       zs['z'], tf.zeros(self.N), self.kernel(x))
   log_lik = tf.reduce_sum(
       bernoulli.logpmf(y, p=self.inverse_link(y * zs['z'])))
   return log_prior + log_lik
Example #18
0
 def log_prob(self, xs, zs):
     """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     x, y = xs['x'], xs['y']
     log_prior = multivariate_normal.logpdf(zs, cov=self.kernel(x))
     log_lik = tf.pack([tf.reduce_sum(
         bernoulli.logpmf(y, self.inverse_link(tf.mul(y, z)))
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
Example #19
0
 def log_prob(self, xs, zs):
     """Return scalar, the log joint density log p(xs, zs)."""
     x, y = xs['x'], xs['y']
     log_prior = multivariate_normal.logpdf(zs['z'], tf.zeros(self.N),
                                            self.kernel(x))
     log_lik = tf.reduce_sum(
         bernoulli.logpmf(y, p=self.inverse_link(y * zs['z'])))
     return log_prior + log_lik
def test_logpdf_cov_float_2d():
    x = np.zeros(2)
    xtf = tf.constant([0.0, 0.0])
    val_true = stats.multivariate_normal.logpdf(
        x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
    _assert_eq(
        multivariate_normal.logpdf(xtf, tf.zeros([2]),
                                   tf.constant([[2.0, 0.5], [0.5, 1.0]])),
        val_true)
Example #21
0
 def log_prob(self, xs, zs):
     """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     x, y = xs['x'], xs['y']
     log_prior = multivariate_normal.logpdf(zs, cov=self.kernel(x))
     log_lik = tf.pack([
         tf.reduce_sum(bernoulli.logpmf(y, self.inverse_link(tf.mul(y, z))))
         for z in tf.unpack(zs)
     ])
     return log_prior + log_lik
def test_logpdf_float_2d():
    x = np.array([[0.3, 0.7],[0.2, 0.8]])
    _test_logpdf(x, np.zeros([2]), np.ones([2]))
    _test_logpdf(x, np.zeros(2), np.diag(np.ones(2)))
    xtf = tf.constant(x)
    val_true = stats.multivariate_normal.logpdf(x, np.zeros(2), np.diag(np.ones(2)))
    _assert_eq(multivariate_normal.logpdf(xtf), val_true)

    _test_logpdf(x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
def test_int_1d():
    x = [0, 0]
    _test(x, np.zeros([2]), np.ones([2]))
    _test(x, np.zeros(2), np.diag(np.ones(2)))
    xtf = tf.constant(x)
    val_true = stats.multivariate_normal.logpdf(x, np.zeros(2), np.diag(np.ones(2)))
    _assert_eq(multivariate_normal.logpdf(xtf), val_true)

    _test(x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
Example #24
0
def test_logpdf_int_1d():
    x = [0, 0]
    _test_logpdf(x, np.zeros([2]), np.ones([2]))
    _test_logpdf(x, np.zeros(2), np.diag(np.ones(2)))
    xtf = tf.constant(x)
    val_true = stats.multivariate_normal.logpdf(x, np.zeros(2),
                                                np.diag(np.ones(2)))
    _assert_eq(multivariate_normal.logpdf(xtf), val_true)

    _test_logpdf(x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
def test_float_2d():
    x = np.array([[0.3, 0.7], [0.2, 0.8]])
    _test(x, np.zeros([2]), np.ones([2]))
    _test(x, np.zeros(2), np.diag(np.ones(2)))
    xtf = tf.constant(x)
    val_true = stats.multivariate_normal.logpdf(x, np.zeros(2),
                                                np.diag(np.ones(2)))
    _assert_eq(multivariate_normal.logpdf(xtf), val_true)

    _test(x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
    def test_float_2d(self):
        x = np.array([[0.3, 0.7],[0.2, 0.8]])
        self._test(x, np.zeros([2]), np.ones([2]))
        self._test(x, np.zeros(2), np.diag(np.ones(2)))
        xtf = tf.constant(x)
        val_true = stats.multivariate_normal.logpdf(x, np.zeros(2), np.diag(np.ones(2)))
        with self.test_session():
            self.assertAllClose(multivariate_normal.logpdf(xtf).eval(), val_true)

        self._test(x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
Example #27
0
 def log_prob(self, xs, zs):
     """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     # Data must have labels in the first column and features in
     # subsequent columns.
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs, cov=K)
     log_lik = tf.pack([tf.reduce_sum(
         bernoulli.logpmf(xs[:, 0], self.inverse_link(tf.mul(xs[:, 0], z)))
         ) for z in tf.unpack(zs)])
     return log_prior + log_lik
    def test_float_2d(self):
        x = np.array([[0.3, 0.7], [0.2, 0.8]])
        self._test(x, np.zeros([2]), np.ones([2]))
        self._test(x, np.zeros(2), np.diag(np.ones(2)))
        xtf = tf.constant(x)
        val_true = stats.multivariate_normal.logpdf(x, np.zeros(2),
                                                    np.diag(np.ones(2)))
        with self.test_session():
            self.assertAllClose(
                multivariate_normal.logpdf(xtf).eval(), val_true)

        self._test(x, np.zeros(2), np.array([[2.0, 0.5], [0.5, 1.0]]))
Example #29
0
 def log_prob(self, xs, zs):
     """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
     # Data must have labels in the first column and features in
     # subsequent columns.
     K = self.kernel(xs)
     log_prior = multivariate_normal.logpdf(zs, cov=K)
     log_lik = tf.pack([
         tf.reduce_sum(
             bernoulli.logpmf(xs[:, 0],
                              self.inverse_link(tf.mul(xs[:, 0], z))))
         for z in tf.unpack(zs)
     ])
     return log_prior + log_lik
Example #30
0
    def predict(self, xs, zs):
        """Return matrix with log-likelihoods for each data point under each cluster,
        averaging over each set of latent variables z in zs."""
        x = xs['x']
        pi, mus, sigmas = zs
        pi = tf.reduce_mean(pi, 0)
        mus = tf.reduce_mean(mus, 0)
        sigmas = tf.reduce_mean(sigmas, 0)

        matrix = []
        for k in range(self.K):
            matrix += [multivariate_normal.logpdf(x,
                               mus[(k*self.D):((k+1)*self.D)],
                               sigmas[(k*self.D):((k+1)*self.D)])]

        return tf.pack(matrix)
    def predict(self, xs, zs):
        """Return matrix with log-likelihoods for each data point under each cluster,
    averaging over each set of latent variables z in zs."""
        x = xs['x']
        pi, mus, sigmas = zs
        pi = tf.reduce_mean(pi, 0)
        mus = tf.reduce_mean(mus, 0)
        sigmas = tf.reduce_mean(sigmas, 0)

        matrix = []
        for k in range(self.K):
            matrix += [
                multivariate_normal.logpdf(
                    x, mus[(k * self.D):((k + 1) * self.D)],
                    sigmas[(k * self.D):((k + 1) * self.D)])
            ]

        return tf.pack(matrix)
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs['x'])[0]
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs)[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi))
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs['x'],
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Example #33
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs['x'])[0]
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each sample zs[b,:]
        log_lik = []
        n_samples = get_dims(zs)[0]
        for s in range(n_samples):
            log_lik_z = N*tf.reduce_sum(tf.log(pi))
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs['x'],
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Example #34
0
 def log_prob(self, xs, zs):
     return multivariate_normal.logpdf(zs['z'], self.mu, self.sigma)
Example #35
0
 def log_prob(self, xs, zs):
     return multivariate_normal.logpdf(zs, self.mu, self.Sigma)
Example #36
0
 def _test(self, x, mu, sigma):
   val_true = stats.multivariate_normal.logpdf(x, mu, sigma)
   with self.test_session():
     self.assertAllClose(
         multivariate_normal.logpdf(x, mu=mu, sigma=sigma).eval(), val_true)
Example #37
0
 def log_prob(self, xs, zs):
     return tf.pack([multivariate_normal.logpdf(z, self.mu, self.Sigma) for z in tf.unpack(zs)])
Example #38
0
 def log_prob(self, xs, zs):
     return tf.pack([
         multivariate_normal.logpdf(z, self.mu, self.Sigma)
         for z in tf.unpack(zs)
     ])