Esempio n. 1
0
 def _test(self, x, alpha):
   xtf = tf.constant(x)
   val_true = dirichlet_logpdf_vec(x, alpha)
   with self.test_session():
     self.assertAllClose(dirichlet.logpdf(xtf, alpha).eval(), val_true)
     self.assertAllClose(dirichlet.logpdf(xtf,
                                          tf.convert_to_tensor(alpha)).eval(),
                         val_true)
Esempio n. 2
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            pi, mus, sigmas = self.unpack_params(z)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
                log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(x,
                        mus[(k*self.D):((k+1)*self.D)],
                        sigmas[(k*self.D):((k+1)*self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
Esempio n. 3
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [
                    tf.ones(N) * tf.log(pi[s, k]) + multivariate_normal.logpdf(
                        x, mus[s, (k * self.D):((k + 1) * self.D)],
                        sigmas[s, (k * self.D):((k + 1) * self.D)])
                ]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Esempio n. 4
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            # Do the unconstrained to constrained transformation for MAP here.
            pi, mus, sigmas = self.unpack_params(z)
            pi = tf.sigmoid(pi)
            pi = tf.concat(0, [
                pi[0:(self.K - 1)],
                tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K - 1)]), 0)
            ])
            sigmas = tf.nn.softplus(sigmas)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k * self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k * self.D + 1], 0,
                                         np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k * self.D], self.a,
                                             self.b)
                log_prior += invgamma.logpdf(sigmas[k * self.D + 1], self.a,
                                             self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(
                        x, mus[(k * self.D):((k + 1) * self.D)],
                        sigmas[(k * self.D):((k + 1) * self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
    def log_prob(self, xs, zs):
        """Return scalar, the log joint density log p(xs, zs)."""
        x = xs["x"]
        pi, mus, sigmas = zs["pi"], zs["mu"], zs["sigma"]
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # log-likelihood is
        # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
        # Create a K x N matrix, whose entry (k, n) is
        # log pi_k + log N(x_n; mu_k, sigma_k).
        N = get_dims(x)[0]
        matrix = []
        for k in range(self.K):
            matrix += [
                tf.ones(N) * tf.log(pi[k])
                + multivariate_normal_diag.logpdf(
                    x, mus[(k * self.D) : ((k + 1) * self.D)], sigmas[(k * self.D) : ((k + 1) * self.D)]
                )
            ]

        matrix = tf.pack(matrix)
        # log_sum_exp() along the rows is a vector, whose nth
        # element is the log-likelihood of data point x_n.
        vector = log_sum_exp(matrix, 0)
        # Sum over data points to get the full log-likelihood.
        log_lik = tf.reduce_sum(vector)

        return log_prior + log_lik
Esempio n. 6
0
  def log_prob(self, xs, zs):
    """Return scalar, the log joint density log p(xs, zs)."""
    x = xs['x']
    pi, mus, sigmas = zs['pi'], zs['mu'], zs['sigma']
    log_prior = dirichlet.logpdf(pi, self.alpha)
    log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c))
    log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

    # log-likelihood is
    # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
    # Create a K x N matrix, whose entry (k, n) is
    # log pi_k + log N(x_n; mu_k, sigma_k).
    N = get_dims(x)[0]
    matrix = []
    for k in range(self.K):
      matrix += [tf.ones(N) * tf.log(pi[k]) +
                 multivariate_normal_diag.logpdf(x,
                 mus[(k * self.D):((k + 1) * self.D)],
                 sigmas[(k * self.D):((k + 1) * self.D)])]

    matrix = tf.pack(matrix)
    # log_sum_exp() along the rows is a vector, whose nth
    # element is the log-likelihood of data point x_n.
    vector = log_sum_exp(matrix, 0)
    # Sum over data points to get the full log-likelihood.
    log_lik = tf.reduce_sum(vector)

    return log_prior + log_lik
Esempio n. 7
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            pi, mus, sigmas = self.unpack_params(z)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k * self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k * self.D + 1], 0,
                                         np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k * self.D], self.a,
                                             self.b)
                log_prior += invgamma.logpdf(sigmas[k * self.D + 1], self.a,
                                             self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(
                        x, mus[(k * self.D):((k + 1) * self.D)],
                        sigmas[(k * self.D):((k + 1) * self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
Esempio n. 8
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            # Do the unconstrained to constrained transformation for MAP here.
            pi, mus, sigmas = self.unpack_params(z)
            pi = tf.sigmoid(pi)
            pi = tf.concat(0, [pi[0:(self.K-1)],
                         tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K-1)]), 0)])
            sigmas = tf.nn.softplus(sigmas)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
                log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(x,
                        mus[(k*self.D):((k+1)*self.D)],
                        sigmas[(k*self.D):((k+1)*self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
Esempio n. 9
0
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = zs
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [tf.ones(N)*tf.log(pi[s, k]) +
                           multivariate_normal.logpdf(x,
                               mus[s, (k*self.D):((k+1)*self.D)],
                               sigmas[s, (k*self.D):((k+1)*self.D)])]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Esempio n. 10
0
 def log_prob_idx(self, idx, xs):
     """
     ``log p(xs[:, idx, :] | params[idx, :])``
     where ``idx`` is of dimension ``shape[:-1]``
     """
     idx = idx + (slice(0, None), )  # slice over multivariate dimension
     full_idx = (slice(0, None), ) + idx  # slice over sample size
     return dirichlet.logpdf(xs[full_idx], self.alpha[idx])
Esempio n. 11
0
 def log_prob_idx(self, idx, xs):
     """
     ``log p(xs[:, idx, :] | params[idx, :])``
     where ``idx`` is of dimension ``shape[:-1]``
     """
     idx = idx + (slice(0, None), ) # slice over multivariate dimension
     full_idx = (slice(0, None), ) + idx # slice over batch size
     return dirichlet.logpdf(xs[full_idx], self.alpha[idx])
Esempio n. 12
0
    def log_prob_zi(self, i, zs):
        """log q(z_i | lambda)"""
        # Note this calculates the log density with respect to z_i,
        # which is the ith factor and not the ith latent variable.
        if i >= self.num_factors:
            raise IndexError()

        return dirichlet.logpdf(zs[:, (i*self.K):((i+1)*self.K)],
                                self.alpha[i, :])
Esempio n. 13
0
    def log_prob_zi(self, i, zs):
        """log q(z_i | lambda)"""
        # Note this calculates the log density with respect to z_i,
        # which is the ith factor and not the ith latent variable.
        if i >= self.num_factors:
            raise IndexError()

        return dirichlet.logpdf(zs[:, (i * self.K):((i + 1) * self.K)],
                                self.alpha[i, :])
Esempio n. 14
0
    def log_prob(self, xs, zs):
        """
    Return scalar, the log joint density log p(xs, zs).

    Given n_minibatch data points, n_samples of variables
    Summing over the datapoints makes sense since the joint is the only place in the 
    estiamtion of the gradient that has the data points, and its the log, so we can sum
    over them
    BUT summing over the variables doenst make sense,its supposed to be one at a time

    """
        x = xs['x']
        pi, mus, sigmas = zs['pi'], zs['mu'], zs['sigma']

        # print(get_dims(x)) #[n_minibatch, D]
        # print(get_dims(pi)) #[K]
        # print(get_dims(mus)) #[K*D]
        # print(get_dims(sigmas)) #[K*D]

        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # log-likelihood is
        # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
        # Create a K x N matrix, whose entry (k, n) is
        # log pi_k + log N(x_n; mu_k, sigma_k).
        n_minibatch = get_dims(x)[
            0]  #this is [n_minibatch, D], with [0] its just n_minibatch
        #OH I think they compute the matrix so that they can do log sum exp, since they need to find the max value

        matrix = []
        for k in range(self.K):

            # bbbb = tf.log(pi[k])
            # print(get_dims(bbbb))
            # aaaa= multivariate_normal_diag.logpdf(x,  mus[(k * self.D):((k + 1) * self.D)],  sigmas[(k * self.D):((k + 1) * self.D)])
            # print(get_dims(aaaa))
            # fadad

            matrix += [
                tf.ones(n_minibatch) * tf.log(pi[k]) +
                multivariate_normal_diag.logpdf(
                    x, mus[(k * self.D):((k + 1) * self.D)],
                    sigmas[(k * self.D):((k + 1) * self.D)])
            ]

        matrix = tf.pack(matrix)
        # log_sum_exp() along the rows is a vector, whose nth
        # element is the log-likelihood of data point x_n.
        vector = log_sum_exp(matrix, 0)
        # Sum over data points to get the full log-likelihood.
        log_lik = tf.reduce_sum(vector)

        return log_prior + log_lik
Esempio n. 15
0
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        # a hack for now
        if i >= self.num_vars:
            raise

        if i == 0:
            # TODO take logpdf of just one of the probability vectors
            return dirichlet.logpdf(z[:, :], self.alpha[0, :])

        if i >= 1:
            return tf.constant(0.0, dtype=tf.float32)
Esempio n. 16
0
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        # a hack for now
        if i >= self.num_vars:
            raise

        if i == 0:
            # TODO take logpdf of just one of the probability vectors
            return dirichlet.logpdf(z[:, :], self.alpha[0, :])

        if i >= 1:
            return tf.constant(0.0, dtype=tf.float32)
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs['x'])[0]
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs)[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi))
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs['x'],
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Esempio n. 18
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs['x'])[0]
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each sample zs[b,:]
        log_lik = []
        n_samples = get_dims(zs)[0]
        for s in range(n_samples):
            log_lik_z = N*tf.reduce_sum(tf.log(pi))
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs['x'],
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
Esempio n. 19
0
 def _test(self, x, alpha):
     xtf = tf.constant(x)
     val_true = dirichlet_logpdf_vec(x, alpha)
     with self.test_session():
         self.assertAllClose(dirichlet.logpdf(xtf, alpha).eval(), val_true)
         self.assertAllClose(dirichlet.logpdf(xtf, tf.convert_to_tensor(alpha)).eval(), val_true)
Esempio n. 20
0
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        if i >= self.num_vars:
            raise

        return dirichlet.logpdf(z[:, i], self.alpha[i, :])
Esempio n. 21
0
 def _test(self, x, alpha):
     val_true = dirichlet_logpdf_vec(x, alpha)
     with self.test_session():
         self.assertAllClose(
             dirichlet.logpdf(x, alpha=alpha).eval(), val_true)
def _test_logpdf(x, alpha=np.array([0.5, 0.5])):
    xtf = tf.constant(x)
    val_true = dirichlet_logpdf_vec(x, alpha)
    _assert_eq(dirichlet.logpdf(xtf, alpha), val_true)
    _assert_eq(dirichlet.logpdf(xtf, tf.convert_to_tensor(alpha)), val_true)
Esempio n. 23
0
def _test_logpdf(x, alpha=np.array([0.5, 0.5])):
    xtf = tf.constant(x)
    val_true = dirichlet_logpdf_vec(x, alpha)
    _assert_eq(dirichlet.logpdf(xtf, alpha), val_true)
    _assert_eq(dirichlet.logpdf(xtf, tf.convert_to_tensor(alpha)), val_true)