コード例 #1
0
def _test_logpdf_scalar(x):
    xtf = tf.constant(x)
    val_true = stats.norm.logpdf(x)
    _assert_eq(norm.logpdf(xtf), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.ones([1])), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.diag(tf.ones([1]))), val_true)
コード例 #2
0
    def log_prob(self, xs, zs):
        """Return scalar, the log joint density log p(xs, zs)."""
        if self.prior == 'Lognormal':
            log_prior = tf.reduce_sum(lognorm.logpdf(zs['z'], self.prior_std))
        elif self.prior == 'Gaussian':
            log_prior = tf.reduce_sum(norm.logpdf(zs['z'], 0.0,
                                                  self.prior_std))
        else:
            raise NotImplementedError("prior not available.")

        s = tf.reshape(zs['z'][:self.n_rows * self.K], [self.n_rows, self.K])
        t = tf.reshape(zs['z'][self.n_cols * self.K:], [self.n_cols, self.K])

        xp = tf.matmul(s, t, transpose_b=True)
        if self.interaction == 'multiplicative':
            xp = tf.exp(xp)
        elif self.interaction != 'additive':
            raise NotImplementedError("interaction type unknown.")

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp, 1.0))
        elif self.like == 'Poisson':
            if not (self.interaction == "additive"
                    or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
コード例 #3
0
  def log_prob(self, xs, zs):
    """Return scalar, the log joint density log p(xs, zs)."""
    if self.prior == 'Lognormal':
      log_prior = tf.reduce_sum(lognorm.logpdf(zs['z'], self.prior_std))
    elif self.prior == 'Gaussian':
      log_prior = tf.reduce_sum(norm.logpdf(zs['z'], 0.0, self.prior_std))
    else:
      raise NotImplementedError("prior not available.")

    s = tf.reshape(zs['z'][:self.n_rows * self.K], [self.n_rows, self.K])
    t = tf.reshape(zs['z'][self.n_cols * self.K:], [self.n_cols, self.K])

    xp = tf.matmul(s, t, transpose_b=True)
    if self.interaction == 'multiplicative':
      xp = tf.exp(xp)
    elif self.interaction != 'additive':
      raise NotImplementedError("interaction type unknown.")

    if self.like == 'Gaussian':
      log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp, 1.0))
    elif self.like == 'Poisson':
      if not (self.interaction == "additive" or self.prior == "Lognormal"):
        raise NotImplementedError("Rate of Poisson has to be nonnegatve.")

      log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
    else:
      raise NotImplementedError("likelihood not available.")

    return log_lik + log_prior
コード例 #4
0
ファイル: normal_map.py プロジェクト: xsongx/edward
 def log_prob(self, xs, zs):
     log_prior = norm.logpdf(zs, self.mu, self.std)
     log_lik = tf.pack([
         tf.reduce_sum(norm.logpdf(xs['x'], z, self.std))
         for z in tf.unpack(zs)
     ])
     return log_lik + log_prior
コード例 #5
0
def _test_logpdf_scalar(scalar):
    x = tf.constant(scalar)
    val_true = stats.norm.logpdf(scalar)
    _assert_eq(norm.logpdf(x), val_true)
    _assert_eq(norm.logpdf(x, tf.zeros([1]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(x, tf.zeros([1]), tf.ones([1])), val_true)
    _assert_eq(norm.logpdf(x, tf.zeros([1]), tf.diag(tf.ones([1]))), val_true)
コード例 #6
0
def test_logpdf_1d():
    x = tf.constant([0.0])
    val_true = stats.norm.logpdf([0.0])
    _assert_eq(norm.logpdf(x), val_true)
    _assert_eq(norm.logpdf(x, tf.constant(0.0), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(x, tf.constant([0.0]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(x, tf.constant([0.0]), tf.constant([1.0])), val_true)
コード例 #7
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            # Do the unconstrained to constrained transformation for MAP here.
            pi, mus, sigmas = self.unpack_params(z)
            pi = tf.sigmoid(pi)
            pi = tf.concat(0, [
                pi[0:(self.K - 1)],
                tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K - 1)]), 0)
            ])
            sigmas = tf.nn.softplus(sigmas)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k * self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k * self.D + 1], 0,
                                         np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k * self.D], self.a,
                                             self.b)
                log_prior += invgamma.logpdf(sigmas[k * self.D + 1], self.a,
                                             self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(
                        x, mus[(k * self.D):((k + 1) * self.D)],
                        sigmas[(k * self.D):((k + 1) * self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
コード例 #8
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            pi, mus, sigmas = self.unpack_params(z)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k * self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k * self.D + 1], 0,
                                         np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k * self.D], self.a,
                                             self.b)
                log_prior += invgamma.logpdf(sigmas[k * self.D + 1], self.a,
                                             self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(
                        x, mus[(k * self.D):((k + 1) * self.D)],
                        sigmas[(k * self.D):((k + 1) * self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
コード例 #9
0
ファイル: mixture_gaussian.py プロジェクト: Beronx86/edward
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            pi, mus, sigmas = self.unpack_params(z)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
                log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(x,
                        mus[(k*self.D):((k+1)*self.D)],
                        sigmas[(k*self.D):((k+1)*self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
コード例 #10
0
    def log_prob(self, xs, zs):
        """Return scalar, the log joint density log p(xs, zs)."""
        if self.prior == 'Lognormal':
            log_prior = tf.reduce_sum(lognorm.logpdf(zs['z'], self.prior_std))
        elif self.prior == 'Gaussian':
            log_prior = tf.reduce_sum(norm.logpdf(zs['z'], 0.0,
                                                  self.prior_std))
        else:
            raise NotImplementedError("prior not available.")

        z = tf.reshape(zs['z'], [self.N, self.K])
        if self.dist == 'euclidean':
            xp = tf.matmul(tf.ones([1, self.N]),
                           tf.reduce_sum(z * z, 1, keep_dims=True))
            xp = xp + tf.transpose(xp) - 2 * tf.matmul(z, z, transpose_b=True)
            xp = 1.0 / xp
        elif self.dist == 'cosine':
            xp = tf.matmul(z, z, transpose_b=True)

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp, 1.0))
        elif self.like == 'Poisson':
            if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
コード例 #11
0
 def log_prob(self, xs, zs):
   x, y = xs['x'], xs['y']
   w, b = zs['w'], zs['b']
   log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
   log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
   log_lik = tf.reduce_sum(norm.logpdf(y, ed.dot(x, w) + b, self.lik_std))
   return log_lik + log_prior
コード例 #12
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs)[0]
        # Loop over each mini-batch zs[b,:]
        log_prob = []
        for z in tf.unpack(zs):
            # Do the unconstrained to constrained transformation for MAP here.
            pi, mus, sigmas = self.unpack_params(z)
            pi = tf.sigmoid(pi)
            pi = tf.concat(0, [pi[0:(self.K-1)],
                         tf.expand_dims(1.0 - tf.reduce_sum(pi[0:(self.K-1)]), 0)])
            sigmas = tf.nn.softplus(sigmas)
            log_prior = dirichlet.logpdf(pi, self.alpha)
            for k in xrange(self.K):
                log_prior += norm.logpdf(mus[k*self.D], 0, np.sqrt(self.c))
                log_prior += norm.logpdf(mus[k*self.D+1], 0, np.sqrt(self.c))
                log_prior += invgamma.logpdf(sigmas[k*self.D], self.a, self.b)
                log_prior += invgamma.logpdf(sigmas[k*self.D+1], self.a, self.b)

            log_lik = tf.constant(0.0, dtype=tf.float32)
            for x in tf.unpack(xs):
                for k in xrange(self.K):
                    log_lik += tf.log(pi[k])
                    log_lik += multivariate_normal.logpdf(x,
                        mus[(k*self.D):((k+1)*self.D)],
                        sigmas[(k*self.D):((k+1)*self.D)])

            log_prob += [log_prior + log_lik]

        return tf.pack(log_prob)
コード例 #13
0
  def log_prob(self, xs, zs):
    """Return scalar, the log joint density log p(xs, zs)."""
    if self.prior == 'Lognormal':
      log_prior = tf.reduce_sum(lognorm.logpdf(zs['z'], self.prior_std))
    elif self.prior == 'Gaussian':
      log_prior = tf.reduce_sum(norm.logpdf(zs['z'], 0.0, self.prior_std))
    else:
      raise NotImplementedError("prior not available.")

    z = tf.reshape(zs['z'], [self.N, self.K])
    if self.dist == 'euclidean':
      xp = tf.tile(tf.reduce_sum(tf.pow(z, 2), 1, keep_dims=True), [1, self.N])
      xp = xp + tf.transpose(xp) - 2 * tf.matmul(z, z, transpose_b=True)
      xp = 1.0 / tf.sqrt(xp + tf.diag(tf.zeros(self.N) + 1e3))
    elif self.dist == 'cosine':
      xp = tf.matmul(z, z, transpose_b=True)

    if self.like == 'Gaussian':
      log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp, 1.0))
    elif self.like == 'Poisson':
      if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
        raise NotImplementedError("Rate of Poisson has to be nonnegatve.")

      log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
    else:
      raise NotImplementedError("likelihood not available.")

    return log_lik + log_prior
コード例 #14
0
 def log_prob(self, xs, zs):
   x, y = xs['x'], xs['y']
   w, b = zs['w'], zs['b']
   log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
   log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
   log_lik = tf.reduce_sum(norm.logpdf(y, ed.dot(x, w) + b, self.lik_std))
   return log_lik + log_prior
コード例 #15
0
ファイル: test_norm_logpdf.py プロジェクト: appcoreopc/edward
def _test(x, loc=0, scale=1):
    xtf = tf.constant(x)
    val_true = stats.norm.logpdf(x, loc, scale)
    _assert_eq(norm.logpdf(xtf, loc, scale), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(loc), tf.constant(scale)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([loc]), tf.constant(scale)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(loc), tf.constant([scale])), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([loc]), tf.constant([scale])), val_true)
コード例 #16
0
 def log_prob(self, xs, zs):
     """Return scalar, the log joint density log p(xs, zs)."""
     x, y = xs['x'], xs['y']
     zs = zs['z']
     log_prior = tf.reduce_sum(norm.logpdf(zs, 0.0, self.prior_std))
     mu = self.neural_network(x, zs)
     log_lik = tf.reduce_sum(norm.logpdf(y, mu, self.lik_std))
     return log_lik + log_prior
コード例 #17
0
ファイル: test_norm_logpdf.py プロジェクト: xsongx/edward
 def _test(self, x, loc=0, scale=1):
     xtf = tf.constant(x)
     val_true = stats.norm.logpdf(x, loc, scale)
     with self.test_session():
         self.assertAllClose(norm.logpdf(xtf, loc, scale).eval(), val_true)
         self.assertAllClose(
             norm.logpdf(xtf, tf.constant(loc), tf.constant(scale)).eval(),
             val_true)
コード例 #18
0
 def log_prob(self, xs, zs):
   """Return scalar, the log joint density log p(xs, zs)."""
   x, y = xs['x'], xs['y']
   w, b = zs['w'], zs['b']
   log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
   log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
   log_lik = tf.reduce_sum(norm.logpdf(y, ed.dot(x, w) + b, self.lik_std))
   return log_lik + log_prior
コード例 #19
0
ファイル: tf_bayesian_nn.py プロジェクト: blei-lab/edward
 def log_prob(self, xs, zs):
   """Return scalar, the log joint density log p(xs, zs)."""
   x, y = xs['x'], xs['y']
   zs = zs['z']
   log_prior = tf.reduce_sum(norm.logpdf(zs, 0.0, self.prior_std))
   mu = self.neural_network(x, zs)
   log_lik = tf.reduce_sum(norm.logpdf(y, mu, self.lik_std))
   return log_lik + log_prior
コード例 #20
0
 def log_prob(self, xs, zs):
     x, y = xs['x'], xs['y']
     w, b = zs['w'], zs['b']
     log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
     log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
     log_lik = tf.reduce_sum(
         bernoulli.logpmf(y, p=self.inv_link(ed.dot(x, w) + b)))
     return log_lik + log_prior
コード例 #21
0
 def log_prob(self, xs, zs):
   x, y = xs['x'], xs['y']
   w, b = zs['w'], zs['b']
   log_prior = tf.reduce_sum(norm.logpdf(w, 0.0, self.prior_std))
   log_prior += tf.reduce_sum(norm.logpdf(b, 0.0, self.prior_std))
   log_lik = tf.reduce_sum(bernoulli.logpmf(y,
                           p=self.inv_link(ed.dot(x, w) + b)))
   return log_lik + log_prior
コード例 #22
0
 def _test(self, x, mu, sigma):
   xtf = tf.constant(x)
   val_true = stats.norm.logpdf(x, mu, sigma)
   with self.test_session():
     self.assertAllClose(norm.logpdf(xtf, mu, sigma).eval(), val_true)
     self.assertAllClose(
         norm.logpdf(xtf, tf.constant(mu), tf.constant(sigma)).eval(),
         val_true)
コード例 #23
0
def _test_logpdf_scalar(x):
    xtf = tf.constant(x)
    val_true = stats.norm.logpdf(x)
    _assert_eq(norm.logpdf(xtf), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.ones([1])), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.diag(tf.ones([1]))),
               val_true)
コード例 #24
0
def test_logpdf_1by1mat():
    x = [[0.0]]
    xtf = tf.constant([[0.0]])
    val_true = stats.norm.logpdf(x)
    _assert_eq(norm.logpdf(xtf), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(0.0), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([0.0]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(0.0), tf.constant([1.0])), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([0.0]), tf.constant([1.0])), val_true)
コード例 #25
0
ファイル: gaussian_map.py プロジェクト: bakersfieldag/edward
 def log_prob(self, xs, zs):
     log_prior = tf.pack([norm.logpdf(z, mu, Sigma)
                     for z in tf.unpack(zs)])
     # log_lik = tf.pack([
     #     tf.reduce_sum(norm.logpdf(x, zs[:,0], Sigma)) \
     #     for x in tf.unpack(xs)])
     log_lik = tf.pack([
         tf.reduce_sum(norm.logpdf(xs, z, 0*xs+Sigma)) \
         for z in tf.unpack(zs)])
     return log_lik + log_prior
コード例 #26
0
def test_logpdf_1d():
    x = [0.0]
    xtf = tf.constant([0.0])
    val_true = stats.norm.logpdf(x)
    _assert_eq(norm.logpdf(xtf), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(0.0), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([0.0]), tf.constant(1.0)),
               val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([0.0]), tf.constant([1.0])),
               val_true)
コード例 #27
0
def _test(x, loc=0, scale=1):
    xtf = tf.constant(x)
    val_true = stats.norm.logpdf(x, loc, scale)
    _assert_eq(norm.logpdf(xtf, loc, scale), val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(loc), tf.constant(scale)),
               val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([loc]), tf.constant(scale)),
               val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant(loc), tf.constant([scale])),
               val_true)
    _assert_eq(norm.logpdf(xtf, tf.constant([loc]), tf.constant([scale])),
               val_true)
コード例 #28
0
 def log_lik(self, xs, zs):
     """Return a vector [log p(xs | zs[1,:]), ..., log p(xs | zs[S,:])]."""
     x, y = xs['x'], xs['y']
     mus = self.neural_network(x, zs)
     log_lik = tf.reduce_sum(
         norm.logpdf(y, loc=mus, scale=self.lik_variance), 1)
     return log_lik
コード例 #29
0
    def log_prob(self, xs, zs):
        """Return scalar, the log joint density log p(xs, zs)."""
        x = xs["x"]
        pi, mus, sigmas = zs["pi"], zs["mu"], zs["sigma"]
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # log-likelihood is
        # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
        # Create a K x N matrix, whose entry (k, n) is
        # log pi_k + log N(x_n; mu_k, sigma_k).
        N = get_dims(x)[0]
        matrix = []
        for k in range(self.K):
            matrix += [
                tf.ones(N) * tf.log(pi[k])
                + multivariate_normal_diag.logpdf(
                    x, mus[(k * self.D) : ((k + 1) * self.D)], sigmas[(k * self.D) : ((k + 1) * self.D)]
                )
            ]

        matrix = tf.pack(matrix)
        # log_sum_exp() along the rows is a vector, whose nth
        # element is the log-likelihood of data point x_n.
        vector = log_sum_exp(matrix, 0)
        # Sum over data points to get the full log-likelihood.
        log_lik = tf.reduce_sum(vector)

        return log_prior + log_lik
コード例 #30
0
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = zs
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [tf.ones(N)*tf.log(pi[s, k]) +
                           multivariate_normal.logpdf(x,
                               mus[s, (k*self.D):((k+1)*self.D)],
                               sigmas[s, (k*self.D):((k+1)*self.D)])]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
コード例 #31
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        if self.prior == 'Lognormal':
            zs = tf.exp(zs)
        elif self.prior != 'Gaussian':
            raise NotImplementedError("prior not available.")

        log_prior = -self.prior_variance * tf.reduce_sum(zs*zs)

        s = tf.reshape(zs[:,:self.n_rows*self.K], [self.n_rows,self.K])
        t = tf.reshape(zs[:,self.n_cols*self.K:], [self.n_cols,self.K])

        xp = tf.matmul(s, t, transpose_b=True)
        if self.interaction == 'multiplicative':
            xp = tf.exp(xp)
        elif self.interaction != 'additive':
            raise NotImplementedError("interaction type unknown.")

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
        elif self.like == 'Poisson':
            if not (self.interaction == "additive" or self.prior == "Lognormal"):
                raise NotImplementedError("Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
コード例 #32
0
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        if self.prior == 'Lognormal':
            zs = tf.exp(zs)
        elif self.prior != 'Gaussian':
            raise NotImplementedError("prior not available.")

        log_prior = -self.prior_variance * tf.reduce_sum(zs*zs)

        z = tf.reshape(zs, [self.N,self.K])
        if self.dist == 'euclidean':
            xp = tf.matmul(tf.ones([1,self.N]), tf.reduce_sum(z*z, 1, keep_dims=True))
            xp = xp + tf.transpose(xp) - 2*tf.matmul(z, z, transpose_b=True)
            xp = 1.0/xp
        elif self.dist == 'cosine':
            xp = tf.matmul(z, z, transpose_b=True)

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
        elif self.like == 'Poisson':
            if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
                raise NotImplementedError("Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
コード例 #33
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        x = xs['x']
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each sample zs[s, :].
        log_lik = []
        N = get_dims(x)[0]
        n_samples = get_dims(pi)[0]
        for s in range(n_samples):
            # log-likelihood is
            # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
            # Create a K x N matrix, whose entry (k, n) is
            # log pi_k + log N(x_n; mu_k, sigma_k).
            matrix = []
            for k in range(self.K):
                matrix += [
                    tf.ones(N) * tf.log(pi[s, k]) + multivariate_normal.logpdf(
                        x, mus[s, (k * self.D):((k + 1) * self.D)],
                        sigmas[s, (k * self.D):((k + 1) * self.D)])
                ]

            matrix = tf.pack(matrix)
            # log_sum_exp() along the rows is a vector, whose nth
            # element is the log-likelihood of data point x_n.
            vector = log_sum_exp(matrix, 0)
            # Sum over data points to get the full log-likelihood.
            log_lik_z = tf.reduce_sum(vector)
            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
コード例 #34
0
  def log_prob(self, xs, zs):
    """Return scalar, the log joint density log p(xs, zs)."""
    x = xs['x']
    pi, mus, sigmas = zs['pi'], zs['mu'], zs['sigma']
    log_prior = dirichlet.logpdf(pi, self.alpha)
    log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c))
    log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

    # log-likelihood is
    # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
    # Create a K x N matrix, whose entry (k, n) is
    # log pi_k + log N(x_n; mu_k, sigma_k).
    N = get_dims(x)[0]
    matrix = []
    for k in range(self.K):
      matrix += [tf.ones(N) * tf.log(pi[k]) +
                 multivariate_normal_diag.logpdf(x,
                 mus[(k * self.D):((k + 1) * self.D)],
                 sigmas[(k * self.D):((k + 1) * self.D)])]

    matrix = tf.pack(matrix)
    # log_sum_exp() along the rows is a vector, whose nth
    # element is the log-likelihood of data point x_n.
    vector = log_sum_exp(matrix, 0)
    # Sum over data points to get the full log-likelihood.
    log_lik = tf.reduce_sum(vector)

    return log_prior + log_lik
コード例 #35
0
ファイル: matrix_factorization.py プロジェクト: xsongx/edward
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        if self.prior == 'Lognormal':
            zs = tf.exp(zs)
        elif self.prior != 'Gaussian':
            raise NotImplementedError("prior not available.")

        log_prior = -self.prior_variance * tf.reduce_sum(zs * zs)

        s = tf.reshape(zs[:, :self.n_rows * self.K], [self.n_rows, self.K])
        t = tf.reshape(zs[:, self.n_cols * self.K:], [self.n_cols, self.K])

        xp = tf.matmul(s, t, transpose_b=True)
        if self.interaction == 'multiplicative':
            xp = tf.exp(xp)
        elif self.interaction != 'additive':
            raise NotImplementedError("interaction type unknown.")

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
        elif self.like == 'Poisson':
            if not (self.interaction == "additive"
                    or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
コード例 #36
0
    def log_prob(self, xs, zs):
        """Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        if self.prior == 'Lognormal':
            zs = tf.exp(zs)
        elif self.prior != 'Gaussian':
            raise NotImplementedError("prior not available.")

        log_prior = -self.prior_variance * tf.reduce_sum(zs * zs)

        z = tf.reshape(zs, [self.N, self.K])
        if self.dist == 'euclidean':
            xp = tf.matmul(tf.ones([1, self.N]),
                           tf.reduce_sum(z * z, 1, keep_dims=True))
            xp = xp + tf.transpose(xp) - 2 * tf.matmul(z, z, transpose_b=True)
            xp = 1.0 / xp
        elif self.dist == 'cosine':
            xp = tf.matmul(z, z, transpose_b=True)

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
        elif self.like == 'Poisson':
            if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
コード例 #37
0
ファイル: variationals.py プロジェクト: 313-Ventures/edward
    def log_prob_zi(self, i, zs):
        """log q(z_i | lambda)"""
        if i >= self.num_factors:
            raise IndexError()

        mi = self.m[i]
        si = self.s[i]
        return norm.logpdf(zs[:, i], mi, si)
コード例 #38
0
ファイル: distributions.py プロジェクト: crack521/edward
    def log_prob_i(self, i, xs):
        """log p(x_i | params)"""
        if i >= self.num_factors:
            raise IndexError()

        loci = self.loc[i]
        scalei = self.scale[i]
        return norm.logpdf(xs[:, i], loci, scalei)
コード例 #39
0
ファイル: variationals.py プロジェクト: fangzheng354/edward
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        if i >= self.num_vars:
            raise

        mi = self.m[i]
        si = self.s[i]
        return tf.pack([norm.logpdf(zm[i], mi, si) for zm in tf.unpack(z)])
コード例 #40
0
    def log_prob_zi(self, i, zs):
        """log q(z_i | lambda)"""
        if i >= self.num_factors:
            raise IndexError()

        mi = self.m[i]
        si = self.s[i]
        return norm.logpdf(zs[:, i], mi, si)
コード例 #41
0
 def log_prob(self, xs, zs):
     """Return scalar, the log joint density log p(xs, zs)."""
     # Note there are no parameters we're being Bayesian about. The
     # parameters are baked into how we specify the neural networks.
     X, y = xs['X'], xs['y']
     self.neural_network(X)
     result = self.pi * tf.exp(norm.logpdf(y, self.mus, self.sigmas))
     result = tf.log(tf.reduce_sum(result, 1))
     return tf.reduce_sum(result)
コード例 #42
0
 def log_prob(self, xs, zs):
     """log p((xs,ys), (z,theta)) = sum_{n=1}^N log p((xs[n,:],ys[n]), theta)"""
     # Note there are no parameters we're being Bayesian about. The
     # parameters are baked into how we specify the neural networks.
     X, y = xs['X'], xs['y']
     self.neural_network(X)
     result = self.pi * tf.exp(norm.logpdf(y, self.mus, self.sigmas))
     result = tf.log(tf.reduce_sum(result, 1))
     return tf.reduce_sum(result)
コード例 #43
0
ファイル: variationals.py プロジェクト: bakersfieldag/edward
    def log_prob_zi(self, i, z):
        """log q(z_i | lambda_i)"""
        if i >= self.num_vars:
            raise

        mi = self.m[i]
        si = self.s[i]
        return concat([norm.logpdf(zm[i], mi, si)
                       for zm in tf.unpack(z)])
コード例 #44
0
    def log_prob(self, xs, zs):
        """
    Return scalar, the log joint density log p(xs, zs).

    Given n_minibatch data points, n_samples of variables
    Summing over the datapoints makes sense since the joint is the only place in the 
    estiamtion of the gradient that has the data points, and its the log, so we can sum
    over them
    BUT summing over the variables doenst make sense,its supposed to be one at a time

    """
        x = xs['x']
        pi, mus, sigmas = zs['pi'], zs['mu'], zs['sigma']

        # print(get_dims(x)) #[n_minibatch, D]
        # print(get_dims(pi)) #[K]
        # print(get_dims(mus)) #[K*D]
        # print(get_dims(sigmas)) #[K*D]

        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # log-likelihood is
        # sum_{n=1}^N log sum_{k=1}^K exp( log pi_k + log N(x_n; mu_k, sigma_k) )
        # Create a K x N matrix, whose entry (k, n) is
        # log pi_k + log N(x_n; mu_k, sigma_k).
        n_minibatch = get_dims(x)[
            0]  #this is [n_minibatch, D], with [0] its just n_minibatch
        #OH I think they compute the matrix so that they can do log sum exp, since they need to find the max value

        matrix = []
        for k in range(self.K):

            # bbbb = tf.log(pi[k])
            # print(get_dims(bbbb))
            # aaaa= multivariate_normal_diag.logpdf(x,  mus[(k * self.D):((k + 1) * self.D)],  sigmas[(k * self.D):((k + 1) * self.D)])
            # print(get_dims(aaaa))
            # fadad

            matrix += [
                tf.ones(n_minibatch) * tf.log(pi[k]) +
                multivariate_normal_diag.logpdf(
                    x, mus[(k * self.D):((k + 1) * self.D)],
                    sigmas[(k * self.D):((k + 1) * self.D)])
            ]

        matrix = tf.pack(matrix)
        # log_sum_exp() along the rows is a vector, whose nth
        # element is the log-likelihood of data point x_n.
        vector = log_sum_exp(matrix, 0)
        # Sum over data points to get the full log-likelihood.
        log_lik = tf.reduce_sum(vector)

        return log_prior + log_lik
コード例 #45
0
 def log_prob(self, xs, zs=None):
     """log p((xs,ys), (z,theta)) = sum_{n=1}^N log p((xs[n,:],ys[n]), theta)"""
     # Note there are no parameters we're being Bayesian about. The
     # parameters are baked into how we specify the neural networks.
     X, y = xs['X'], xs['y']
     self.neural_network(X)
     result = tf.exp(norm.logpdf(y, self.mus, self.sigmas))
     result = tf.mul(result, self.pi)
     result = tf.reduce_sum(result, 1)
     result = tf.log(result)
     return tf.reduce_sum(result)
コード例 #46
0
 def log_prob(self, xs, zs=None):
     """log p((xs,ys), (z,theta)) = sum_{n=1}^N log p((xs[n,:],ys[n]), theta)"""
     # Note there are no parameters we're being Bayesian about. The
     # parameters are baked into how we specify the neural networks.
     X, y = xs
     self.mapping(X)
     result = tf.exp(norm.logpdf(y, self.mus, self.sigmas))
     result = tf.mul(result, self.pi)
     result = tf.reduce_sum(result, 1, keep_dims=True)
     result = tf.log(result)
     return tf.reduce_sum(result)
コード例 #47
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs['x'])[0]
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each sample zs[b,:]
        log_lik = []
        n_samples = get_dims(zs)[0]
        for s in range(n_samples):
            log_lik_z = N*tf.reduce_sum(tf.log(pi))
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs['x'],
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
コード例 #48
0
    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        N = get_dims(xs['x'])[0]
        pi, mus, sigmas = self.unpack_params(zs)
        log_prior = dirichlet.logpdf(pi, self.alpha)
        log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)))
        log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b))

        # Loop over each mini-batch zs[b,:]
        log_lik = []
        n_minibatch = get_dims(zs)[0]
        for s in range(n_minibatch):
            log_lik_z = N*tf.reduce_sum(tf.log(pi))
            for k in range(self.K):
                log_lik_z += tf.reduce_sum(multivariate_normal.logpdf(xs['x'],
                    mus[s, (k*self.D):((k+1)*self.D)],
                    sigmas[s, (k*self.D):((k+1)*self.D)]))

            log_lik += [log_lik_z]

        return log_prior + tf.pack(log_lik)
コード例 #49
0
ファイル: normal_map.py プロジェクト: TalkingData/edward
 def log_prob(self, xs, zs):
     log_prior = norm.logpdf(zs, self.mu, self.std)
     log_lik = tf.pack([tf.reduce_sum(norm.logpdf(xs['x'], z, self.std))
                        for z in tf.unpack(zs)])
     return log_lik + log_prior
コード例 #50
0
ファイル: gaussian_map.py プロジェクト: andybaoxv/edward
 def log_prob(self, xs, zs):
     log_prior = tf.pack([norm.logpdf(z, mu, Sigma)
                     for z in tf.unpack(zs)])
     log_lik = tf.pack([tf.reduce_sum(norm.logpdf(xs, z, Sigma))
                        for z in tf.unpack(zs)])
     return log_lik + log_prior
コード例 #51
0
 def log_prob(self, xs, zs):
     return tf.pack([norm.logpdf(z, self.mu, self.std)
                     for z in tf.unpack(zs)])
コード例 #52
0
 def _test(self, x, loc=0, scale=1):
     xtf = tf.constant(x)
     val_true = stats.norm.logpdf(x, loc, scale)
     with self.test_session():
         self.assertAllClose(norm.logpdf(xtf, loc, scale).eval(), val_true)
         self.assertAllClose(norm.logpdf(xtf, tf.constant(loc), tf.constant(scale)).eval(), val_true)
コード例 #53
0
ファイル: normal.py プロジェクト: 313-Ventures/edward
 def log_prob(self, xs, zs):
     return norm.logpdf(zs, self.mu, self.std)
コード例 #54
0
 def log_prob(self, xs, zs):
     return norm.logpdf(zs['z'], 1.0, 1.0)
コード例 #55
0
 def log_lik(self, xs, zs):
     """Return scalar, the log-likelihood p(xs | zs)."""
     x, y = xs['x'], xs['y']
     mu = self.neural_network(x, zs['z'])
     log_lik = tf.reduce_sum(norm.logpdf(y, mu, self.lik_std))
     return log_lik
コード例 #56
0
 def log_prob(self, xs, zs):
     log_prior = norm.logpdf(zs['mu'], 1.0, 1.0)
     log_lik = tf.reduce_sum(norm.logpdf(xs['x'], zs['mu'], 1.0))
     return log_lik + log_prior
コード例 #57
0
ファイル: distributions.py プロジェクト: diengadji/edward
 def log_prob_idx(self, idx, xs):
     full_idx = (slice(0, None), ) + idx # slice over batch size
     return norm.logpdf(xs[full_idx], self.loc[idx], self.scale[idx])
コード例 #58
0
ファイル: test_norm_logpdf.py プロジェクト: blei-lab/edward
 def _test(self, x, mu, sigma):
     val_true = stats.norm.logpdf(x, mu, sigma)
     with self.test_session():
         self.assertAllClose(norm.logpdf(x, mu=mu, sigma=sigma).eval(), val_true)
コード例 #59
0
 def log_prob(self, xs, zs):
     return tf.pack([norm.logpdf(z, self.mu, self.std) for z in tf.unpack(zs)])
コード例 #60
0
ファイル: test_scale.py プロジェクト: blei-lab/edward
 def log_prob(self, xs, zs):
   log_prior = norm.logpdf(zs['mu'], 1.0, 1.0)
   log_lik = tf.reduce_sum(norm.logpdf(xs['x'], zs['mu'], 1.0))
   return log_lik + log_prior