예제 #1
0
 def logp(self, X):
     v = self.v
     p = self.p
     Z = self.Z
     inv_S = self.inv_S 
     result = -Z + log(det(X)) * (v - p - 1) / 2. - trace(inv_S.dot(X)) / 2.
     return ifelse(gt(v, p-1), result, self.invalid) 
예제 #2
0
 def logp(self, X):
     v = self.v
     p = self.p
     S = self.S
     Z = self.Z
     result = -Z + log(det(X)) * -(v + p + 1.) / 2. - trace(S.dot(matrix_inverse(X))) / 2.
     return ifelse(gt(v, p-1), result, self.invalid) 
예제 #3
0
파일: normal.py 프로젝트: ibab/carl
    def __init__(self, mu, sigma, random_state=None):
        super(MultivariateNormal, self).__init__(mu=mu,
                                                 sigma=sigma,
                                                 random_state=random_state,
                                                 optimizer=None)
        # XXX: The SDP-ness of sigma should be check upon changes

        # ndim
        self.ndim_ = self.mu.shape[0]
        self.make_(self.ndim_, "ndim_func_", args=[])

        # pdf
        L = linalg.cholesky(self.sigma)
        sigma_det = linalg.det(self.sigma)  # XXX: compute from L instead
        sigma_inv = linalg.matrix_inverse(self.sigma)  # XXX: idem

        self.pdf_ = (
            (1. / T.sqrt((2. * np.pi) ** self.ndim_ * T.abs_(sigma_det))) *
            T.exp(-0.5 * T.sum(T.mul(T.dot(self.X - self.mu,
                                           sigma_inv),
                                     self.X - self.mu),
                               axis=1))).ravel()
        self.make_(self.pdf_, "pdf")

        # -log pdf
        self.nnlf_ = -T.log(self.pdf_)  # XXX: for sure this can be better
        self.make_(self.nnlf_, "nnlf")

        # self.rvs_
        self.make_(T.dot(L, self.X.T).T + self.mu, "rvs_func_")
예제 #4
0
    def __init__(self, mu, sigma, random_state=None):
        super(MultivariateNormal, self).__init__(mu=mu, sigma=sigma)
        # XXX: The SDP-ness of sigma should be check upon changes

        # ndim
        self.ndim_ = self.mu.shape[0]
        self.make_(self.ndim_, "ndim_func_", args=[])

        # pdf
        L = linalg.cholesky(self.sigma)
        sigma_det = linalg.det(self.sigma)  # XXX: compute from L instead
        sigma_inv = linalg.matrix_inverse(self.sigma)  # XXX: idem

        self.pdf_ = ((1. / T.sqrt(
            (2. * np.pi)**self.ndim_ * T.abs_(sigma_det))) *
                     T.exp(-0.5 * T.sum(T.mul(
                         T.dot(self.X - self.mu, sigma_inv), self.X - self.mu),
                                        axis=1))).ravel()
        self.make_(self.pdf_, "pdf")

        # -log pdf
        self.nll_ = -T.log(self.pdf_)  # XXX: for sure this can be better
        self.make_(self.nll_, "nll")

        # self.rvs_
        self.make_(T.dot(L, self.X.T).T + self.mu, "rvs_func_")
예제 #5
0
    def logp(self, value):
        mu = self.mu
        tau = self.tau

        delta = value - mu
        k = tau.shape[0]

        return 1/2. * (-k * log(2*pi) + log(det(tau)) - dot(delta.T, dot(tau, delta)))
예제 #6
0
    def logp(X):
        IVI = det(V)
        return bound(
            ((n - p - 1) * log(IVI) - trace(matrix_inverse(V).dot(X)) -
             n * p * log(
             2) - n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2,

            all(n > p - 1))
예제 #7
0
    def logp(X):
        IVI = det(V)
        return bound(
            ((n - p - 1) * log(IVI) - trace(solve(V, X)) -
             n * p * log(
             2) - n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2,

            n > p - 1)
예제 #8
0
    def step(visible, filtered_hidden_mean_m1, filtered_hidden_cov_m1):
        A, B = transition, emission  # (h, h), (h, v)

        # Shortcuts for the filtered mean and covariance from the previous
        # time step.
        f_m1 = filtered_hidden_mean_m1  # (n, h)
        F_m1 = filtered_hidden_cov_m1  # (n, h, h)

        # Calculate mean of joint.
        hidden_mean = T.dot(f_m1, A) + hnm  # (n, h)

        visible_mean = T.dot(hidden_mean, B) + vnm  # (n, v)

        # Calculate covariance of joint.
        hidden_cov = stacked_dot(A.T, stacked_dot(F_m1, A))  # (n, h, h)

        hidden_cov += hnc

        visible_cov = stacked_dot(  # (n, v, v)
            B.T, stacked_dot(hidden_cov, B))
        visible_cov += vnc

        visible_hidden_cov = stacked_dot(hidden_cov, B)  # (n, h, v)

        visible_error = visible - visible_mean  # (n, v)

        inv_visible_cov, _ = theano.map(lambda x: matrix_inverse(x),
                                        visible_cov)  # (n, v, v)

        # I don't know a better name for this monster.
        visible_hidden_cov_T = visible_hidden_cov.dimshuffle(0, 2,
                                                             1)  # (n, v, h)
        D = stacked_dot(inv_visible_cov, visible_hidden_cov_T)

        f = (
            D * visible_error.dimshuffle(0, 1, 'x')  # (n, h)
        ).sum(axis=1)
        f += hidden_mean

        F = hidden_cov
        F -= stacked_dot(visible_hidden_cov, D)

        log_l = (
            inv_visible_cov *  # (n,)
            visible_error.dimshuffle(0, 1, 'x') *
            visible_error.dimshuffle(0, 'x', 1)).sum(axis=(1, 2))
        log_l *= -.5

        dets, _ = theano.map(lambda x: det(x), visible_cov)

        log_l -= 0.5 * T.log(dets)
        log_l -= np.log(2 * np.pi)

        return f, F, log_l
예제 #9
0
 def __init__(self, v, S, *args, **kwargs):
     super(Wishart, self).__init__(*args, **kwargs)
     self.v = v
     self.S = S
     self.p = p = S.shape[0]
     self.inv_S = matrix_inverse(S)
     
     'TODO: We should pre-compute the following if the parameters are fixed'   
     self.invalid = theano.tensor.fill(S, nan) # Invalid result, if v<p
     self.Z = log(2.)*(v * p / 2.) + multigammaln(p, v / 2.) - log(det(S)) * v / 2.,
     self.mean = ifelse(gt(v, p-1), S / ( v - p - 1), self.invalid) 
예제 #10
0
    def s_nll(self):
        """ Marginal negative log likelihood of model

        :note: See RW.pdf page 37, Eq. 2.30.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        nll = (0.5 * dots(y, matrix_inverse(rK), y) +
               0.5 * tensor.log(det(rK)) + N / 2.0 * tensor.log(2 * numpy.pi))
        if nll.dtype != self.dtype:
            raise TypeError('nll dtype', nll.dtype)
        return nll
예제 #11
0
파일: lds.py 프로젝트: ddofer/breze
    def step(visible, filtered_hidden_mean_m1, filtered_hidden_cov_m1):
        A, B = transition, emission                         # (h, h), (h, v)

        # Shortcuts for the filtered mean and covariance from the previous
        # time step.
        f_m1 = filtered_hidden_mean_m1                      # (n, h)
        F_m1 = filtered_hidden_cov_m1                       # (n, h, h)

        # Calculate mean of joint.
        hidden_mean = T.dot(f_m1, A) + hnm                  # (n, h)

        visible_mean = T.dot(hidden_mean, B) + vnm          # (n, v)

        # Calculate covariance of joint.
        hidden_cov = stacked_dot(
            A.T, stacked_dot(F_m1, A))                      # (n, h, h)

        hidden_cov += hnc

        visible_cov = stacked_dot(                          # (n, v, v)
            B.T, stacked_dot(hidden_cov, B))
        visible_cov += vnc

        visible_hidden_cov = stacked_dot(hidden_cov, B)     # (n, h, v)

        visible_error = visible - visible_mean              # (n, v)

        inv_visible_cov, _ = theano.map(
            lambda x: matrix_inverse(x), visible_cov)       # (n, v, v)

        # I don't know a better name for this monster.
        visible_hidden_cov_T = visible_hidden_cov.dimshuffle(0, 2, 1)   # (n, v, h)
        D = stacked_dot(inv_visible_cov, visible_hidden_cov_T)

        f = (D * visible_error.dimshuffle(0, 1, 'x')        # (n, h)
            ).sum(axis=1)
        f += hidden_mean

        F = hidden_cov
        F -= stacked_dot(visible_hidden_cov, D)

        log_l = (inv_visible_cov *                          # (n,)
            visible_error.dimshuffle(0, 1, 'x') *
            visible_error.dimshuffle(0,'x', 1)).sum(axis=(1, 2))
        log_l *= -.5

        dets, _ = theano.map(lambda x: det(x), visible_cov)

        log_l -= 0.5 * T.log(dets)
        log_l -= np.log(2 * np.pi)

        return f, F, log_l
예제 #12
0
파일: theano_gp.py 프로젝트: cyip/hyperopt
    def s_nll(self):
        """ Marginal negative log likelihood of model

        :note: See RW.pdf page 37, Eq. 2.30.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        nll = (0.5 * dots(y, matrix_inverse(rK), y)
                + 0.5 * tensor.log(det(rK))
                + N / 2.0 * tensor.log(2 * numpy.pi))
        if nll.dtype != self.dtype:
            raise TypeError('nll dtype', nll.dtype)
        return nll
예제 #13
0
파일: niw.py 프로젝트: afcarl/trmix
def log_partf(b, s, C, v, logdet=None):
    D = b.size

    # multivariate log-gamma function
    g = tt.sum(tt.gammaln((v + 1. - tt.arange(1, D + 1)) /
                          2.)) + D * (D - 1) / 4. * np.log(np.pi)

    # log-partition function
    if logdet is None:
        return -v / 2. * tt.log(tl.det(C - tt.dot(b, b.T) / (4 * s))) \
         + v * np.log(2.) + g - D / 2. * tt.log(s)
    else:
        return -v / 2. * logdet + v * np.log(2.) + g - D / 2. * tt.log(s)
예제 #14
0
    def logp(self, X):
        n = self.n
        p = self.p
        V = self.V

        IVI = det(V)

        return bound(
            ((n - p - 1) * log(IVI) - trace(matrix_inverse(V).dot(X)) -
             n * p * log(
             2) - n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2,

             n > (p - 1))
예제 #15
0
def s_nll(K, y, var_y, prior_var):
    """
    Marginal negative log likelihood of model

    K - gram matrix (matrix-like)
    y - the training targets (vector-like)
    var_y - the variance of uncertainty about y (vector-like)

    :note: See RW.pdf page 37, Eq. 2.30.

    """

    n = y.shape[0]
    rK = psd(prior_var * K + var_y * TT.eye(n))

    fit = .5 * dots(y, matrix_inverse(rK), y)
    complexity = 0.5 * TT.log(det(rK))
    normalization = n / 2.0 * TT.log(2 * np.pi)
    nll = fit + complexity + normalization
    return nll
예제 #16
0
def normal(X, m, C):
    """
    Evaluates the density of a normal distribution.

    @type  X: C{TensorVariable}
    @param X: matrix storing data points column-wise

    @type  m: C{ndarray}/C{TensorVariable}
    @param m: column vector representing the mean of the Gaussian

    @type  C: C{ndarray}/C{TensorVariable}
    @param C: covariance matrix

    @rtype: C{TensorVariable}
    @return: density of a Gaussian distribution evaluated at C{X}
    """

    Z = X - m

    return tt.exp(-tt.sum(Z * tt.dot(tl.matrix_inverse(C), Z), 0) / 2. -
                  tt.log(tl.det(C)) / 2. - m.size / 2. * np.log(2. * np.pi))
예제 #17
0
파일: normal.py 프로젝트: betatim/carl
    def __init__(self, mu, sigma):
        """Constructor.

        Parameters
        ----------
        * `mu` [1d array]:
            The means.

        * `sigma` [2d array]:
            The covariance matrix.
        """
        super(MultivariateNormal, self).__init__(mu=mu, sigma=sigma)
        # XXX: The SDP-ness of sigma should be check upon changes

        # ndim
        self.ndim_ = self.mu.shape[0]
        self._make(self.ndim_, "ndim_func_", args=[])

        # pdf
        L = linalg.cholesky(self.sigma)
        sigma_det = linalg.det(self.sigma)  # XXX: compute from L instead
        sigma_inv = linalg.matrix_inverse(self.sigma)  # XXX: idem

        self.pdf_ = (
            (1. / T.sqrt((2. * np.pi) ** self.ndim_ * T.abs_(sigma_det))) *
            T.exp(-0.5 * T.sum(T.mul(T.dot(self.X - self.mu,
                                           sigma_inv),
                                     self.X - self.mu),
                               axis=1))).ravel()
        self._make(self.pdf_, "pdf")

        # -log pdf
        self.nll_ = -T.log(self.pdf_)  # XXX: for sure this can be better
        self._make(self.nll_, "nll")

        # self.rvs_
        self._make(T.dot(L, self.X.T).T + self.mu, "rvs_func_")
예제 #18
0
    def logp(value):
        delta = value - mu
        k = tau.shape[0]

        return 1/2. * (-k * log(2*pi) + log(det(tau)) - dot(delta.T, dot(tau, delta)))
예제 #19
0
 def logp(value):
     delta = value - mu
     return 1 / 2. * (log(det(Tau)) - dot(delta.T, dot(Tau, delta)))
예제 #20
0
    def logp(value):
        delta = value - mu
        k = tau.shape[0]

        return 1/2. * (-k * log(2*pi) + log(det(tau)) - dot(delta.T, dot(tau, delta)))
예제 #21
0
 def logp(X):
     IVI = det(V)
     return bound(
         ((n - p - 1) * log(IVI) - trace(solve(V, X)) - n * p * log(2) -
          n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2, n > p - 1)
예제 #22
0
def log_detIM( M     = Th.dmatrix('M') , **result):
    return Th.log( det( Th.identity_like(M)-(M+M.T)/2 ) )
예제 #23
0
 def logp(value):
     delta = value - mu
     return 1 / 2. * (log(det(Tau)) - dot(delta.T, dot(Tau, delta)))