Пример #1
0
 def logprob(z):
     """z is NxD."""
     z_minus_mean = z - mean
     mahalanobois_dist = np.einsum('ij,jk,ik->i', z_minus_mean, pinv,
                                   z_minus_mean)
     return const - 0.5 * (dof - D) * np.logaddexp(
         1.0, (1.0 / dof) * mahalanobois_dist)
Пример #2
0
def bernoulli_log_density(targets, unnormalized_logprobs):
    # unnormalized_logprobs are in R
    # Targets must be 0 or 1
    t2 = targets * 2 - 1
    # Now t2 is -1 or 1, which makes the following form nice
    label_probabilities = -np.logaddexp(0, -unnormalized_logprobs * t2)
    return np.sum(label_probabilities, axis=-1)  # Sum across pixels.
Пример #3
0
 def log_density(x, t):
     x_, y_ = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(y_, 0, 1.35)
     mu_density = norm.logpdf(x_, -1.5, np.exp(y_))
     sigma_density2 = norm.logpdf(y_, 0.1, 1.35)
     mu_density2 = norm.logpdf(x_, 1.5, np.exp(y_))
     return np.logaddexp(sigma_density + mu_density, sigma_density2 + mu_density2)
Пример #4
0
 def log_density(x):
     x_, y_ = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(y_, 0, 1.35)
     mu_density = norm.logpdf(x_, -0.5, np.exp(y_))
     sigma_density2 = norm.logpdf(y_, 0.1, 1.35)
     mu_density2 = norm.logpdf(x_, 0.5, np.exp(y_))
     return np.logaddexp(sigma_density + mu_density, sigma_density2 + mu_density2)
Пример #5
0
    def _cumulative_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        alpha_ = np.exp(np.dot(Xs[0], alpha_params))

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        beta_ = np.exp(np.dot(Xs[1], beta_params))
        return np.logaddexp(beta_ * (np.log(np.clip(T, 1e-25, np.inf)) - np.log(alpha_)), 0)
Пример #6
0
    def _loglikelihood(params, x, tx, T):
        """Log likelihood for optimizer."""
        alpha, beta, gamma, delta = params

        betaln_ab = betaln(alpha, beta)
        betaln_gd = betaln(gamma, delta)

        A = betaln(alpha + x, beta + T - x) - betaln_ab + betaln(
            gamma, delta + T) - betaln_gd

        B = 1e-15 * np.ones_like(T)
        recency_T = T - tx - 1

        for j in np.arange(recency_T.max() + 1):
            ix = recency_T >= j
            B1 = betaln(alpha + x, beta + tx - x + j)
            B2 = betaln(gamma + 1, delta + tx + j)
            B = B + ix * (exp(B1 - betaln_ab)) * (exp(B2 - betaln_gd))
            # v0.11.3
            # B = B + ix * betaf(alpha + x, beta + tx - x + j) * betaf(gamma + 1, delta + tx + j)

        log_B = log(B)
        # v0.11.3
        # B = log(B) - betaln_gd - betaln_ab
        result = logaddexp(A, log_B)
        return result
def logsigmoid(x): return x - np.logaddexp(0, x)




# def init_random_params(scale, layer_sizes, subspace_weights, rs=npr.RandomState(0)):
#     """Build a list of (weights, biases) tuples,
#        one for each layer in the net."""
#
#     params = []
#     subspace_params = [subspace_weights, []]
#     subspace_dim = len(subspace_weights)
##
#     for m, n in zip(layer_sizes[:-1], layer_sizes[1:]):
#
#         # Create random projections P
#         Pw, Pb = npr.randn(subspace_dim, m * n), npr.randn(subspace_dim, n)
#         norms_w, norms_b = np.linalg.norm(Pw, axis=1), np.linalg.norm(Pb, axis=1)
#         Pw, Pb = Pw / norms_w.reshape([-1,1]), Pb / norms_b.reshape([-1,1])
#
#         # Initial params
#         init_params = (scale * rs.randn(m * n),  # weight matrix
#                         scale * rs.randn(n))    # bias vector
#
#         # Initial params + subspace
#         layer_weights = init_params[0] + np.dot(subspace_weights, Pw)
#         layer_biases = init_params[1] + np.dot(subspace_weights, Pb)
#         layer_weights = layer_weights.reshape([m,n])
#
#         params.append((layer_weights, layer_biases))
#         subspace_params[1].append((Pw, Pb))
#
#     return params

def init_random_params(scale, layer_sizes, rs=npr.RandomState(0)):
Пример #8
0
 def neg_logp(x):
     if alpha == 1:
         log_prob = pow1 * proposal.logprob(x) + pow2 * target.logprob(x)
     else:
         log_prob = (2 / (1 - alpha)) * (np.logaddexp(
             np.log(pow1) + ((1 - alpha) / 2) * proposal.logprob(x),
             np.log(pow2) + ((1 - alpha) / 2) * target.logprob(x)))
     return -log_prob
Пример #9
0
    def _cumulative_hazard(self, params, T, Xs):
        alpha_params = params["alpha_"]
        alpha_ = np.exp(np.dot(Xs["alpha_"], alpha_params))

        beta_params = params["beta_"]
        beta_ = np.exp(np.dot(Xs["beta_"], beta_params))
        return np.logaddexp(
            beta_ * (np.log(np.clip(T, 1e-25, np.inf)) - np.log(alpha_)), 0)
 def log_density(x, t):
     mu, log_sigma = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(log_sigma, 0, 1.35)
     mu_density = norm.logpdf(mu, -0.5, np.exp(log_sigma))
     sigma_density2 = norm.logpdf(log_sigma, 0.1, 1.35)
     mu_density2 = norm.logpdf(mu, 0.5, np.exp(log_sigma))
     return np.logaddexp(sigma_density + mu_density,
                         sigma_density2 + mu_density2)
 def log_density(x, t):
     mu, log_sigma = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(log_sigma, 0, 1.35)
     mu_density = norm.logpdf(mu, -0.5, np.exp(log_sigma))
     sigma_density2 = norm.logpdf(log_sigma, 0.1, 1.35)
     mu_density2 = norm.logpdf(mu, 0.5, np.exp(log_sigma))
     return np.logaddexp(sigma_density + mu_density,
                         sigma_density2 + mu_density2)
Пример #12
0
def log_softmax(p, axis = 1):
  '''
  Currently assumes only 2 values in p
  '''
  #sums = np.sum(np.exp(p), axis = axis)
  #return np.log(np.exp(p)/sums[:, None])
  #print(np.round(p - np.logaddexp(p[0, 0], p[0, 1]) - np.log(np.exp(p)/sums[:, None])), 3)
  return p - np.logaddexp(p[0, 0], p[0, 1])
Пример #13
0
    def _log_1m_sf(self, params, T, Xs):
        alpha_params = params["alpha_"]
        log_alpha_ = np.dot(Xs["alpha_"], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params["beta_"]
        log_beta_ = np.dot(Xs["beta_"], beta_params)
        beta_ = np.exp(log_beta_)
        return -np.logaddexp(-beta_ * (np.log(T) - np.log(alpha_)), 0)
Пример #14
0
    def log_prob_and_gradient(self, w):

        # first term of the objective function
        c1 = 0.5 * np.sum(self.q * (w - self.m)**2)

        # second term
        c2 = np.sum(np.logaddexp(0, -self.Y * np.dot(self.X, w)))

        return c1 + c2
Пример #15
0
    def _log_1m_sf(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        log_alpha_ = np.dot(Xs[0], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        log_beta_ = np.dot(Xs[1], beta_params)
        beta_ = np.exp(log_beta_)
        return -np.logaddexp(-beta_ * (np.log(T) - np.log(alpha_)), 0)
def neglog_prior(wparams, pi, nlogs1, nlogs2):
    logpi = np.log(pi)
    log1mpi = np.log(1 - pi)
    log2pi = np.log(2 * np.pi)

    nll = 0.
    for wA, wb in wparams:
        nll += np.sum(
            np.logaddexp(
                logpi - .5 * np.exp(2 * (np.log(wA) + nlogs1)) + nlogs1,
                log1mpi - .5 * np.exp(2 * (np.log(wA) + nlogs2)) + nlogs2,
            ) - .5 * log2pi)

        nll += np.sum(
            np.logaddexp(
                logpi - .5 * np.exp(2 * (np.log(wb) + nlogs1)) + nlogs1,
                log1mpi - .5 * np.exp(2 * (np.log(wb) + nlogs2)) + nlogs2,
            ) - .5 * log2pi)
    return nll
Пример #17
0
 def log_density(x):
     '''
     x: [n_samples, D]
     return: [n_samples]
     '''
     x_, y_ = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(y_, 0, 1.35)
     mu_density = norm.logpdf(x_, -1.5, np.exp(y_))
     sigma_density2 = norm.logpdf(y_, 0.1, 1.35)
     mu_density2 = norm.logpdf(x_, 1.5, np.exp(y_))
     return np.logaddexp(sigma_density + mu_density, sigma_density2 + mu_density2)
Пример #18
0
 def log_density(x):
     '''
     x: [n_samples, D]
     return: [n_samples]
     '''
     x_, y_ = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(y_, 0, 1.35)
     mu_density = norm.logpdf(x_, -2.2, np.exp(y_))
     sigma_density2 = norm.logpdf(y_, 0.1, 1.35)
     mu_density2 = norm.logpdf(x_, 2.2, np.exp(y_))
     return np.logaddexp(sigma_density + mu_density, sigma_density2 + mu_density2)
Пример #19
0
    def _log_hazard(self, params, T, Xs):
        alpha_params = params["alpha_"]
        log_alpha_ = np.dot(Xs["alpha_"], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params["beta_"]
        log_beta_ = np.dot(Xs["beta_"], beta_params)
        beta_ = np.exp(log_beta_)

        return (log_beta_ - log_alpha_ + np.expm1(log_beta_) *
                (np.log(T) - log_alpha_) -
                np.logaddexp(beta_ * (np.log(T) - np.log(alpha_)), 0))
    def _negative_log_likelihood(log_params, freq, rec, T, weights, penalizer_coef):
        warnings.simplefilter(action="ignore", category=FutureWarning)

        params = np.exp(log_params)
        r, alpha, a, b = params

        A_1 = gammaln(r + freq) - gammaln(r) + r * log(alpha)
        A_2 = gammaln(a + b) + gammaln(b + freq + 1) - gammaln(b) - gammaln(a + b + freq + 1)
        A_3 = -(r + freq) * log(alpha + T)
        A_4 = log(a) - log(b + freq) + (r + freq) * (log(alpha + T) - log(alpha + rec))

        penalizer_term = penalizer_coef * sum(params ** 2)
        return -(weights * (A_1 + A_2 + A_3 + logaddexp(A_4, 0))).sum() / weights.sum() + penalizer_term
Пример #21
0
    def _log_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        log_alpha_ = np.dot(Xs[0], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        log_beta_ = np.dot(Xs[1], beta_params)
        beta_ = np.exp(log_beta_)

        return (
            log_beta_
            - log_alpha_
            + np.expm1(log_beta_) * (np.log(T) - log_alpha_)
            - np.logaddexp(beta_ * (np.log(T) - np.log(alpha_)), 0)
        )
def log_prob_and_gradient(w, sigma_squared, X, Y):

    T = -Y * np.dot(X, w)

    # compute the log-posterior of the data
    c1 = np.sum(np.dot(w, w)) / (2 * sigma_squared)
    c2 = np.sum(np.logaddexp(0, T))

    neg_log_posterior = c1 + c2

    # compute the gradient of the log posterior
    if opt_mode == manual:
        g1 = w / sigma_squared
        g2 = np.dot((expit(T) * Y).T, X)
        neg_gradient = g1 - g2
        return neg_log_posterior, neg_gradient

    #print 'log = {}, sum grad = {}'.format(neg_log_posterior, sum(abs(neg_gradient)))

    return neg_log_posterior
Пример #23
0
    def _loglikelihood(params, x, tx, T):
        warnings.simplefilter(action="ignore", category=FutureWarning)

        """Log likelihood for optimizer."""
        alpha, beta, gamma, delta = params

        betaln_ab = betaln(alpha, beta)
        betaln_gd = betaln(gamma, delta)

        A = betaln(alpha + x, beta + T - x) - betaln_ab + betaln(gamma, delta + T) - betaln_gd

        B = 1e-15 * np.ones_like(T)
        recency_T = T - tx - 1

        for j in np.arange(recency_T.max() + 1):
            ix = recency_T >= j
            B = B + ix * betaf(alpha + x, beta + tx - x + j) * betaf(gamma + 1, delta + tx + j)

        B = log(B) - betaln_gd - betaln_ab
        return logaddexp(A, B)
Пример #24
0
 def log_sqrt_pair_integral(self, new_param, old_params):
     old_params = np.atleast_2d(old_params)
     mu_new = new_param[:self.d]
     mus_old = old_params[:, :self.d]
     if self.diag:
         lsig_new = new_param[self.d:]
         lsigs_old = old_params[:, self.d:]
         lSig2 = np.log(0.5) + np.logaddexp(lsig_new, lsigs_old)
         return -0.125 * np.sum(
             np.exp(-lSig2) * (mu_new - mus_old)**2, axis=1) - 0.5 * np.sum(
                 lSig2, axis=1) + 0.25 * np.sum(lsig_new) + 0.25 * np.sum(
                     lsigs_old, axis=1)
     else:
         L_new = new_param[self.d:].reshape((self.d, self.d))
         Sig_new = np.dot(L_new, L_new.T)
         N = old_params.shape[0]
         Ls_old = old_params[:, self.d:].reshape((N, self.d, self.d))
         Sigs_old = np.array([np.dot(L, L.T) for L in Ls_old])
         Sig2 = 0.5 * (Sig_new + Sigs_old)
         return -0.125 * ((mu_new - mus_old) * np.linalg.solve(
             Sig2, mu_new - mus_old)).sum(axis=1) - 0.5 * np.linalg.slogdet(
                 Sig2)[1] + 0.25 * np.linalg.slogdet(
                     Sig_new)[1] + 0.25 * np.linalg.slogdet(Sigs_old)[1]
Пример #25
0
def logprob_two_moons(z):
    z1 = z[:, 0]
    z2 = z[:, 1]
    return (- 0.5 * ((np.sqrt(z1**2 + z2**2) - 2 ) / 0.4)**2\
            + np.logaddexp(-0.5 * ((z1 - 2) / 0.6)**2, -0.5 * ((z1 + 2) / 0.6)**2))
Пример #26
0
def test_energy_two_moons(z):
    z1 = z[0]
    z2 = z[1]
    return 0.5 * ((np.sqrt(z1**2 + z2**2) - 2 ) / 0.4)**2\
            - np.logaddexp(-0.5 * ((z1 - 2) / 0.6)**2, -0.5 * ((z1 + 2) / 0.6)**2)
Пример #27
0
def logsigmoid(x):
    return x - np.logaddexp(0, x)
Пример #28
0
def sigmoid(x):
    return np.exp(-np.logaddexp(0, -x)) if x >= 0 else np.exp(x - np.logaddexp(x, 0))
Пример #29
0
 def _log_1m_sf(self, params, times):
     alpha_, beta_ = params
     return -np.logaddexp(-beta_ * (np.log(times) - np.log(alpha_)), 0)
Пример #30
0
def softplus(z):
    """Smooth relu."""
    # Avoid numerical overflow, see:
    # https://docs.scipy.org/doc/numpy/reference/generated/numpy.logaddexp.html
    return np.logaddexp(0.0, z)
def logsigmoid(x): return x - np.logaddexp(0, x)

def init_random_params(scale, layer_sizes, rs=npr.RandomState(0)):
Пример #32
0
def bernoulli_log_density(targets, unnormalized_logprobs):
    # unnormalized_logprobs are in R
    # Targets must be -1 or 1
    label_probabilities = -np.logaddexp(0, -unnormalized_logprobs * targets)
    return np.sum(label_probabilities, axis=-1)  # Sum across pixels.
Пример #33
0
def LogSigmoid(x: np.ndarray) -> np.ndarray:
    """
    Natural log of sigmoid function.
    """
    return np.clip(x - np.logaddexp(0, x), 1e-6, 1e6)
Пример #34
0
def softplus(z):
    """Smooth relu."""
    # Avoid numerical overflow, see:
    # https://docs.scipy.org/doc/numpy/reference/generated/numpy.logaddexp.html
    return np.logaddexp(0.0, z)
Пример #35
0
def bernoulli_log_density(targets, unnormalized_logprobs):
    # unnormalized_logprobs are in R
    # Targets must be -1 or 1
    label_probabilities = -np.logaddexp(0, -unnormalized_logprobs*targets)
    return np.sum(label_probabilities, axis=-1)   # Sum across pixels.
Пример #36
0
def softplus(x):
    return np.exp(x - np.logaddexp(0, x))
Пример #37
0
 def _cumulative_hazard(self, params, times):
     alpha_, beta_ = params
     return np.logaddexp(
         beta_ * (np.log(np.clip(times, 1e-25, np.inf)) - np.log(alpha_)),
         0)