예제 #1
0
def negative_log_likelihood(y_true, y_pred):
    """
        Negative log-likelihood or negative log-probability loss/metric.
        Reference: Evaluating Predictive Uncertainty Challenge, Quiñonero-Candela et al, 2006.
        It sums over classes: log(y_pred) for true class and log(1.0 - pred) for not true class, and then takes average across samples.
    """
    y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())

    return -K.mean(K.sum(y_true * K.log(y_pred) +
                         (1.0 - y_true) * K.log(1.0 - y_pred),
                         axis=-1),
                   axis=-1)
예제 #2
0
    def nll(y_true, y_pred):
        #if variance_logits:
        #    variance_tensor = K.exp(variance_tensor)

        return 0.5 * K.mean(
            K.log(variance_tensor + epsilon) + K.square(y_true - y_pred) /
            (variance_tensor + epsilon))
예제 #3
0
def entropy(y_true, y_pred):
    """
        Standard entropy over class probabilities.
        It sums y_pred * K.log(y_pred + epsilon) over class probabilities, and then takes average over samples
    """
    return K.mean(-K.sum(y_pred * K.log(y_pred + K.epsilon()), axis=-1),
                  axis=-1)
예제 #4
0
    def beta_nll(y_true, y_pred):
        #if variance_logits:
        #    variance_tensor = K.exp(variance_tensor)

        beta_sigma_sq = K.stop_gradient(K.pow(variance_tensor, beta))
        return 0.5 * K.mean(
            beta_sigma_sq *
            (K.log(variance_tensor + epsilon) + K.square(y_true - y_pred) /
             (variance_tensor + epsilon)))
예제 #5
0
 def nll(y_true, y_pred):
     return K.mean(
         K.log(2.0 * spread_tensor + epsilon) + K.abs(y_true - y_pred) /
         (spread_tensor + epsilon))
예제 #6
0
 def log_prior_prob(self, w):
     return K.log(self.prior_pi_1 *
                  gaussian.probability(w, 0.0, self.prior_sigma_1) +
                  self.prior_pi_2 *
                  gaussian.probability(w, 0.0, self.prior_sigma_2))
예제 #7
0
def log_probability(x, mu, sigma):
    return NegHalfLog2PI - K.log(sigma) - 0.5 * K.square((x - mu) / sigma)
예제 #8
0
 def log_probability(self, x):
     return NegHalfLog2PI - K.log(self.std) - 0.5 * K.square(
         (x - self.mean) / self.std)