示例#1
0
def entropy(y_true, y_pred):
    """
        Standard entropy over class probabilities.
        It sums y_pred * K.log(y_pred + epsilon) over class probabilities, and then takes average over samples
    """
    return K.mean(-K.sum(y_pred * K.log(y_pred + K.epsilon()), axis=-1),
                  axis=-1)
示例#2
0
    def nll(y_true, y_pred):
        #if variance_logits:
        #    variance_tensor = K.exp(variance_tensor)

        return 0.5 * K.mean(
            K.log(variance_tensor + epsilon) + K.square(y_true - y_pred) /
            (variance_tensor + epsilon))
示例#3
0
    def call(self, inputs):
        assert len(
            inputs
        ) == 2, "This layer requires exactly two inputs (mean and variance logits)"

        logit_mean, logit_var = inputs
        logit_std = self.preprocess_variance_input(logit_var)
        logit_shape = (K.shape(logit_mean)[0], self.num_samples,
                       K.shape(logit_mean)[-1])

        logit_mean = K.expand_dims(logit_mean, axis=1)
        logit_mean = K.repeat_elements(logit_mean, self.num_samples, axis=1)

        logit_std = K.expand_dims(logit_std, axis=1)
        logit_std = K.repeat_elements(logit_std, self.num_samples, axis=1)

        logit_samples = K.random_normal(logit_shape,
                                        mean=logit_mean,
                                        stddev=logit_std)

        # Apply max normalization for numerical stability
        logit_samples = logit_samples - K.max(
            logit_samples, axis=-1, keepdims=True)

        # Apply temperature scaling to logits
        logit_samples = logit_samples / self.temperature

        prob_samples = K.softmax(logit_samples, axis=-1)
        probs = K.mean(prob_samples, axis=1)

        # This is required due to approximation error, without it probabilities can sum to 1.01 or 0.99
        probs = probs / K.sum(probs, axis=-1, keepdims=True)

        return probs
示例#4
0
    def beta_nll(y_true, y_pred):
        #if variance_logits:
        #    variance_tensor = K.exp(variance_tensor)

        beta_sigma_sq = K.stop_gradient(K.pow(variance_tensor, beta))
        return 0.5 * K.mean(
            beta_sigma_sq *
            (K.log(variance_tensor + epsilon) + K.square(y_true - y_pred) /
             (variance_tensor + epsilon)))
示例#5
0
def negative_log_likelihood(y_true, y_pred):
    """
        Negative log-likelihood or negative log-probability loss/metric.
        Reference: Evaluating Predictive Uncertainty Challenge, Quiñonero-Candela et al, 2006.
        It sums over classes: log(y_pred) for true class and log(1.0 - pred) for not true class, and then takes average across samples.
    """
    y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())

    return -K.mean(K.sum(y_true * K.log(y_pred) +
                         (1.0 - y_true) * K.log(1.0 - y_pred),
                         axis=-1),
                   axis=-1)
示例#6
0
def brier_score(y_true, y_pred):
    """
        Mean squared error on the probabilities.
    """
    return K.mean(K.square(y_true - y_pred))
示例#7
0
 def pinball(y_true, y_pred):
     err = y_true - y_pred
     return K.mean(K.maximum(tau * err, (tau - 1.0) * err), axis=-1)
示例#8
0
 def nll(y_true, y_pred):
     return K.mean(
         K.log(2.0 * spread_tensor + epsilon) + K.abs(y_true - y_pred) /
         (spread_tensor + epsilon))
示例#9
0
 def kl_loss(self, w, mu, sigma):
     return self.kl_weight * K.mean(
         gaussian.log_probability(w, mu, sigma) -
         self.prior * self.log_prior_prob(w))
示例#10
0
 def kl_loss(self, parameter, distribution):
     return self.kl_weight * K.mean(distribution.log_probability(parameter))
示例#11
0
    def rbf(self, z):
        z = z - self.centroids
        z = K.mean(K.square(z), axis=1) / (2.0 * self.length_scale**2)
        z = K.exp(-z)

        return z