Ejemplo n.º 1
0
    def nll(y_true, y_pred):
        #if variance_logits:
        #    variance_tensor = K.exp(variance_tensor)

        return 0.5 * K.mean(
            K.log(variance_tensor + epsilon) + K.square(y_true - y_pred) /
            (variance_tensor + epsilon))
Ejemplo n.º 2
0
    def beta_nll(y_true, y_pred):
        #if variance_logits:
        #    variance_tensor = K.exp(variance_tensor)

        beta_sigma_sq = K.stop_gradient(K.pow(variance_tensor, beta))
        return 0.5 * K.mean(
            beta_sigma_sq *
            (K.log(variance_tensor + epsilon) + K.square(y_true - y_pred) /
             (variance_tensor + epsilon)))
Ejemplo n.º 3
0
def add_gradient_penalty(model, lambda_coeff=0.5, penalty_type="two-sided"):
    term = K.gradients(K.sum(model.output, axis=1), model.input)
    term = K.square(term)

    if penalty_type == "two-sided":
        penalty = (term - 1)**2
    elif penalty_type == "one-sided":
        penalty = K.max(0, term - 1)
    else:
        raise ValueError(
            "Invalid penalty type {}, valid values are [one-sided, two-sided]".
            format(penalty_type))

    penalty = lambda_coeff * penalty
    penalty = K.in_train_phase(penalty, K.zeros(shape=(1, )))

    model.add_loss(penalty)
Ejemplo n.º 4
0
def brier_score(y_true, y_pred):
    """
        Mean squared error on the probabilities.
    """
    return K.mean(K.square(y_true - y_pred))
Ejemplo n.º 5
0
    def rbf(self, z):
        z = z - self.centroids
        z = K.mean(K.square(z), axis=1) / (2.0 * self.length_scale**2)
        z = K.exp(-z)

        return z
Ejemplo n.º 6
0
def log_probability(x, mu, sigma):
    return NegHalfLog2PI - K.log(sigma) - 0.5 * K.square((x - mu) / sigma)
Ejemplo n.º 7
0
 def log_probability(self, x):
     return NegHalfLog2PI - K.log(self.std) - 0.5 * K.square(
         (x - self.mean) / self.std)
Ejemplo n.º 8
0
def probability(x, mu, sigma):
    x = K.square((x - mu) / sigma)

    return InvSqrt2PI * (1.0 / sigma) * K.exp(-0.5 * x)