コード例 #1
0
def pearson_correlation_metric_fn(
    y_true: tf.Tensor,
    y_pred: tf.Tensor,
) -> tf.Tensor:
    """
    Pearson correlation metric function.
    https://github.com/WenYanger/Keras_Metrics

    Args:
        y_true (tf.Tensor): y_true
        y_pred (tf.Tensor): y_pred

    Returns:
        tf.contrib.metrics: pearson correlation
    """

    x = y_true
    y = y_pred
    mx = K.mean(x, axis=0)
    my = K.mean(y, axis=0)
    xm, ym = x - mx, y - my
    r_num = K.sum(xm * ym)
    x_square_sum = K.sum(xm * xm)
    y_square_sum = K.sum(ym * ym)
    r_den = K.sqrt(x_square_sum * y_square_sum) + 1e-12
    r = r_num / r_den
    return K.mean(r)
コード例 #2
0
ファイル: untitled1.py プロジェクト: dedbox/TOAD-GAN
 def vae_loss(self, x, z_decoded):
     x = K.flatten(x)
     z_decoded = K.flatten(z_decoded)
     
     # Reconstruction loss (as we used sigmoid activation we can use binarycrossentropy)
     recon_loss = keras.metrics.binary_crossentropy(x, z_decoded)
     
     # KL divergence
     kl_loss = -5e-4 * K.mean(1 + z_sigma - K.square(z_mu) - K.exp(z_sigma), axis=-1)
     return K.mean(recon_loss + kl_loss)
コード例 #3
0
    def loss(y_true, y_pred):
        PPO_LOSS_CLIPPING = 0.2
        PPO_ENTROPY_LOSS = 5 * 1e-3  # Does not converge without entropy penalty

        log_pdf_new = get_log_probability_density(y_pred, y_true)
        log_pdf_old = get_log_probability_density(old_prediction, y_true)

        ratio = K.exp(log_pdf_new - log_pdf_old)
        surrogate1 = ratio * advantage
        clip_ratio = K.clip(ratio,
                            min_value=(1 - PPO_LOSS_CLIPPING),
                            max_value=(1 + PPO_LOSS_CLIPPING))
        surrogate2 = clip_ratio * advantage

        loss_actor = -K.mean(K.minimum(surrogate1, surrogate2))

        sigma = y_pred[:, 2:]
        variance = K.square(sigma)

        loss_entropy = PPO_ENTROPY_LOSS * K.mean(
            -(K.log(2 * np.pi * variance) + 1) / 2)

        return loss_actor + loss_entropy
コード例 #4
0
 def contrastive_loss(y_true, y_pred):
   return K.mean(y_true * K.square(K.maximum(y_pred - m_pos, 0)) +
                 (1 - y_true) * K.square(K.maximum(m_neg - y_pred, 0)))
コード例 #5
0
def mean_pred(y_true, y_pred):
    return -K.mean(y_true * K.log(y_pred + 1.e-7) +
                   (1 - y_true) * K.log(1 - y_pred + 1.e-7)) * 10
コード例 #6
0
 def triplet_loss(_, y_diff):
     return K.mean(K.maximum(y_diff + m, 0))
コード例 #7
0
 def root_mean_squared_error(y_true, y_pred):
     return K.sqrt(K.mean(K.square(y_pred - y_true)))
コード例 #8
0
def __cosine(x, y):
    x = K.l2_normalize(x, axis=-1)
    y = K.l2_normalize(y, axis=-1)
    c = K.mean(x * y, axis=-1)
    return -c