def get_kernel(X, param, kernel):
    assert kernel in codes.properties(
        codes.Kernels), 'Unsupported Kernel loss mode'
    if kernel == codes.Kernels.StudentT:
        return StudentT(X, param)
    elif kernel == codes.Kernels.RBF:
        return RBF(X, param)
    elif kernel == codes.Kernels.Cauchy:
        return Cauchy(X, param)
def get_kernel(X, batch_size, param, epsilon, kernel):
    assert kernel in codes.properties(
        codes.Kernels), 'Unsupported Kernel loss mode'
    if kernel == codes.Kernels.StudentT:
        return StudentT(X, batch_size, df=param, epsilon=epsilon)
    elif kernel == codes.Kernels.RBF:
        return RBF(X, batch_size, df=param, epsilon=epsilon)
    elif kernel == codes.Kernels.Cauchy:
        return Cauchy(X, batch_size, df=param, epsilon=epsilon)
 def __init__(self):
     keys = list()
     items = list()
     ddict = dict(default_config.__dict__)
     for key, item in ddict.items():
         if key in properties(default_config):
             keys.append(key)
             items.append(item)
     ddict =  dict(zip(keys, items))
     for k, v in ddict.items():
         setattr(self, k, v)
Exemple #4
0
def get_reconst_loss(x, x_recons, loss_func, epsilon=config.epsilon):
    """
    Returns the reconstuction loss between x and x_recons
    two modes:
     OLS:
        MSE(x, x_recons) Mean error squared
     MLE:
        Maximum log-likelihood estimator is the expected log-likelihood of the lower bound. For this we use a bernouilli LL.
    """
    assert loss_func in codes.properties(codes.Losses), \
        'Unsupported reconstuction loss loss_func'
    if loss_func == codes.Losses.MLE:
        return -tf.reduce_sum((x) * tf.log(x_recons + epsilon) +
                              (1 - x) * tf.log(1 - x_recons + epsilon), 1)
    else:
        return tf.losses.mean_pairwise_squared_error(x, x_recons)
Exemple #5
0
def get_divergence(meanQ, log_varQ, meanP, log_varP, div_loss):
    assert div_loss in codes.properties(codes.Losses)\
           , 'Unsupported divergences loss div_loss'
    if div_loss == codes.Losses.KLD:
        return get_KL_div(meanQ, log_varQ, meanP, log_varP)

    elif div_loss == codes.Losses.RKLD:
        return -get_KL_div(meanP, log_varP, meanQ, log_varQ)

    elif div_loss == codes.Losses.JS:
        return get_KL_div(meanQ, log_varQ, meanP, log_varP) * 0.5 + \
               get_KL_div(meanP, log_varP, meanQ, log_varQ) * 0.5

    elif div_loss == codes.Losses.CHI2:
        return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
                              -(tf.square(meanQ - meanP) / tf.log(log_varP)-1)**2
                              - tf.exp(log_varQ - log_varP)**2 , 1)

    elif div_loss == codes.Losses.Helling:
        return -0.5 * tf.reduce_sum(tf.exp(log_varP) + log_varQ
                              -(tf.square(tf.square(meanQ - meanP) / tf.log(log_varP))-1)**2
                              - tf.exp(log_varQ - log_varP)**2 , 1)
Exemple #6
0
def get_distributions_div_cost(Px, Qx, loss_func, epsilon=config.epsilon):

    assert loss_func in codes.properties(codes.Losses),\
        'Unsupported divergences loss loss_func'

    if loss_func == codes.Losses.KLD:
        return kl_divergence(Px, Qx)

    if loss_func == codes.Losses.RKLD:
        return -kl_divergence(Qx, Px)

    elif loss_func == codes.Losses.JS:
        return kl_divergence(Px, Qx) * 0.5 + \
               kl_divergence(Qx, Px) * 0.5

    elif loss_func == codes.Losses.CHI2:
        Pxc = tf.maximum(Px, epsilon)
        Qyc = tf.maximum(Qx, epsilon)
        return tf.reduce_sum(Qx * (Pxc / Qyc - 1.)**2)

    elif loss_func == codes.Losses.Helling:
        Pxc = tf.maximum(Px, epsilon)
        Qyc = tf.maximum(Qx, epsilon)
        return tf.reduce_sum(Qx * (tf.sqrt(Pxc / Qyc) - 1.)**2)