def kl_div(input, target, reduction='none', log_target=False): if log_target: kl_div = ops.exp(target) * (target - input) else: output = target * (ops.log(target) - input) zeros = ops.zeros_like(input) kl_div = ops.select(target > 0, output, zeros) if reduction == 'sum': return kl_div.sum() if reduction == 'mean': return kl_div.mean() return kl_div
def binary_cross_entropy_with_logits(input, target, weight=None, reduction='mean', pos_weight=None): max_val = ops.maximum(-input, 0) if pos_weight is not None: log_weight = ((pos_weight - 1) * target) + 1 loss = (1 - target) * input loss_1 = ops.log(ops.exp(-max_val) + ops.exp(-input - max_val)) + max_val loss += log_weight * loss_1 else: loss = (1 - target) * input loss += max_val loss += ops.log(ops.exp(-max_val) + ops.exp(-input - max_val)) if weight is not None: output = loss * weight else: output = loss if reduction == "mean": return ops.reduce_mean(output) elif reduction == "sum": return ops.reduce_sum(output) else: return output
def get_distribution(self, x_d, dp_mean, dp_std): return self.normal.log_prob(x_d, dp_mean, ops.exp(dp_std))
def logsigmoid(input): return ops.log(1 / (1 + ops.exp(-input)))
def celu(input, alpha=1.0): return ops.maximum(0, input) + ops.minimum(0, alpha * (ops.exp(input / alpha) - 1))
def selu(input): return 1.0507009873554804934193349852946 * \ (ops.maximum(0, input) + ops.minimum(0, 1.6732632423543772848170429916717 * (ops.exp(input) - 1)))