Exemple #1
0
def mu_law_encode(x: Tensor, mu: int = 256, quantized: bool = True) -> Tensor:
    """Mu-law encoding.
    Compute the mu-law decoding given an input code.
    When quantized is True, the result will be converted to
    integer in range [0,mu-1]. Otherwise, the resulting signal
    is in range [-1,1]

    Parameters:
        x(Tensor): the input tensor of arbitrary shape to be encoded.
        mu(int): the maximum value (depth) of encoded signal. The signal will be
        clip to be in range [0,mu-1].
        quantized(bool): indicate whether the signal will quantized to integers.

    Examples:
        .. code-block:: python

        import paddle
        import paddleaudio.functional as F
        F.mu_law_encode(paddle.randn((2, 8)))
        >> Tensor(shape=[2, 8], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
                [[0, 5, 30, 255, 255, 255, 12, 13],
                [0, 241, 8, 243, 7, 35, 84, 228]])

    Reference:
        https://en.wikipedia.org/wiki/%CE%9C-law_algorithm
    """
    mu = mu - 1
    y = paddle.sign(x) * paddle.log1p(mu * paddle.abs(x)) / math.log1p(mu)
    if quantized:
        y = (y + 1) / 2 * mu + 0.5  # convert to [0 , mu-1]
        y = paddle.clip(y, min=0, max=mu).astype('int32')
    return y
Exemple #2
0
 def _probs_to_logits(self, probs, is_binary=False):
     r"""
     Converts probabilities into logits. For the binary, probs denotes the 
     probability of occurrence of the event indexed by `1`. For the 
     multi-dimensional, values of last axis denote the probabilities of 
     occurrence of each of the events.
     """
     return (paddle.log(probs) - paddle.log1p(-probs)) \
         if is_binary else paddle.log(probs)
Exemple #3
0
    def _binomial_logpmf(self, count, value):
        logits = self._probs_to_logits(self.probs, is_binary=True)

        factor_n = paddle.lgamma(count + 1)
        factor_k = paddle.lgamma(value + 1)
        factor_nmk = paddle.lgamma(count - value + 1)

        norm = (count * _clip_by_zero(logits) +
                count * paddle.log1p(paddle.exp(-paddle.abs(logits))) -
                factor_n)

        return value * logits - factor_k - factor_nmk - norm
    def __init__(self, range_max, n_sample):
        with paddle.no_grad():
            self.range_max = range_max
            log_indices = paddle.log(
                paddle.arange(1., range_max + 2., 1., dtype=global_dtype))
            self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]

            self.log_q = paddle.cast(paddle.log(
                paddle.exp(-(
                    paddle.log1p(-paddle.cast(self.dist, dtype=global_dtype)) *
                    2 * n_sample)) - 1),
                                     dtype=global_dtype)

        self.n_sample = n_sample
Exemple #5
0
    def gwd_loss(self,
                 pred,
                 target,
                 fun='log',
                 tau=1.0,
                 alpha=1.0,
                 normalize=False):

        xy_p, R_p, S_p = self.xywhr2xyrs(pred)
        xy_t, R_t, S_t = self.xywhr2xyrs(target)

        xy_distance = (xy_p - xy_t).square().sum(axis=-1)

        Sigma_p = R_p.matmul(S_p.square()).matmul(R_p.transpose([0, 2, 1]))
        Sigma_t = R_t.matmul(S_t.square()).matmul(R_t.transpose([0, 2, 1]))

        whr_distance = paddle.diagonal(
            S_p, axis1=-2, axis2=-1).square().sum(axis=-1)

        whr_distance = whr_distance + paddle.diagonal(
            S_t, axis1=-2, axis2=-1).square().sum(axis=-1)
        _t = Sigma_p.matmul(Sigma_t)

        _t_tr = paddle.diagonal(_t, axis1=-2, axis2=-1).sum(axis=-1)
        _t_det_sqrt = paddle.diagonal(S_p, axis1=-2, axis2=-1).prod(axis=-1)
        _t_det_sqrt = _t_det_sqrt * paddle.diagonal(
            S_t, axis1=-2, axis2=-1).prod(axis=-1)
        whr_distance = whr_distance + (-2) * (
            (_t_tr + 2 * _t_det_sqrt).clip(0).sqrt())

        distance = (xy_distance + alpha * alpha * whr_distance).clip(0)

        if normalize:
            wh_p = pred[..., 2:4].clip(min=1e-7, max=1e7)
            wh_t = target[..., 2:4].clip(min=1e-7, max=1e7)
            scale = ((wh_p.log() + wh_t.log()).sum(dim=-1) / 4).exp()
            distance = distance / scale

        if fun == 'log':
            distance = paddle.log1p(distance)

        if tau >= 1.0:
            return 1 - 1 / (tau + distance)

        return distance
Exemple #6
0
 def __init__(self,
              num_classes,
              n_sample,
              unique=True,
              remove_accidental_hits=True,
              subtract_log_q=True,
              num_true=1,
              batch_size=None):
     super(Mind_SampledSoftmaxLoss_Layer, self).__init__()
     self.range_max = num_classes
     self.n_sample = n_sample
     self.unique = unique
     self.remove_accidental_hits = remove_accidental_hits
     self.subtract_log_q = subtract_log_q
     self.num_true = num_true
     self.prob = np.array([0.0] * self.range_max)
     self.batch_size = batch_size
     for i in range(1, self.range_max):
         self.prob[i] = (np.log(i+2) - np.log(i+1)) / \
             np.log(self.range_max + 1)
     self.new_prob = paddle.assign(self.prob.astype("float32"))
     self.log_q = paddle.log(-(paddle.exp(
         (-paddle.log1p(self.new_prob) * 2 * n_sample)) - 1.0))
Exemple #7
0
def _sigmoid_cross_entropy_with_logits(logits, labels):
  # to be compatible with tensorflow, we don't use ignore_idx
  loss = paddle.clip(logits, min=0) - logits * labels.astype(logits.dtype)
  loss += paddle.log1p(paddle.exp(-paddle.abs(logits)))
  return loss
Exemple #8
0
def logit_transform(image, lam=1e-6):
    image = lam + (1 - 2 * lam) * image
    return paddle.log(image) - paddle.log1p(-image)
Exemple #9
0
 def forward(self, inputs):
     """
     forward
     """
     x = paddle.log1p(inputs)
     return x