コード例 #1
0
    def __call__(self, preds, targets):
        heatmaps_gt, mask = targets
        heatmaps_pred = preds[0]
        scalemaps_pred = preds[1]

        heatmaps_scaled_gt = paddle.where(
            heatmaps_gt > 0, 0.5 * heatmaps_gt *
            (1 + (1 +
                  (scalemaps_pred - 1.) * paddle.log(heatmaps_gt + 1e-10))**2),
            heatmaps_gt)

        regularizer_loss = paddle.mean(
            paddle.pow((scalemaps_pred - 1.) * (heatmaps_gt > 0).astype(float),
                       2))
        omiga = 0.01
        # thres = 2**(-1/omiga), threshold for positive weight
        hm_weight = heatmaps_scaled_gt**(omiga) * paddle.abs(
            1 - heatmaps_pred) + paddle.abs(heatmaps_pred) * (
                1 - heatmaps_scaled_gt**(omiga))

        loss = (((heatmaps_pred - heatmaps_scaled_gt)**2) *
                mask.cast('float').unsqueeze(1)) * hm_weight
        loss = loss.mean()
        loss = self.loss_factor * (loss + 1.0 * regularizer_loss)
        return loss
コード例 #2
0
ファイル: distill.py プロジェクト: xiegegege/PaddleDetection
 def obj_weighted_reg(self, sx, sy, sw, sh, tx, ty, tw, th, tobj):
     loss_x = ops.sigmoid_cross_entropy_with_logits(sx, F.sigmoid(tx))
     loss_y = ops.sigmoid_cross_entropy_with_logits(sy, F.sigmoid(ty))
     loss_w = paddle.abs(sw - tw)
     loss_h = paddle.abs(sh - th)
     loss = paddle.add_n([loss_x, loss_y, loss_w, loss_h])
     weighted_loss = paddle.mean(loss * F.sigmoid(tobj))
     return weighted_loss
コード例 #3
0
ファイル: loss_opr.py プロジェクト: lu1kaifeng/CrowdDet
def smooth_l1_loss(pred, target, beta: float):
    if beta < 1e-5:
        loss = torch.abs(input - target)
    else:
        abs_x = torch.abs(pred- target)
        in_mask = abs_x < beta
        loss = torch.where(in_mask, 0.5 * abs_x ** 2 / beta, abs_x - 0.5 * beta)
    return loss.sum(axis=1)
コード例 #4
0
ファイル: jtnn_vae.py プロジェクト: xueeinstein/PaddleHelix
 def encode_latent(self, jtenc_holder, mpn_holder):
     """Encode latent space"""
     tree_vecs, _ = self.jtnn(*jtenc_holder)
     mol_vecs = self.mpn(**mpn_holder)
     tree_mean = self.T_mean(tree_vecs)
     mol_mean = self.G_mean(mol_vecs)
     tree_var = -paddle.abs(self.T_var(tree_vecs))
     mol_var = -paddle.abs(self.G_var(mol_vecs))
     return paddle.concat([tree_mean, mol_mean], axis=1), paddle.concat([tree_var, mol_var], axis=1)
コード例 #5
0
def compute_g_loss(nets,
                   w_hpf,
                   lambda_sty,
                   lambda_ds,
                   lambda_cyc,
                   x_real,
                   y_org,
                   y_trg,
                   z_trgs=None,
                   x_refs=None,
                   masks=None):
    assert (z_trgs is None) != (x_refs is None)
    if z_trgs is not None:
        z_trg, z_trg2 = z_trgs
    if x_refs is not None:
        x_ref, x_ref2 = x_refs

    # adversarial loss
    if z_trgs is not None:
        s_trg = nets['mapping_network'](z_trg, y_trg)
    else:
        s_trg = nets['style_encoder'](x_ref, y_trg)

    x_fake = nets['generator'](x_real, s_trg, masks=masks)
    out = nets['discriminator'](x_fake, y_trg)
    loss_adv = adv_loss(out, 1)

    # style reconstruction loss
    s_pred = nets['style_encoder'](x_fake, y_trg)
    loss_sty = paddle.mean(paddle.abs(s_pred - s_trg))

    # diversity sensitive loss
    if z_trgs is not None:
        s_trg2 = nets['mapping_network'](z_trg2, y_trg)
    else:
        s_trg2 = nets['style_encoder'](x_ref2, y_trg)
    x_fake2 = nets['generator'](x_real, s_trg2, masks=masks)
    loss_ds = paddle.mean(paddle.abs(x_fake - x_fake2))

    # cycle-consistency loss
    masks = nets['fan'].get_heatmap(x_fake) if w_hpf > 0 else None
    s_org = nets['style_encoder'](x_real, y_org)
    x_rec = nets['generator'](x_fake, s_org, masks=masks)
    loss_cyc = paddle.mean(paddle.abs(x_rec - x_real))

    loss = loss_adv + lambda_sty * loss_sty \
        - lambda_ds * loss_ds + lambda_cyc * loss_cyc
    return loss, {
        'adv': loss_adv.numpy(),
        'sty': loss_sty.numpy(),
        'ds:': loss_ds.numpy(),
        'cyc': loss_cyc.numpy()
    }
コード例 #6
0
 def _update_masks(self):
     for name, sub_layer in self.model.named_sublayers():
         if not self._should_prune_layer(sub_layer):
             continue
         for param in sub_layer.parameters(include_sublayers=False):
             if param.name in self.skip_params:
                 continue
             mask = self.masks.get(param.name)
             if self.local_sparsity:
                 bool_tmp = (paddle.abs(param) >=
                             self.thresholds[param.name])
             else:
                 bool_tmp = (paddle.abs(param) >= self.threshold)
             paddle.assign(bool_tmp, output=mask)
コード例 #7
0
ファイル: masking.py プロジェクト: gkbxs/Parakeet
def feature_mask(input, axis, dtype="bool"):
    """Compute mask from input features.
    
    For a input features, represented as batched feature vectors, those vectors
    which all zeros are considerd padding vectors.

    Parameters
    ----------
    input : Tensor [dtype: float]
        The input tensor which represents featues.
        
    axis : int
        The index of the feature dimension in ``input``. Other dimensions are
        considered ``spatial`` dimensions.
        
    dtype : str, optional
        Data type of the generated mask, by default "bool"

    Returns
    -------
    Tensor
        The geenrated mask with ``spatial`` shape as mentioned above.
        
        It has one less dimension than ``input`` does.
    """
    feature_sum = paddle.sum(paddle.abs(input), axis)
    return paddle.cast(feature_sum != 0, dtype)
コード例 #8
0
def supervised_chi_loss(ret, batch, value, config):
    """Computes loss for direct chi angle supervision.

    Jumper et al. (2021) Suppl. Alg. 27 "torsionAngleLoss"

    Args:
        ret: Dictionary to write outputs into, needs to contain 'loss'.
        batch: Batch, needs to contain 'seq_mask', 'chi_mask', 'chi_angles'.
        value: Dictionary containing structure module output, needs to contain
            value['sidechains']['angles_sin_cos'] for angles and
            value['sidechains']['unnormalized_angles_sin_cos'] for unnormalized
            angles.
        config: Configuration of loss, should contain 'chi_weight' and
            'angle_norm_weight', 'angle_norm_weight' scales angle norm term,
            'chi_weight' scales torsion term.
    """
    eps = 1e-6
    
    sequence_mask = batch['seq_mask']
    num_res = sequence_mask.shape[1]
    batch_size = sequence_mask.shape[0]
    chi_mask = batch['chi_mask']
    pred_angles = paddle.reshape(value['sidechains']['angles_sin_cos'], [batch_size, -1, num_res, 7, 2])
    pred_angles = pred_angles[:, :, :, 3:]

    residue_type_one_hot = paddle.nn.functional.one_hot(batch['aatype_index'], 
                            num_classes=residue_constants.restype_num + 1)
    chi_pi_periodic = paddle.einsum('nijk, nkl->nijl', residue_type_one_hot[:, None, ...], 
                            paddle.to_tensor(residue_constants.chi_pi_periodic)[None])

    sin_cos_true_chi = batch['chi_angles_sin_cos'][:, None, ...]

    # This is -1 if chi is pi-periodic and +1 if it's 2pi-periodic
    shifted_mask = (1 - 2 * chi_pi_periodic)[..., None]
    sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi

    sq_chi_error = paddle.sum(squared_difference(sin_cos_true_chi, pred_angles), axis=-1)
    sq_chi_error_shifted = paddle.sum(squared_difference(sin_cos_true_chi_shifted, pred_angles), axis=-1)
    sq_chi_error = paddle.minimum(sq_chi_error, sq_chi_error_shifted)

    sq_chi_loss_tmp = []
    for i in range(batch_size):
        sq_chi_loss_i = utils.mask_mean(mask=paddle.unsqueeze(chi_mask[i], axis=0), value=sq_chi_error[i])
        sq_chi_loss_tmp.append(sq_chi_loss_i)
    sq_chi_loss = paddle.to_tensor(sq_chi_loss_tmp, stop_gradient=False)
    sq_chi_loss = paddle.squeeze(sq_chi_loss, axis=-1)
    ret['chi_loss'] = sq_chi_loss
    ret['loss'] += config.chi_weight * sq_chi_loss

    unnormed_angles = paddle.reshape(value['sidechains']['unnormalized_angles_sin_cos'], [batch_size, -1, num_res, 7, 2])
    angle_norm = paddle.sqrt(paddle.sum(paddle.square(unnormed_angles), axis=-1) + eps)
    norm_error = paddle.abs(angle_norm - 1.)
    angle_norm_loss_tmp = []
    for i in range(batch_size):
        angle_norm_loss_i = utils.mask_mean(mask=paddle.unsqueeze(sequence_mask[i], axis=[0,2]), value=norm_error[i])
        angle_norm_loss_tmp.append(angle_norm_loss_i)
    angle_norm_loss = paddle.to_tensor(angle_norm_loss_tmp, stop_gradient=False)
    angle_norm_loss = paddle.squeeze(angle_norm_loss, axis=-1)
    ret['angle_norm_loss'] = angle_norm_loss
    ret['loss'] += config.angle_norm_weight * angle_norm_loss
コード例 #9
0
 def set_static_masks(self):
     for name, sub_layer in self.model.named_sublayers():
         if not self._should_prune_layer(sub_layer): continue
         for param in sub_layer.parameters(include_sublayers=False):
             mask = self.masks.get(param.name)
             bool_tmp = (paddle.abs(param) != 0.0)
             paddle.assign(bool_tmp, output=mask)
コード例 #10
0
ファイル: losses.py プロジェクト: haoyuying/PaddleSeg
    def forward(self, pred, label, sample_weight=None):
        one_hot = label > 0.5
        sample_weight = label != self._ignore_label

        if not self._from_logits:
            pred = F.sigmoid(pred)
        alpha = paddle.where(one_hot, self._alpha * sample_weight,
                             (1 - self._alpha) * sample_weight)
        pt = paddle.where(one_hot, 1.0 - paddle.abs(label - pred),
                          paddle.ones_like(pred))

        beta = (1 - pt)**self._gamma

        loss = -alpha * beta * paddle.log(
            paddle.min(pt + self._eps, paddle.ones(1, dtype='float32')))
        loss = self._weight * (loss * sample_weight)

        if self._size_average:
            tsum = paddle.sum(label == 1,
                              axis=misc.get_dims_with_exclusion(
                                  len(label.shape), self._batch_axis))
            loss = paddle.sum(loss,
                              axis=misc.get_dims_with_exclusion(
                                  len(loss.shape),
                                  self._batch_axis)) / (tsum + self._eps)
        else:
            loss = paddle.sum(loss,
                              axis=misc.get_dims_with_exclusion(
                                  len(loss.shape), self._batch_axis))
        return self._scale * loss
コード例 #11
0
    def forward(self, predicts, labels):
        l_score, l_geo, l_mask = labels[1:]
        f_score = predicts['f_score']
        f_geo = predicts['f_geo']

        dice_loss = self.dice_loss(f_score, l_score, l_mask)

        #smoooth_l1_loss
        channels = 8
        l_geo_split = paddle.split(
            l_geo, num_or_sections=channels + 1, axis=1)
        f_geo_split = paddle.split(f_geo, num_or_sections=channels, axis=1)
        smooth_l1 = 0
        for i in range(0, channels):
            geo_diff = l_geo_split[i] - f_geo_split[i]
            abs_geo_diff = paddle.abs(geo_diff)
            smooth_l1_sign = paddle.less_than(abs_geo_diff, l_score)
            smooth_l1_sign = paddle.cast(smooth_l1_sign, dtype='float32')
            in_loss = abs_geo_diff * abs_geo_diff * smooth_l1_sign + \
                (abs_geo_diff - 0.5) * (1.0 - smooth_l1_sign)
            out_loss = l_geo_split[-1] / channels * in_loss * l_score
            smooth_l1 += out_loss
        smooth_l1_loss = paddle.mean(smooth_l1 * l_score)

        dice_loss = dice_loss * 0.01
        total_loss = dice_loss + smooth_l1_loss
        losses = {"loss":total_loss, \
                  "dice_loss":dice_loss,\
                  "smooth_l1_loss":smooth_l1_loss}
        return losses
コード例 #12
0
def exponential(M: int,
                center=None,
                tau=1.,
                sym: bool = True,
                dtype: str = 'float64') -> Tensor:
    """Compute an exponential (or Poisson) window.
    Parameters:
        M(int): window size.
        tau(float): the window-specific parameter.
        sym(bool):whether to return symmetric window.
            The default value is True
        dtype(str): the datatype of returned tensor.
    Returns:
        Tensor: the window tensor
    Notes:
        This function is consistent with scipy.signal.windows.exponential().
    """
    if sym and center is not None:
        raise ValueError("If sym==True, center must be None.")
    if _len_guards(M):
        return paddle.ones((M, ), dtype=dtype)
    M, needs_trunc = _extend(M, sym)

    if center is None:
        center = (M - 1) / 2

    n = paddle.arange(0, M, dtype=dtype)
    w = paddle.exp(-paddle.abs(n - center) / tau)

    return _truncate(w, needs_trunc)
コード例 #13
0
    def sample_data(self, layer, tensors):
        assert isinstance(tensors, tuple)

        if self.abs_max_vals == []:
            abs_max_vals = [abs_max_value(t) for t in tensors]
            self.abs_max_vals = abs_max_vals

            for idx, tensor in enumerate(tensors):
                if abs_max_vals[idx] == 0.0:
                    self.hists.append(None)
                else:
                    hist, _ = np.histogram(paddle.abs(tensor).numpy(),
                                           range=(0., abs_max_vals[idx]),
                                           bins=self.bins)
                    hist = hist.astype(np.float32)
                    self.hists.append(hist)
        else:
            assert len(self.abs_max_vals) == len(tensors)
            assert len(self.hists) == len(tensors)

            for idx, tensor in enumerate(tensors):
                new_abs_max, new_hist = combine_abs_max_and_hist(
                    tensor, self.abs_max_vals[idx], self.hists[idx], self.bins,
                    self.upsample_bins)
                self.abs_max_vals[idx] = new_abs_max
                self.hists[idx] = new_hist
コード例 #14
0
 def test_all_positive(self):
     for dtype in self._dtypes:
         x = 1 + 10 * np.random.random([13, 3, 3]).astype(dtype)
         for place in self._places:
             with dg.guard(place):
                 y = paddle.abs(paddle.to_tensor(x))
                 self.assertTrue(np.allclose(np.abs(x), y.numpy()))
コード例 #15
0
    def get_loss(self, pred_scores, pred_deltas, anchors, inputs):
        """
        pred_scores (list[Tensor]): Multi-level scores prediction
        pred_deltas (list[Tensor]): Multi-level deltas prediction
        anchors (list[Tensor]): Multi-level anchors
        inputs (dict): ground truth info, including im, gt_bbox, gt_score
        """
        anchors = [paddle.reshape(a, shape=(-1, 4)) for a in anchors]
        anchors = paddle.concat(anchors)

        scores = [
            paddle.reshape(paddle.transpose(v, perm=[0, 2, 3, 1]),
                           shape=(v.shape[0], -1, 1)) for v in pred_scores
        ]
        scores = paddle.concat(scores, axis=1)

        deltas = [
            paddle.reshape(paddle.transpose(v, perm=[0, 2, 3, 1]),
                           shape=(v.shape[0], -1, 4)) for v in pred_deltas
        ]
        deltas = paddle.concat(deltas, axis=1)

        score_tgt, bbox_tgt, loc_tgt, norm = self.rpn_target_assign(
            inputs, anchors)

        scores = paddle.reshape(x=scores, shape=(-1, ))
        deltas = paddle.reshape(x=deltas, shape=(-1, 4))

        score_tgt = paddle.concat(score_tgt)
        score_tgt.stop_gradient = True

        pos_mask = score_tgt == 1
        pos_ind = paddle.nonzero(pos_mask)

        valid_mask = score_tgt >= 0
        valid_ind = paddle.nonzero(valid_mask)

        # cls loss
        if valid_ind.shape[0] == 0:
            loss_rpn_cls = paddle.zeros([1], dtype='float32')
        else:
            score_pred = paddle.gather(scores, valid_ind)
            score_label = paddle.gather(score_tgt, valid_ind).cast('float32')
            score_label.stop_gradient = True
            loss_rpn_cls = F.binary_cross_entropy_with_logits(
                logit=score_pred, label=score_label, reduction="sum")

        # reg loss
        if pos_ind.shape[0] == 0:
            loss_rpn_reg = paddle.zeros([1], dtype='float32')
        else:
            loc_pred = paddle.gather(deltas, pos_ind)
            loc_tgt = paddle.concat(loc_tgt)
            loc_tgt = paddle.gather(loc_tgt, pos_ind)
            loc_tgt.stop_gradient = True
            loss_rpn_reg = paddle.abs(loc_pred - loc_tgt).sum()
        return {
            'loss_rpn_cls': loss_rpn_cls / norm,
            'loss_rpn_reg': loss_rpn_reg / norm
        }
コード例 #16
0
    def forward(self, true_binary, rule_masks, raw_logits):
        """
        tbd
        """
        if cmd_args.loss_type == 'binary':
            exp_pred = paddle.exp(raw_logits) * rule_masks

            norm = paddle.sum(exp_pred, axis=2, keepdim=True)
            prob = paddle.divide(exp_pred, norm)

            return F.binary_cross_entropy(
                prob, true_binary) * cmd_args.max_decode_steps

        if cmd_args.loss_type == 'perplexity':
            my_perp_loss = MyPerpLoss()
            return my_perp_loss(true_binary, rule_masks, raw_logits)

        if cmd_args.loss_type == 'vanilla':
            exp_pred = paddle.exp(raw_logits) * rule_masks + 1e-30
            norm = paddle.sum(exp_pred, 2, keepdim=True)
            prob = paddle.divide(exp_pred, norm)

            ll = paddle.abs(paddle.sum(true_binary * prob, 2))
            mask = 1 - rule_masks[:, :, -1]
            logll = mask * paddle.log(ll)

            loss = -paddle.sum(logll) / true_binary.shape[1]

            return loss
        print('unknown loss type %s' % cmd_args.loss_type)
        raise NotImplementedError
コード例 #17
0
  def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    # print("++++++++++++++++++++++++++++++++++++START CAL_L1_LOSS++++++++++++++++++++++++++++++++++++++++++++++++")
    diff = prediction_tensor - target_tensor
    if self._code_weights is not None:
      code_weights = self._code_weights.astype(prediction_tensor.dtype)
      diff = code_weights.reshape((1, 1, -1)) * diff
    abs_diff = paddle.abs(diff)
    abs_diff_lt_1 = paddle.less_equal(abs_diff, paddle.to_tensor(1 / (self._sigma**2))).astype(abs_diff.dtype)
    loss = abs_diff_lt_1 * 0.5 * paddle.pow(abs_diff * self._sigma, 2) \
      + (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
    if self._codewise:
      anchorwise_smooth_l1norm = loss
      if weights is not None:
        anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
    else:
      anchorwise_smooth_l1norm = paddle.sum(loss, 2)#  * weights
      if weights is not None:
        anchorwise_smooth_l1norm *= weights
    # print("++++++++++++++++++++++++++++++++++++OVER CAL_L1_LOSS++++++++++++++++++++++++++++++++++++++++++++++++")
    return anchorwise_smooth_l1norm
コード例 #18
0
    def abs_max_run(self, reader, exe, step=None, loss_name=None):
        fetch_list = []
        with paddle.static.program_guard(self.program):
            for act_name in self.real_names:
                act = self.program.global_block().var(act_name)
                act = paddle.max(paddle.abs(act), name=act_name + "_reduced")
                fetch_list.append(act_name + "_reduced.tmp_0")

        if not hasattr(self.program, '_program'):
            # Compile the native program to speed up
            program = paddle.static.CompiledProgram(
                self.program).with_data_parallel(loss_name=loss_name)
        for idx, data in enumerate(reader):
            vars_np = exe.run(program=program,
                              feed=data,
                              fetch_list=fetch_list)
            vars_np = [np.max(var) for var in vars_np]
            mapped_vars_np = dict(zip(self.real_names, vars_np))
            values = self.update(mapped_vars_np)

            if idx % 10 == 0:
                _logger.info("Collecting..., Step: {}".format(idx))

            if step is not None and idx + 1 >= step:
                break
        return values
コード例 #19
0
def mu_law_encode(x: Tensor, mu: int = 256, quantized: bool = True) -> Tensor:
    """Mu-law encoding.
    Compute the mu-law decoding given an input code.
    When quantized is True, the result will be converted to
    integer in range [0,mu-1]. Otherwise, the resulting signal
    is in range [-1,1]

    Parameters:
        x(Tensor): the input tensor of arbitrary shape to be encoded.
        mu(int): the maximum value (depth) of encoded signal. The signal will be
        clip to be in range [0,mu-1].
        quantized(bool): indicate whether the signal will quantized to integers.

    Examples:
        .. code-block:: python

        import paddle
        import paddleaudio.functional as F
        F.mu_law_encode(paddle.randn((2, 8)))
        >> Tensor(shape=[2, 8], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
                [[0, 5, 30, 255, 255, 255, 12, 13],
                [0, 241, 8, 243, 7, 35, 84, 228]])

    Reference:
        https://en.wikipedia.org/wiki/%CE%9C-law_algorithm
    """
    mu = mu - 1
    y = paddle.sign(x) * paddle.log1p(mu * paddle.abs(x)) / math.log1p(mu)
    if quantized:
        y = (y + 1) / 2 * mu + 0.5  # convert to [0 , mu-1]
        y = paddle.clip(y, min=0, max=mu).astype('int32')
    return y
コード例 #20
0
ファイル: det_basic_loss.py プロジェクト: omar16100/PaddleOCR
 def forward(self, pred, gt, mask):
     """
     Mask L1 Loss
     """
     loss = (paddle.abs(pred - gt) * mask).sum() / (mask.sum() + self.eps)
     loss = paddle.mean(loss)
     return loss
コード例 #21
0
    def validation_step(self, batch: int, batch_idx: int) -> dict:
        '''
        One step for validation, which should be called as forward computation.

        Args:
            batch(list[paddle.Tensor]): The one batch data, which contains images and labels.
            batch_idx(int): The index of batch.

        Returns:
            results(dict) : The model outputs, such as metrics.
        '''
        if Version(paddle.__version__) >= '2.1' or Version(
                paddle.__version__) == '0.0.0':
            img = self.preprocess(batch)
        else:
            img = self.preprocess(batch[0])

        out_class, out_reg = self(img['A'], img['hint_B'], img['mask_B'])

        # loss
        loss_ce = F.cross_entropy(out_class,
                                  img['real_B_enc'][:, :1, :, :],
                                  axis=1)
        loss_ce = paddle.mean(loss_ce)
        loss_G_L1_reg = paddle.sum(paddle.abs(img['B'] - out_reg),
                                   axis=1,
                                   keepdim=True)
        loss_G_L1_reg = paddle.mean(loss_G_L1_reg)
        loss = loss_ce + loss_G_L1_reg
        return {'loss': loss}
コード例 #22
0
ファイル: modeling.py プロジェクト: joey12300/PaddleNLP
def relative_position_bucket(relative_position,
                             bidirectional=True,
                             num_buckets=32,
                             max_distance=128):
    ret = 0
    if bidirectional:
        num_buckets //= 2
        ret += (relative_position > 0).astype(paddle.int64) * num_buckets
        n = paddle.abs(relative_position)
    else:
        n = paddle.max(-relative_position, paddle.zeros_like(relative_position))
    # now n is in the range [0, inf)
    # half of the buckets are for exact increments in positions
    max_exact = num_buckets // 2
    is_small = n < max_exact

    # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
    val_if_large = max_exact + (paddle.log(
        n.astype(paddle.float32) / max_exact) / math.log(max_distance /
                                                         max_exact) *
                                (num_buckets - max_exact)).astype(paddle.int64)

    val_if_large = paddle.minimum(
        val_if_large, paddle.full_like(val_if_large, num_buckets - 1))

    ret += paddle.where(is_small, n, val_if_large)
    return ret
コード例 #23
0
def combine_abs_max_and_hist(tensor, origin_max, origin_hist, bins,
                             upsample_bins):
    """
    """

    new_max = abs_max_value(tensor)

    if new_max == 0.0:
        return origin_max, origin_hist
    elif origin_max == 0.0:
        new_hist, _ = np.histogram(paddle.abs(tensor).numpy(),
                                   range=(0, new_max),
                                   bins=bins)
        new_hist = new_hist.astype(np.float32)
        return new_max, new_hist
    elif new_max <= origin_max:
        new_hist, _ = np.histogram(paddle.abs(tensor).numpy(),
                                   range=(0, origin_max),
                                   bins=bins)
        new_hist = new_hist.astype(np.float32)
        new_hist += origin_hist
        return origin_max, new_hist
    else:
        # bin_width = origin_max / (bins * upsample_bins)
        #           = new_max / (bins * downsample_bins)
        bin_width = origin_max / (bins * upsample_bins)
        downsampe_bins = int(math.ceil(new_max / (bins * bin_width)))
        new_max = bins * bin_width * downsampe_bins

        upsampled_hist = np.repeat(origin_hist, upsample_bins)
        expanded_hist = np.zeros((bins * downsampe_bins), dtype=np.float32)
        expanded_hist[0:bins * upsample_bins] = upsampled_hist
        cumsumed_hist = np.cumsum(expanded_hist,
                                  dtype=np.float64)[downsampe_bins -
                                                    1::downsampe_bins]
        shift_cumsumed_hist = np.zeros((bins), dtype=np.float64)
        shift_cumsumed_hist[1:] = cumsumed_hist[0:-1]
        sampled_hist = (cumsumed_hist - shift_cumsumed_hist) / upsample_bins
        sampled_hist = sampled_hist.astype(np.float32)

        new_hist, _ = np.histogram(paddle.abs(tensor).numpy(),
                                   range=(0, new_max),
                                   bins=bins)
        new_hist = new_hist.astype(np.float32)
        new_hist += sampled_hist

        return new_max, new_hist
コード例 #24
0
    def get_loss(self, scores, deltas, targets, rois, bbox_weight):
        """
        scores (Tensor): scores from bbox head outputs
        deltas (Tensor): deltas from bbox head outputs
        targets (list[List[Tensor]]): bbox targets containing tgt_labels, tgt_bboxes and tgt_gt_inds
        rois (List[Tensor]): RoIs generated in each batch
        """
        # TODO: better pass args
        tgt_labels, tgt_bboxes, tgt_gt_inds = targets
        tgt_labels = paddle.concat(
            tgt_labels) if len(tgt_labels) > 1 else tgt_labels[0]
        tgt_labels = tgt_labels.cast('int64')
        tgt_labels.stop_gradient = True
        loss_bbox_cls = F.cross_entropy(input=scores,
                                        label=tgt_labels,
                                        reduction='mean')
        # bbox reg

        cls_agnostic_bbox_reg = deltas.shape[1] == 4

        fg_inds = paddle.nonzero(
            paddle.logical_and(tgt_labels >= 0,
                               tgt_labels < self.num_classes)).flatten()

        cls_name = 'loss_bbox_cls'
        reg_name = 'loss_bbox_reg'
        loss_bbox = {}

        if cls_agnostic_bbox_reg:
            reg_delta = paddle.gather(deltas, fg_inds)
        else:
            fg_gt_classes = paddle.gather(tgt_labels, fg_inds)

            reg_row_inds = paddle.arange(fg_gt_classes.shape[0]).unsqueeze(1)
            reg_row_inds = paddle.tile(reg_row_inds, [1, 4]).reshape([-1, 1])

            reg_col_inds = 4 * fg_gt_classes.unsqueeze(1) + paddle.arange(4)

            reg_col_inds = reg_col_inds.reshape([-1, 1])
            reg_inds = paddle.concat([reg_row_inds, reg_col_inds], axis=1)

            reg_delta = paddle.gather(deltas, fg_inds)
            reg_delta = paddle.gather_nd(reg_delta, reg_inds).reshape([-1, 4])
        rois = paddle.concat(rois) if len(rois) > 1 else rois[0]
        tgt_bboxes = paddle.concat(
            tgt_bboxes) if len(tgt_bboxes) > 1 else tgt_bboxes[0]

        reg_target = bbox2delta(rois, tgt_bboxes, bbox_weight)
        reg_target = paddle.gather(reg_target, fg_inds)
        reg_target.stop_gradient = True

        loss_bbox_reg = paddle.abs(reg_delta -
                                   reg_target).sum() / tgt_labels.shape[0]

        loss_bbox[cls_name] = loss_bbox_cls
        loss_bbox[reg_name] = loss_bbox_reg

        return loss_bbox
コード例 #25
0
def cubic(x):
    absx = paddle.abs(x)
    absx2 = absx**2
    absx3 = absx**3
    temp1 = paddle.cast((absx <= 1), absx.dtype)
    temp2 = paddle.cast((absx > 1), absx.dtype) * paddle.cast(
        (absx <= 2), absx.dtype)
    return (1.5 * absx3 - 2.5 * absx2 +
            1) * temp1 + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * temp2
コード例 #26
0
ファイル: losses.py プロジェクト: haoyuying/PaddleSeg
    def forward(self, pred, label):
        one_hot = label > 0.5
        sample_weight = label != self._ignore_label

        sample_weight = sample_weight.astype('float32')

        if not self._from_logits:
            pred = F.sigmoid(pred)
        alpha = paddle.where(one_hot, self._alpha * sample_weight,
                             (1 - self._alpha) * sample_weight)
        pt = paddle.where(sample_weight.astype('bool'),
                          1.0 - paddle.abs(label - pred),
                          paddle.ones_like(pred))
        beta = (1 - pt)**self._gamma
        sw_sum = paddle.sum(sample_weight, axis=(-2, -1), keepdim=True)
        beta_sum = paddle.sum(beta, axis=(-2, -1), keepdim=True)
        mult = sw_sum / (beta_sum + self._eps)

        if self._detach_delimeter:
            mult = mult.detach()
        beta = beta * mult
        with paddle.no_grad():
            ignore_area = paddle.sum(
                (label == self._ignore_label).astype('float32'),
                axis=tuple(range(1, len(label.shape)))).numpy()
            sample_mult = paddle.mean(mult,
                                      axis=tuple(range(1, len(
                                          mult.shape)))).numpy()
            if np.any(ignore_area == 0):
                self._k_sum = 0.9 * self._k_sum + 0.1 * sample_mult[
                    ignore_area == 0].mean()
                beta_pmax = paddle.max(paddle.flatten(beta, 1), axis=1)
                beta_pmax = float(paddle.mean(beta_pmax))
                self._m_max = 0.8 * self._m_max + 0.2 * beta_pmax

        loss_mask = pt + self._eps < 1
        loss_mask = loss_mask.astype('float32')
        pt_mask = (pt + self._eps) * loss_mask + (1 - loss_mask) * paddle.ones(
            pt.shape)
        loss = -alpha * beta * paddle.log(pt_mask)
        loss = self._weight * (loss * sample_weight)

        if self._size_average:
            bsum = paddle.sum(sample_weight,
                              axis=misc.get_dims_with_exclusion(
                                  len(sample_weight.shape), self._batch_axis))
            loss = paddle.sum(loss,
                              axis=misc.get_dims_with_exclusion(
                                  len(loss.shape),
                                  self._batch_axis)) / (bsum + self._eps)
        else:
            loss = paddle.sum(loss,
                              axis=paddle.get_dims_with_exclusion(
                                  len(loss.shape), self._batch_axis))

        return loss
コード例 #27
0
ファイル: test_eigh_op.py プロジェクト: sandyhouse/Paddle
 def test_eigh_grad(self):
     paddle.disable_static()
     x = paddle.to_tensor(self.complex_symm, stop_gradient=False)
     w, v = paddle.linalg.eigh(x)
     (w.sum() + paddle.abs(v).sum()).backward()
     np.testing.assert_allclose(
         abs(x.grad.numpy()),
         abs(x.grad.numpy().conj().transpose(self.trans_dims)),
         rtol=self.rtol,
         atol=self.atol)
コード例 #28
0
ファイル: line_search.py プロジェクト: sandyhouse/Paddle
        def body_zoom(j, done_zoom, a_lo, phi_lo, derphi_lo, derf_lo, a_hi,
                      phi_hi, derphi_hi):
            aj = cubic_interpolation_(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
                                      derphi_hi)  # 21
            min_change = 0.1 * paddle.abs(a_hi - a_lo)
            pred = paddle.minimum(paddle.abs(aj - a_lo),
                                  paddle.abs(aj - a_hi)) < min_change
            aj = paddle.static.nn.cond(pred, lambda: 0.5 * (a_lo + a_hi),
                                       lambda: aj)

            phi_j, derf_j, derphi_j = phi_and_derphi(aj)

            def true_fn():
                # use assing to modify the variable in-place
                paddle.assign(aj, a_hi)
                paddle.assign(phi_j, phi_hi)
                paddle.assign(derphi_j, derphi_hi)

            def false_fn(a_lo, done_zoom):
                pred3 = (paddle.abs(derphi_j) <= -c2 * derphi_0)
                paddle.assign(pred3, done_zoom)

                def true_fn():
                    paddle.assign(a_lo, a_hi)
                    paddle.assign(phi_lo, phi_hi)
                    paddle.assign(derphi_lo, derphi_hi)

                pred4 = ~done_zoom & (derphi_j * (a_hi - a_lo) >= 0)
                paddle.static.nn.cond(pred4, true_fn, None)

                paddle.assign(aj, a_lo)
                paddle.assign(phi_j, phi_lo)
                paddle.assign(derphi_j, derphi_lo)
                paddle.assign(derf_j, derf_lo)

            pred2 = (phi_j > phi_0 + c1 * aj * derphi_0) | (phi_j >= phi_lo)
            paddle.static.nn.cond(pred2, true_fn,
                                  lambda: false_fn(a_lo, done_zoom))
            j = paddle.static.nn.cond(done_zoom, lambda: j, lambda: j + 1)
            return [
                j, done_zoom, a_lo, phi_lo, derphi_lo, derf_lo, a_hi, phi_hi,
                derphi_hi
            ]
コード例 #29
0
 def test_full_matrices(self):
     mat_shape = (2, 3)
     mat = np.random.random(mat_shape).astype("float64")
     x = paddle.to_tensor(mat)
     u, s, vh = paddle.linalg.svd(x, full_matrices=True)
     assert (u.shape == [2, 2])
     assert (vh.shape == [3, 3])
     x_recover = u.matmul(paddle.diag(s)).matmul(vh[0:2])
     if ((paddle.abs(x_recover - x) > 1e-4).any()):
         raise RuntimeError("mat can't be recovered\n")
コード例 #30
0
 def forward(self, pred, target, reduction='none'):
     """forward function, based on fvcore.
     Args:
         pred (Tensor): prediction tensor
         target (Tensor): target tensor, pred.shape must be the same as target.shape
         reduction (str): the way to reduce loss, one of (none, sum, mean)
     """
     assert reduction in ('none', 'sum', 'mean')
     target = target.detach()
     if self.beta < 1e-5:
         loss = paddle.abs(pred - target)
     else:
         n = paddle.abs(pred - target)
         cond = n < self.beta
         loss = paddle.where(cond, 0.5 * n ** 2 / self.beta, n - 0.5 * self.beta)
     if reduction == 'mean':
         loss = loss.mean() if loss.size > 0 else 0.0 * loss.sum()
     elif reduction == 'sum':
         loss = loss.sum()
     return loss * self.loss_weight