Example #1
0
    def __call__(self, pred, target):
        """
        Calculate the loss
        Args:
            pred (Tensor): heatmap prediction
            target (Tensor): target for positive samples
        Return:
            ct_focal_loss (Tensor): Focal Loss used in CornerNet & CenterNet.
                Note that the values in target are in [0, 1] since gaussian is
                used to reduce the punishment and we treat [0, 1) as neg example.
        """
        fg_map = paddle.cast(target == 1, 'float32')
        fg_map.stop_gradient = True
        bg_map = paddle.cast(target < 1, 'float32')
        bg_map.stop_gradient = True

        neg_weights = paddle.pow(1 - target, 4) * bg_map
        pos_loss = 0 - paddle.log(pred) * paddle.pow(1 - pred,
                                                     self.gamma) * fg_map
        neg_loss = 0 - paddle.log(1 - pred) * paddle.pow(
            pred, self.gamma) * neg_weights
        pos_loss = paddle.sum(pos_loss)
        neg_loss = paddle.sum(neg_loss)

        fg_num = paddle.sum(fg_map)
        ct_focal_loss = (pos_loss + neg_loss) / (
            fg_num + paddle.cast(fg_num == 0, 'float32'))
        return ct_focal_loss * self.loss_weight
Example #2
0
    def test_api(self):
        import paddle
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(name="x",
                              shape=[11, 17],
                              append_batch_size=False,
                              dtype="float32")
        res = fluid.layers.data(name="res",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
Example #3
0
def _neg_loss(pred, gt):
    ''' Modified focal loss. Exactly the same as CornerNet.
      Runs faster and costs a little bit more memory
    Arguments:
      pred (batch x c x h x w)
      gt_regr (batch x c x h x w)
  '''
    # pos_inds = gt.eq(1).float()
    # neg_inds = gt.lt(1).float()
    pos_inds = gt.equal(paddle.ones(gt.shape, dtype=gt.dtype)).cast('float32')
    neg_inds = gt.less_than(paddle.ones(gt.shape,
                                        dtype=gt.dtype)).cast('float32')
    # neg_weights = torch.pow(1 - gt, 4)
    neg_weights = paddle.pow(1 - gt, 4)

    loss = 0

    # pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
    # neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
    pos_loss = paddle.log(pred) * paddle.pow(1 - pred, 2) * pos_inds
    neg_loss = paddle.log(1 - pred) * paddle.pow(pred,
                                                 2) * neg_weights * neg_inds

    # num_pos  = pos_inds.float().sum()
    num_pos = pos_inds.cast('float32').sum()
    pos_loss = pos_loss.sum()
    neg_loss = neg_loss.sum()

    if num_pos == 0:
        loss = loss - neg_loss
    else:
        loss = loss - (pos_loss + neg_loss) / num_pos
    return loss
Example #4
0
    def forward(self, prediction, target):
        """forward

        Args:
            prediction (paddle.Tensor): model prediction
            target (paddle.Tensor): ground truth

        Returns:
            paddle.Tensor: focal loss
        """
        positive_index = (target == 1).astype("float32")
        negative_index = (target < 1).astype("float32")

        negative_weights = paddle.pow(1 - target, self.beta)
        loss = 0.

        positive_loss = paddle.log(prediction) \
                        * paddle.pow(1 - prediction, self.alpha) * positive_index
        negative_loss = paddle.log(1 - prediction) \
                        * paddle.pow(prediction, self.alpha) * negative_weights * negative_index

        num_positive = positive_index.sum()
        positive_loss = positive_loss.sum()
        negative_loss = negative_loss.sum()

        if num_positive == 0:
            loss -= negative_loss
        else:
            loss -= (positive_loss + negative_loss) / num_positive

        return loss
Example #5
0
    def __init__(self, height=64, width=64, with_r=False, with_boundary=False):
        super(AddCoordsTh, self).__init__()
        self.with_r = with_r
        self.with_boundary = with_boundary

        with paddle.no_grad():
            x_coords = paddle.arange(height).unsqueeze(1).expand(
                (height, width)).astype('float32')
            y_coords = paddle.arange(width).unsqueeze(0).expand(
                (height, width)).astype('float32')
            x_coords = (x_coords / (height - 1)) * 2 - 1
            y_coords = (y_coords / (width - 1)) * 2 - 1
            coords = paddle.stack([x_coords, y_coords],
                                  axis=0)  # (2, height, width)

            if self.with_r:
                rr = paddle.sqrt(
                    paddle.pow(x_coords, 2) +
                    paddle.pow(y_coords, 2))  # (height, width)
                rr = (rr / paddle.max(rr)).unsqueeze(0)
                coords = paddle.concat([coords, rr], axis=0)

            self.coords = coords.unsqueeze(0)  # (1, 2 or 3, height, width)
            self.x_coords = x_coords
            self.y_coords = y_coords
Example #6
0
 def forward(self, distance, label):
     label = -1 * (2 * label - 1)
     # print(label, distance)
     pos_num = paddle.sum((label == 1).astype('float32')) + 0.0001
     neg_num = paddle.sum((label == -1).astype('float32')) + 0.0001
     loss_1 = paddle.sum((1 + label) / 2 * paddle.pow(distance, 2)) / pos_num
     loss_2 = paddle.sum((1 - label) / 2 * paddle.pow(paddle.clip(self.margin - distance, min=0.0), 2)) / neg_num
     loss = loss_1 + loss_2
     return loss
Example #7
0
 def forward(self, output1, output2, label):
     dist = nn.PairwiseDistance(keepdim=True)
     euclidean_distance = dist(output1, output2)
     loss_contrastive = \
             paddle.mean((1-label) * \
             paddle.pow(euclidean_distance, 2) + \
             (label) * \
             paddle.pow( \
                 paddle.clip(self.margin - euclidean_distance, min=0.0), 2) \
             )
     return loss_contrastive
Example #8
0
 def test_single_api(sort_sum_gradient):
     fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
     x = paddle.to_tensor(5., stop_gradient=False)
     for i in range(10):
         y = paddle.pow(x, 4.0)
         y.backward()
         self.assertEqual(x.grad.numpy(), (i + 1) * 500)
     x.clear_gradient()
     self.assertEqual(x.grad.numpy(), 0.)
     for i in range(10):
         y = paddle.pow(x, 4.0)
         y.backward()
         self.assertEqual(x.grad.numpy(), (i + 1) * 500)
     x.clear_grad()
     self.assertEqual(x.grad.numpy(), 0.)
Example #9
0
def degree_norm(graph, mode="indegree"):
    """Calculate the degree normalization of a graph

    Args:
        graph: the graph object from (:code:`Graph`)

        mode: which degree to be normalized ("indegree" or "outdegree")

    return:
        A tensor with shape (num_nodes, 1).

    """

    assert mode in [
        'indegree', 'outdegree'
    ], "The degree_norm mode should be in ['indegree', 'outdegree']. But recieve mode=%s" % mode

    if mode == "indegree":
        degree = graph.indegree()
    elif mode == "outdegree":
        degree = graph.outdegree()

    norm = paddle.cast(degree, dtype=paddle.get_default_dtype())
    norm = paddle.clip(norm, min=1.0)
    norm = paddle.pow(norm, -0.5)
    norm = paddle.reshape(norm, [-1, 1])
    return norm
Example #10
0
def reshape_tensor(name: str, x, out_shape, use_tensor_in_list):
    import paddle
    paddle.enable_static()

    with paddle.static.program_guard(paddle.static.Program(),
                                     paddle.static.Program()):
        node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
        if use_tensor_in_list:
            out_shape[0] = paddle.assign(
                np.array((out_shape[0], )).astype('int32'))
            out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape)
        else:
            out_shape = np.array(out_shape).astype('int32')
            node_shape = paddle.assign(out_shape)
            out = paddle.fluid.layers.reshape(x=node_x, shape=node_shape)

        out = paddle.pow(out, 1)
        cpu = paddle.static.cpu_places(1)
        exe = paddle.static.Executor(cpu[0])
        # startup program will call initializer to initialize the parameters.
        exe.run(paddle.static.default_startup_program())

        outs = exe.run(feed={'x': x}, fetch_list=[out])

        saveModel(name,
                  exe,
                  feedkeys=['x'],
                  fetchlist=[out],
                  inputs=[x],
                  outputs=[outs[0]],
                  target_dir=sys.argv[1])

    return outs[0]
Example #11
0
    def test_adam_exception(self):
        paddle.enable_static()
        a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
        b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
        label = paddle.static.data(name="label", shape=[32, 1], dtype='int64')

        sum = paddle.add(a, b)
        z = paddle.pow(sum, 2.0)

        fc_1 = fluid.layers.fc(input=z, size=128)
        prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')

        cost = fluid.layers.cross_entropy(input=prediction, label=label)
        loss = fluid.layers.reduce_mean(cost)
        adam = fluid.optimizer.Adam(use_global_beta_pow=True)
        adam.minimize(loss)
        self.assertRaises(Exception, adam._get_global_accumulator, 'tmp')
        adam._add_global_accumulator('tmp',
                                     type=core.VarDesc.VarType.LOD_TENSOR)
        adam._get_global_accumulator('tmp')
        self.assertRaises(Exception,
                          adam._add_global_accumulator,
                          adam._beta1_pow_acc_str,
                          type=core.VarDesc.VarType.LOD_TENSOR)
        paddle.disable_static()
Example #12
0
    def __call__(self, preds, targets):
        heatmaps_gt, mask = targets
        heatmaps_pred = preds[0]
        scalemaps_pred = preds[1]

        heatmaps_scaled_gt = paddle.where(
            heatmaps_gt > 0, 0.5 * heatmaps_gt *
            (1 + (1 +
                  (scalemaps_pred - 1.) * paddle.log(heatmaps_gt + 1e-10))**2),
            heatmaps_gt)

        regularizer_loss = paddle.mean(
            paddle.pow((scalemaps_pred - 1.) * (heatmaps_gt > 0).astype(float),
                       2))
        omiga = 0.01
        # thres = 2**(-1/omiga), threshold for positive weight
        hm_weight = heatmaps_scaled_gt**(omiga) * paddle.abs(
            1 - heatmaps_pred) + paddle.abs(heatmaps_pred) * (
                1 - heatmaps_scaled_gt**(omiga))

        loss = (((heatmaps_pred - heatmaps_scaled_gt)**2) *
                mask.cast('float').unsqueeze(1)) * hm_weight
        loss = loss.mean()
        loss = self.loss_factor * (loss + 1.0 * regularizer_loss)
        return loss
Example #13
0
def evaluate(model, loader, criterion):
    """
    Evaluate the model on the test dataset and return average loss and pcc.
    """
    model.eval()
    total_loss = []
    total_pcc = []

    for idx, batch_data in enumerate(loader):
        graphs, mut, gexpr, met, label = batch_data
        g = pgl.Graph.batch(graphs).tensor()
        mut = paddle.to_tensor(mut)
        gexpr = paddle.to_tensor(gexpr)
        met = paddle.to_tensor(met)
        label = paddle.to_tensor(label)

        pred = model([g, mut, gexpr, met])
        eval_loss = paddle.pow(criterion(pred[:, 0], label)[0], 0.5)
        eval_pcc = pearsonr(pred[:, 0].numpy(), label.numpy())[0]
        total_loss.append(eval_loss.numpy())
        total_pcc.append(eval_pcc)

    total_loss = np.mean(total_loss)
    total_pcc = np.mean(total_pcc)
    model.train()

    return {"loss": total_loss, "pcc": total_pcc}
Example #14
0
def gelu_new(x):
    """
    Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
    the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
    """
    return 0.5 * x * (1.0 + paddle.tanh(
        math.sqrt(2.0 / math.pi) * (x + 0.044715 * paddle.pow(x, 3.0))))
Example #15
0
File: model.py Project: Yelrose/PGL
 def degree_norm(self, g):
     degree = g.indegree() + 1  # self loop
     norm = paddle.cast(degree, dtype=paddle.get_default_dtype())
     norm = paddle.clip(norm, min=1.0)
     norm = paddle.pow(norm, -0.5)
     norm = paddle.reshape(norm, [-1, 1])
     return norm
Example #16
0
  def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    # print("++++++++++++++++++++++++++++++++++++START CAL_L1_LOSS++++++++++++++++++++++++++++++++++++++++++++++++")
    diff = prediction_tensor - target_tensor
    if self._code_weights is not None:
      code_weights = self._code_weights.astype(prediction_tensor.dtype)
      diff = code_weights.reshape((1, 1, -1)) * diff
    abs_diff = paddle.abs(diff)
    abs_diff_lt_1 = paddle.less_equal(abs_diff, paddle.to_tensor(1 / (self._sigma**2))).astype(abs_diff.dtype)
    loss = abs_diff_lt_1 * 0.5 * paddle.pow(abs_diff * self._sigma, 2) \
      + (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
    if self._codewise:
      anchorwise_smooth_l1norm = loss
      if weights is not None:
        anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
    else:
      anchorwise_smooth_l1norm = paddle.sum(loss, 2)#  * weights
      if weights is not None:
        anchorwise_smooth_l1norm *= weights
    # print("++++++++++++++++++++++++++++++++++++OVER CAL_L1_LOSS++++++++++++++++++++++++++++++++++++++++++++++++")
    return anchorwise_smooth_l1norm
Example #17
0
    def forward(self, input, target):
        """
        Args:
            inputs: feature matrix with shape (batch_size, feat_dim)
            target: ground truth labels with shape (num_classes)
        """
        inputs = input["features"]

        if self.normalize_feature:
            inputs = 1. * inputs / (paddle.expand_as(
                paddle.norm(inputs, p=2, axis=-1, keepdim=True), inputs) +
                                    1e-12)

        bs = inputs.shape[0]

        # compute distance
        dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs])
        dist = dist + dist.t()
        dist = paddle.addmm(input=dist,
                            x=inputs,
                            y=inputs.t(),
                            alpha=-2.0,
                            beta=1.0)
        dist = paddle.clip(dist, min=1e-12).sqrt()

        # hard negative mining
        is_pos = paddle.expand(target, (bs, bs)).equal(
            paddle.expand(target, (bs, bs)).t())
        is_neg = paddle.expand(target, (bs, bs)).not_equal(
            paddle.expand(target, (bs, bs)).t())

        # `dist_ap` means distance(anchor, positive)
        ## both `dist_ap` and `relative_p_inds` with shape [N, 1]
        '''
        dist_ap, relative_p_inds = paddle.max(
            paddle.reshape(dist[is_pos], (bs, -1)), axis=1, keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an, relative_n_inds = paddle.min(
            paddle.reshape(dist[is_neg], (bs, -1)), axis=1, keepdim=True)
        '''
        dist_ap = paddle.max(paddle.reshape(paddle.masked_select(dist, is_pos),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an = paddle.min(paddle.reshape(paddle.masked_select(dist, is_neg),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # shape [N]
        dist_ap = paddle.squeeze(dist_ap, axis=1)
        dist_an = paddle.squeeze(dist_an, axis=1)

        # Compute ranking hinge loss
        y = paddle.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        return {"TripletLossV2": loss}
    def forward(self, inputs):
        #deal with features with different length
        #1. padding to same lenght, make a tensor
        #2. make a mask tensor with the same shpae with 1
        #3. compute output using mask tensor, s.t. output is nothing todo with padding
        assert (len(inputs) == self.feature_num
                ), "Input tensor does not contain {} features".format(
                    self.feature_num)
        att_outs = []
        for i in range(len(inputs)):
            ###1. fc
            m = getattr(self, "fc_feature{}".format(i))
            output_fc = m(inputs[i][0])
            output_fc = paddle.tanh(output_fc)

            ###2. bi_lstm
            m = getattr(self, "bi_lstm{}".format(i))
            lstm_out, _ = m(inputs=output_fc, sequence_length=inputs[i][1])

            lstm_dropout = self.dropout(lstm_out)

            ###3. att_fc
            m = getattr(self, "att_fc{}".format(i))
            lstm_weight = m(lstm_dropout)

            ###4. softmax replace start, for it's relevant to sum in time step
            lstm_exp = paddle.exp(lstm_weight)
            lstm_mask = paddle.mean(inputs[i][2], axis=2)
            lstm_exp_with_mask = paddle.multiply(x=lstm_exp,
                                                 y=lstm_mask,
                                                 axis=0)
            lstm_sum_with_mask = paddle.sum(lstm_exp_with_mask, axis=1)
            exponent = -1
            lstm_denominator = paddle.pow(lstm_sum_with_mask, exponent)
            lstm_softmax = paddle.multiply(x=lstm_exp,
                                           y=lstm_denominator,
                                           axis=0)
            lstm_weight = lstm_softmax
            ###softmax replace end

            lstm_scale = paddle.multiply(x=lstm_dropout, y=lstm_weight, axis=0)

            ###5. sequence_pool's replace start, for it's relevant to sum in time step
            lstm_scale_with_mask = paddle.multiply(x=lstm_scale,
                                                   y=lstm_mask,
                                                   axis=0)
            fea_lens = inputs[i][1]
            fea_len = int(fea_lens[0])
            lstm_pool = paddle.sum(lstm_scale_with_mask, axis=1)
            ###sequence_pool's replace end
            att_outs.append(lstm_pool)
        att_out = paddle.concat(att_outs, axis=1)
        fc_out1 = self.fc_out1(att_out)
        fc_out1_act = self.relu(fc_out1)
        fc_out2 = self.fc_out2(fc_out1_act)
        fc_out2_act = paddle.tanh(fc_out2)
        fc_logit = self.fc_logit(fc_out2_act)
        output = self.sigmoid(fc_logit)
        return fc_logit, output
Example #19
0
 def label_aware_attention(self, keys, query):
     """label_aware_attention
     """
     weight = paddle.sum(keys * query, axis=-1, keepdim=True)
     weight = paddle.pow(weight, self.pow_p)  # [x,k_max,1]
     weight = F.softmax(weight, axis=1)
     output = paddle.sum(keys * weight, axis=1)
     return output, weight
def bbox_iou(box1, box2, giou=False, diou=False, ciou=False, eps=1e-9):
    """calculate the iou of box1 and box2

    Args:
        box1 (list): [x, y, w, h], all have the shape [b, na, h, w, 1]
        box2 (list): [x, y, w, h], all have the shape [b, na, h, w, 1]
        giou (bool): whether use giou or not, default False
        diou (bool): whether use diou or not, default False
        ciou (bool): whether use ciou or not, default False
        eps (float): epsilon to avoid divide by zero

    Return:
        iou (Tensor): iou of box1 and box1, with the shape [b, na, h, w, 1]
    """
    px1, py1, px2, py2 = box1
    gx1, gy1, gx2, gy2 = box2
    x1 = paddle.maximum(px1, gx1)
    y1 = paddle.maximum(py1, gy1)
    x2 = paddle.minimum(px2, gx2)
    y2 = paddle.minimum(py2, gy2)

    overlap = (x2 - x1) * (y2 - y1)
    overlap = overlap.clip(0)

    area1 = (px2 - px1) * (py2 - py1)
    area1 = area1.clip(0)

    area2 = (gx2 - gx1) * (gy2 - gy1)
    area2 = area2.clip(0)

    union = area1 + area2 - overlap + eps
    iou = overlap / union

    if giou or ciou or diou:
        # convex w, h
        cw = paddle.maximum(px2, gx2) - paddle.minimum(px1, gx1)
        ch = paddle.maximum(py2, gy2) - paddle.minimum(py1, gy1)
        if giou:
            c_area = cw * ch + eps
            return iou - (c_area - union) / c_area
        else:
            # convex diagonal squared
            c2 = cw**2 + ch**2 + eps
            # center distance
            rho2 = ((px1 + px2 - gx1 - gx2)**2 +
                    (py1 + py2 - gy1 - gy2)**2) / 4
            if diou:
                return iou - rho2 / c2
            else:
                w1, h1 = px2 - px1, py2 - py1 + eps
                w2, h2 = gx2 - gx1, gy2 - gy1 + eps
                delta = paddle.atan(w1 / h1) - paddle.atan(w2 / h2)
                v = (4 / math.pi**2) * paddle.pow(delta, 2)
                alpha = v / (1 + eps - iou + v)
                alpha.stop_gradient = True
                return iou - (rho2 / c2 + v * alpha)
    else:
        return iou
def calc_dist_matrix(x, y):
    """Calculate Euclidean distance matrix with paddle.Tensor"""
    n = x.shape[0]
    m = y.shape[0]
    d = x.shape[1]
    x = x.unsqueeze(1)
    x = paddle.expand(x, [n, m, d])
    y = y.unsqueeze(0)
    y = paddle.expand(y, [n, m, d])
    dist_matrix = paddle.sqrt(paddle.pow(x - y, 2).sum(2))
    return dist_matrix
Example #22
0
    def apply_single(self, pred, tagmap):
        if tagmap.numpy()[:, :, 3].sum() == 0:
            return (paddle.zeros([1]), paddle.zeros([1]))
        nonzero = paddle.nonzero(tagmap[:, :, 3] > 0)
        if nonzero.shape[0] == 0:
            return (paddle.zeros([1]), paddle.zeros([1]))
        p_inds = paddle.unique(nonzero[:, 0])
        num_person = p_inds.shape[0]
        if num_person == 0:
            return (paddle.zeros([1]), paddle.zeros([1]))

        pull = 0
        tagpull_num = 0
        embs_all = []
        person_unvalid = 0
        for person_idx in p_inds.numpy():
            valid_single = tagmap[person_idx.item()]
            validkpts = paddle.nonzero(valid_single[:, 3] > 0)
            valid_single = paddle.index_select(valid_single, validkpts)
            emb = paddle.gather_nd(pred, valid_single[:, :3])
            if emb.shape[0] == 1:
                person_unvalid += 1
            mean = paddle.mean(emb, axis=0)
            embs_all.append(mean)
            pull += paddle.mean(paddle.pow(emb - mean, 2), axis=0)
            tagpull_num += emb.shape[0]
        pull /= max(num_person - person_unvalid, 1)
        if num_person < 2:
            return pull, paddle.zeros([1])

        embs_all = paddle.stack(embs_all)
        A = embs_all.expand([num_person, num_person])
        B = A.transpose([1, 0])
        diff = A - B

        diff = paddle.pow(diff, 2)
        push = paddle.exp(-diff)
        push = paddle.sum(push) - num_person

        push /= 2 * num_person * (num_person - 1)
        return pull, push
    def _test(self, run_npu=True):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        main_prog.random_seed = SEED
        startup_prog.random_seed = SEED
        np.random.seed(SEED)

        a_np = np.random.random(size=(32, 32)).astype('float32')
        b_np = np.random.random(size=(32, 32)).astype('float32')
        label_np = np.random.randint(2, size=(32, 1)).astype('int64')

        with paddle.static.program_guard(main_prog, startup_prog):
            a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
            b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
            label = paddle.static.data(name="label",
                                       shape=[32, 1],
                                       dtype='int64')

            sum = paddle.add(a, b)
            z = paddle.pow(sum, 2.0)

            fc_1 = fluid.layers.fc(input=z, size=128)
            prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')

            cost = fluid.layers.cross_entropy(input=prediction, label=label)
            loss = fluid.layers.reduce_mean(cost)
            adam = paddle.optimizer.AdamW(learning_rate=0.01,
                                          weight_decay=0.02)
            adam.minimize(loss)

        if run_npu:
            place = paddle.NPUPlace(0)
        else:
            place = paddle.CPUPlace()

        exe = paddle.static.Executor(place)
        exe.run(startup_prog)

        print("Start run on {}".format(place))
        for epoch in range(100):

            pred_res, loss_res = exe.run(main_prog,
                                         feed={
                                             "a": a_np,
                                             "b": b_np,
                                             "label": label_np
                                         },
                                         fetch_list=[prediction, loss])
            if epoch % 10 == 0:
                print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
                    epoch, pred_res[0], loss_res))

        return pred_res, loss_res
Example #24
0
    def test_tensor_gradient(self):
        paddle.__version__ = '2.1.0'

        x = paddle.to_tensor(5., stop_gradient=False)
        y = paddle.pow(x, 4.0)
        y.backward()

        with warnings.catch_warnings(record=True) as w:
            grad = x.gradient()
            assert (
                'API "paddle.fluid.dygraph.varbase_patch_methods.gradient" is '
                'deprecated since 2.1.0') in str(w[-1].message)
Example #25
0
def _run_power(mode, x, y):
    # dynamic mode
    if mode == DYNAMIC:
        paddle.disable_static()
        # y is scalar
        if isinstance(y, (int, float)):
            x_ = paddle.to_tensor(x)
            y_ = y
            res = paddle.pow(x_, y_)
            return res.numpy()
        # y is tensor
        else:
            x_ = paddle.to_tensor(x)
            y_ = paddle.to_tensor(y)
            res = paddle.pow(x_, y_)
            return res.numpy()
    # static mode
    elif mode == STATIC:
        paddle.enable_static()
        # y is scalar
        if isinstance(y, (int, float)):
            with program_guard(Program(), Program()):
                x_ = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
                y_ = y
                res = paddle.pow(x_, y_)
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                outs = exe.run(feed={'x': x}, fetch_list=[res])
                return outs[0]
        # y is tensor
        else:
            with program_guard(Program(), Program()):
                x_ = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
                y_ = paddle.static.data(name="y", shape=y.shape, dtype=y.dtype)
                res = paddle.pow(x_, y_)
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                outs = exe.run(feed={'x': x, 'y': y}, fetch_list=[res])
                return outs[0]
Example #26
0
def _not_faster_neg_loss(pred, gt):
    # pos_inds = gt.eq(1).float()
    # neg_inds = gt.lt(1).float()
    pos_inds = gt.equal(paddle.ones(gt.shape, dtype=gt.dtype)).cast('float32')
    neg_inds = gt.less_than(paddle.ones(gt.shape,
                                        dtype=gt.dtype)).cast('float32')
    # num_pos  = pos_inds.float().sum()
    num_pos = pos_inds.cast('float32').sum()
    # neg_weights = torch.pow(1 - gt, 4)
    neg_weights = paddle.pow(1 - gt, 4)

    loss = 0
    trans_pred = pred * neg_inds + (1 - pred) * pos_inds
    weight = neg_weights * neg_inds + pos_inds
    # all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight
    all_loss = paddle.log(1 - trans_pred) * paddle.pow(trans_pred, 2) * weight
    all_loss = all_loss.sum()

    if num_pos > 0:
        all_loss /= num_pos
    loss -= all_loss
    return loss
Example #27
0
 def forward(self, input, label):
     cosine = F.linear(F.normalize(input), F.normalize(self.weight))
     sine = paddle.sqrt(
         paddle.clip(1.0 - paddle.pow(cosine, 2), min=0, max=1))
     phi = cosine * self.cos_m - sine * self.sin_m
     if self.easy_margin:
         phi = paddle.where(cosine > 0, phi, cosine)
     else:
         phi = paddle.where(cosine > self.th, phi, cosine - self.mm)
     one_hot = paddle.nn.functional.one_hot(label, self.class_dim)
     output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
     output *= self.s
     return output
Example #28
0
def get_sinusoid_encoding(n_position, feat_dim, wave_length=10000):
    # [n_position]
    positions = paddle.arange(0, n_position)
    # [feat_dim]
    dim_range = paddle.arange(0, feat_dim)
    dim_range = paddle.pow(wave_length, 2 * (dim_range // 2) / feat_dim)
    # [n_position, feat_dim]
    angles = paddle.unsqueeze(positions, axis=1) / paddle.unsqueeze(dim_range,
                                                                    axis=0)
    angles = paddle.cast(angles, "float32")
    angles[:, 0::2] = paddle.sin(angles[:, 0::2])
    angles[:, 1::2] = paddle.cos(angles[:, 1::2])
    return angles
Example #29
0
def _postprocess_output(ioup, output, an_num, num_classes, iou_aware_factor):
    """
    post process output objectness score
    """
    tensors = []
    stride = output.shape[1] // an_num
    for m in range(an_num):
        tensors.append(output[:, stride * m:stride * m + 4, :, :])
        obj = output[:, stride * m + 4:stride * m + 5, :, :]
        obj = F.sigmoid(obj)

        ip = ioup[:, m:m + 1, :, :]

        new_obj = paddle.pow(obj, (1 - iou_aware_factor)) * paddle.pow(ip, iou_aware_factor)
        new_obj = _de_sigmoid(new_obj)   # 置信位未进行sigmoid()激活

        tensors.append(new_obj)

        tensors.append(output[:, stride * m + 5:stride * m + 5 + num_classes, :, :])

    output = paddle.concat(tensors, axis=1)

    return output
    def __init__(self, posistion: int = 60, d_model: int = 30):
        super().__init__()

        pos_enc = paddle.zeros(shape=[posistion, d_model], dtype='float32')
        pos = paddle.arange(start=0, end=posistion,
                            dtype='float32').unsqueeze(1)
        dim = paddle.arange(start=0, end=d_model, step=2, dtype='float32')
        div_den = paddle.pow(
            paddle.to_tensor(np.array([10000]), dtype='float32'),
            -(dim / d_model))
        pos_enc[:, 0::2] = paddle.sin(pos * div_den)
        pos_enc[:, 1::2] = paddle.cos(pos * div_den)
        pos_enc.stop_gradient = True
        self.register_buffer('pos_enc', pos_enc)