Ejemplo n.º 1
0
    def sqrt_newton_schulz_autograd(self, A, numIters):
        A_shape = A.shape
        batchSize = A_shape[0]
        dim = A_shape[1]

        normA = A * A
        normA = paddle.sum(normA, axis=1)
        normA = paddle.sum(normA, axis=1)
        normA = paddle.sqrt(normA)
        normA1 = normA.reshape([batchSize, 1, 1])
        Y = paddle.divide(A, paddle.expand_as(normA1, A))
        I = paddle.eye(dim, dim).reshape([1, dim, dim])
        l0 = []
        for i in range(batchSize):
            l0.append(I)
        I = paddle.concat(l0, axis=0)
        I.stop_gradient = False
        Z = paddle.eye(dim, dim).reshape([1, dim, dim])
        l1 = []
        for i in range(batchSize):
            l1.append(Z)
        Z = paddle.concat(l1, axis=0)
        Z.stop_gradient = False

        for i in range(numIters):
            T = 0.5 * (3.0 * I - Z.bmm(Y))
            Y = Y.bmm(T)
            Z = T.bmm(Z)
        sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1])
        sA = paddle.expand_as(sA, A)
        return sA
Ejemplo n.º 2
0
 def _compute_locations(self, fpn_feats):
     """
     Args:
         fpn_feats (list): List of Variables for FPN feature maps
     Return:
         Anchor points for each feature map pixel
     """
     locations = []
     for lvl, feature in enumerate(fpn_feats):
         shape_fm = paddle.shape(feature)
         shape_fm.stop_gradient = True
         h = shape_fm[2]
         w = shape_fm[3]
         fpn_stride = self.fpn_stride[lvl]
         shift_x = fluid.layers.range(0,
                                      w * fpn_stride,
                                      fpn_stride,
                                      dtype='float32')
         shift_y = fluid.layers.range(0,
                                      h * fpn_stride,
                                      fpn_stride,
                                      dtype='float32')
         shift_x = paddle.unsqueeze(shift_x, axis=0)
         shift_y = paddle.unsqueeze(shift_y, axis=1)
         shift_x = paddle.expand_as(shift_x, feature[0, 0, :, :])
         shift_y = paddle.expand_as(shift_y, feature[0, 0, :, :])
         shift_x.stop_gradient = True
         shift_y.stop_gradient = True
         shift_x = paddle.reshape(shift_x, shape=[-1])
         shift_y = paddle.reshape(shift_y, shape=[-1])
         location = paddle.stack([shift_x, shift_y],
                                 axis=-1) + fpn_stride // 2
         location.stop_gradient = True
         locations.append(location)
     return locations
Ejemplo n.º 3
0
    def get_loss(self, heatmap, size, offset, weights, inputs):
        heatmap_target = inputs['heatmap']
        size_target = inputs['size']
        offset_target = inputs['offset']
        index = inputs['index']
        mask = inputs['index_mask']
        heatmap = paddle.clip(F.sigmoid(heatmap), 1e-4, 1 - 1e-4)
        heatmap_loss = self.focal_loss(heatmap, heatmap_target)

        size = paddle.transpose(size, perm=[0, 2, 3, 1])
        size_n, size_h, size_w, size_c = size.shape
        size = paddle.reshape(size, shape=[size_n, -1, size_c])
        index = paddle.unsqueeze(index, 2)
        batch_inds = list()
        for i in range(size_n):
            batch_ind = paddle.full(shape=[1, index.shape[1], 1],
                                    fill_value=i,
                                    dtype='int64')
            batch_inds.append(batch_ind)
        batch_inds = paddle.concat(batch_inds, axis=0)
        index = paddle.concat(x=[batch_inds, index], axis=2)
        pos_size = paddle.gather_nd(size, index=index)
        mask = paddle.unsqueeze(mask, axis=2)
        size_mask = paddle.expand_as(mask, pos_size)
        size_mask = paddle.cast(size_mask, dtype=pos_size.dtype)
        pos_num = size_mask.sum()
        size_mask.stop_gradient = True
        size_target.stop_gradient = True
        size_loss = F.l1_loss(pos_size * size_mask,
                              size_target * size_mask,
                              reduction='sum')
        size_loss = size_loss / (pos_num + 1e-4)

        offset = paddle.transpose(offset, perm=[0, 2, 3, 1])
        offset_n, offset_h, offset_w, offset_c = offset.shape
        offset = paddle.reshape(offset, shape=[offset_n, -1, offset_c])
        pos_offset = paddle.gather_nd(offset, index=index)
        offset_mask = paddle.expand_as(mask, pos_offset)
        offset_mask = paddle.cast(offset_mask, dtype=pos_offset.dtype)
        pos_num = offset_mask.sum()
        offset_mask.stop_gradient = True
        offset_target.stop_gradient = True
        offset_loss = F.l1_loss(pos_offset * offset_mask,
                                offset_target * offset_mask,
                                reduction='sum')
        offset_loss = offset_loss / (pos_num + 1e-4)

        det_loss = weights['heatmap'] * heatmap_loss + weights[
            'size'] * size_loss + weights['offset'] * offset_loss

        return {
            'det_loss': det_loss,
            'heatmap_loss': heatmap_loss,
            'size_loss': size_loss,
            'offset_loss': offset_loss
        }
Ejemplo n.º 4
0
    def forward(self, x):
        squeeze = self.squeeze(x)
        squeeze = paddle.reshape(squeeze, (squeeze.shape[0], -1))
        excitation = self.excitation(squeeze)
        excitation = paddle.reshape(excitation, (x.shape[0], x.shape[1], 1, 1))

        return x * paddle.expand_as(excitation, x)
Ejemplo n.º 5
0
def index_add_(parent, axis, idx, child):
    expend_dim = parent.shape[0]
    idx_one_hot = F.one_hot(idx.cast("int64"), expend_dim)
    child = paddle.expand_as(child.cast("float32").unsqueeze(-1), idx_one_hot)
    output = parent + (
        idx_one_hot.cast("float32").multiply(child)).sum(axis).squeeze()
    return output
Ejemplo n.º 6
0
 def _compute_locatioins_by_level(self, fpn_stride, feature):
     shape_fm = feature.shape
     h, w = shape_fm[2], shape_fm[3]
     shift_x = paddle.arange(0, w * fpn_stride, fpn_stride)
     shift_y = paddle.arange(0, h * fpn_stride, fpn_stride)
     shift_x = paddle.unsqueeze(shift_x, axis=0)
     shift_y = paddle.unsqueeze(shift_y, axis=1)
     shift_x = paddle.expand_as(shift_x, feature[0, 0, :, :])
     shift_y = paddle.expand_as(shift_y, feature[0, 0, :, :])
     shift_x.stop_gradient = True
     shift_y.stop_gradient = True
     shift_x = paddle.reshape(shift_x, shape=[-1])
     shift_y = paddle.reshape(shift_y, shape=[-1])
     location = paddle.stack([shift_x, shift_y], axis=-1) + fpn_stride / 2
     location.stop_gradient = True
     return location
Ejemplo n.º 7
0
    def forward(self, x):
        channel_att_sum = None
        for pool_type in self.pool_types:
            if pool_type == 'avg':
                avg_pool = F.avg_pool2d(x, (x.shape[2], x.shape[3]),
                                        stride=(x.shape[2], x.shape[3]))
                channel_att_raw = self.mlp(avg_pool)
            elif pool_type == 'max':
                max_pool = F.max_pool2d(x, (x.shape[2], x.shape[3]),
                                        stride=(x.shape[2], x.shape[3]))
                channel_att_raw = self.mlp(max_pool)
            elif pool_type == 'lp':
                lp_pool = F.lp_pool2d(x,
                                      2, (x.shape[2], x.shape[3]),
                                      stride=(x.shape[2], x.shape[3]))
                channel_att_raw = self.mlp(lp_pool)
            elif pool_type == 'lse':
                # LSE pool only
                lse_pool = logsumexp_2d(x)
                channel_att_raw = self.mlp(lse_pool)

            if channel_att_sum is None:
                channel_att_sum = channel_att_raw
            else:
                channel_att_sum = channel_att_sum + channel_att_raw

        scale = F.sigmoid(channel_att_sum)
        scale = paddle.unsqueeze(scale, 2)
        scale = paddle.unsqueeze(scale, 3)
        scale = paddle.expand_as(scale, x)
        return x * scale
Ejemplo n.º 8
0
    def forward(self, input, target):
        """
        Args:
            inputs: feature matrix with shape (batch_size, feat_dim)
            target: ground truth labels with shape (num_classes)
        """
        inputs = input["features"]

        if self.normalize_feature:
            inputs = 1. * inputs / (paddle.expand_as(
                paddle.norm(inputs, p=2, axis=-1, keepdim=True), inputs) +
                                    1e-12)

        bs = inputs.shape[0]

        # compute distance
        dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs])
        dist = dist + dist.t()
        dist = paddle.addmm(input=dist,
                            x=inputs,
                            y=inputs.t(),
                            alpha=-2.0,
                            beta=1.0)
        dist = paddle.clip(dist, min=1e-12).sqrt()

        # hard negative mining
        is_pos = paddle.expand(target, (bs, bs)).equal(
            paddle.expand(target, (bs, bs)).t())
        is_neg = paddle.expand(target, (bs, bs)).not_equal(
            paddle.expand(target, (bs, bs)).t())

        # `dist_ap` means distance(anchor, positive)
        ## both `dist_ap` and `relative_p_inds` with shape [N, 1]
        '''
        dist_ap, relative_p_inds = paddle.max(
            paddle.reshape(dist[is_pos], (bs, -1)), axis=1, keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an, relative_n_inds = paddle.min(
            paddle.reshape(dist[is_neg], (bs, -1)), axis=1, keepdim=True)
        '''
        dist_ap = paddle.max(paddle.reshape(paddle.masked_select(dist, is_pos),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an = paddle.min(paddle.reshape(paddle.masked_select(dist, is_neg),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # shape [N]
        dist_ap = paddle.squeeze(dist_ap, axis=1)
        dist_an = paddle.squeeze(dist_an, axis=1)

        # Compute ranking hinge loss
        y = paddle.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        return {"TripletLossV2": loss}
Ejemplo n.º 9
0
 def forward(self, in_tensor):
     avg_pool = F.avg_pool2d(in_tensor,
                             in_tensor.shape[2],
                             stride=in_tensor.shape[2])
     tmp = paddle.unsqueeze(self.gate_c(avg_pool), 2)
     tmp = paddle.unsqueeze(tmp, 3)
     result = paddle.expand_as(tmp, in_tensor)
     return result
Ejemplo n.º 10
0
    def masked_fill(self, mask,value):
        mask_float= mask.astype("float32")
        result = self *(1-mask_float) + mask_float*value
        return  result

        mask=paddle.expand_as(mask,self)
        new_values=paddle.where(mask,self,paddle.ones(self.shape)*value)
        return new_values
Ejemplo n.º 11
0
    def get_mc_loss(self, feat, inputs):
        # feat.shape = [bs, ch_emb, h, w]
        assert 'cls_id_map' in inputs and 'cls_tr_ids' in inputs
        index = inputs['index']
        mask = inputs['index_mask']
        cls_id_map = inputs['cls_id_map']  # [bs, h, w]
        cls_tr_ids = inputs['cls_tr_ids']  # [bs, num_classes, h, w]

        feat = paddle.transpose(feat, perm=[0, 2, 3, 1])
        feat_n, feat_h, feat_w, feat_c = feat.shape
        feat = paddle.reshape(feat, shape=[feat_n, -1, feat_c])

        index = paddle.unsqueeze(index, 2)
        batch_inds = list()
        for i in range(feat_n):
            batch_ind = paddle.full(
                shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
            batch_inds.append(batch_ind)
        batch_inds = paddle.concat(batch_inds, axis=0)
        index = paddle.concat(x=[batch_inds, index], axis=2)
        feat = paddle.gather_nd(feat, index=index)

        mask = paddle.unsqueeze(mask, axis=2)
        mask = paddle.expand_as(mask, feat)
        mask.stop_gradient = True
        feat = paddle.masked_select(feat, mask > 0)
        feat = paddle.reshape(feat, shape=[-1, feat_c])

        reid_losses = 0
        for cls_id, id_num in self.num_identities_dict.items():
            # target
            cur_cls_tr_ids = paddle.reshape(
                cls_tr_ids[:, cls_id, :, :], shape=[feat_n, -1])  # [bs, h*w]
            cls_id_target = paddle.gather_nd(cur_cls_tr_ids, index=index)
            mask = inputs['index_mask']
            cls_id_target = paddle.masked_select(cls_id_target, mask > 0)
            cls_id_target.stop_gradient = True

            # feat
            cls_id_feat = self.emb_scale_dict[str(cls_id)] * F.normalize(feat)
            cls_id_pred = self.classifiers[str(cls_id)](cls_id_feat)

            loss = self.reid_loss(cls_id_pred, cls_id_target)
            valid = (cls_id_target != self.reid_loss.ignore_index)
            valid.stop_gradient = True
            count = paddle.sum((paddle.cast(valid, dtype=np.int32)))
            count.stop_gradient = True
            if count > 0:
                loss = loss / count
            reid_losses += loss

        return reid_losses
Ejemplo n.º 12
0
    def get_loss(self, mask_logits, mask_label, mask_target, mask_weight):
        mask_label = F.one_hot(mask_label, self.num_classes).unsqueeze([2, 3])
        mask_label = paddle.expand_as(mask_label, mask_logits)
        mask_label.stop_gradient = True
        mask_pred = paddle.gather_nd(mask_logits, paddle.nonzero(mask_label))
        shape = mask_logits.shape
        mask_pred = paddle.reshape(mask_pred, [shape[0], shape[2], shape[3]])

        mask_target = mask_target.cast('float32')
        mask_weight = mask_weight.unsqueeze([1, 2])
        loss_mask = F.binary_cross_entropy_with_logits(
            mask_pred, mask_target, weight=mask_weight, reduction="mean")
        return loss_mask
    def test_amp_guard_upsupported_fp16_op(self):
        data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
        with fluid.dygraph.guard():
            conv2d = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None)
            data = fluid.dygraph.to_variable(data)
            with fluid.dygraph.amp_guard(True):
                out_amp_fp16 = conv2d(data)
                out_amp_fp32 = paddle.expand_as(
                    out_amp_fp16,
                    out_amp_fp16)  # expand_as_v2 has no fp16 kernel

            with fluid.dygraph.amp_guard(True, level='O2'):
                out_purefp16_fp16 = conv2d(data)
                out_purefp16_fp32 = paddle.expand_as(
                    out_purefp16_fp16,
                    out_purefp16_fp16)  # expand_as_v2 has no fp16 kernel

        self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32)
        self.assertTrue(out_amp_fp16.dtype == fluid.core.VarDesc.VarType.FP16)
        self.assertTrue(out_amp_fp32.dtype == fluid.core.VarDesc.VarType.FP32)
        self.assertTrue(
            out_purefp16_fp16.dtype == fluid.core.VarDesc.VarType.FP16)
        self.assertTrue(
            out_purefp16_fp32.dtype == fluid.core.VarDesc.VarType.FP32)
Ejemplo n.º 14
0
def accuracy(output, target, topk=(1, )):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.shape[0]  # 256

    _, pred = paddle.topk(output, maxk, 1, True, True)  # 256, 5
    pred = paddle.t(pred)  # 5,256
    correct = paddle.equal(pred,
                           paddle.expand_as(target.reshape([1, -1]),
                                            pred)).astype('float32')  # 5, 256

    res = []
    for k in topk:
        correct_k = paddle.flatten(correct[:k], start_axis=0,
                                   stop_axis=-1).sum(0)
        res.append(correct_k * (100.0 / batch_size))
    return res
Ejemplo n.º 15
0
    def get_target_tensor(self, prediction, target_is_real):
        """Create label tensors with the same size as the input.

        Parameters:
            prediction (tensor) - - tpyically the prediction from a discriminator
            target_is_real (bool) - - if the ground truth label is for real images or fake images

        Returns:
            A label tensor filled with ground truth label, and with the size of the input
        """

        if target_is_real:
            target_tensor = self.real_label
        else:
            target_tensor = self.fake_label
        # return target_tensor.expand_as(prediction)
        # print(target_tensor.shape, prediction.shape)
        target_tensor = paddle.cast(target_tensor, prediction.dtype)
        return paddle.expand_as(target_tensor, prediction)
Ejemplo n.º 16
0
    def forward(self, x):
        # x: input features with shape [b, c, h, w]
        b, c, h, w = x.shape

        # feature descriptor on the global spatial information
        y = self.avg_pool(x)

        # Two different branches of ECA module
        y = paddle.squeeze(y, axis=-1)
        y = paddle.transpose(y, perm=[0, 2, 1])
        y = self.conv(y)
        y = paddle.transpose(y, perm=[0, 2, 1])
        paddle.unsqueeze_(y, axis=-1)
        
        # Multi-scale information fusion
        y = self.sigmoid(y)
        y = paddle.expand_as(y, x)

        return x * y
Ejemplo n.º 17
0
    def get_loss(self, feat, inputs):
        index = inputs['index']
        mask = inputs['index_mask']
        target = inputs['reid']
        target = paddle.masked_select(target, mask > 0)
        target = paddle.unsqueeze(target, 1)

        feat = paddle.transpose(feat, perm=[0, 2, 3, 1])
        feat_n, feat_h, feat_w, feat_c = feat.shape
        feat = paddle.reshape(feat, shape=[feat_n, -1, feat_c])
        index = paddle.unsqueeze(index, 2)
        batch_inds = list()
        for i in range(feat_n):
            batch_ind = paddle.full(
                shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
            batch_inds.append(batch_ind)
        batch_inds = paddle.concat(batch_inds, axis=0)
        index = paddle.concat(x=[batch_inds, index], axis=2)
        feat = paddle.gather_nd(feat, index=index)

        mask = paddle.unsqueeze(mask, axis=2)
        mask = paddle.expand_as(mask, feat)
        mask.stop_gradient = True
        feat = paddle.masked_select(feat, mask > 0)
        feat = paddle.reshape(feat, shape=[-1, feat_c])
        feat = F.normalize(feat)
        feat = self.emb_scale * feat
        logit = self.classifier(feat)
        target.stop_gradient = True
        loss = self.reid_loss(logit, target)
        valid = (target != self.reid_loss.ignore_index)
        valid.stop_gradient = True
        count = paddle.sum((paddle.cast(valid, dtype=np.int32)))
        count.stop_gradient = True
        if count > 0:
            loss = loss / count

        return loss
Ejemplo n.º 18
0
    def test_api(self):
        input1 = np.random.random([12, 14]).astype("float32")
        input2 = np.random.random([2, 12, 14]).astype("float32")
        x = fluid.layers.data(name='x',
                              shape=[12, 14],
                              append_batch_size=False,
                              dtype="float32")

        y = fluid.layers.data(name='target_tensor',
                              shape=[2, 12, 14],
                              append_batch_size=False,
                              dtype="float32")

        out_1 = paddle.expand_as(x, y=y)

        exe = fluid.Executor(place=fluid.NPUPlace(0))
        res_1 = exe.run(fluid.default_main_program(),
                        feed={
                            "x": input1,
                            "target_tensor": input2
                        },
                        fetch_list=[out_1])
        assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1)))
Ejemplo n.º 19
0
 def forward(self, inputs1, inputs2):
     """
     forward
     """
     x = paddle.expand_as(inputs1, inputs2)
     return x + inputs2
    def test_tensor_patch_method(self):
        paddle.disable_static()
        x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
        y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
        z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype)

        x = paddle.to_tensor(x_np)
        y = paddle.to_tensor(y_np)
        z = paddle.to_tensor(z_np)

        a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]])
        b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]])

        # 1. Unary operation for Tensor
        self.assertEqual(x.dim(), 2)
        self.assertEqual(x.ndimension(), 2)
        self.assertEqual(x.ndim, 2)
        self.assertEqual(x.size, 6)
        self.assertEqual(x.numel(), 6)
        self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy()))
        self.assertTrue(
            np.array_equal(x.tanh().numpy(),
                           paddle.tanh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.atan().numpy(),
                           paddle.atan(x).numpy()))
        self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy()))
        m = x.abs()
        self.assertTrue(
            np.array_equal(m.sqrt().numpy(),
                           paddle.sqrt(m).numpy()))
        self.assertTrue(
            np.array_equal(m.rsqrt().numpy(),
                           paddle.rsqrt(m).numpy()))
        self.assertTrue(
            np.array_equal(x.ceil().numpy(),
                           paddle.ceil(x).numpy()))
        self.assertTrue(
            np.array_equal(x.floor().numpy(),
                           paddle.floor(x).numpy()))
        self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy()))
        self.assertTrue(
            np.array_equal(x.acos().numpy(),
                           paddle.acos(x).numpy()))
        self.assertTrue(
            np.array_equal(x.asin().numpy(),
                           paddle.asin(x).numpy()))
        self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy()))
        self.assertTrue(
            np.array_equal(x.sinh().numpy(),
                           paddle.sinh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.cosh().numpy(),
                           paddle.cosh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.round().numpy(),
                           paddle.round(x).numpy()))
        self.assertTrue(
            np.array_equal(x.reciprocal().numpy(),
                           paddle.reciprocal(x).numpy()))
        self.assertTrue(
            np.array_equal(x.square().numpy(),
                           paddle.square(x).numpy()))
        self.assertTrue(
            np.array_equal(x.rank().numpy(),
                           paddle.rank(x).numpy()))
        self.assertTrue(
            np.array_equal(x[0].t().numpy(),
                           paddle.t(x[0]).numpy()))
        self.assertTrue(
            np.array_equal(x.asinh().numpy(),
                           paddle.asinh(x).numpy()))
        ### acosh(x) = nan, need to change input
        t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype)
        t = paddle.to_tensor(t_np)
        self.assertTrue(
            np.array_equal(t.acosh().numpy(),
                           paddle.acosh(t).numpy()))
        self.assertTrue(
            np.array_equal(x.atanh().numpy(),
                           paddle.atanh(x).numpy()))
        d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898],
                              [1.30058, 1.0688717, 1.4928783],
                              [1.0958099, 1.3724753, 1.8926544]])
        d = d.matmul(d.t())
        # ROCM not support cholesky
        if not fluid.core.is_compiled_with_rocm():
            self.assertTrue(
                np.array_equal(d.cholesky().numpy(),
                               paddle.cholesky(d).numpy()))

        self.assertTrue(
            np.array_equal(x.is_empty().numpy(),
                           paddle.is_empty(x).numpy()))
        self.assertTrue(
            np.array_equal(x.isfinite().numpy(),
                           paddle.isfinite(x).numpy()))
        self.assertTrue(
            np.array_equal(
                x.cast('int32').numpy(),
                paddle.cast(x, 'int32').numpy()))
        self.assertTrue(
            np.array_equal(
                x.expand([3, 2, 3]).numpy(),
                paddle.expand(x, [3, 2, 3]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.tile([2, 2]).numpy(),
                paddle.tile(x, [2, 2]).numpy()))
        self.assertTrue(
            np.array_equal(x.flatten().numpy(),
                           paddle.flatten(x).numpy()))
        index = paddle.to_tensor([0, 1])
        self.assertTrue(
            np.array_equal(
                x.gather(index).numpy(),
                paddle.gather(x, index).numpy()))
        index = paddle.to_tensor([[0, 1], [1, 2]])
        self.assertTrue(
            np.array_equal(
                x.gather_nd(index).numpy(),
                paddle.gather_nd(x, index).numpy()))
        self.assertTrue(
            np.array_equal(
                x.reverse([0, 1]).numpy(),
                paddle.reverse(x, [0, 1]).numpy()))
        self.assertTrue(
            np.array_equal(
                a.reshape([3, 2]).numpy(),
                paddle.reshape(a, [3, 2]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.slice([0, 1], [0, 0], [1, 2]).numpy(),
                paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.split(2)[0].numpy(),
                paddle.split(x, 2)[0].numpy()))
        m = paddle.to_tensor(
            np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype))
        self.assertTrue(
            np.array_equal(
                m.squeeze([]).numpy(),
                paddle.squeeze(m, []).numpy()))
        self.assertTrue(
            np.array_equal(
                m.squeeze([1, 2]).numpy(),
                paddle.squeeze(m, [1, 2]).numpy()))
        m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32')
        self.assertTrue(
            np.array_equal(m.unique()[0].numpy(),
                           paddle.unique(m)[0].numpy()))
        self.assertTrue(
            np.array_equal(
                m.unique(return_counts=True)[1],
                paddle.unique(m, return_counts=True)[1]))
        self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0])))
        self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0)))
        self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1)))
        self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1)))
        m = paddle.to_tensor(1)
        self.assertTrue(np.array_equal(m.increment(), paddle.increment(m)))
        m = x.abs()
        self.assertTrue(np.array_equal(m.log(), paddle.log(m)))
        self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2)))
        self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x)))

        # 2. Binary operation
        self.assertTrue(
            np.array_equal(x.divide(y).numpy(),
                           paddle.divide(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.matmul(y, True, False).numpy(),
                paddle.matmul(x, y, True, False).numpy()))
        self.assertTrue(
            np.array_equal(
                x.norm(p='fro', axis=[0, 1]).numpy(),
                paddle.norm(x, p='fro', axis=[0, 1]).numpy()))
        self.assertTrue(
            np.array_equal(x.dist(y).numpy(),
                           paddle.dist(x, y).numpy()))
        self.assertTrue(
            np.array_equal(x.cross(y).numpy(),
                           paddle.cross(x, y).numpy()))
        m = x.expand([2, 2, 3])
        n = y.expand([2, 2, 3]).transpose([0, 2, 1])
        self.assertTrue(
            np.array_equal(m.bmm(n).numpy(),
                           paddle.bmm(m, n).numpy()))
        self.assertTrue(
            np.array_equal(
                x.histogram(5, -1, 1).numpy(),
                paddle.histogram(x, 5, -1, 1).numpy()))
        self.assertTrue(
            np.array_equal(x.equal(y).numpy(),
                           paddle.equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.greater_equal(y).numpy(),
                paddle.greater_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.greater_than(y).numpy(),
                paddle.greater_than(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.less_equal(y).numpy(),
                paddle.less_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.less_than(y).numpy(),
                paddle.less_than(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.not_equal(y).numpy(),
                paddle.not_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.equal_all(y).numpy(),
                paddle.equal_all(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.allclose(y).numpy(),
                paddle.allclose(x, y).numpy()))
        m = x.expand([2, 2, 3])
        self.assertTrue(
            np.array_equal(
                x.expand_as(m).numpy(),
                paddle.expand_as(x, m).numpy()))
        index = paddle.to_tensor([2, 1, 0])
        self.assertTrue(
            np.array_equal(
                a.scatter(index, b).numpy(),
                paddle.scatter(a, index, b).numpy()))

        # 3. Bool tensor operation
        x = paddle.to_tensor([[True, False], [True, False]])
        y = paddle.to_tensor([[False, False], [False, True]])
        self.assertTrue(
            np.array_equal(
                x.logical_and(y).numpy(),
                paddle.logical_and(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_not(y).numpy(),
                paddle.logical_not(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_or(y).numpy(),
                paddle.logical_or(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_xor(y).numpy(),
                paddle.logical_xor(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_and(y).numpy(),
                paddle.logical_and(x, y).numpy()))
        a = paddle.to_tensor([[1, 2], [3, 4]])
        b = paddle.to_tensor([[4, 3], [2, 1]])
        self.assertTrue(
            np.array_equal(
                x.where(a, b).numpy(),
                paddle.where(x, a, b).numpy()))

        x_np = np.random.randn(3, 6, 9, 7)
        x = paddle.to_tensor(x_np)
        x_T = x.T
        self.assertTrue(x_T.shape, [7, 9, 6, 3])
        self.assertTrue(np.array_equal(x_T.numpy(), x_np.T))

        self.assertTrue(inspect.ismethod(a.dot))
        self.assertTrue(inspect.ismethod(a.logsumexp))
        self.assertTrue(inspect.ismethod(a.multiplex))
        self.assertTrue(inspect.ismethod(a.prod))
        self.assertTrue(inspect.ismethod(a.scale))
        self.assertTrue(inspect.ismethod(a.stanh))
        self.assertTrue(inspect.ismethod(a.add_n))
        self.assertTrue(inspect.ismethod(a.max))
        self.assertTrue(inspect.ismethod(a.maximum))
        self.assertTrue(inspect.ismethod(a.min))
        self.assertTrue(inspect.ismethod(a.minimum))
        self.assertTrue(inspect.ismethod(a.floor_divide))
        self.assertTrue(inspect.ismethod(a.remainder))
        self.assertTrue(inspect.ismethod(a.floor_mod))
        self.assertTrue(inspect.ismethod(a.multiply))
        self.assertTrue(inspect.ismethod(a.logsumexp))
        self.assertTrue(inspect.ismethod(a.inverse))
        self.assertTrue(inspect.ismethod(a.log1p))
        self.assertTrue(inspect.ismethod(a.erf))
        self.assertTrue(inspect.ismethod(a.addmm))
        self.assertTrue(inspect.ismethod(a.clip))
        self.assertTrue(inspect.ismethod(a.trace))
        self.assertTrue(inspect.ismethod(a.kron))
        self.assertTrue(inspect.ismethod(a.isinf))
        self.assertTrue(inspect.ismethod(a.isnan))
        self.assertTrue(inspect.ismethod(a.concat))
        self.assertTrue(inspect.ismethod(a.broadcast_to))
        self.assertTrue(inspect.ismethod(a.scatter_nd_add))
        self.assertTrue(inspect.ismethod(a.scatter_nd))
        self.assertTrue(inspect.ismethod(a.shard_index))
        self.assertTrue(inspect.ismethod(a.chunk))
        self.assertTrue(inspect.ismethod(a.stack))
        self.assertTrue(inspect.ismethod(a.strided_slice))
        self.assertTrue(inspect.ismethod(a.unsqueeze))
        self.assertTrue(inspect.ismethod(a.unstack))
        self.assertTrue(inspect.ismethod(a.argmax))
        self.assertTrue(inspect.ismethod(a.argmin))
        self.assertTrue(inspect.ismethod(a.argsort))
        self.assertTrue(inspect.ismethod(a.masked_select))
        self.assertTrue(inspect.ismethod(a.topk))
        self.assertTrue(inspect.ismethod(a.index_select))
        self.assertTrue(inspect.ismethod(a.nonzero))
        self.assertTrue(inspect.ismethod(a.sort))
        self.assertTrue(inspect.ismethod(a.index_sample))
        self.assertTrue(inspect.ismethod(a.mean))
        self.assertTrue(inspect.ismethod(a.std))
        self.assertTrue(inspect.ismethod(a.numel))
Ejemplo n.º 21
0
    def masked_fill_(self, mask,value):
        mask=paddle.expand_as(mask,self)
        new_values=paddle.where(mask,self,paddle.ones(self.shape)*value)
        paddorch.copy(new_values,self)

        return self
Ejemplo n.º 22
0
    def test_tensor_patch_method(self):
        paddle.disable_static()
        x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
        y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
        z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype)

        x = paddle.to_tensor(x_np)
        y = paddle.to_tensor(y_np)
        z = paddle.to_tensor(z_np)

        a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]])
        b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]])

        # 1. Unary operation for Tensor
        self.assertEqual(x.dim(), 2)
        self.assertEqual(x.ndimension(), 2)
        self.assertEqual(x.ndim, 2)
        self.assertEqual(x.size(), [2, 3])
        self.assertTrue(
            np.array_equal(x.sigmoid().numpy(),
                           fluid.layers.sigmoid(x).numpy()))
        self.assertTrue(
            np.array_equal(x.logsigmoid().numpy(),
                           fluid.layers.logsigmoid(x).numpy()))
        self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy()))
        self.assertTrue(
            np.array_equal(x.tanh().numpy(),
                           paddle.tanh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.atan().numpy(),
                           paddle.atan(x).numpy()))
        self.assertTrue(
            np.array_equal(x.tanh_shrink().numpy(),
                           fluid.layers.tanh_shrink(x).numpy()))
        self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy()))
        m = x.abs()
        self.assertTrue(
            np.array_equal(m.sqrt().numpy(),
                           paddle.sqrt(m).numpy()))
        self.assertTrue(
            np.array_equal(m.rsqrt().numpy(),
                           paddle.rsqrt(m).numpy()))
        self.assertTrue(
            np.array_equal(x.ceil().numpy(),
                           paddle.ceil(x).numpy()))
        self.assertTrue(
            np.array_equal(x.floor().numpy(),
                           paddle.floor(x).numpy()))
        self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy()))
        self.assertTrue(
            np.array_equal(x.acos().numpy(),
                           paddle.acos(x).numpy()))
        self.assertTrue(
            np.array_equal(x.asin().numpy(),
                           paddle.asin(x).numpy()))
        self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy()))
        self.assertTrue(
            np.array_equal(x.sinh().numpy(),
                           paddle.sinh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.cosh().numpy(),
                           paddle.cosh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.round().numpy(),
                           paddle.round(x).numpy()))
        self.assertTrue(
            np.array_equal(x.reciprocal().numpy(),
                           paddle.reciprocal(x).numpy()))
        self.assertTrue(
            np.array_equal(x.square().numpy(),
                           paddle.square(x).numpy()))
        self.assertTrue(
            np.array_equal(x.softplus().numpy(),
                           fluid.layers.softplus(x).numpy()))
        self.assertTrue(
            np.array_equal(x.softsign().numpy(),
                           fluid.layers.softsign(x).numpy()))
        self.assertTrue(
            np.array_equal(x.rank().numpy(),
                           paddle.rank(x).numpy()))
        self.assertTrue(
            np.array_equal(x[0].t().numpy(),
                           paddle.t(x[0]).numpy()))
        m = paddle.to_tensor(np.random.uniform(1, 2, [3, 3]), 'float32')
        m = m.matmul(m.t())
        self.assertTrue(
            np.array_equal(m.cholesky().numpy(),
                           paddle.cholesky(m).numpy()))

        self.assertTrue(
            np.array_equal(x.is_empty().numpy(),
                           paddle.is_empty(x).numpy()))
        self.assertTrue(
            np.array_equal(x.isfinite().numpy(),
                           paddle.isfinite(x).numpy()))
        self.assertTrue(
            np.array_equal(
                x.cast('int32').numpy(),
                paddle.cast(x, 'int32').numpy()))
        self.assertTrue(
            np.array_equal(
                x.expand([3, 2, 3]).numpy(),
                paddle.expand(x, [3, 2, 3]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.tile([2, 2]).numpy(),
                paddle.tile(x, [2, 2]).numpy()))
        self.assertTrue(
            np.array_equal(x.flatten().numpy(),
                           paddle.flatten(x).numpy()))
        index = paddle.to_tensor([0, 1])
        self.assertTrue(
            np.array_equal(
                x.gather(index).numpy(),
                paddle.gather(x, index).numpy()))
        index = paddle.to_tensor([[0, 1], [1, 2]])
        self.assertTrue(
            np.array_equal(
                x.gather_nd(index).numpy(),
                paddle.gather_nd(x, index).numpy()))
        self.assertTrue(
            np.array_equal(
                x.reverse([0, 1]).numpy(),
                paddle.reverse(x, [0, 1]).numpy()))
        self.assertTrue(
            np.array_equal(
                a.reshape([3, 2]).numpy(),
                paddle.reshape(a, [3, 2]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.slice([0, 1], [0, 0], [1, 2]).numpy(),
                paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.split(2)[0].numpy(),
                paddle.split(x, 2)[0].numpy()))
        m = paddle.to_tensor(
            np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype))
        self.assertTrue(
            np.array_equal(
                m.squeeze([]).numpy(),
                paddle.squeeze(m, []).numpy()))
        self.assertTrue(
            np.array_equal(
                m.squeeze([1, 2]).numpy(),
                paddle.squeeze(m, [1, 2]).numpy()))
        m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32')
        self.assertTrue(
            np.array_equal(m.unique()[0].numpy(),
                           paddle.unique(m)[0].numpy()))
        self.assertTrue(
            np.array_equal(m.unique_with_counts()[2],
                           paddle.unique_with_counts(m)[2]))
        self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0])))
        self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0)))
        self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1)))
        self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1)))
        m = paddle.to_tensor(1)
        self.assertTrue(np.array_equal(m.increment(), paddle.increment(m)))
        m = x.abs()
        self.assertTrue(np.array_equal(m.log(), paddle.log(m)))
        self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2)))
        self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x)))

        # 2. Binary operation
        self.assertTrue(
            np.array_equal(
                x.matmul(y, True, False).numpy(),
                paddle.matmul(x, y, True, False).numpy()))
        self.assertTrue(
            np.array_equal(
                x.norm(p='fro', axis=[0, 1]).numpy(),
                paddle.norm(x, p='fro', axis=[0, 1]).numpy()))
        self.assertTrue(
            np.array_equal(x.dist(y).numpy(),
                           paddle.dist(x, y).numpy()))
        self.assertTrue(
            np.array_equal(x.cross(y).numpy(),
                           paddle.cross(x, y).numpy()))
        m = x.expand([2, 2, 3])
        n = y.expand([2, 2, 3]).transpose([0, 2, 1])
        self.assertTrue(
            np.array_equal(m.bmm(n).numpy(),
                           paddle.bmm(m, n).numpy()))
        self.assertTrue(
            np.array_equal(
                x.histogram(5, -1, 1).numpy(),
                paddle.histogram(x, 5, -1, 1).numpy()))
        self.assertTrue(
            np.array_equal(x.equal(y).numpy(),
                           paddle.equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.greater_equal(y).numpy(),
                paddle.greater_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.greater_than(y).numpy(),
                paddle.greater_than(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.less_equal(y).numpy(),
                paddle.less_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.less_than(y).numpy(),
                paddle.less_than(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.not_equal(y).numpy(),
                paddle.not_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.equal_all(y).numpy(),
                paddle.equal_all(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.allclose(y).numpy(),
                paddle.allclose(x, y).numpy()))
        m = x.expand([2, 2, 3])
        self.assertTrue(
            np.array_equal(
                x.expand_as(m).numpy(),
                paddle.expand_as(x, m).numpy()))
        index = paddle.to_tensor([2, 1, 0])
        self.assertTrue(
            np.array_equal(
                a.scatter(index, b).numpy(),
                paddle.scatter(a, index, b).numpy()))

        # 3. Bool tensor operation
        x = paddle.to_tensor([[True, False], [True, False]])
        y = paddle.to_tensor([[False, False], [False, True]])
        self.assertTrue(
            np.array_equal(x.reduce_all().numpy(),
                           paddle.reduce_all(x).numpy()))
        self.assertTrue(
            np.array_equal(x.reduce_any().numpy(),
                           paddle.reduce_any(x).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_and(y).numpy(),
                paddle.logical_and(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_not(y).numpy(),
                paddle.logical_not(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_or(y).numpy(),
                paddle.logical_or(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_xor(y).numpy(),
                paddle.logical_xor(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_and(y).numpy(),
                paddle.logical_and(x, y).numpy()))
Ejemplo n.º 23
0
 def forward(self, x, y):
     z = paddle.expand_as(x, y)
     z += y
     return z
    def get_loss(self, heatmap, size, iou, offset, weights, inputs):
        heatmap_target = inputs['heatmap']
        size_target = inputs['size']
        offset_target = inputs['offset']
        index = inputs['index']
        mask = inputs['index_mask']
        heatmap = paddle.clip(F.sigmoid(heatmap), 1e-4, 1 - 1e-4)
        heatmap_loss = self.focal_loss(heatmap, heatmap_target)

        size = paddle.transpose(size, perm=[0, 2, 3, 1])
        size_n, size_h, size_w, size_c = size.shape
        size = paddle.reshape(size, shape=[size_n, -1, size_c])
        index = paddle.unsqueeze(index, 2)
        batch_inds = list()
        for i in range(size_n):
            batch_ind = paddle.full(shape=[1, index.shape[1], 1],
                                    fill_value=i,
                                    dtype='int64')
            batch_inds.append(batch_ind)
        batch_inds = paddle.concat(batch_inds, axis=0)
        index = paddle.concat(x=[batch_inds, index], axis=2)
        pos_size = paddle.gather_nd(size, index=index)
        mask = paddle.unsqueeze(mask, axis=2)
        size_mask = paddle.expand_as(mask, pos_size)
        size_mask = paddle.cast(size_mask, dtype=pos_size.dtype)
        pos_num = size_mask.sum()
        size_mask.stop_gradient = True
        size_target.stop_gradient = True
        size_loss = F.l1_loss(pos_size * size_mask,
                              size_target * size_mask,
                              reduction='sum')
        size_loss = size_loss / (pos_num + 1e-4)

        ### giou loss
        iou = paddle.transpose(iou, perm=[0, 2, 3, 1])
        iou_n, iou_h, iou_w, iou_c = iou.shape
        iou = paddle.reshape(iou, shape=[iou_n, -1, iou_c])
        pos_iou = paddle.gather_nd(iou, index=index)
        iou_mask = paddle.expand_as(mask, pos_iou)
        iou_mask = paddle.cast(iou_mask, dtype=pos_iou.dtype)
        pos_num = iou_mask.sum()
        iou_mask.stop_gradient = True
        gt_bbox_xys = inputs['bbox_xys']
        gt_bbox_xys.stop_gradient = True
        centers_x = (gt_bbox_xys[:, :, 0:1] + gt_bbox_xys[:, :, 2:3]) / 2.0
        centers_y = (gt_bbox_xys[:, :, 1:2] + gt_bbox_xys[:, :, 3:4]) / 2.0
        x1 = centers_x - pos_size[:, :, 0:1]
        y1 = centers_y - pos_size[:, :, 1:2]
        x2 = centers_x + pos_size[:, :, 2:3]
        y2 = centers_y + pos_size[:, :, 3:4]
        pred_boxes = paddle.concat([x1, y1, x2, y2], axis=-1)

        iou_loss = self.iou_loss(pred_boxes * iou_mask,
                                 gt_bbox_xys * iou_mask,
                                 iou_weight=iou_mask,
                                 loc_reweight=None)
        iou_loss = iou_loss / (pos_num + 1e-4)

        offset = paddle.transpose(offset, perm=[0, 2, 3, 1])
        offset_n, offset_h, offset_w, offset_c = offset.shape
        offset = paddle.reshape(offset, shape=[offset_n, -1, offset_c])
        pos_offset = paddle.gather_nd(offset, index=index)
        offset_mask = paddle.expand_as(mask, pos_offset)
        offset_mask = paddle.cast(offset_mask, dtype=pos_offset.dtype)
        pos_num = offset_mask.sum()
        offset_mask.stop_gradient = True
        offset_target.stop_gradient = True
        offset_loss = F.l1_loss(pos_offset * offset_mask,
                                offset_target * offset_mask,
                                reduction='sum')
        offset_loss = offset_loss / (pos_num + 1e-4)

        det_loss = weights['heatmap'] * heatmap_loss + weights[
            'size'] * size_loss + weights['offset'] * offset_loss + weights[
                'iou'] * iou_loss

        return {
            'det_loss': det_loss,
            'heatmap_loss': heatmap_loss,
            'size_loss': size_loss,
            'iou_loss': iou_loss,
            'offset_loss': offset_loss
        }
Ejemplo n.º 25
0
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
    if input.shape != other.shape:
        other = paddle.expand_as(other, input)

    return convertTensor(paddle.allclose(input, other, rtol, atol, equal_nan))
Ejemplo n.º 26
0
 def forward(self, x):
     N, C, _, _ = x.shape
     y = self.avg_pool(x).reshape((N, C))
     y = self.fc(y).reshape((N, C, 1, 1))
     return x * paddle.expand_as(y, x)
Ejemplo n.º 27
0
 def fill_diagonal(self,value,wrap=False):
     diag_v = paddle.diag(self)
     diag_v = torch.mm(paddle.eye(self.shape[0], self.shape[1]),
                       paddle.expand_as(diag_v, self)+value)
     return  self-diag_v
Ejemplo n.º 28
0
 def forward(self, in_tensor):
     return paddle.expand_as(self.gate_s(in_tensor), in_tensor)
Ejemplo n.º 29
0
    def beam_search(self, x, beam_width, eos, embed):
        def _inflate(tensor, times, dim):
            repeat_dims = [1] * tensor.dim()
            repeat_dims[dim] = times
            output = paddle.tile(tensor, repeat_dims)
            return output

        # https://github.com/IBM/pytorch-seq2seq/blob/fede87655ddce6c94b38886089e05321dc9802af/seq2seq/models/TopKDecoder.py
        batch_size, l, d = x.shape
        x = paddle.tile(paddle.transpose(x.unsqueeze(1), perm=[1, 0, 2, 3]),
                        [beam_width, 1, 1, 1])
        inflated_encoder_feats = paddle.reshape(
            paddle.transpose(x, perm=[1, 0, 2, 3]), [-1, l, d])

        # Initialize the decoder
        state = self.decoder.get_initial_state(embed, tile_times=beam_width)

        pos_index = paddle.reshape(paddle.arange(batch_size) * beam_width,
                                   shape=[-1, 1])

        # Initialize the scores
        sequence_scores = paddle.full(shape=[batch_size * beam_width, 1],
                                      fill_value=-float('Inf'))
        index = [i * beam_width for i in range(0, batch_size)]
        sequence_scores[index] = 0.0

        # Initialize the input vector
        y_prev = paddle.full(shape=[batch_size * beam_width],
                             fill_value=self.num_classes)

        # Store decisions for backtracking
        stored_scores = list()
        stored_predecessors = list()
        stored_emitted_symbols = list()

        for i in range(self.max_len_labels):
            output, state = self.decoder(inflated_encoder_feats, state, y_prev)
            state = paddle.unsqueeze(state, axis=0)
            log_softmax_output = paddle.nn.functional.log_softmax(output,
                                                                  axis=1)

            sequence_scores = _inflate(sequence_scores, self.num_classes, 1)
            sequence_scores += log_softmax_output
            scores, candidates = paddle.topk(paddle.reshape(
                sequence_scores, [batch_size, -1]),
                                             beam_width,
                                             axis=1)

            # Reshape input = (bk, 1) and sequence_scores = (bk, 1)
            y_prev = paddle.reshape(candidates % self.num_classes,
                                    shape=[batch_size * beam_width])
            sequence_scores = paddle.reshape(
                scores, shape=[batch_size * beam_width, 1])

            # Update fields for next timestep
            pos_index = paddle.expand_as(pos_index, candidates)
            predecessors = paddle.cast(candidates / self.num_classes +
                                       pos_index,
                                       dtype='int64')
            predecessors = paddle.reshape(predecessors,
                                          shape=[batch_size * beam_width, 1])
            state = paddle.index_select(state,
                                        index=predecessors.squeeze(),
                                        axis=1)

            # Update sequence socres and erase scores for <eos> symbol so that they aren't expanded
            stored_scores.append(sequence_scores.clone())
            y_prev = paddle.reshape(y_prev, shape=[-1, 1])
            eos_prev = paddle.full_like(y_prev, fill_value=eos)
            mask = eos_prev == y_prev
            mask = paddle.nonzero(mask)
            if mask.dim() > 0:
                sequence_scores = sequence_scores.numpy()
                mask = mask.numpy()
                sequence_scores[mask] = -float('inf')
                sequence_scores = paddle.to_tensor(sequence_scores)

            # Cache results for backtracking
            stored_predecessors.append(predecessors)
            y_prev = paddle.squeeze(y_prev)
            stored_emitted_symbols.append(y_prev)

        # Do backtracking to return the optimal values
        #====== backtrak ======#
        # Initialize return variables given different types
        p = list()
        l = [[self.max_len_labels] * beam_width for _ in range(batch_size)
             ]  # Placeholder for lengths of top-k sequences

        # the last step output of the beams are not sorted
        # thus they are sorted here
        sorted_score, sorted_idx = paddle.topk(
            paddle.reshape(stored_scores[-1], shape=[batch_size, beam_width]),
            beam_width)

        # initialize the sequence scores with the sorted last step beam scores
        s = sorted_score.clone()

        batch_eos_found = [0] * batch_size  # the number of EOS found
        # in the backward loop below for each batch
        t = self.max_len_labels - 1
        # initialize the back pointer with the sorted order of the last step beams.
        # add pos_index for indexing variable with b*k as the first dimension.
        t_predecessors = paddle.reshape(sorted_idx +
                                        pos_index.expand_as(sorted_idx),
                                        shape=[batch_size * beam_width])
        while t >= 0:
            # Re-order the variables with the back pointer
            current_symbol = paddle.index_select(stored_emitted_symbols[t],
                                                 index=t_predecessors,
                                                 axis=0)
            t_predecessors = paddle.index_select(
                stored_predecessors[t].squeeze(), index=t_predecessors, axis=0)
            eos_indices = stored_emitted_symbols[t] == eos
            eos_indices = paddle.nonzero(eos_indices)

            if eos_indices.dim() > 0:
                for i in range(eos_indices.shape[0] - 1, -1, -1):
                    # Indices of the EOS symbol for both variables
                    # with b*k as the first dimension, and b, k for
                    # the first two dimensions
                    idx = eos_indices[i]
                    b_idx = int(idx[0] / beam_width)
                    # The indices of the replacing position
                    # according to the replacement strategy noted above
                    res_k_idx = beam_width - (batch_eos_found[b_idx] %
                                              beam_width) - 1
                    batch_eos_found[b_idx] += 1
                    res_idx = b_idx * beam_width + res_k_idx

                    # Replace the old information in return variables
                    # with the new ended sequence information
                    t_predecessors[res_idx] = stored_predecessors[t][idx[0]]
                    current_symbol[res_idx] = stored_emitted_symbols[t][idx[0]]
                    s[b_idx, res_k_idx] = stored_scores[t][idx[0], 0]
                    l[b_idx][res_k_idx] = t + 1

            # record the back tracked results
            p.append(current_symbol)
            t -= 1

        # Sort and re-order again as the added ended sequences may change
        # the order (very unlikely)
        s, re_sorted_idx = s.topk(beam_width)
        for b_idx in range(batch_size):
            l[b_idx] = [
                l[b_idx][k_idx.item()] for k_idx in re_sorted_idx[b_idx, :]
            ]

        re_sorted_idx = paddle.reshape(
            re_sorted_idx + pos_index.expand_as(re_sorted_idx),
            [batch_size * beam_width])

        # Reverse the sequences and re-order at the same time
        # It is reversed because the backtracking happens in reverse time order
        p = [
            paddle.reshape(paddle.index_select(step, re_sorted_idx, 0),
                           shape=[batch_size, beam_width, -1])
            for step in reversed(p)
        ]
        p = paddle.concat(p, -1)[:, 0, :]
        return p, paddle.ones_like(p)
Ejemplo n.º 30
0
 def forward(self, inputs, inputs_):
     """
     forward
     """
     x = paddle.expand_as(inputs, inputs_)
     return x