예제 #1
0
 def _cal_2d_pos_emb(self, hidden_states, bbox):
     position_coord_x = bbox[:, :, 0]
     position_coord_y = bbox[:, :, 3]
     rel_pos_x_2d_mat = position_coord_x.unsqueeze(
         -2) - position_coord_x.unsqueeze(-1)
     rel_pos_y_2d_mat = position_coord_y.unsqueeze(
         -2) - position_coord_y.unsqueeze(-1)
     rel_pos_x = relative_position_bucket(
         rel_pos_x_2d_mat,
         num_buckets=self.rel_2d_pos_bins,
         max_distance=self.max_rel_2d_pos, )
     rel_pos_y = relative_position_bucket(
         rel_pos_y_2d_mat,
         num_buckets=self.rel_2d_pos_bins,
         max_distance=self.max_rel_2d_pos, )
     rel_pos_x = F.one_hot(
         rel_pos_x,
         num_classes=self.rel_2d_pos_onehot_size).astype(hidden_states.dtype)
     rel_pos_y = F.one_hot(
         rel_pos_y,
         num_classes=self.rel_2d_pos_onehot_size).astype(hidden_states.dtype)
     rel_pos_x = self.rel_pos_x_bias(rel_pos_x).transpose([0, 3, 1, 2])
     rel_pos_y = self.rel_pos_y_bias(rel_pos_y).transpose([0, 3, 1, 2])
     rel_2d_pos = rel_pos_x + rel_pos_y
     return rel_2d_pos
예제 #2
0
def calculate_area(pred, label, num_classes, ignore_index=255):
    """
    Calculate intersect, prediction and label area

    Args:
        pred (Tensor): The prediction by model.
        label (Tensor): The ground truth of image.
        num_classes (int): The unique number of target classes.
        ignore_index (int): Specifies a target value that is ignored. Default: 255.

    Returns:
        Tensor: The intersection area of prediction and the ground on all class.
        Tensor: The prediction area on all class.
        Tensor: The ground truth area on all class
    """
    if len(pred.shape) == 4:
        pred = paddle.squeeze(pred, axis=1)
    if len(label.shape) == 4:
        label = paddle.squeeze(label, axis=1)
    if not pred.shape == label.shape:
        raise ValueError('Shape of `pred` and `label should be equal, '
                         'but there are {} and {}.'.format(
                             pred.shape, label.shape))

    # Delete ignore_index
    mask = label != ignore_index
    pred = pred + 1
    label = label + 1
    pred = pred * mask
    label = label * mask
    pred = F.one_hot(pred, num_classes + 1)
    label = F.one_hot(label, num_classes + 1)
    pred = pred[:, :, :, 1:]
    label = label[:, :, :, 1:]

    pred_area = []
    label_area = []
    intersect_area = []

    for i in range(num_classes):
        pred_i = pred[:, :, :, i]
        label_i = label[:, :, :, i]
        pred_area_i = paddle.sum(pred_i)
        label_area_i = paddle.sum(label_i)
        intersect_area_i = paddle.sum(pred_i * label_i)
        pred_area.append(pred_area_i)
        label_area.append(label_area_i)
        intersect_area.append(intersect_area_i)
    pred_area = paddle.concat(pred_area)
    label_area = paddle.concat(label_area)
    intersect_area = paddle.concat(intersect_area)
    return intersect_area, pred_area, label_area
예제 #3
0
    def forward_sigmoid(self, logits_4D, labels_4D, do_rmi=False):
        """
        Using the sigmiod operation both.
        Args:
                logits_4D   :   [N, C, H, W], dtype=float32
                labels_4D   :   [N, H, W], dtype=long
                do_rmi          :       bool
        """
        label_mask_3D = labels_4D != self.ignore_index
        valid_onehot_labels_4D = paddle.cast(
            F.one_hot(paddle.cast(labels_4D, dtype='int64') *
                      paddle.cast(label_mask_3D, dtype='int64'),
                      num_classes=self.num_classes),
            dtype='float32')
        # label_mask_flat = paddle.cast(
        #     paddle.reshape(label_mask_3D, [-1]), dtype='float32')

        valid_onehot_labels_4D = valid_onehot_labels_4D * paddle.unsqueeze(
            label_mask_3D, axis=3)
        valid_onehot_labels_4D.stop_gradient = True
        probs_4D = F.sigmoid(logits_4D) * paddle.unsqueeze(label_mask_3D,
                                                           axis=1) + _CLIP_MIN

        valid_onehot_labels_4D = paddle.transpose(valid_onehot_labels_4D,
                                                  [0, 3, 1, 2])
        valid_onehot_labels_4D.stop_gradient = True
        rmi_loss = self.rmi_lower_bound(valid_onehot_labels_4D, probs_4D)

        return rmi_loss
예제 #4
0
 def _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,
                          pad_gt_mask):
     pad_gt_mask = pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool)
     gt2anchor_distances_list = paddle.split(gt2anchor_distances,
                                             num_anchors_list,
                                             axis=-1)
     num_anchors_index = np.cumsum(num_anchors_list).tolist()
     num_anchors_index = [
         0,
     ] + num_anchors_index[:-1]
     is_in_topk_list = []
     topk_idxs_list = []
     for distances, anchors_index in zip(gt2anchor_distances_list,
                                         num_anchors_index):
         num_anchors = distances.shape[-1]
         topk_metrics, topk_idxs = paddle.topk(distances,
                                               self.topk,
                                               axis=-1,
                                               largest=False)
         topk_idxs_list.append(topk_idxs + anchors_index)
         topk_idxs = paddle.where(pad_gt_mask, topk_idxs,
                                  paddle.zeros_like(topk_idxs))
         is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)
         is_in_topk = paddle.where(is_in_topk > 1,
                                   paddle.zeros_like(is_in_topk),
                                   is_in_topk)
         is_in_topk_list.append(is_in_topk.astype(
             gt2anchor_distances.dtype))
     is_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)
     topk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)
     return is_in_topk_list, topk_idxs_list
예제 #5
0
def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9):
    r"""
    Args:
        metrics (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors
        topk (int): The number of top elements to look for along the axis.
        largest (bool) : largest is a flag, if set to true,
            algorithm will sort by descending order, otherwise sort by
            ascending order. Default: True
        topk_mask (Tensor, bool|None): shape[B, n, topk], ignore bbox mask,
            Default: None
        eps (float): Default: 1e-9
    Returns:
        is_in_topk (Tensor, float32): shape[B, n, L], value=1. means selected
    """
    num_anchors = metrics.shape[-1]
    topk_metrics, topk_idxs = paddle.topk(metrics,
                                          topk,
                                          axis=-1,
                                          largest=largest)
    if topk_mask is None:
        topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps).tile(
            [1, 1, topk])
    topk_idxs = paddle.where(topk_mask, topk_idxs,
                             paddle.zeros_like(topk_idxs))
    is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)
    is_in_topk = paddle.where(is_in_topk > 1, paddle.zeros_like(is_in_topk),
                              is_in_topk)
    return is_in_topk.astype(metrics.dtype)
예제 #6
0
    def forward(self, input, label):
        feat_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, feat_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        logits = paddle.matmul(input, weight)
        if not self.training or label is None:
            return logits

        alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.)
        alpha_n = paddle.clip(logits.detach() + self.margin, min=0.)
        delta_p = 1 - self.margin
        delta_n = self.margin

        m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1])

        logits_p = alpha_p * (logits - delta_p)
        logits_n = alpha_n * (logits - delta_n)
        pre_logits = logits_p * m_hot + logits_n * (1 - m_hot)
        pre_logits = self.scale * pre_logits

        return pre_logits
예제 #7
0
파일: model.py 프로젝트: wbj0110/models
    def forward(self, inputs):
        token_ids = inputs['token_ids']
        type_ids = inputs['type_ids']
        pos_ids = inputs['pos_ids']
        generation_mask = inputs['generation_mask']
        latent_id = inputs['latent_id']
        data_id = inputs['data_id']

        # [-1, 1, latent_type_size]
        latent_id = F.one_hot(latent_id, self.latent_type_size)
        # [-1, 1, hidden_size]
        latent_emb = paddle.matmul(
            latent_id, self.latent_weight, transpose_y=True)

        caches = self.plato2_encoder.gen_caches(token_ids)

        # [-1, seq_len + 1, hidden_size]
        enc_out, new_caches = self.plato2_encoder(
            caches, token_ids, type_ids, pos_ids, generation_mask, latent_emb)

        pred_ids = self.decode(inputs, new_caches)

        nsp_inputs = self.gen_nsp_input(token_ids, pred_ids)
        # [-1, 2]
        probs = self.nsp_predictor(nsp_inputs)

        return self.get_results(data_id, token_ids, pred_ids, probs)
예제 #8
0
    def forward(self, input):
        dtype = input.dtype
        flatten = input.reshape([-1, self.dim])
        dist = (flatten.pow(2).sum(1, keepdim=True) -
                2 * flatten.transpose([0, 1]).matmul(self.embed) +
                self.embed.pow(2).sum(0, keepdim=True))
        embed_ind = (-dist).argmax(1)
        embed_onehot = F.one_hot(embed_ind, self.n_embed).astype(dtype)
        embed_ind = embed_ind.reshape(input.shape[:-1])
        quantize = F.embedding(embed_ind,
                               self.embed.transpose([1, 0]),
                               padding_idx=-1)

        if self.training:
            embed_onehot_sum = embed_onehot.sum(0)
            embed_sum = flatten.transpose([1, 0]).matmul(embed_onehot)

            if dist_fn.get_world_size() > 1:
                dist_fn.all_reduce(embed_onehot_sum)
                dist_fn.all_reduce(embed_sum)

            ema_inplace(self.cluster_size, embed_onehot_sum, self.decay)
            ema_inplace(self.embed_avg, embed_sum, self.decay)
            cluster_size = laplace_smoothing(
                self.cluster_size, self.n_embed,
                self.eps) * self.cluster_size.sum()
            embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
            self.embed[:] = embed_normalized

        loss = F.mse_loss(quantize.detach(), input) * self.commitment
        quantize = input + (quantize - input).detach()
        return quantize, embed_ind, loss
 def test_bad_x():
     label = fluid.layers.data(
         name="label",
         shape=[4],
         append_batch_size=False,
         dtype="float32")
     one_hot_label = functional.one_hot(x=label, num_classes=4)
예제 #10
0
    def forward(self, input, label):
        # lambda = max(lambda_min,base*(1+gamma*iteration)^(-power))
        self.iter += 1
        self.lamb = max(
            self.LambdaMin,
            self.base * (1 + self.gamma * self.iter)**(-1 * self.power))

        # --------------------------- cos(theta) & phi(theta) ---------------------------
        self.linear.weight.Tensor = F.normalize(self.linear.weight)
        x = F.normalize(input)
        cos_theta = self.linear(x)
        cos_theta = cos_theta.clip(min=-1, max=1)
        cos_m_theta = self.mlambda[self.m](cos_theta)
        theta = cos_theta.acos()
        k = paddle.floor(self.m * theta / 3.14159265)
        phi_theta = paddle.to_tensor(((-1.0)**k) * cos_m_theta - 2 * k)
        NormOfFeature = paddle.norm(input, p=2, axis=1)

        # --------------------------- convert label to one-hot ---------------------------
        one_hot = F.one_hot(label, num_classes=phi_theta.shape[1])
        one_hot = paddle.reshape(one_hot,
                                 (phi_theta.shape[0], phi_theta.shape[1]))
        # --------------------------- Calculate output ---------------------------
        output = (one_hot * (phi_theta - cos_theta) /
                  (1 + self.lamb)) + cos_theta
        output *= NormOfFeature.reshape((-1, 1))

        return output
예제 #11
0
def index_add_(parent, axis, idx, child):
    expend_dim = parent.shape[0]
    idx_one_hot = F.one_hot(idx.cast("int64"), expend_dim)
    child = paddle.expand_as(child.cast("float32").unsqueeze(-1), idx_one_hot)
    output = parent + (
        idx_one_hot.cast("float32").multiply(child)).sum(axis).squeeze()
    return output
예제 #12
0
def beam_search_step(state, logits, eos_id, beam_width, is_first_step,
                     length_penalty):
    """logits.shape == [B*W, V]"""
    _, vocab_size = logits.shape

    bsz, beam_width = state.log_probs.shape
    onehot_eos = P.cast(F.one_hot(P.ones([1], 'int64') * eos_id, vocab_size),
                        'int64')  #[1, V]

    probs = P.log(F.softmax(logits))  #[B*W, V]
    probs = mask_prob(probs, onehot_eos, state.finished)  #[B*W, V]
    allprobs = P.reshape(state.log_probs, [-1, 1]) + probs  #[B*W, V]

    not_finished = 1 - P.reshape(state.finished, [-1, 1])  #[B*W,1]
    not_eos = 1 - onehot_eos
    length_to_add = not_finished * not_eos  #[B*W,V]
    alllen = P.reshape(state.lengths, [-1, 1]) + length_to_add

    allprobs = P.reshape(allprobs, [-1, beam_width * vocab_size])
    alllen = P.reshape(alllen, [-1, beam_width * vocab_size])
    allscore = hyp_score(allprobs, alllen, length_penalty)
    if is_first_step:
        allscore = P.reshape(
            allscore,
            [bsz, beam_width, -1])[:, 0, :]  # first step only consiter beam 0
    scores, idx = P.topk(allscore, k=beam_width)  #[B, W]
    next_beam_id = idx // vocab_size  #[B, W]
    next_word_id = idx % vocab_size

    gather_idx = P.concat(
        [P.nonzero(idx != -1)[:, :1],
         P.reshape(idx, [-1, 1])], 1)
    next_probs = P.reshape(P.gather_nd(allprobs, gather_idx), idx.shape)
    next_len = P.reshape(P.gather_nd(alllen, gather_idx), idx.shape)

    gather_idx = P.concat([
        P.nonzero(next_beam_id != -1)[:, :1],
        P.reshape(next_beam_id, [-1, 1])
    ], 1)
    next_finished = P.reshape(
        P.gather_nd(state.finished, gather_idx), state.finished.shape
    )  #[gather new beam state according to new beam id]
    #log.debug(gather_idx.numpy())
    #log.debug(state.finished.numpy())
    #log.debug(next_finished.numpy())

    next_finished += P.cast(next_word_id == eos_id, 'int64')
    next_finished = P.cast(next_finished > 0, 'int64')

    #log.debug(next_word_id.numpy())
    #log.debug(next_beam_id.numpy())
    next_state = BeamSearchState(log_probs=next_probs,
                                 lengths=next_len,
                                 finished=next_finished)
    output = BeamSearchOutput(scores=scores,
                              predicted_ids=next_word_id,
                              beam_parent_ids=next_beam_id)

    return output, next_state
예제 #13
0
    def forward(self, x, class_id=None):
        if self.n_class > 0:
            class_id = (class_id % self.n_class).detach()
            class_id = F.one_hot(class_id, self.n_class).astype('float32')
            class_id = class_id.reshape([x.shape[0], -1])
            x = paddle.concat([x, class_id], 1)

        return super(ConditionalDeepConvGenerator, self).forward(x)
예제 #14
0
 def test_api_with_dygraph(self):
     num_classes = 10
     label = np.array([
         np.random.randint(0, num_classes - 1) for i in range(6)
     ]).reshape([6, 1])
     with fluid.dygraph.guard():
         one_hot_label = functional.one_hot(
             x=fluid.dygraph.to_variable(label), num_classes=num_classes)
예제 #15
0
 def _labelsmoothing(self, target, class_num):
     if target.shape[-1] != class_num:
         one_hot_target = F.one_hot(target, class_num)
     else:
         one_hot_target = target
     soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon)
     soft_target = paddle.reshape(soft_target, shape=[-1, class_num])
     return soft_target
예제 #16
0
    def forward(self, x, class_id):
        if self.n_class > 0:
            class_id = (class_id % self.n_class).detach()
            class_id = F.one_hot(class_id, self.n_class).astype('float32')
            class_id = class_id.reshape([x.shape[0], -1, 1, 1])
            class_id = class_id.tile([1, 1, *x.shape[2:]])
            x = paddle.concat([x, class_id], 1)

        return super(NLayerDiscriminatorWithClassification, self).forward(x)
예제 #17
0
    def forward(self, outputs, targets, length=None):
        targets = F.one_hot(targets, outputs.shape[1])
        try:
            predictions = self.loss_fn(outputs, targets)
        except TypeError:
            predictions = self.loss_fn(outputs)

        predictions = F.log_softmax(predictions, axis=1)
        loss = self.criterion(predictions, targets) / targets.sum()
        return loss
예제 #18
0
파일: base.py 프로젝트: ylwb/PaddleVideo
    def mixup_loss(self, scores, labels_a, labels_b, lam):
        if self.ls_eps != 0:
            labels_a = F.one_hot(labels_a, self.num_classes)
            labels_a = F.label_smooth(labels_a, epsilon=self.ls_eps)
            labels_b = F.one_hot(labels_b, self.num_classes)
            labels_b = F.label_smooth(labels_b, epsilon=self.ls_eps)
            # reshape [bs, 1, num_classes] to [bs, num_classes]
            labels_a = paddle.reshape(labels_a, shape=[-1, self.num_classes])
            labels_b = paddle.reshape(labels_b, shape=[-1, self.num_classes])

        losses = dict()
        loss_a = self.loss_func(scores, labels_a, soft_label=True)
        loss_b = self.loss_func(scores, labels_b, soft_label=True)
        avg_loss_a = paddle.mean(
            loss_a)  #FIXME: call mean here or in last step?
        avg_loss_b = paddle.mean(loss_b)
        avg_loss = lam * avg_loss_a + (1 - lam) * avg_loss_b
        losses['loss'] = avg_loss

        return losses
    def forward(self, logit, label):
        """
        Forward computation.

        Args:
            logit (Tensor): Logit tensor, the data type is float32, float64. Shape is
                (N, C), where C is number of classes, and if shape is more than 2D, this
                is (N, C, D1, D2,..., Dk), k >= 1.
            label (Tensor): Label tensor, the data type is int64. Shape is (N, C), where each
                value is 0 or 1, and if shape is more than 2D, this is
                (N, C, D1, D2,..., Dk), k >= 1.
        """
        if len(label.shape) != len(logit.shape):
            label = paddle.unsqueeze(label, 1)
        mask = (label != self.ignore_index)
        mask = paddle.cast(mask, 'float32')
        # label.shape should equal to the logit.shape
        if label.shape[1] != logit.shape[1]:
            label = label.squeeze(1)
            label = F.one_hot(label, logit.shape[1])
            label = label.transpose((0, 3, 1, 2))
        if isinstance(self.weight, str):
            pos_index = (label == 1)
            neg_index = (label == 0)
            pos_num = paddle.sum(pos_index.astype('float32'))
            neg_num = paddle.sum(neg_index.astype('float32'))
            sum_num = pos_num + neg_num
            weight_pos = 2 * neg_num / (sum_num + self.EPS)
            weight_neg = 2 * pos_num / (sum_num + self.EPS)
            weight = weight_pos * label + weight_neg * (1 - label)
        else:
            weight = self.weight
        if isinstance(self.pos_weight, str):
            pos_index = (label == 1)
            neg_index = (label == 0)
            pos_num = paddle.sum(pos_index.astype('float32'))
            neg_num = paddle.sum(neg_index.astype('float32'))
            sum_num = pos_num + neg_num
            pos_weight = 2 * neg_num / (sum_num + self.EPS)
        else:
            pos_weight = self.pos_weight
        label = label.astype('float32')
        loss = paddle.nn.functional.binary_cross_entropy_with_logits(
            logit,
            label,
            weight=weight,
            reduction='none',
            pos_weight=pos_weight)
        loss = loss * mask
        loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS)
        label.stop_gradient = True
        mask.stop_gradient = True

        return loss
예제 #20
0
def compute_max_iou_gt(ious):
    r"""
    For each GT, find the anchor with the largest IOU.
    Args:
        ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors
    Returns:
        is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected
    """
    num_anchors = ious.shape[-1]
    max_iou_index = ious.argmax(axis=-1)
    is_max_iou = F.one_hot(max_iou_index, num_anchors)
    return is_max_iou.astype(ious.dtype)
예제 #21
0
def compute_max_iou_anchor(ious):
    r"""
    For each anchor, find the GT with the largest IOU.
    Args:
        ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors
    Returns:
        is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected
    """
    num_max_boxes = ious.shape[-2]
    max_iou_index = ious.argmax(axis=-2)
    is_max_iou = F.one_hot(max_iou_index, num_max_boxes).transpose([0, 2, 1])
    return is_max_iou.astype(ious.dtype)
예제 #22
0
    def get_loss(self, mask_logits, mask_label, mask_target, mask_weight):
        mask_label = F.one_hot(mask_label, self.num_classes).unsqueeze([2, 3])
        mask_label = paddle.expand_as(mask_label, mask_logits)
        mask_label.stop_gradient = True
        mask_pred = paddle.gather_nd(mask_logits, paddle.nonzero(mask_label))
        shape = mask_logits.shape
        mask_pred = paddle.reshape(mask_pred, [shape[0], shape[2], shape[3]])

        mask_target = mask_target.cast('float32')
        mask_weight = mask_weight.unsqueeze([1, 2])
        loss_mask = F.binary_cross_entropy_with_logits(
            mask_pred, mask_target, weight=mask_weight, reduction="mean")
        return loss_mask
예제 #23
0
파일: modeling.py 프로젝트: wbj0110/models
 def sample_from_softmax(self, logits, use_softmax_sample=True):
     if use_softmax_sample:
         #uniform_noise = paddle.uniform(logits.shape, dtype="float32", min=0, max=1)
         uniform_noise = paddle.rand(logits.shape, dtype="float32")
         gumbel_noise = -paddle.log(-paddle.log(uniform_noise + 1e-9) +
                                    1e-9)
     else:
         gumbel_noise = paddle.zeros_like(logits)
     # softmax_sample equal to sampled_tokids.unsqueeze(-1)
     softmax_sample = paddle.argmax(F.softmax(logits + gumbel_noise),
                                    axis=-1)
     # one hot
     return F.one_hot(softmax_sample, logits.shape[-1])
    def _run(self, num_classes):
        label = fluid.layers.data(name="label", shape=[1], dtype="int64")
        one_hot_label = functional.one_hot(x=label, num_classes=num_classes)

        place = fluid.CPUPlace()
        label_data = np.array([np.random.randint(0, 10 - 1)
                               for i in range(6)]).reshape([6, 1])

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        ret = exe.run(feed={'label': label_data, },
                      fetch_list=[one_hot_label],
                      return_numpy=False)
예제 #25
0
    def _post_process_loss(self, logit, label, semantic_weights, loss):
        """
        Consider mask and top_k to calculate the final loss.

        Args:
            logit (Tensor): Logit tensor, the data type is float32, float64. Shape is
                (N, C), where C is number of classes, and if shape is more than 2D, this
                is (N, C, D1, D2,..., Dk), k >= 1.
            label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
                value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
                (N, D1, D2,..., Dk), k >= 1.
            semantic_weights (Tensor, optional): Weights about loss for each pixels,
                shape is the same as label.
            loss (Tensor): Loss tensor which is the output of cross_entropy. If soft_label
                is False in cross_entropy, the shape of loss should be the same as the label.
                If soft_label is True in cross_entropy, the shape of loss should be
                (N, D1, D2,..., Dk, 1).
        Returns:
            (Tensor): The average loss.
        """
        mask = label != self.ignore_index
        mask = paddle.cast(mask, 'float32')
        label.stop_gradient = True
        mask.stop_gradient = True

        if loss.ndim > mask.ndim:
            loss = paddle.squeeze(loss, axis=-1)
        loss = loss * mask
        if semantic_weights is not None:
            loss = loss * semantic_weights

        if self.weight is not None:
            _one_hot = F.one_hot(label, logit.shape[-1])
            coef = paddle.sum(_one_hot * self.weight, axis=-1)
        else:
            coef = paddle.ones_like(label)

        if self.top_k_percent_pixels == 1.0:
            avg_loss = paddle.mean(loss) / (paddle.mean(mask * coef) +
                                            self.EPS)
        else:
            loss = loss.reshape((-1, ))
            top_k_pixels = int(self.top_k_percent_pixels * loss.numel())
            loss, indices = paddle.topk(loss, top_k_pixels)
            coef = coef.reshape((-1, ))
            coef = paddle.gather(coef, indices)
            coef.stop_gradient = True
            coef = coef.astype('float32')
            avg_loss = loss.mean() / (paddle.mean(coef) + self.EPS)

        return avg_loss
def compute_class_connectiveity(pred_conn, label_conn, pred_num_conn,
                                label_num_conn, pred, real_label_num,
                                real_pred_num, zero):

    pred_conn = paddle.to_tensor(pred_conn)
    label_conn = paddle.to_tensor(label_conn)
    pred_conn = F.one_hot(pred_conn, pred_num_conn)
    label_conn = F.one_hot(label_conn, label_num_conn)

    ious = paddle.zeros((real_label_num, real_pred_num))
    pair_conn_sum = paddle.to_tensor([0.], stop_gradient=False)

    for i in range(1, label_num_conn):
        label_i = label_conn[:, :, i]

        pair_conn = paddle.to_tensor([0.], stop_gradient=False)
        pair_conn_num = 0

        for j in range(1, pred_num_conn):
            pred_j_mask = pred_conn[:, :, j]
            pred_j = pred_j_mask * pred

            iou = compute_iou(pred_j, label_i, zero)
            ious[i - 1, j - 1] = iou
            if iou != 0:
                pair_conn += iou
                pair_conn_num += 1

        if pair_conn_num != 0:
            pair_conn_sum += pair_conn / pair_conn_num
    lone_pred_num = 0

    pred_sum = paddle.sum(ious, axis=0)
    for m in range(0, real_pred_num):
        if pred_sum[m] == 0:
            lone_pred_num += 1
    img_connectivity = pair_conn_sum / (real_label_num + lone_pred_num)
    return img_connectivity
예제 #27
0
    def forward(self, input, targets):
        relations, texts, x = input
        node_nums, char_nums = [], []
        for text in texts:
            node_nums.append(text.shape[0])
            char_nums.append(paddle.sum((text > -1).astype(int), axis=-1))

        max_num = max([char_num.max() for char_num in char_nums])
        all_nodes = paddle.concat([
            paddle.concat(
                [text,
                 paddle.zeros((text.shape[0], max_num - text.shape[1]))], -1)
            for text in texts
        ])
        temp = paddle.clip(all_nodes, min=0).astype(int)
        embed_nodes = self.node_embed(temp)
        rnn_nodes, _ = self.rnn(embed_nodes)

        b, h, w = rnn_nodes.shape
        nodes = paddle.zeros([b, w])
        all_nums = paddle.concat(char_nums)
        valid = paddle.nonzero((all_nums > 0).astype(int))
        temp_all_nums = (paddle.gather(all_nums, valid) -
                         1).unsqueeze(-1).unsqueeze(-1)
        temp_all_nums = paddle.expand(temp_all_nums, [
            temp_all_nums.shape[0], temp_all_nums.shape[1], rnn_nodes.shape[-1]
        ])
        temp_all_nodes = paddle.gather(rnn_nodes, valid)
        N, C, A = temp_all_nodes.shape
        one_hot = F.one_hot(temp_all_nums[:, 0, :],
                            num_classes=C).transpose([0, 2, 1])
        one_hot = paddle.multiply(temp_all_nodes,
                                  one_hot.astype("float32")).sum(axis=1,
                                                                 keepdim=True)
        t = one_hot.expand([N, 1, A]).squeeze(1)
        nodes = paddle.scatter(nodes, valid.squeeze(1), t)

        if x is not None:
            nodes = self.fusion([x, nodes])

        all_edges = paddle.concat(
            [rel.reshape([-1, rel.shape[-1]]) for rel in relations])
        embed_edges = self.edge_embed(all_edges.astype('float32'))
        embed_edges = F.normalize(embed_edges)

        for gnn_layer in self.gnn_layers:
            nodes, cat_nodes = gnn_layer(nodes, embed_edges, node_nums)

        node_cls, edge_cls = self.node_cls(nodes), self.edge_cls(cat_nodes)
        return node_cls, edge_cls
    def __call__(self, ins_pred_list, ins_label_list, cate_preds, cate_labels,
                 num_ins):
        """
        Get loss of network of SOLOv2.
        Args:
            ins_pred_list (list): Variable list of instance branch output.
            ins_label_list (list): List of instance labels pre batch.
            cate_preds (list): Concat Variable list of categroy branch output.
            cate_labels (list): Concat list of categroy labels pre batch.
            num_ins (int): Number of positive samples in a mini-batch.
        Returns:
            loss_ins (Variable): The instance loss Variable of SOLOv2 network.
            loss_cate (Variable): The category loss Variable of SOLOv2 network.
        """

        #1. Ues dice_loss to calculate instance loss
        loss_ins = []
        total_weights = paddle.zeros(shape=[1], dtype='float32')
        for input, target in zip(ins_pred_list, ins_label_list):
            if input is None:
                continue
            target = paddle.cast(target, 'float32')
            target = paddle.reshape(
                target,
                shape=[-1,
                       paddle.shape(input)[-2],
                       paddle.shape(input)[-1]])
            weights = paddle.cast(
                paddle.sum(target, axis=[1, 2]) > 0, 'float32')
            input = F.sigmoid(input)
            dice_out = paddle.multiply(self._dice_loss(input, target), weights)
            total_weights += paddle.sum(weights)
            loss_ins.append(dice_out)
        loss_ins = paddle.sum(paddle.concat(loss_ins)) / total_weights
        loss_ins = loss_ins * self.ins_loss_weight

        #2. Ues sigmoid_focal_loss to calculate category loss
        # expand onehot labels
        num_classes = cate_preds.shape[-1]
        cate_labels_bin = F.one_hot(cate_labels, num_classes=num_classes + 1)
        cate_labels_bin = cate_labels_bin[:, 1:]

        loss_cate = F.sigmoid_focal_loss(cate_preds,
                                         label=cate_labels_bin,
                                         normalizer=num_ins + 1.,
                                         gamma=self.focal_loss_gamma,
                                         alpha=self.focal_loss_alpha)

        return loss_ins, loss_cate
예제 #29
0
파일: base.py 프로젝트: ylwb/PaddleVideo
    def loss(self, scores, labels, reduce_sum=False, **kwargs):
        """Calculate the loss accroding to the model output ```scores```,
           and the target ```labels```.

        Args:
            scores (paddle.Tensor): The output of the model.
            labels (paddle.Tensor): The target output of the model.

        Returns:
            losses (dict): A dict containing field 'loss'(mandatory) and 'top1_acc', 'top5_acc'(optional).

        """
        if len(labels) == 1:
            labels = labels[0]
        elif len(labels) == 3:
            labels_a, labels_b, lam = labels
            return self.mixup_loss(scores, labels_a, labels_b, lam)
        else:
            raise NotImplemented

        if self.ls_eps != 0.:
            labels = F.one_hot(labels, self.num_classes)
            labels = F.label_smooth(labels, epsilon=self.ls_eps)
            # reshape [bs, 1, num_classes] to [bs, num_classes]
            #NOTE: maybe squeeze is helpful for understanding.
            labels = paddle.reshape(labels, shape=[-1, self.num_classes])
        #labels.stop_gradient = True  #XXX(shipping): check necessary
        losses = dict()
        #NOTE(shipping): F.crossentropy include logsoftmax and nllloss !
        #NOTE(shipping): check the performance of F.crossentropy
        loss = self.loss_func(scores, labels, **kwargs)
        avg_loss = paddle.mean(loss)
        top1 = paddle.metric.accuracy(input=scores, label=labels, k=1)
        top5 = paddle.metric.accuracy(input=scores, label=labels, k=5)

        _, world_size = get_dist_info()

        #NOTE(shipping): deal with multi cards validate
        if world_size > 1 and reduce_sum:
            top1 = paddle.distributed.all_reduce(
                top1, op=paddle.distributed.ReduceOp.SUM) / world_size
            top5 = paddle.distributed.all_reduce(
                top5, op=paddle.distributed.ReduceOp.SUM) / world_size

        losses['top1'] = top1
        losses['top5'] = top5
        losses['loss'] = avg_loss

        return losses
예제 #30
0
    def forward(self, input, label):
        # --------------------------- cos(theta) & phi(theta) ---------------------------
        self.linear.weight.Tensor = F.normalize(self.linear.weight)
        x = F.normalize(input)
        cosine = self.linear(x)
        phi = cosine - self.m
        # --------------------------- convert label to one-hot ---------------------------
        label = label.astype(dtype='int64').flatten()
        one_hot = F.one_hot(label, num_classes=phi.shape[1])
        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
        output = (one_hot * phi) + (
            (1.0 - one_hot) * cosine
        )  # you can use torch.where if your torch.__version__ is 0.4
        output *= self.s

        return output