예제 #1
0
    def __call__(self, proposals, mask_logits, targets):
        """
        Arguments:
            proposals (list[BoxList])
            mask_logits (Tensor)
            targets (list[BoxList])

        Return:
            mask_loss (Tensor): scalar tensor containing the loss
        """
        labels, mask_targets = self.prepare_targets(proposals, targets)

        labels = cat(labels, dim=0)
        mask_targets = cat(mask_targets, dim=0)

        positive_inds = jt.nonzero(labels > 0).squeeze(1)
        labels_pos = labels[positive_inds]

        # torch.mean (in binary_cross_entropy_with_logits) doesn't
        # accept empty tensors, so handle it separately
        if mask_targets.numel() == 0:
            return mask_logits.sum() * 0
        binary_cross_entropy_with_logits = nn.BCEWithLogitsLoss()
        mask_loss = binary_cross_entropy_with_logits(
            mask_logits[positive_inds, labels_pos], mask_targets)
        return mask_loss
예제 #2
0
    def __call__(self, proposals, mask_logits, targets):
        """
        Arguments:
            proposals (list[BoxList])
            mask_logits (Tensor)
            targets (list[BoxList])

        Return:
            mask_loss (Tensor): scalar tensor containing the loss
        """
        labels, mask_targets, mask_ratios = self.prepare_targets(
            proposals, targets)

        labels = cat(labels, dim=0)
        mask_targets = cat(mask_targets, dim=0)

        positive_inds = jt.nonzero(labels > 0).squeeze(1)
        labels_pos = labels[positive_inds]

        # accept empty tensors, so handle it separately
        if mask_targets.numel() == 0:
            if not self.maskiou_on:
                return mask_logits.sum() * 0
            else:
                selected_index = jt.arange(mask_logits.shape[0])
                selected_mask = mask_logits[selected_index, labels]
                mask_num, mask_h, mask_w = selected_mask.shape
                selected_mask = selected_mask.reshape(mask_num, 1, mask_h,
                                                      mask_w)
                return mask_logits.sum() * 0, selected_mask, labels, None
        if self.maskiou_on:
            mask_ratios = cat(mask_ratios, dim=0)
            value_eps = 1e-10 * jt.ones((mask_targets.shape[0], ))
            mask_ratios = jt.maximum(mask_ratios, value_eps)
            pred_masks = mask_logits[positive_inds, labels_pos]
            pred_masks[:] = pred_masks > 0
            mask_targets_full_area = mask_targets.sum(
                dims=[1, 2]) / mask_ratios
            mask_ovr = pred_masks * mask_targets
            mask_ovr_area = mask_ovr.sum(dims=[1, 2])
            mask_union_area = pred_masks.sum(
                dims=[1, 2]) + mask_targets_full_area - mask_ovr_area
            value_1 = jt.ones((pred_masks.shape[0], ))
            value_0 = jt.zeros((pred_masks.shape[0], ))
            mask_union_area = jt.maximum(mask_union_area, value_1)
            mask_ovr_area = jt.maximum(mask_ovr_area, value_0)
            maskiou_targets = mask_ovr_area / mask_union_area

        binary_cross_entropy_with_logits = nn.BCEWithLogitsLoss()
        mask_loss = binary_cross_entropy_with_logits(
            mask_logits[positive_inds, labels_pos], mask_targets)
        if not self.maskiou_on:
            return mask_loss
        else:
            selected_index = jt.index((mask_logits.shape[0], ), dim=0)
            selected_mask = mask_logits[selected_index, labels]
            mask_num, mask_h, mask_w = selected_mask.shape
            selected_mask = selected_mask.reshape(mask_num, 1, mask_h, mask_w)
            selected_mask = selected_mask.sigmoid()
            return mask_loss, selected_mask, labels, maskiou_targets
예제 #3
0
    def semantic_segmentation_loss(self,
                                   segment_data,
                                   mask_t,
                                   class_t,
                                   interpolation_mode='bilinear'):
        # Note num_classes here is without the background class so cfg.num_classes-1
        batch_size, num_classes, mask_h, mask_w = segment_data.shape
        loss_s = 0

        for idx in range(batch_size):
            cur_segment = segment_data[idx]
            cur_class_t = class_t[idx]

            with jt.no_grad():
                downsampled_masks = nn.interpolate(
                    mask_t[idx].unsqueeze(0), (mask_h, mask_w),
                    mode=interpolation_mode,
                    align_corners=False).squeeze(0)
                downsampled_masks = (downsampled_masks > 0.5).float()

                # Construct Semantic Segmentation
                segment_t = jt.zeros_like(cur_segment)
                segment_t.stop_grad()
                for obj_idx in range(downsampled_masks.shape[0]):
                    segment_t[cur_class_t[obj_idx]] = jt.maximum(
                        segment_t[cur_class_t[obj_idx]],
                        downsampled_masks[obj_idx])

            loss_s += nn.BCEWithLogitsLoss(size_average=False)(cur_segment,
                                                               segment_t)

        return loss_s / mask_h / mask_w * cfg.semantic_segmentation_alpha
 def test_bce_with_logits_loss(self):
     jt_loss=jnn.BCEWithLogitsLoss()
     tc_loss=tnn.BCEWithLogitsLoss()
     output=np.random.randn(100).astype(np.float32)
     target=np.random.randint(2, size=(100)).astype(np.float32)
     jt_y=jt_loss(jt.array(output), jt.array(target))
     tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
     assert np.allclose(jt_y.numpy(), tc_y.numpy())
 def __init__(self, cfg):
     self.cls_loss_func = SigmoidFocalLoss(
         cfg.MODEL.FCOS.LOSS_GAMMA,
         cfg.MODEL.FCOS.LOSS_ALPHA
     )
     self.center_sample = cfg.MODEL.FCOS.CENTER_SAMPLE
     self.strides = cfg.MODEL.FCOS.FPN_STRIDES
     self.radius = cfg.MODEL.FCOS.POS_RADIUS
     self.loc_loss_type = cfg.MODEL.FCOS.LOC_LOSS_TYPE
     # we make use of IOU Loss for bounding boxes regression,
     # but we found that L1 in log scale can yield a similar performance
     self.box_reg_loss_func = IOULoss(self.loc_loss_type)
     self.centerness_loss_func = nn.BCEWithLogitsLoss()
     self.dense_points = cfg.MODEL.FCOS.DENSE_POINTS
예제 #6
0
    def __init__(self, cfg):
        self.cls_loss_func = SigmoidFocalLoss(cfg.MODEL.EMBED_MASK.LOSS_GAMMA,
                                              cfg.MODEL.EMBED_MASK.LOSS_ALPHA)
        self.fpn_strides = cfg.MODEL.EMBED_MASK.FPN_STRIDES
        self.center_on = cfg.MODEL.EMBED_MASK.CENTER_ON
        self.center_sampling_radius = cfg.MODEL.EMBED_MASK.CENTER_POS_RADIOS
        self.iou_loss_type = cfg.MODEL.EMBED_MASK.IOU_LOSS_TYPE
        self.norm_reg_targets = cfg.MODEL.EMBED_MASK.NORM_REG_TARGETS

        self.box_reg_loss_func = IOULoss(self.iou_loss_type)
        self.centerness_loss_func = nn.BCEWithLogitsLoss(size_average=False)

        ########## mask prediction ############
        self.mask_loss_func = LovaszHinge(reduction='none')

        self.mask_scale_factor = cfg.MODEL.EMBED_MASK.MASK_SCALE_FACTOR
        self.object_sizes_of_interest = [
            [-1, cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[0]],
            [
                cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[0],
                cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[1]
            ],
            [
                cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[1],
                cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[2]
            ],
            [
                cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[2],
                cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[3]
            ], [cfg.MODEL.EMBED_MASK.FPN_INTEREST_SPLIT[3], INF]
        ]
        self.sample_in_mask = cfg.MODEL.EMBED_MASK.SAMPLE_IN_MASK

        self.box_padding = cfg.MODEL.EMBED_MASK.BOX_PADDING

        self.fix_margin = cfg.MODEL.EMBED_MASK.FIX_MARGIN
        prior_margin = cfg.MODEL.EMBED_MASK.PRIOR_MARGIN
        self.init_margin = -math.log(0.5) / (prior_margin**2)

        self.loss_mask_alpha = cfg.MODEL.EMBED_MASK.LOSS_MASK_ALPHA
예제 #7
0
        def discriminator_block(in_filters, out_filters, bn=True):
            block = [nn.Conv(in_filters, out_filters, 3, stride=2, padding=1), nn.LeakyReLU(scale=0.2), nn.Dropout(p=0.25)]
            if bn:
                block.append(nn.BatchNorm(out_filters, eps=0.8))
            return block
        self.model = nn.Sequential(*discriminator_block(opt.channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128))
        ds_size = (opt.img_size // (2 ** 4))
        self.adv_layer = nn.Sequential(nn.Linear((128 * (ds_size ** 2)), 1))

    def execute(self, img):
        out = self.model(img)
        out = out.view((out.shape[0], (- 1)))
        validity = self.adv_layer(out)
        return validity

adversarial_loss = nn.BCEWithLogitsLoss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Configure data loader
transform = transform.Compose([
    transform.Resize(size=opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True, transform=transform).set_attrs(batch_size=opt.batch_size, shuffle=True)

# Optimizers
optimizer_G = jt.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
예제 #8
0
    min_ = img.min()
    max_ = img.max()
    img = (img - min_) / (max_ - min_) * 255
    img = img.transpose((1, 2, 0))
    if C == 3:
        # img = img[:,:,::-1]
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    cv2.imwrite(path, img)


c_dim = len(opt.selected_attrs)
img_shape = (opt.channels, opt.img_height, opt.img_width)

# Loss functions
criterion_cycle = nn.L1Loss()
bce_with_logits_loss = nn.BCEWithLogitsLoss(size_average=False)


def criterion_cls(logit, target):
    return bce_with_logits_loss(logit, target) / logit.size(0)


# Loss weights
lambda_cls = 1
lambda_rec = 10
lambda_gp = 10

# Initialize generator and discriminator
generator = GeneratorResNet(img_shape=img_shape,
                            res_blocks=opt.residual_blocks,
                            c_dim=c_dim)
예제 #9
0
 def class_existence_loss(self, class_data, class_existence_t):
     return cfg.class_existence_alpha * nn.BCEWithLogitsLoss( size_average=False)(class_data, class_existence_t)
예제 #10
0
def compute_loss(p, targets, model):  # predictions, targets, model
    lcls, lbox, lobj = jt.zeros((1, )), jt.zeros((1, )), jt.zeros((1, ))
    tcls, tbox, indices, anchors = build_targets(p, targets, model)  # targets
    h = model.hyp  # hyperparameters

    # Define criteria
    BCEcls = nn.BCEWithLogitsLoss(pos_weight=jt.array(
        [h['cls_pw']]))  # weight=model.class_weights)
    BCEobj = nn.BCEWithLogitsLoss(pos_weight=jt.array([h['obj_pw']]))

    # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
    cp, cn = smooth_BCE(eps=0.0)

    # Focal loss
    g = h['fl_gamma']  # focal loss gamma
    if g > 0:
        BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)

    # Losses
    balance = [4.0, 1.0, 0.4, 0.1]  # P3-P6
    for i, pi in enumerate(p):  # layer index, layer predictions
        b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx
        tobj = jt.zeros_like(pi[..., 0])  # target obj

        n = b.shape[0]  # number of targets
        if n:
            ps = pi[b, a, gj, gi]  # prediction subset corresponding to targets

            # Regression
            pxy = ps[:, :2].sigmoid() * 2. - 0.5
            pwh = (ps[:, 2:4].sigmoid() * 2)**2 * anchors[i]
            pbox = jt.contrib.concat((pxy, pwh), 1)  # predicted box
            iou = bbox_iou(pbox.transpose(1, 0),
                           tbox[i],
                           x1y1x2y2=False,
                           CIoU=True)  # iou(prediction, target)
            lbox += (1.0 - iou).mean()  # iou loss

            # Objectness
            tobj[b, a, gj,
                 gi] = (1.0 -
                        model.gr) + model.gr * iou.detach().clamp(0).cast(
                            tobj.dtype)  # iou ratio

            # Classification
            if model.nc > 1:  # cls loss (only if multiple classes)
                t = jt.full_like(ps[:, 5:], cn)  # targets
                t[list(range(n)), tcls[i]] = cp
                lcls += BCEcls(ps[:, 5:], t)  # BCE

            # Append targets to text file
            # with open('targets.txt', 'a') as file:
            #     [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in jt.contrib.concat((txy[i], twh[i]), 1)]

        lobj += BCEobj(pi[..., 4], tobj) * balance[i]  # obj loss

    lbox *= h['box']
    lobj *= h['obj']
    lcls *= h['cls']
    bs = tobj.shape[0]  # batch size

    loss = lbox + lobj + lcls
    return loss * bs, jt.contrib.concat((lbox, lobj, lcls, loss)).detach()
예제 #11
0
 def __init__(self, alpha=0.05):
     super(BCEBlurWithLogitsLoss, self).__init__()
     self.loss_fcn = nn.BCEWithLogitsLoss(
         reduction='none')  # must be nn.BCEWithLogitsLoss()
     self.alpha = alpha