Пример #1
0
    def __init__(self,
                 in_channel,
                 feat_channel,
                 num_classes,
                 loss_center_heatmap=dict(type='GaussianFocalLoss',
                                          loss_weight=1.0),
                 loss_wh=dict(type='L1Loss', loss_weight=0.1),
                 loss_offset=dict(type='L1Loss', loss_weight=1.0),
                 train_cfg=None,
                 test_cfg=None,
                 init_cfg=None):
        super(CenterNetHead, self).__init__(init_cfg)
        self.num_classes = num_classes
        self.heatmap_head = self._build_head(in_channel, feat_channel,
                                             num_classes)
        self.wh_head = self._build_head(in_channel, feat_channel, 2)
        self.offset_head = self._build_head(in_channel, feat_channel, 2)

        self.loss_center_heatmap = build_loss(loss_center_heatmap)
        self.loss_wh = build_loss(loss_wh)
        self.loss_offset = build_loss(loss_offset)

        self.train_cfg = train_cfg
        self.test_cfg = test_cfg
        self.fp16_enabled = False
Пример #2
0
def test_kd_loss():
    # test that temeprature should be greater than 1
    with pytest.raises(AssertionError):
        loss_cfg = dict(type='KnowledgeDistillationKLDivLoss',
                        loss_weight=1.0,
                        T=0.5)
        build_loss(loss_cfg)

    # test that pred and target should be of the same size
    loss_cls_cfg = dict(type='KnowledgeDistillationKLDivLoss',
                        loss_weight=1.0,
                        T=1)
    loss_cls = build_loss(loss_cls_cfg)
    with pytest.raises(AssertionError):
        fake_pred = torch.Tensor([[100, -100]])
        fake_label = torch.Tensor([1]).long()
        loss_cls(fake_pred, fake_label)

    # test the calculation
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[100.0, 100.0]])
    fake_target = torch.Tensor([[1.0, 1.0]])
    assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0))

    # test the loss with weights
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[100.0, -100.0], [100.0, 100.0]])
    fake_target = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
    fake_weight = torch.Tensor([0.0, 1.0])
    assert torch.allclose(loss_cls(fake_pred, fake_target, fake_weight),
                          torch.tensor(0.0))
Пример #3
0
def test_ce_loss():
    # use_mask and use_sigmoid cannot be true at the same time
    with pytest.raises(AssertionError):
        loss_cfg = dict(
            type='CrossEntropyLoss',
            use_mask=True,
            use_sigmoid=True,
            loss_weight=1.0)
        build_loss(loss_cfg)

    # test loss with class weights
    loss_cls_cfg = dict(
        type='CrossEntropyLoss',
        use_sigmoid=False,
        class_weight=[0.8, 0.2],
        loss_weight=1.0)
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[100, -100]])
    fake_label = torch.Tensor([1]).long()
    assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))

    loss_cls_cfg = dict(
        type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
    loss_cls = build_loss(loss_cls_cfg)
    assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
Пример #4
0
    def __init__(self,
                 embed_channels=256,
                 softmax_temp=-1,
                 loss_track=dict(
                     type='MultiPosCrossEntropyLoss', loss_weight=0.25),
                 loss_track_aux=dict(
                     type='L2Loss',
                     sample_ratio=3,
                     margin=0.3,
                     loss_weight=1.0,
                     hard_mining=True),
                 init_cfg=dict(
                     type='Xavier',
                     layer='Linear',
                     distribution='uniform',
                     bias=0,
                     override=dict(
                         type='Normal',
                         name='fc_embed',
                         mean=0,
                         std=0.01,
                         bias=0)),
                 *args,
                 **kwargs):
        super(QuasiDenseEmbedHead, self).__init__(
            init_cfg=init_cfg, *args, **kwargs)

        self.fc_embed = nn.Linear(self.last_layer_dim, embed_channels)

        self.softmax_temp = softmax_temp
        self.loss_track = build_loss(loss_track)
        if loss_track_aux is not None:
            self.loss_track_aux = build_loss(loss_track_aux)
        else:
            self.loss_track_aux = None
Пример #5
0
    def __init__(self,
                 anchor_generator,
                 in_channels,
                 kernel_size=3,
                 norm_cfg=dict(type='BN'),
                 weighted_sum=False,
                 bbox_coder=dict(type='DeltaXYWHBBoxCoder',
                                 target_means=[0., 0., 0., 0.],
                                 target_stds=[1., 1., 1., 1.]),
                 loss_cls=dict(type='CrossEntropyLoss',
                               reduction='sum',
                               loss_weight=1.0),
                 loss_bbox=dict(type='L1Loss',
                                reduction='sum',
                                loss_weight=1.2),
                 train_cfg=None,
                 test_cfg=None,
                 init_cfg=None,
                 *args,
                 **kwargs):
        super(SiameseRPNHead, self).__init__(init_cfg)
        self.anchor_generator = build_prior_generator(anchor_generator)
        self.bbox_coder = build_bbox_coder(bbox_coder)
        self.train_cfg = train_cfg
        self.test_cfg = test_cfg
        self.assigner = build_assigner(self.train_cfg.assigner)
        self.sampler = build_sampler(self.train_cfg.sampler)
        self.fp16_enabled = False

        self.cls_heads = nn.ModuleList()
        self.reg_heads = nn.ModuleList()
        for i in range(len(in_channels)):
            self.cls_heads.append(
                CorrelationHead(in_channels[i], in_channels[i],
                                2 * self.anchor_generator.num_base_anchors[0],
                                kernel_size, norm_cfg))
            self.reg_heads.append(
                CorrelationHead(in_channels[i], in_channels[i],
                                4 * self.anchor_generator.num_base_anchors[0],
                                kernel_size, norm_cfg))

        self.weighted_sum = weighted_sum
        if self.weighted_sum:
            self.cls_weight = nn.Parameter(torch.ones(len(in_channels)))
            self.reg_weight = nn.Parameter(torch.ones(len(in_channels)))

        self.loss_cls = build_loss(loss_cls)
        self.loss_bbox = build_loss(loss_bbox)
Пример #6
0
    def __init__(self,
                 num_convs=4,
                 num_fcs=1,
                 roi_feat_size=7,
                 in_channels=256,
                 conv_out_channels=256,
                 fc_out_channels=1024,
                 embed_channels=256,
                 conv_cfg=None,
                 norm_cfg=None,
                 softmax_temp=-1,
                 loss_track=dict(type='MultiPosCrossEntropyLoss',
                                 loss_weight=0.25),
                 loss_track_aux=dict(type='L2Loss',
                                     sample_ratio=3,
                                     margin=0.3,
                                     loss_weight=1.0,
                                     hard_mining=True)):
        super(QuasiDenseEmbedHead, self).__init__()
        self.num_convs = num_convs
        self.num_fcs = num_fcs
        self.roi_feat_size = roi_feat_size
        self.in_channels = in_channels
        self.conv_out_channels = conv_out_channels
        self.fc_out_channels = fc_out_channels
        self.embed_channels = embed_channels
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.relu = nn.ReLU(inplace=True)
        self.convs, self.fcs, last_layer_dim = self._add_conv_fc_branch(
            self.num_convs, self.num_fcs, self.in_channels)
        self.fc_embed = nn.Linear(last_layer_dim, embed_channels)

        self.softmax_temp = softmax_temp
        self.loss_track = build_loss(loss_track)
        if loss_track_aux is not None:
            self.loss_track_aux = build_loss(loss_track_aux)
        else:
            self.loss_track_aux = None
Пример #7
0
def test_varifocal_loss():
    # only sigmoid version of VarifocalLoss is implemented
    with pytest.raises(AssertionError):
        loss_cfg = dict(
            type='VarifocalLoss', use_sigmoid=False, loss_weight=1.0)
        build_loss(loss_cfg)

    # test that alpha should be greater than 0
    with pytest.raises(AssertionError):
        loss_cfg = dict(
            type='VarifocalLoss',
            alpha=-0.75,
            gamma=2.0,
            use_sigmoid=True,
            loss_weight=1.0)
        build_loss(loss_cfg)

    # test that pred and target should be of the same size
    loss_cls_cfg = dict(
        type='VarifocalLoss',
        use_sigmoid=True,
        alpha=0.75,
        gamma=2.0,
        iou_weighted=True,
        reduction='mean',
        loss_weight=1.0)
    loss_cls = build_loss(loss_cls_cfg)
    with pytest.raises(AssertionError):
        fake_pred = torch.Tensor([[100.0, -100.0]])
        fake_target = torch.Tensor([[1.0]])
        loss_cls(fake_pred, fake_target)

    # test the calculation
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[100.0, -100.0]])
    fake_target = torch.Tensor([[1.0, 0.0]])
    assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0))

    # test the loss with weights
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[0.0, 100.0]])
    fake_target = torch.Tensor([[1.0, 1.0]])
    fake_weight = torch.Tensor([0.0, 1.0])
    assert torch.allclose(
        loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0))
Пример #8
0
    def __init__(self,
                 num_convs=0,
                 num_fcs=0,
                 roi_feat_size=7,
                 in_channels=256,
                 conv_out_channels=256,
                 with_avg_pool=False,
                 fc_out_channels=1024,
                 conv_cfg=None,
                 norm_cfg=None,
                 loss_match=dict(
                     type='CrossEntropyLoss',
                     use_sigmoid=False,
                     loss_weight=1.0),
                 init_cfg=None,
                 **kwargs):
        super(RoIEmbedHead, self).__init__(init_cfg=init_cfg)
        self.num_convs = num_convs
        self.num_fcs = num_fcs
        self.roi_feat_size = _pair(roi_feat_size)
        self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
        self.in_channels = in_channels
        self.conv_out_channels = conv_out_channels
        self.with_avg_pool = with_avg_pool
        self.fc_out_channels = fc_out_channels
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.loss_match = build_loss(loss_match)
        self.fp16_enabled = False

        if self.with_avg_pool:
            self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
        # add convs and fcs
        self.convs, self.fcs, self.last_layer_dim = self._add_conv_fc_branch(
            self.num_convs, self.num_fcs, self.in_channels)
        self.relu = nn.ReLU(inplace=True)
Пример #9
0
def test_seesaw_loss():
    # only softmax version of Seesaw Loss is implemented
    with pytest.raises(AssertionError):
        loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0)
        build_loss(loss_cfg)

    # test that cls_score.size(-1) == num_classes + 2
    loss_cls_cfg = dict(type='SeesawLoss',
                        p=0.0,
                        q=0.0,
                        loss_weight=1.0,
                        num_classes=2)
    loss_cls = build_loss(loss_cls_cfg)
    # the length of fake_pred should be num_classes + 2 = 4
    with pytest.raises(AssertionError):
        fake_pred = torch.Tensor([[-100, 100]])
        fake_label = torch.Tensor([1]).long()
        loss_cls(fake_pred, fake_label)
    # the length of fake_pred should be num_classes + 2 = 4
    with pytest.raises(AssertionError):
        fake_pred = torch.Tensor([[-100, 100, -100]])
        fake_label = torch.Tensor([1]).long()
        loss_cls(fake_pred, fake_label)

    # test the calculation without p and q
    loss_cls_cfg = dict(type='SeesawLoss',
                        p=0.0,
                        q=0.0,
                        loss_weight=1.0,
                        num_classes=2)
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[-100, 100, -100, 100]])
    fake_label = torch.Tensor([1]).long()
    loss = loss_cls(fake_pred, fake_label)
    assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
    assert torch.allclose(loss['loss_cls_classes'], torch.tensor(0.))

    # test the calculation with p and without q
    loss_cls_cfg = dict(type='SeesawLoss',
                        p=1.0,
                        q=0.0,
                        loss_weight=1.0,
                        num_classes=2)
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[-100, 100, -100, 100]])
    fake_label = torch.Tensor([0]).long()
    loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20]))
    loss = loss_cls(fake_pred, fake_label)
    assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
    assert torch.allclose(loss['loss_cls_classes'], torch.tensor(180.))

    # test the calculation with q and without p
    loss_cls_cfg = dict(type='SeesawLoss',
                        p=0.0,
                        q=1.0,
                        loss_weight=1.0,
                        num_classes=2)
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[-100, 100, -100, 100]])
    fake_label = torch.Tensor([0]).long()
    loss = loss_cls(fake_pred, fake_label)
    assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
    assert torch.allclose(loss['loss_cls_classes'],
                          torch.tensor(200.) + torch.tensor(100.).log())

    # test the others
    loss_cls_cfg = dict(type='SeesawLoss',
                        p=0.0,
                        q=1.0,
                        loss_weight=1.0,
                        num_classes=2,
                        return_dict=False)
    loss_cls = build_loss(loss_cls_cfg)
    fake_pred = torch.Tensor([[100, -100, 100, -100]])
    fake_label = torch.Tensor([0]).long()
    loss = loss_cls(fake_pred, fake_label)
    acc = loss_cls.get_accuracy(fake_pred, fake_label)
    act = loss_cls.get_activation(fake_pred)
    assert torch.allclose(loss, torch.tensor(0.))
    assert torch.allclose(acc['acc_objectness'], torch.tensor(100.))
    assert torch.allclose(acc['acc_classes'], torch.tensor(100.))
    assert torch.allclose(act, torch.tensor([1., 0., 0.]))