def init_weights(self):
     super(CornerHead, self).init_weights()
     bias_init = bias_init_with_prob(0.1)
     for i in range(self.num_feat_levels):
         # The initialization of parameters are different between
         # nn.Conv2d and ConvModule. Our experiments show that
         # using the original initialization of nn.Conv2d increases
         # the final mAP by about 0.2%
         self.tl_heat[i][-1].conv.reset_parameters()
         self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
         self.br_heat[i][-1].conv.reset_parameters()
         self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
         self.tl_off[i][-1].conv.reset_parameters()
         self.br_off[i][-1].conv.reset_parameters()
         if self.with_corner_emb:
             self.tl_emb[i][-1].conv.reset_parameters()
             self.br_emb[i][-1].conv.reset_parameters()
Esempio n. 2
0
    def init_weights(self):
        """Initialize weights of the head."""
        bias_cls = bias_init_with_prob(0.01)
        for m in self.inter_convs:
            normal_init(m.conv, std=0.01)
        for m in self.cls_prob_module:
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)
        for m in self.reg_offset_module:
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)
        normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls)

        self.cls_decomp.init_weights()
        self.reg_decomp.init_weights()

        normal_init(self.tood_cls, std=0.01, bias=bias_cls)
        normal_init(self.tood_reg, std=0.01)
Esempio n. 3
0
def test_truncnormalinit():
    """test TruncNormalInit class."""
    model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))

    func = TruncNormalInit(mean=100,
                           std=1e-5,
                           bias=200,
                           a=0,
                           b=200,
                           layer=['Conv2d', 'Linear'])
    func(model)
    assert model[0].weight.allclose(torch.tensor(100.))
    assert model[2].weight.allclose(torch.tensor(100.))
    assert model[0].bias.allclose(torch.tensor(200.))
    assert model[2].bias.allclose(torch.tensor(200.))

    func = TruncNormalInit(mean=300,
                           std=1e-5,
                           a=100,
                           b=400,
                           bias_prob=0.01,
                           layer=['Conv2d', 'Linear'])
    res = bias_init_with_prob(0.01)
    func(model)
    assert model[0].weight.allclose(torch.tensor(300.))
    assert model[2].weight.allclose(torch.tensor(300.))
    assert model[0].bias.allclose(torch.tensor(res))
    assert model[2].bias.allclose(torch.tensor(res))

    # test layer key with base class name
    model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))

    func = TruncNormalInit(mean=300,
                           std=1e-5,
                           a=100,
                           b=400,
                           bias_prob=0.01,
                           layer='_ConvNd')
    func(model)
    assert model[0].weight.allclose(torch.tensor(300.))
    assert model[2].weight.allclose(torch.tensor(300.))
    assert torch.all(model[0].bias == res)
    assert torch.all(model[2].bias == res)
Esempio n. 4
0
def test_normalinit():
    """test Normalinit class."""
    model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))

    func = NormalInit(mean=100, std=1e-5, bias=200, layer=['Conv2d', 'Linear'])
    func(model)
    assert model[0].weight.allclose(torch.tensor(100.))
    assert model[2].weight.allclose(torch.tensor(100.))
    assert model[0].bias.allclose(torch.tensor(200.))
    assert model[2].bias.allclose(torch.tensor(200.))

    func = NormalInit(
        mean=300, std=1e-5, bias_prob=0.01, layer=['Conv2d', 'Linear'])
    res = bias_init_with_prob(0.01)
    func(model)
    assert model[0].weight.allclose(torch.tensor(300.))
    assert model[2].weight.allclose(torch.tensor(300.))
    assert model[0].bias.allclose(torch.tensor(res))
    assert model[2].bias.allclose(torch.tensor(res))
Esempio n. 5
0
    def init_weights(self):
        for _, m in self.shortcut_layers.named_modules():
            if isinstance(m, nn.Conv2d):
                kaiming_init(m)

        for _, m in self.deconv_layers.named_modules():
            if isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        for _, m in self.hm.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)

        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.hm[-1], std=0.01, bias=bias_cls)

        for _, m in self.wh.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)
Esempio n. 6
0
 def init_weights(self):
     for m in self.cls_convs:
         normal_init(m.conv, std=0.01)
     for m in self.reg_convs:
         normal_init(m.conv, std=0.01)
     for m in self.mask_convs:
         normal_init(m.conv, std=0.01)
     for m in self.stuff_convs:
         normal_init(m.conv, std=0.01)
     for m in self.cls_spatial_flow_convs:
         normal_init(m.conv, std=0.01)
     for m in self.mask_spatial_flow_convs:
         normal_init(m.conv, std=0.01)
     for m in self.stuff_spatial_flow_convs:
         normal_init(m.conv, std=0.01)
     for m in self.stuff_mask_flow_convs:
         normal_init(m.conv, std=0.01)
     bias_cls = bias_init_with_prob(0.01)
     normal_init(self.retina_cls, std=0.01, bias=bias_cls)
     normal_init(self.retina_reg, std=0.01)
Esempio n. 7
0
 def init_weights(self):
     for conv in self.conv_x:
         nn.init.kaiming_normal_(conv.weight,
                                 mode='fan_out',
                                 nonlinearity='relu')
         nn.init.constant_(conv.bias, 0)
     for conv in self.conv_h:
         nn.init.kaiming_normal_(conv.weight,
                                 mode='fan_out',
                                 nonlinearity='relu')
         nn.init.constant_(conv.bias, 0)
     if self.loss_type == 3:
         # 用focal_loss 损失,利用loss_type初始化
         bias_cls = bias_init_with_prob(0.01)
         normal_init(self.fc_out, std=0.01, bias=bias_cls)
     else:
         nn.init.normal_(self.fc_out.weight, 0, 0.01)
         nn.init.constant_(self.fc_out.bias, 0)
         if self.with_offset:
             nn.init.normal_(self.fc_offset.weight, 0, 0.01)
             nn.init.constant_(self.fc_offset.bias, 0)
Esempio n. 8
0
def test_constaninit():
    """test ConstantInit class."""
    model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
    func = ConstantInit(val=1, bias=2, layer='Conv2d')
    func(model)
    assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
    assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))

    assert not torch.equal(model[2].weight,
                           torch.full(model[2].weight.shape, 1.))
    assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))

    func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
    func(model)
    res = bias_init_with_prob(0.01)

    assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
    assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
    assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
    assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))

    # test layer key with base class name
    model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
    func = ConstantInit(val=4., bias=5., layer='_ConvNd')
    func(model)
    assert torch.all(model[0].weight == 4.)
    assert torch.all(model[2].weight == 4.)
    assert torch.all(model[0].bias == 5.)
    assert torch.all(model[2].bias == 5.)

    # test bias input type
    with pytest.raises(TypeError):
        func = ConstantInit(val=1, bias='1')
    # test bias_prob type
    with pytest.raises(TypeError):
        func = ConstantInit(val=1, bias_prob='1')
    # test layer input type
    with pytest.raises(TypeError):
        func = ConstantInit(val=1, layer=1)
 def init_weights(self):
     bias_cls = bias_init_with_prob(0.01)
     normal_init(self.efficient_cls.pointwise_conv.conv,
                 std=0.01,
                 bias=bias_cls)
Esempio n. 10
0
 def init_weights(self):
     """Initialize weights of the head."""
     bias_init = bias_init_with_prob(0.1)
     for i in range(self.num_feat_levels):
         self.tl_heat[i][-1].bias.data.fill_(bias_init)
         self.br_heat[i][-1].bias.data.fill_(bias_init)
Esempio n. 11
0
 def init_weights(self):
     """Initialize the weights of head."""
     bias_cls = bias_init_with_prob(0.01)
     normal_init(self.conv_cls, std=0.01, bias=bias_cls)
     normal_init(self.conv_reg, std=0.01)
Esempio n. 12
0
 def init_weights(self):
     """Initialize weights of the head."""
     normal_init(self.rpn_conv, std=0.01)
     bias_cls = bias_init_with_prob(0.01)
     normal_init(self.rpn_cls, std=0.01, bias=bias_cls)
     normal_init(self.rpn_reg, std=0.01)
Esempio n. 13
0
 def init_weights(self):
     bias_cls = bias_init_with_prob(0.01)
     normal_init(self.conv_logits, std=0.01, bias=bias_cls)