Exemple #1
0
    def init_weights(self):
        for _, m in self.shortcut_layers.named_modules():
            if isinstance(m, nn.Conv2d):
                kaiming_init(m)

        for _, m in self.deconv_layers.named_modules():
            if isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        for _, m in self.hm.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)

        bias_cls = bias_init_with_prob(0.01)
        if self.hm_last_se3x3:
            normal_init(self.hm[-2], std=0.01, bias=bias_cls)
        else:
            normal_init(self.hm[-1], std=0.01, bias=bias_cls)

        for _, m in self.wh.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)

        if self.two_stage:
            for _, m in self.wh2.named_modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.001)
Exemple #2
0
 def init_weights(self):
     for m in self.cls_convs:
         normal_init(m.conv, std=0.01)
     for m in self.reg_convs:
         normal_init(m.conv, std=0.01)
     bias_cls = bias_init_with_prob(0.01)
     normal_init(self.retina_cls, std=0.01, bias=bias_cls)
     normal_init(self.retina_reg, std=0.01, bias=1)
Exemple #3
0
    def init_weights(self):
        for _, m in self.shortcut_layers.named_modules():
            if isinstance(m, nn.Conv2d):
                kaiming_init(m)

        for _, m in self.deconv_layers.named_modules():
            if isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            if self.use_deconv_init and isinstance(m, nn.ConvTranspose2d):
                normal_init(m, std=0.01)
            if self.use_trident and isinstance(m, nn.Conv2d):
                kaiming_init(m)

        if not self.predict_together:
            for _, m in self.hm.named_modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)

            if self.hm_init_value is not None:
                self.hm[-1].bias.data.fill_(self.hm_init_value)
            else:
                bias_cls = bias_init_with_prob(0.01)
                normal_init(self.hm[-1], std=0.01, bias=bias_cls)

            for _, m in self.wh.named_modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.001)

            if self.wh_001:
                for _, m in self.wh.named_modules():
                    if isinstance(m, nn.Conv2d):
                        normal_init(m, std=0.01)
        else:
            for _, m in self.hmwh.named_modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.001)

            bias_cls = bias_init_with_prob(0.01)
            self.hmwh[-1].bias.data[0:-1:5].fill_(bias_cls)

        if hasattr(self, 'centerness'):
            for _, m in self.centerness.named_modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)
Exemple #4
0
    def init_weights(self):
        self.auto_head.init_weight()

        for _, m in self.hm.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)

        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.hm[-1], std=0.01, bias=bias_cls)

        for _, m in self.wh.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)
Exemple #5
0
    def init_weights(self):
        for _, m in self.dcn_layers.named_modules():
            if isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        for _, m in self.hm.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)

        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.hm[-1], std=0.01, bias=bias_cls)

        for _, m in self.wh.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)
Exemple #6
0
    def init_weights(self):
        for upsample_layers in [
                self.upsample_layers, self.s8_upsample_layers,
                self.s16_upsample_layers
        ]:
            for m in upsample_layers.modules():
                if isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)

        for shortcut_layers in [
                self.shortcut_layers, self.s8_shortcut_layers,
                self.s16_shortcut_layers
        ]:
            for m in shortcut_layers.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)

        bias_cls = bias_init_with_prob(0.01)
        for hm in [self.s4_hm, self.s8_hm, self.s16_hm]:
            for m in hm.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)
            if len(hm) > 0:
                normal_init(hm[-1], std=0.01, bias=bias_cls)

        for wh in [self.s4_wh, self.s8_wh, self.s16_wh]:
            for m in wh.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.001)

        for m in self.modules():
            if isinstance(m, ModulatedDeformConvPack):
                if hasattr(m, 'conv_offset_mask'):
                    constant_init(m.conv_offset_mask, 0)
                else:
                    constant_init(m.conv_offset, 0)

        if self.use_asff:
            for asff in [self.s16_asff, self.s8_asff, self.s4_asff]:
                for m in asff.modules():
                    if isinstance(m, nn.Conv2d):
                        normal_init(m, std=0.01)
                    elif isinstance(m, nn.BatchNorm2d):
                        constant_init(m, 1)
Exemple #7
0
    def init_weights(self):
        if self._require_upsampling:
            for _, m in self.shortcut_layers.named_modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
            for _, m in self.upsample_layers.named_modules():
                if isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

        for _, m in self.hm.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)
        for _, m in self.hm_offset.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)
        for _, m in self.wh.named_modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.001)

        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.hm[-1], std=0.01, bias=bias_cls)
Exemple #8
0
    def init_weights(self):
        for m in self.upsample_layers.modules():
            if isinstance(m, nn.BatchNorm2d):
                constant_init(m, 1)

        for m in self.shortcut_layers.modules():
            if isinstance(m, nn.Conv2d):
                kaiming_init(m)

        bias_cls = bias_init_with_prob(0.01)
        for m in self.hm.modules():
            if isinstance(m, nn.Conv2d):
                normal_init(m, std=0.01)
        normal_init(self.hm[-1], std=0.01, bias=bias_cls)

        for wh in [self.wh_b1, self.wh_b2]:
            for m in wh.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.001)

        for m in self.modules():
            if isinstance(m, ModulatedDeformConvPack):
                constant_init(m.conv_offset, 0)
Exemple #9
0
    def init_weights(self):
        for m in self.upsample_layers.modules():
            if isinstance(m, nn.BatchNorm2d):
                constant_init(m, 1)

        for m in self.shortcut_layers.modules():
            if isinstance(m, nn.Conv2d):
                kaiming_init(m)

        bias_cls = bias_init_with_prob(0.01)
        for hm in [self.hm_b1, self.hm_b2]:
            for m in hm.modules():
                if isinstance(m, nn.Conv2d):
                    if self.all_kaiming:
                        kaiming_init(m)
                    else:
                        normal_init(m, std=0.01)
            normal_init(hm[-1], std=0.01, bias=bias_cls)

        for wh in [self.wh_b1, self.wh_b2]:
            for m in wh.modules():
                if isinstance(m, nn.Conv2d):
                    if self.all_kaiming:
                        kaiming_init(m)
                    else:
                        normal_init(m, std=0.001)

        if self.mdcn_before_s8 or self.ind_mdcn_for_s8:
            for m in self.mdcn_s8_layer.modules():
                if isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)

        if self.conv_before_s8:
            for m in self.conv_s8_layer.modules():
                if isinstance(m, nn.Conv2d):
                    if self.all_kaiming:
                        kaiming_init(m)
                    else:
                        normal_init(m, std=0.01)
                if isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)

        if self.with_score_loss:
            for m in self.hm_bns:
                constant_init(m, 1)

        if self.conv_exchage:
            for m in self.conv_ex:
                kaiming_init(m)

        if self.extra_shortcut_cfg:
            for m in self.extra_shortcut_layer.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)

        for m in self.modules():
            if isinstance(m, ModulatedDeformConvPack):
                if hasattr(m, 'conv_offset_mask'):
                    constant_init(m.conv_offset_mask, 0)
                else:
                    constant_init(m.conv_offset, 0)
Exemple #10
0
    def init_weights(self):
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         normal_init(m, std=0.01)
        #     if isinstance(m, nn.BatchNorm2d):
        #         nn.init.constant_(m.weight, 1)
        #         nn.init.constant_(m.bias, 0)

        for upsample_layers in [self.upsample_layers_b1, self.upsample_layers_b2]:
            for m in upsample_layers.modules():
                if isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)

        for shortcut_layers in [self.shortcut_layers_b1, self.shortcut_layers_b2]:
            for m in shortcut_layers.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)

        bias_cls = bias_init_with_prob(0.01)
        for hm in [self.hm_b1, self.hm_b2]:
            for m in hm.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)
            if self.depthwise_hm:
                if self.depth_init_kaiming:
                    normal_init(hm[-1].conv, std=0.01, bias=bias_cls)
                    if not self.depth_deform:
                        kaiming_init(hm[-1].depth_conv)
                else:
                    if self.depth_deform:
                        if hasattr(hm[-1].depth_conv, 'bias') and hm[
                            -1].depth_conv.bias is not None:
                            nn.init.constant_(hm[-1].depth_conv.bias, bias_cls)
                    else:
                        normal_init(hm[-1].depth_conv, std=0.01, bias=bias_cls)
            else:
                normal_init(hm[-1], std=0.01, bias=bias_cls)

        for wh in [self.wh_b1, self.wh_b2]:
            for m in wh.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.001)

        if self.b1_s8_conv_num != 0:
            for m in self.b1b2_s8_layers.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)
                if isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
            if self.b1_concat:
                for m in self.b1b2_s8_down.modules():
                    if isinstance(m, nn.Conv2d):
                        normal_init(m, std=0.01)
                    if isinstance(m, nn.BatchNorm2d):
                        constant_init(m, 1)
        if self.b1_s16_conv_num != 0:
            for m in self.b1b2_s16_layers.modules():
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)
                if isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
            if self.b1_concat:
                for m in self.b1b2_s16_down.modules():
                    if isinstance(m, nn.Conv2d):
                        normal_init(m, std=0.01)
                    if isinstance(m, nn.BatchNorm2d):
                        constant_init(m, 1)

        if self.use_b2_extra_layers:
            for m in self.b2_extra_layers:
                if isinstance(m, nn.Conv2d):
                    normal_init(m, std=0.01)

        for m in self.modules():
            if isinstance(m, ModulatedDeformConvPack):
                constant_init(m.conv_offset, 0)