def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_bbox_reg, std=0.01) normal_init(self.retina_bbox_cls, std=0.01)
def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.atss_cls, std=0.01, bias=bias_cls) normal_init(self.atss_reg, std=0.01) normal_init(self.atss_centerness, std=0.01)
def init_weights(self): for module_list in [ self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs ]: for m in module_list.modules(): if isinstance(m, nn.Linear): xavier_init(m, distribution='uniform') if self.reg_feat_up_ratio > 1: kaiming_init(self.upsample_x, distribution='normal') kaiming_init(self.upsample_y, distribution='normal') normal_init(self.reg_conv_att_x, 0, 0.01) normal_init(self.reg_conv_att_y, 0, 0.01) normal_init(self.fc_reg_offset, 0, 0.001) normal_init(self.fc_reg_cls, 0, 0.01) normal_init(self.fc_cls, 0, 0.01)
def init_weights(self): normal_init(self.conv_cls, std=0.01) normal_init(self.conv_reg, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_loc, std=0.01, bias=bias_cls) normal_init(self.conv_shape, std=0.01) self.feature_adaption.init_weights()
def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: if isinstance(m.conv, nn.Conv2d): normal_init(m.conv, std=0.01) for m in self.reg_convs: if isinstance(m.conv, nn.Conv2d): normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_cls, std=0.01, bias=bias_cls) normal_init(self.conv_reg, std=0.01)
def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) # 这个操作非常关键,原因是anchor太多了,且没有faster rcnn里面的sample操作 # 故负样本远远大于正样本,也就是说分类分支,假设负样本:正样本数=1000:1 # 分类是sigmod输出,负数表示负样本label,bias_cls是一个负数 # 可以保证分类分支输出大部分是负数,这样算loss时候就会比较小,相当于强制输出的值偏向负类 bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01)
def init_weights(self): normal_init(self.conv_offset, std=0.1) normal_init(self.conv_adaption, std=0.01)
def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.reppoints_cls_conv, std=0.01) normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls) normal_init(self.reppoints_pts_init_conv, std=0.01) normal_init(self.reppoints_pts_init_out, std=0.01) normal_init(self.reppoints_pts_refine_conv, std=0.01) normal_init(self.reppoints_pts_refine_out, std=0.01)
def init_weights(self): """Initialize weights of the head.""" for m in self.convs_pred: normal_init(m, std=0.01)
def init_weights(self): """Initialize weights of the head.""" normal_init(self.conv_cls, std=0.01) normal_init(self.conv_reg, std=0.01)
def init_weights(self): """Initialize weights of the head.""" super().init_weights() normal_init(self.conv_centerness, std=0.01)
def init_weights(self): """Initialize weights of the layer.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) self.feature_adaption_cls.init_weights() self.feature_adaption_reg.init_weights() bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_loc, std=0.01, bias=bias_cls) normal_init(self.conv_shape, std=0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01)