def init_weights(self): if isinstance(self.decoder_input_proj, Conv2d): caffe2_xavier_init(self.decoder_input_proj, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p)
def init_weights(self): caffe2_xavier_init(self.lateral_conv) caffe2_xavier_init(self.fpn_conv) for m in [self.lateral_norm, self.fpn_norm]: constant_init(m, 1) for m in self.dilated_encoder_blocks.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1)
def init_weights(self): for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [ self.adapt_convs.modules(), self.extra_downsamples.modules() ]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
def init_weights(self): # retinanet_bias_init bias_cls = bias_init_with_prob(0.01) normal_init(self.fcos_reg, std=0.01) normal_init(self.fcos_centerness, std=0.01) normal_init(self.fcos_cls, std=0.01, bias=bias_cls) for branch in [self.cls_convs, self.reg_convs]: for module in branch.modules(): if isinstance(module, ConvModule) \ and isinstance(module.conv, nn.Conv2d): caffe2_xavier_init(module.conv)
def init_weights(self): """Initialize weights.""" for i in range(0, self.num_inputs - 2): caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) caffe2_xavier_init(self.output_convs[i].conv, bias=0) caffe2_xavier_init(self.mask_feature, bias=0) caffe2_xavier_init(self.encoder_in_proj, bias=0) caffe2_xavier_init(self.encoder_out_proj.conv, bias=0) for p in self.encoder.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p)
def init_weights(self): """Initialize the weights of module.""" super(NASFCOS_FPN, self).init_weights() for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [ self.adapt_convs.modules(), self.extra_downsamples.modules() ]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
def _init_hint_attn_layer(self): self.hint_attn_layer = [] for i in range(len(self.anchor_generator.strides)): self.hint_attn_layer.append( nn.Sequential(nn.Conv2d(256, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, 3, padding=1))) self.hint_attn_layer[i].cuda() self.hint_attn_layer = nn.ModuleList(self.hint_attn_layer) from mmcv.cnn import xavier_init, caffe2_xavier_init for i in range(len(self.anchor_generator.strides)): for m in self.hint_attn_layer[i].modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m)
def init_weights(self): """Initialize weights.""" for i in range(0, self.num_inputs - 2): caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) caffe2_xavier_init(self.output_convs[i].conv, bias=0) caffe2_xavier_init(self.mask_feature, bias=0) caffe2_xavier_init(self.last_feat_conv, bias=0)
def test_caffe_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) caffe2_xavier_init(conv_module)
def init_weights(self): """Initialize the weights of module.""" for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m)
def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m)
def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m) elif is_norm(m): constant_init(m, 1.0)