def __init__(self, cl=32, cm=32, ch=16, nframes = 7, input_nc = 3, output_nc = 3, upscale_factor=4): super(RBPN, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor #Initial Feature Extraction self.conv1 = M.Sequential( M.Conv2d(input_nc, cl, kernel_size=3, stride=1, padding=1), M.PReLU(), ) self.conv2 = M.Sequential( M.Conv2d(input_nc*2+0, cm, kernel_size=3, stride=1, padding=1), M.PReLU(), ) # projection module self.Projection = Projection_Module(cl, cm, ch) # reconstruction module self.reconstruction = M.Conv2d((self.nframes-1)*ch, output_nc, kernel_size=3, stride=1, padding=1)
def __init__(self, hidden): super(Upsample, self).__init__() self.shirking = M.Conv2d(4 * hidden, hidden, kernel_size=1, stride=1, padding=0) self.sel = SEL(hidden) self.reconstruction = IMDModule(in_channels=hidden) self.conv_last = M.Conv2d(hidden, 3, kernel_size=3, stride=1, padding=1) self.conv_hr1 = M.Conv2d(hidden, hidden, kernel_size=3, stride=1, padding=1) self.conv_hr2 = M.Conv2d(hidden, hidden, kernel_size=3, stride=1, padding=1) self.lrelu = M.LeakyReLU(negative_slope=0.1) self.init_weights()
def __init__(self, in_ch=3, num_classes=1000): ''' The AlexNet. args: in_ch: int, the number of channels of inputs num_classes: int, the number of classes that need to predict reference: "One weird trick for parallelizing convolutional neural networks"<https://arxiv.org/abs/1404.5997> ''' super(AlexNet, self).__init__() #the part to extract feature self.features = M.Sequential( M.Conv2d(in_ch, 64, kernel_size=11, stride=4, padding=11 // 4), M.ReLU(), M.MaxPool2d(kernel_size=3, stride=2), M.Conv2d(64, 192, kernel_size=5, padding=2), M.ReLU(), M.MaxPool2d(kernel_size=3, stride=2), M.Conv2d(192, 384, kernel_size=3, stride=1, padding=1), M.ReLU(), M.Conv2d(384, 256, kernel_size=3, stride=1, padding=1), M.ReLU(), M.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), M.ReLU(), M.MaxPool2d(kernel_size=3, stride=2), ) #global avg pooling self.avgpool = M.AdaptiveAvgPool2d((6, 6)) #classify part self.classifier = M.Sequential(M.Dropout(), M.Linear(256 * 6 * 6, 4096), M.ReLU(), M.Dropout(), M.Linear(4096, 4096), M.ReLU(), M.Linear(4096, num_classes))
def __init__(self, cfg): super().__init__() self.cfg = cfg self.box_coder = layers.BoxCoder() self.stride_list = cfg.rpn_stride rpn_channel = cfg.rpn_channel self.in_features = cfg.rpn_in_features self.anchors_generator = layers.DefaultAnchorGenerator( cfg.anchor_base_size, cfg.anchor_scales, cfg.anchor_aspect_ratios, cfg.anchor_offset, ) self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1) self.rpn_cls_score = M.Conv2d( rpn_channel, cfg.num_cell_anchors * 2, kernel_size=1, stride=1 ) self.rpn_bbox_offsets = M.Conv2d( rpn_channel, cfg.num_cell_anchors * 4, kernel_size=1, stride=1 ) for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]: M.init.normal_(l.weight, std=0.01) M.init.fill_(l.bias, 0)
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = [] self.conv_prev_1x1.append(M.ReLU()) self.conv_prev_1x1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False)) self.conv_prev_1x1.append(M.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True)) self.conv_prev_1x1 = M.Sequential(*self.conv_prev_1x1) self.conv_1x1 = [] self.conv_1x1.append(M.ReLU()) self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False)) self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True)) self.conv_1x1 = M.Sequential(*self.conv_1x1) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
def __init__(self, in_channels, out_channels, kernel_size=3): """ 默认使用coordinate attention在第一个dwise之后 https://github.com/Andrew-Qibin/CoordAttention/blob/main/coordatt.py """ super(MobileNeXt, self).__init__() self.dconv1 = M.ConvRelu2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2), groups=in_channels) self.CA = CoordAtt(inp=out_channels, oup=out_channels) self.conv1 = M.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0) self.conv2 = M.ConvRelu2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0) self.dconv2 = M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2), groups=out_channels) self.init_weights()
def __init__(self, in_channels: int, out_channels: int): super().__init__() self.num_levels = 2 self.in_feature = "res5" self.p6 = M.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = M.Conv2d(out_channels, out_channels, 3, 2, 1) self.use_P5 = in_channels == out_channels
def __init__(self, in_channels, out_channels, hidden_channels=None, downsample=False): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels if hidden_channels is not None else in_channels self.downsample = downsample self.learnable_sc = (in_channels != out_channels) or downsample # Build the layers self.c1 = M.Conv2d(self.in_channels, self.hidden_channels, 3, 1, 1) self.c2 = M.Conv2d(self.hidden_channels, self.out_channels, 3, 1, 1) self.activation = M.ReLU() M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0)) M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0)) # Shortcut layer if self.learnable_sc: self.c_sc = M.Conv2d(in_channels, out_channels, 1, 1, 0) M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super(BasicBlock, self).__init__() if groups != 1 or base_width != 64: raise ValueError( "BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError( "Dilation > 1 not supported in BasicBlock") self.conv1 = M.Conv2d(in_channels, channels, 3, stride, padding=dilation, bias=False) self.bn1 = norm(channels) self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=False) self.bn2 = norm(channels) self.downsample = ( M.Identity() if in_channels == channels and stride == 1 else M.Sequential( M.Conv2d(in_channels, channels, 1, stride, bias=False), norm(channels), ))
def __init__(self, channel_num): super(CARBBlock, self).__init__() self.conv1 = M.Sequential( M.Conv2d(channel_num, channel_num, kernel_size=3, padding=1, stride=1), M.ReLU(), M.Conv2d(channel_num, channel_num, kernel_size=3, padding=1, stride=1), ) # self.global_average_pooling = nn.AdaptiveAvgPool2d((1,1)) # B,C,H,W -> B,C,1,1 self.linear = M.Sequential(M.Linear(channel_num, channel_num // 2), M.ReLU(), M.Linear(channel_num // 2, channel_num), M.Sigmoid()) self.conv2 = M.Conv2d(channel_num * 2, channel_num, kernel_size=1, padding=0, stride=1) self.lrelu = M.LeakyReLU()
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super().__init__() width = int(channels * (base_width / 64.0)) * groups self.conv1 = M.Conv2d(in_channels, width, 1, 1, bias=False) self.bn1 = norm(width) self.conv2 = M.Conv2d( width, width, 3, stride, padding=dilation, groups=groups, dilation=dilation, bias=False, ) self.bn2 = norm(width) self.conv3 = M.Conv2d(width, channels * self.expansion, 1, 1, bias=False) self.bn3 = norm(channels * self.expansion) self.downsample = ( M.Identity() if in_channels == channels * self.expansion and stride == 1 else M.Sequential( M.Conv2d(in_channels, channels * self.expansion, 1, stride, bias=False), norm(channels * self.expansion), ) )
def __init__(self, bottom_up): super(FPN, self).__init__() in_channels = [256, 512, 1024, 2048] fpn_dim = 256 use_bias = True # lateral_convs = list() # output_convs = list() lateral_convs, output_convs = [], [] for idx, in_channels in enumerate(in_channels): lateral_conv = M.Conv2d(in_channels, fpn_dim, kernel_size=1, bias=use_bias) output_conv = M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias) M.init.msra_normal_(lateral_conv.weight, mode="fan_in") M.init.msra_normal_(output_conv.weight, mode="fan_in") if use_bias: M.init.fill_(lateral_conv.bias, 0) M.init.fill_(output_conv.bias, 0) lateral_convs.append(lateral_conv) output_convs.append(output_conv) self.lateral_convs = lateral_convs[::-1] self.output_convs = output_convs[::-1] self.bottom_up = bottom_up
def __init__(self, inp, ksize, stride): super().__init__() self.M = 2 self.G = 2 self.pad = ksize // 2 inp_gap = max(16, inp // 16) self.inp = inp self.ksize = ksize self.stride = stride self.wn_fc1 = M.Conv2d(inp_gap, self.M // self.G * inp, 1, 1, 0, groups=1, bias=True) self.sigmoid = M.Sigmoid() self.wn_fc2 = M.Conv2d(self.M // self.G * inp, inp * ksize * ksize, 1, 1, 0, groups=inp, bias=False)
def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = M.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = M.BatchNorm2d(planes) self.conv2 = M.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = M.BatchNorm2d(planes) self.shortcut = M.Sequential() if stride != 1 or in_planes != planes: self.shortcut = M.Sequential( M.Conv2d( in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False, ), M.BatchNorm2d(self.expansion * planes), )
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(FirstCell, self).__init__() self.conv_1x1 = [] self.conv_1x1.append(M.ReLU()) self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False)) self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True)) self.conv_1x1 = M.Sequential(*self.conv_1x1) self.relu = M.ReLU() self.path_1 = [] self.path_1.append(M.AvgPool2d(1, stride=2)) self.path_1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False)) self.path_1 = M.Sequential(*self.path_1) self.path_2 = [] # self.path_2.append(M.ZeroPad2d((0, 1, 0, 1))) self.path_2.append(M.AvgPool2d(1, stride=2)) self.path_2.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False)) self.path_2 = M.Sequential(*self.path_2) self.final_path_bn = M.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_2_left = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_3_left = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def __init__(self, cfg, input_shape: List[layers.ShapeSpec]): super().__init__() in_channels = input_shape[0].channels num_classes = cfg.num_classes num_convs = 4 prior_prob = cfg.cls_prior_prob num_anchors = [ len(cfg.anchor_scales[i]) * len(cfg.anchor_ratios[i]) for i in range(len(input_shape)) ] assert (len(set(num_anchors)) == 1 ), "not support different number of anchors between levels" num_anchors = num_anchors[0] cls_subnet = [] bbox_subnet = [] for _ in range(num_convs): cls_subnet.append( M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)) cls_subnet.append(M.ReLU()) bbox_subnet.append( M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)) bbox_subnet.append(M.ReLU()) self.cls_subnet = M.Sequential(*cls_subnet) self.bbox_subnet = M.Sequential(*bbox_subnet) self.cls_score = M.Conv2d(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = M.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1) # Initialization for modules in [ self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred ]: for layer in modules.modules(): if isinstance(layer, M.Conv2d): M.init.normal_(layer.weight, mean=0, std=0.01) M.init.fill_(layer.bias, 0) # Use prior in model initialization to improve stability bias_value = -math.log((1 - prior_prob) / prior_prob) M.init.fill_(self.cls_score.bias, bias_value)
def __init__(self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d): super(SingleStage, self).__init__() self.down = ResnetBody(block, init_channel, layers, channels, norm) channel = block.expansion * channels[-1] self.up1 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)) self.deconv1 = M.Sequential( M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)) channel = block.expansion * channels[-2] self.up2 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)) self.deconv2 = M.Sequential( M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)) channel = block.expansion * channels[-3] self.up3 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)) self.deconv3 = M.Sequential( M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)) channel = block.expansion * channels[-4] self.up4 = M.Sequential(M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel))
def __init__(self, in_channels, out_channels, kernel_size=3, activation='prelu'): super(ResBlock, self).__init__() if activation == 'relu': self.act = M.ReLU() elif activation == 'prelu': self.act = M.PReLU(num_parameters=1, init=0.25) else: raise NotImplementedError("not implemented activation") m = [] m.append( M.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2))) m.append(self.act) m.append( M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2))) self.body = M.Sequential(*m)
def __init__(self, in_channels, out_channels, dw_kernel, dw_stride, dw_padding, bias=False): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = M.Conv2d(in_channels, in_channels, dw_kernel, stride=dw_stride, padding=dw_padding, bias=bias, groups=in_channels) self.pointwise_conv2d = M.Conv2d(in_channels, out_channels, 1, stride=1, bias=bias)
def __init__(self, ch): super(AU, self).__init__() self.conv = M.Sequential( M.Conv2d(2*ch, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), M.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), )
def __init__(self, in_channels: int, out_channels: int, in_feature="res5"): super().__init__() self.num_levels = 2 if in_feature == "p5": assert in_channels == out_channels self.in_feature = in_feature self.p6 = M.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = M.Conv2d(out_channels, out_channels, 3, 2, 1)
def __init__(self, in_ch, out_ch): super(DoubleConv, self).__init__() self.conv = M.Sequential( M.Conv2d(in_ch, out_ch, 3, padding=1), M.BatchNorm2d(out_ch), M.ReLU(), M.Conv2d(out_ch, out_ch, 3, padding=1), M.BatchNorm2d(out_ch), M.ReLU())
def conv_dw(inp, oup, stride): return M.Sequential( M.Conv2d(inp, inp, 3, stride, 1, groups=inp), M.BatchNorm2d(inp), M.ReLU(), M.Conv2d(inp, oup, 1, 1, 0), M.BatchNorm2d(oup), M.ReLU(), )
def __init__(self, inp, oup, reduction=32): super(CoordAtt, self).__init__() # self.pool_h = M.AdaptiveAvgPool2d((None, 1)) # self.pool_w = M.AdaptiveAvgPool2d((1, None)) mip = max(16, inp // reduction) # for inp<=256, mip = 8 self.conv1 = M.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0) # self.bn1 = M.BatchNorm2d(mip) self.act = h_swish() self.conv_h = M.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0) self.conv_w = M.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0) self.init_weights()
def __init__(self, cl, ch): super(SISR_Block, self).__init__() self.num_stages = 3 self.pre_deal = M.Conv2d(cl, ch, kernel_size=1, stride=1, padding=0) self.prelu = M.PReLU(num_parameters=1, init=0.25) self.UPU1 = UPU(ch, 8, stride=4, padding=2) self.UPU2 = UPU(ch, 8, stride=4, padding=2) self.UPU3 = UPU(ch, 8, stride=4, padding=2) self.DPU1 = DPU(ch, 8, stride=4, padding=2) self.DPU2 = DPU(ch, 8, stride=4, padding=2) self.reconstruction = M.Conv2d(self.num_stages * ch, ch, kernel_size=1, stride=1, padding=0)
def __init__(self, ch=128, nframes = 7, input_nc = 3, output_nc = 3, upscale_factor=4, blocknums1 = 5, blocknums2 = 15, non_local = True): super(MUCANV2, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor # 每个LR搞三个尺度 self.feature_encoder_carb = M.Sequential( M.Conv2d(input_nc, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=blocknums1) ) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) self.AU0 = AU(ch = ch) self.AU1 = AU(ch = ch) self.AU2 = AU(ch = ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) if non_local: self.non_local = Separate_non_local(ch, nframes) else: self.non_local = Identi() self.aggre = M.Conv2d(ch * self.nframes, ch, kernel_size=3, stride=1, padding=1) self.carbs = M.Sequential( CARBBlocks(channel_num=ch, block_num=blocknums2), ) self.main_conv = M.Sequential( M.Conv2d(ch, ch*4, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), PixelShuffle(scale=2), # 128 M.Conv2d(ch, ch*2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), PixelShuffle(scale=2), # 64 M.Conv2d(ch//2, ch//2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), M.Conv2d(ch//2, 3, kernel_size=3, stride=1, padding=1) )
def __init__(self): super().__init__() self.conv0 = M.Conv2d(1, 20, kernel_size=5, bias=False) self.bn0 = M.BatchNorm2d(20) self.relu0 = M.ReLU() self.pool0 = M.MaxPool2d(2) self.conv1 = M.Conv2d(20, 20, kernel_size=5, bias=False) self.bn1 = M.BatchNorm2d(20) self.relu1 = M.ReLU() self.pool1 = M.MaxPool2d(2) self.fc0 = M.Linear(500, 64, bias=True) self.relu2 = M.ReLU() self.fc1 = M.Linear(64, 10, bias=True)
def __init__(self, in_size, out_size, downsample, relu_slope): super(UNetConvBlock, self).__init__() self.block = nn.Sequential( nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True), nn.LeakyReLU(relu_slope), nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True), nn.LeakyReLU(relu_slope)) self.downsample = downsample if downsample: self.downsample = conv_down(out_size, out_size, bias=False) self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def __init__(self, in_channels): super().__init__() self.conv_frelu1 = M.Conv2d(in_channels, in_channels, (1, 3), 1, (0, 1), groups=in_channels, bias=False) self.conv_frelu2 = M.Conv2d(in_channels, in_channels, (3, 1), 1, (1, 0), groups=in_channels, bias=False) self.bn1 = M.BatchNorm2d(in_channels) self.bn2 = M.BatchNorm2d(in_channels)
def __init__(self, rpn_channel=256): super().__init__() self.anchors_generator = AnchorGenerator( config.anchor_base_size, config.anchor_aspect_ratios, config.anchor_base_scale) self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1) self.rpn_cls_score = M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1) self.rpn_bbox_offsets = M.Conv2d(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1) for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]: M.init.normal_(l.weight, std=0.01) M.init.fill_(l.bias, 0)