def __init__(self, in_channels, inner_channels=128, fpem_repeat=2, **kwargs): """ PANnet :param in_channels: 基础网络输出的维度 """ super().__init__() self.conv_out = inner_channels inplace = True # reduce layers self.reduce_conv_c2 = ConvBnRelu(in_channels[0], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c3 = ConvBnRelu(in_channels[1], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c4 = ConvBnRelu(in_channels[2], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c5 = ConvBnRelu(in_channels[3], inner_channels, kernel_size=1, inplace=inplace) self.fpems = nn.ModuleList() for i in range(fpem_repeat): self.fpems.append(FPEM(self.conv_out)) self.out_channels = self.conv_out * 4
def __init__(self, in_channels, inner_channels=256, **kwargs): """ :param in_channels: 基础网络输出的维度 :param kwargs: """ super().__init__() inplace = True self.conv_out = inner_channels inner_channels = inner_channels // 4 # reduce layers self.reduce_conv_c2 = ConvBnRelu(in_channels[0], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c3 = ConvBnRelu(in_channels[1], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c4 = ConvBnRelu(in_channels[2], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c5 = ConvBnRelu(in_channels[3], inner_channels, kernel_size=1, inplace=inplace) # Smooth layers self.smooth_p4 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.smooth_p3 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.smooth_p2 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.conv = nn.Sequential( nn.Conv2d(self.conv_out, self.conv_out, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(self.conv_out), nn.ReLU(inplace=inplace) ) self.out_channels = self.conv_out
def __init__(self, in_channels, inner_channels=256, **kwargs): """ :param in_channels: 基础网络输出的维度 [64, 128, 256, 512] :param kwargs: """ super().__init__() inplace = True self.conv_out = inner_channels inner_channels = inner_channels // 4 # 256 // 4 = 64 # reduce layers self.reduce_conv_c2 = ConvBnRelu(in_channels[0], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c3 = ConvBnRelu(in_channels[1], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c4 = ConvBnRelu(in_channels[2], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c5 = ConvBnRelu(in_channels[3], inner_channels, kernel_size=1, inplace=inplace) # Smooth layers self.smooth_p4 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) # 311 self.smooth_p3 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.smooth_p2 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) #self.upsample = nn.Upsample(scale_factor=2, mode='nearest') self.conv = nn.Sequential( nn.Conv2d(self.conv_out, self.conv_out, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(self.conv_out), nn.ReLU(inplace=inplace)) self.out_channels = self.conv_out
def __init__(self, model_config: dict, layers=[2, 2, 2, 2], in_channels=3, inner_channels=256, k=50): """ PANnet :param model_config: 模型配置 """ super().__init__() self.name = f'resnet18_fpn_db' self.inplanes = 64 self.backon_out_channels = [] self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(BasicBlock, 64, layers[0]) self.layer2 = self._make_layer(BasicBlock, 128, layers[1], stride=2) self.layer3 = self._make_layer(BasicBlock, 256, layers[2], stride=2) self.layer4 = self._make_layer(BasicBlock, 512, layers[3], stride=2) inplace = True self.conv_out = inner_channels inner_channels = inner_channels // 4 # reduce layers self.reduce_conv_c2 = ConvBnRelu(self.backon_out_channels[0], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c3 = ConvBnRelu(self.backon_out_channels[1], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c4 = ConvBnRelu(self.backon_out_channels[2], inner_channels, kernel_size=1, inplace=inplace) self.reduce_conv_c5 = ConvBnRelu(self.backon_out_channels[3], inner_channels, kernel_size=1, inplace=inplace) # Smooth layers self.smooth_p4 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.smooth_p3 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.smooth_p2 = ConvBnRelu(inner_channels, inner_channels, kernel_size=3, padding=1, inplace=inplace) self.conv = nn.Sequential( nn.Conv2d(self.conv_out, self.conv_out, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(self.conv_out), nn.ReLU(inplace=inplace)) self.out_channels = self.conv_out self.k = k self.binarize = nn.Sequential( nn.Conv2d(self.out_channels, self.out_channels // 4, 3, padding=1), nn.BatchNorm2d(self.out_channels // 4), nn.ReLU(inplace=True), nn.ConvTranspose2d(self.out_channels // 4, self.out_channels // 4, 2, 2), nn.BatchNorm2d(self.out_channels // 4), nn.ReLU(inplace=True), nn.ConvTranspose2d(self.out_channels // 4, 1, 2, 2), nn.Sigmoid()) self.binarize.apply(self.weights_init) self.thresh = self._init_thresh(self.out_channels) self.thresh.apply(self.weights_init)