def __init__( self, config ): super(ResNetUNet, self).__init__() self.n_classes = config.NUM_CLASSES self.padding = 1 self.up_mode = 'upconv' assert self.up_mode in ('upconv', 'upsample') self.encode = ResNet101v2() prev_channels = 2048 self.up_path = nn.ModuleList() for i in range(3): self.up_path.append( UNetUpBlock(prev_channels, prev_channels // 2, self.up_mode, self.padding) ) prev_channels //= 2 self.cls_conv_block1 = Block(prev_channels, 32) self.cls_conv_block2 = Block(32, 16) self.last = nn.Conv2d(16, self.n_classes, kernel_size=1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self,args ): super(ResNetUNet, self).__init__() # 网络参数要跟deeplabv3p一样的参数,是同一个config self.n_classes = 8 self.padding = 1 self.up_mode = 'upconv' assert self.up_mode in ('upconv', 'upsample') # encode改成resnet101v2 self.encode = ResNet101v2() # 上一层的给出的就是2048 prev_channels = 2048 self.up_path = nn.ModuleList() self.batch_norm = True for i in range(3): self.up_path.append( UNetUpBlock(prev_channels, prev_channels // 2, self.up_mode, self.padding, self.batch_norm) ) prev_channels //= 2 self.cls_conv_block1 = Block(prev_channels, 32) self.cls_conv_block2 = Block(32, 16) self.last = nn.Conv2d(16, self.n_classes, kernel_size=1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self, config): super(ResNetUNet, self).__init__() self.n_classes = config.NUM_CLASSES self.encode = ResNet101v2() prev_channels = 2048 self.up_path = nn.ModuleList() for i in range(3): self.up_path.append(UNetUpBlock(prev_channels, prev_channels // 2)) prev_channels //= 2 self.cls_conv_block1 = Block(prev_channels, 32) self.cls_conv_block2 = Block(32, 16) self.last = nn.Conv2d(16, self.n_classes, kernel_size=1) self.init_weight()
def __init__(self): super(ResNet101v2, self).__init__() self.conv1 = Block(3, 64, 7, 3, 2) self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) self.conv2_1 = DownBottleneck(64, 256, stride=1) self.conv2_2 = Bottleneck(256, 256) self.conv2_3 = Bottleneck(256, 256) self.layer3 = Layer(256, [512] * 2, "resnet") self.layer4 = Layer(512, [1024] * 23, "resnet") self.layer5 = Layer(1024, [2048] * 3, "resnet")
def __init__( self, config ): super(ResNetUNet, self).__init__() #网络参数要跟deeplabv3p一样的参数,是同一个config self.n_classes = config.NUM_CLASSES self.padding = 1 self.up_mode = 'upconv' assert self.up_mode in ('upconv', 'upsample') #encode改成resnet101v2 self.encode = ResNet101v2() #上一层的给出的就是2048 prev_channels = 2048 # 定义self.up_path为nn.Modulelist(),存放三个UnetUpBlock上采样模块,输出channel为输入channel的一半,循环三次 # [UNetUpBlock(2048, 1024),UNetUpBlock(1024, 512),UNetUpBlock(512, 256)] self.up_path = nn.ModuleList() for i in range(3): self.up_path.append( UNetUpBlock(prev_channels, prev_channels // 2, self.up_mode, self.padding) ) prev_channels //= 2 # block: input-->conv-->bn-->relu -->out (默认3*3的conv标准模式) # 定义self.conv_block1,conv_block2,使用3*3常规conv-->bn-->relu -->out模式,下降维度到32维-->16维 self.cls_conv_block1 = Block(prev_channels, 32) self.cls_conv_block2 = Block(32, 16) # 定义self.last 1*1conv下降维度到self.n_classes self.last = nn.Conv2d(16, self.n_classes, kernel_size=1) # 每层都进行kaiming初始化 for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)