def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False, activate_first=True, inplace=True): super(SeparableConv2d, self).__init__() self.relu0 = nn.ReLU(inplace=inplace) self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias) self.bn1 = SynchronizedBatchNorm2d(in_channels, momentum=bn_mom) self.relu1 = nn.ReLU(inplace=True) self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias) self.bn2 = SynchronizedBatchNorm2d(out_channels, momentum=bn_mom) self.relu2 = nn.ReLU(inplace=True) self.activate_first = activate_first
def __init__(self, cfg): super().__init__(cfg) self.img_rec = nn.Sequential( nn.Conv2d(in_channels=cfg.MODEL_NUM_CLASSES + 1, out_channels=16, kernel_size=3, padding=1, stride=1, bias=True), SynchronizedBatchNorm2d(16, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, stride=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, padding=1, stride=1, bias=True), ) self.apply(self.init_bn) self.rec_loss = nn.MSELoss()
def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride, atrous) # self.bn1 = nn.BatchNorm2d(planes) self.bn1 = SynchronizedBatchNorm2d(planes, momentum=self.bn_mom) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) # self.bn2 = nn.BatchNorm2d(planes) self.bn2 = SynchronizedBatchNorm2d(planes, momentum=self.bn_mom) self.downsample = downsample self.stride = stride
def __init__(self, block, layers, atrous=None, os=16): super(ResNet_Atrous, self).__init__() stride_list = None if os == 8: stride_list = [2, 1, 1] elif os == 16: stride_list = [2, 2, 1] else: raise ValueError( 'esnet_atrous.py: output stride=%d is not supported.' % os) self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) # self.conv1 = nn.Sequential( # nn.Conv2d(3,64,kernel_size=3, stride=2, padding=1), # nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1), # nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1), # ) self.bn1 = SynchronizedBatchNorm2d(64, momentum=bn_mom) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, 64, layers[0]) self.layer2 = self._make_layer(block, 256, 128, layers[1], stride=stride_list[0]) self.layer3 = self._make_layer(block, 512, 256, layers[2], stride=stride_list[1], atrous=16 // os) self.layer4 = self._make_layer( block, 1024, 512, layers[3], stride=stride_list[2], atrous=[item * 16 // os for item in atrous]) # self.layer5 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous]) # self.layer6 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous]) # self.layer7 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous]) self.layers = [] for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self, in_filters, out_filters, strides=1, atrous=None, grow_first=True, activate_first=True, inplace=True): super(Block, self).__init__() if atrous == None: # atrous,深黑,空洞 atrous = [1] * 3 elif isinstance(atrous, int): atrous_list = [atrous] * 3 atrous = atrous_list idx = 0 self.head_relu = True if out_filters != in_filters or strides != 1: self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False) self.skipbn = SynchronizedBatchNorm2d(out_filters, momentum=bn_mom) self.head_relu = False else: self.skip = None self.hook_layer = None if grow_first: filters = out_filters else: filters = in_filters self.sepconv1 = SeparableConv2d(in_filters, filters, 3, stride=1, padding=1 * atrous[0], dilation=atrous[0], bias=False, activate_first=activate_first, inplace=self.head_relu) self.sepconv2 = SeparableConv2d(filters, out_filters, 3, stride=1, padding=1 * atrous[1], dilation=atrous[1], bias=False, activate_first=activate_first) self.sepconv3 = SeparableConv2d(out_filters, out_filters, 3, stride=strides, padding=1 * atrous[2], dilation=atrous[2], bias=False, activate_first=activate_first, inplace=inplace)
def __init__(self, ninput, noutput): super().__init__() self.conv = nn.ConvTranspose2d(ninput, noutput, 3, stride=2, padding=1, output_padding=1, bias=True) self.bn = SynchronizedBatchNorm2d(noutput, eps=1e-3)
def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) # self.bn1 = nn.BatchNorm2d(planes) self.bn1 = SynchronizedBatchNorm2d(planes, momentum=bn_mom) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1 * atrous, dilation=atrous, bias=False) # self.bn2 = nn.BatchNorm2d(planes) self.bn2 = SynchronizedBatchNorm2d(planes, momentum=bn_mom) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) # self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.bn3 = SynchronizedBatchNorm2d(planes * self.expansion, momentum=bn_mom) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride
def _make_layer(self, block, inplanes, planes, blocks, stride=1, atrous=None): downsample = None if atrous == None: atrous = [1] * blocks elif isinstance(atrous, int): atrous_list = [atrous] * blocks atrous = atrous_list if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), SynchronizedBatchNorm2d(planes * block.expansion, momentum=bn_mom), ) layers = [] layers.append( block(inplanes, planes, stride=stride, atrous=atrous[0], downsample=downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block(planes * block.expansion, planes, stride=1, atrous=atrous[i])) return nn.Sequential(*layers)
def __init__(self, cfg): super(deeplabv3plus, self).__init__() self.cfg = cfg self.backbone = None self.backbone_layers = None input_channel = 2048 self.aspp = ASPP(dim_in=input_channel, dim_out=cfg.MODEL_ASPP_OUTDIM, rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) self.dropout1 = nn.Dropout(0.5) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.upsample_sub = nn.UpsamplingBilinear2d( scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4) indim = 256 self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.offset_conv = Decoder(cfg.MODEL_AUX_OUT) self.seed_map_conv = Decoder(cfg.MODEL_NUM_CLASSES) self.bbox_attention1 = nn.Sequential( nn.Conv2d(cfg.MODEL_NUM_CLASSES + 1, indim, 3, 1, padding=1, bias=True), nn.Sigmoid()) self.bbox_attention2 = nn.Sequential( nn.Conv2d(cfg.MODEL_NUM_CLASSES + 1, input_channel, 3, 1, padding=1, bias=True), nn.Sigmoid()) # self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE) self.backbone_layers = self.backbone.get_layers() self.init_output() self.apply(self.init_bn)
def __init__(self, dim_in, dim_out, rate=1, bn_mom=0.1): """ branch 1~4 is Atrous Conv with dilation rate [0 (conv-1x1), 6, 12, 18] * rate branch 5 is global pooling :param dim_in: in_channel :param dim_out: out_channel :param rate: dilation rate coefficient :param bn_mom: """ super(ASPP, self).__init__() self.branch1 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate, bias=True), SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.ReLU(inplace=True), ) self.branch2 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=6 * rate, dilation=6 * rate, bias=True), SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.ReLU(inplace=True), ) self.branch3 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=12 * rate, dilation=12 * rate, bias=True), SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.ReLU(inplace=True), ) self.branch4 = nn.Sequential( nn.Conv2d(dim_in, dim_out, 3, 1, padding=18 * rate, dilation=18 * rate, bias=True), SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.ReLU(inplace=True), ) self.branch5_conv = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=True) self.branch5_bn = SynchronizedBatchNorm2d(dim_out, momentum=bn_mom) self.branch5_relu = nn.ReLU(inplace=True) self.conv_cat = nn.Sequential( nn.Conv2d(dim_out * 5, dim_out, 1, 1, padding=0, bias=True), SynchronizedBatchNorm2d(dim_out, momentum=bn_mom), nn.ReLU(inplace=True), )
def __init__(self, os): """ Constructor Args: num_classes: number of classes """ super(Xception, self).__init__() stride_list = None if os == 8: stride_list = [2, 1, 1] elif os == 16: stride_list = [2, 2, 1] else: raise ValueError( 'xception.py: output stride=%d is not supported.' % os) self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) # stride = 2 self.bn1 = SynchronizedBatchNorm2d(32, momentum=bn_mom) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) self.bn2 = SynchronizedBatchNorm2d(64, momentum=bn_mom) # do relu here self.block1 = Block(64, 128, 2) self.block2 = Block(128, 256, stride_list[0], inplace=False) # stride = 4 self.block3 = Block(256, 728, stride_list[1]) # stride = 8 # middle flow rate = 16 // os self.block4 = Block(728, 728, 1, atrous=rate) #atrous 控制卷积核膨胀情况 self.block5 = Block(728, 728, 1, atrous=rate) self.block6 = Block(728, 728, 1, atrous=rate) self.block7 = Block(728, 728, 1, atrous=rate) self.block8 = Block(728, 728, 1, atrous=rate) self.block9 = Block(728, 728, 1, atrous=rate) self.block10 = Block(728, 728, 1, atrous=rate) self.block11 = Block(728, 728, 1, atrous=rate) self.block12 = Block(728, 728, 1, atrous=rate) self.block13 = Block(728, 728, 1, atrous=rate) self.block14 = Block(728, 728, 1, atrous=rate) self.block15 = Block(728, 728, 1, atrous=rate) self.block16 = Block(728, 728, 1, atrous=[1 * rate, 1 * rate, 1 * rate]) self.block17 = Block(728, 728, 1, atrous=[1 * rate, 1 * rate, 1 * rate]) self.block18 = Block(728, 728, 1, atrous=[1 * rate, 1 * rate, 1 * rate]) self.block19 = Block(728, 728, 1, atrous=[1 * rate, 1 * rate, 1 * rate]) self.block20 = Block(728, 1024, stride_list[2], atrous=rate, grow_first=False) # self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1 * rate, dilation=rate, activate_first=False) # self.bn3 = SynchronizedBatchNorm2d(1536, momentum=bn_mom) self.conv4 = SeparableConv2d(1536, 1536, 3, 1, 1 * rate, dilation=rate, activate_first=False) # self.bn4 = SynchronizedBatchNorm2d(1536, momentum=bn_mom) # do relu here self.conv5 = SeparableConv2d(1536, 2048, 3, 1, 1 * rate, dilation=rate, activate_first=False) # self.bn5 = SynchronizedBatchNorm2d(2048, momentum=bn_mom) self.layers = [] # ------- init weights -------- for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, SynchronizedBatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()