示例#1
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=1,
              stride=1,
              padding=0,
              dilation=1,
              bias=False,
              activate_first=True,
              inplace=True):
     super(SeparableConv2d, self).__init__()
     self.relu0 = nn.ReLU(inplace=inplace)
     self.depthwise = nn.Conv2d(in_channels,
                                in_channels,
                                kernel_size,
                                stride,
                                padding,
                                dilation,
                                groups=in_channels,
                                bias=bias)
     self.bn1 = SynchronizedBatchNorm2d(in_channels)
     self.relu1 = nn.ReLU(inplace=True)
     self.pointwise = nn.Conv2d(in_channels,
                                out_channels,
                                1,
                                1,
                                0,
                                1,
                                1,
                                bias=bias)
     self.bn2 = SynchronizedBatchNorm2d(out_channels)
     self.relu2 = nn.ReLU(inplace=True)
     self.activate_first = activate_first
示例#2
0
 def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride, atrous)
     #self.bn1 = nn.BatchNorm2d(planes)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     #self.bn2 = nn.BatchNorm2d(planes)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
示例#3
0
 def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     #self.bn1 = nn.BatchNorm2d(planes)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1*atrous, dilation=atrous, bias=False)
     #self.bn2 = nn.BatchNorm2d(planes)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
     # self.bn3 = nn.BatchNorm2d(planes * self.expansion)
     self.bn3 = SynchronizedBatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
示例#4
0
    def __init__(self,
                 in_filters,
                 out_filters,
                 strides=1,
                 atrous=None,
                 grow_first=True,
                 activate_first=True,
                 inplace=True):
        super(Block, self).__init__()
        if atrous == None:
            atrous = [1] * 3
        elif isinstance(atrous, int):
            atrous_list = [atrous] * 3
            atrous = atrous_list
        idx = 0
        self.head_relu = True
        if out_filters != in_filters or strides != 1:
            self.skip = nn.Conv2d(in_filters,
                                  out_filters,
                                  1,
                                  stride=strides,
                                  bias=False)
            self.skipbn = SynchronizedBatchNorm2d(out_filters, momentum=0.0003)
            self.head_relu = False
        else:
            self.skip = None

        self.hook_layer = None
        if grow_first:
            filters = out_filters
        else:
            filters = in_filters
        self.sepconv1 = SeparableConv2d(in_filters,
                                        filters,
                                        3,
                                        stride=1,
                                        padding=1 * atrous[0],
                                        dilation=atrous[0],
                                        bias=False,
                                        activate_first=activate_first,
                                        inplace=self.head_relu)
        self.sepconv2 = SeparableConv2d(filters,
                                        out_filters,
                                        3,
                                        stride=1,
                                        padding=1 * atrous[1],
                                        dilation=atrous[1],
                                        bias=False,
                                        activate_first=activate_first)
        self.sepconv3 = SeparableConv2d(out_filters,
                                        out_filters,
                                        3,
                                        stride=strides,
                                        padding=1 * atrous[2],
                                        dilation=atrous[2],
                                        bias=False,
                                        activate_first=activate_first,
                                        inplace=inplace)
示例#5
0
    def __init__(self, block, layers, atrous=None, num_classes=1000):
        super(ResNet_Atrous, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv2d(4,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        #self.bn1 = nn.BatchNorm2d(64)
        self.bn1 = SynchronizedBatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, 64, layers[0])
        self.layer2 = self._make_layer(block, 256, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 512, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block,
                                       1024,
                                       512,
                                       layers[3],
                                       stride=1,
                                       atrous=atrous)
        self.layer5 = self._make_layer(block,
                                       2048,
                                       512,
                                       layers[3],
                                       stride=1,
                                       atrous=atrous)
        self.layer6 = self._make_layer(block,
                                       2048,
                                       512,
                                       layers[3],
                                       stride=1,
                                       atrous=atrous)
        self.layer7 = self._make_layer(block,
                                       2048,
                                       512,
                                       layers[3],
                                       stride=1,
                                       atrous=atrous)
        self.layers = []

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
示例#6
0
 def __init__(self, dim_in, dim_out, rate=1, bn_mom=0.1):
     super(ASPP, self).__init__()
     self.branch1 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   1,
                   1,
                   padding=0,
                   dilation=rate,
                   bias=True),
         SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
         nn.ReLU(inplace=True),
     )
     self.branch2 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   3,
                   1,
                   padding=6 * rate,
                   dilation=6 * rate,
                   bias=True),  #使用的是输出大小不变的空洞卷积
         SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
         nn.ReLU(inplace=True),
     )
     self.branch3 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   3,
                   1,
                   padding=12 * rate,
                   dilation=12 * rate,
                   bias=True),
         SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
         nn.ReLU(inplace=True),
     )
     self.branch4 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   3,
                   1,
                   padding=18 * rate,
                   dilation=18 * rate,
                   bias=True),
         SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
         nn.ReLU(inplace=True),
     )
     self.branch5_conv = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=True)
     self.branch5_bn = SynchronizedBatchNorm2d(dim_out, momentum=bn_mom)
     self.branch5_relu = nn.ReLU(inplace=True)
     self.conv_cat = nn.Sequential(
         nn.Conv2d(dim_out * 5, dim_out, 1, 1, padding=0, bias=True),
         SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
         nn.ReLU(inplace=True),
     )
示例#7
0
 def __init__(self, dim_in, dim_out, rate=1):
     super(ASPP, self).__init__()
     self.branch1 = nn.Sequential(
         nn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch2 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   3,
                   1,
                   padding=6 * rate,
                   dilation=6 * rate),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch3 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   3,
                   1,
                   padding=12 * rate,
                   dilation=12 * rate),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch4 = nn.Sequential(
         nn.Conv2d(dim_in,
                   dim_out,
                   3,
                   1,
                   padding=18 * rate,
                   dilation=18 * rate),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch5_conv = nn.Conv2d(dim_in, dim_out, 1, 1, 0)
     self.branch5_bn = SynchronizedBatchNorm2d(dim_out)
     self.branch5_relu = nn.ReLU(inplace=True)
     self.conv_cat = nn.Sequential(
         nn.Conv2d(dim_out * 5, dim_out, 1, 1, padding=0),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
示例#8
0
    def _make_layer(self,
                    block,
                    inplanes,
                    planes,
                    blocks,
                    stride=1,
                    atrous=None):
        downsample = None
        if atrous == None:
            atrous = [1] * blocks
        elif isinstance(atrous, int):
            atrous_list = [atrous] * blocks
            atrous = atrous_list
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          dilation=atrous[0],
                          bias=False),
                SynchronizedBatchNorm2d(planes * block.expansion,
                                        momentum=0.0003),
            )

        layers = []
        layers.append(
            block(inplanes,
                  planes,
                  stride=stride,
                  atrous=atrous[0],
                  downsample=downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(planes * block.expansion,
                      planes,
                      stride=1,
                      atrous=atrous[i]))

        return nn.Sequential(*layers)
    def __init__(self, block, layers, atrous=None, os=16):
        super(ResNet_Atrous, self).__init__()
        stride_list = None
        if os == 8:
            stride_list = [2,1,1]
        elif os == 16:
            stride_list = [2,2,1]
        else:
            raise ValueError('resnet_atrous.py: output stride=%d is not supported.'%os) 
            
        self.inplanes = 64
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
#        self.conv1 =  nn.Sequential(
#                          nn.Conv2d(3,64,kernel_size=3, stride=2, padding=1),
#                          nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1),
#                          nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1),
#                      )
        self.bn1 = SynchronizedBatchNorm2d(64, momentum=bn_mom)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, 64, layers[0])
        self.layer2 = self._make_layer(block, 256, 128, layers[1], stride=stride_list[0])
        self.layer3 = self._make_layer(block, 512, 256, layers[2], stride=stride_list[1], atrous=16//os)
        self.layer4 = self._make_layer(block, 1024, 512, layers[3], stride=stride_list[2], atrous=[item*16//os for item in atrous])
        #self.layer5 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous])
        #self.layer6 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous])
        #self.layer7 = self._make_layer(block, 2048, 512, layers[3], stride=1, atrous=[item*16//os for item in atrous])
        self.layers = []

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
示例#10
0
 def __init__(self, dim_in, dim_out, resolution_in):
     super(ASPP, self).__init__()
     self.branch1 = nn.Sequential(
         nn.Conv2d(dim_in, dim_out, 1, 1, padding=0),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch2 = nn.Sequential(
         nn.Conv2d(dim_in, dim_out, 3, 1, padding=6, dilation=6),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch3 = nn.Sequential(
         nn.Conv2d(dim_in, dim_out, 3, 1, padding=12, dilation=12),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch4 = nn.Sequential(
         nn.Conv2d(dim_in, dim_out, 3, 1, padding=18, dilation=18),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
     self.branch5 = nn.Sequential(
         nn.AvgPool2d(resolution_in, stride=resolution_in),
         nn.Conv2d(dim_in, dim_out, 1, 1, padding=0),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.UpsamplingBilinear2d(scale_factor=resolution_in),
         nn.ReLU(inplace=True),
     )
     self.conv_cat = nn.Sequential(
         nn.Conv2d(dim_out * 5, dim_out, 1, 1, padding=0),
         #nn.BatchNorm2d(dim_out),
         SynchronizedBatchNorm2d(dim_out),
         nn.ReLU(inplace=True),
     )
    def __init__(self, cfg):
        super(deeplabv3plus, self).__init__()
        self.backbone = None
        self.backbone_layers = None
        input_channel = 2048
        self.aspp = ASPP(dim_in=input_channel,
                         dim_out=cfg.MODEL_ASPP_OUTDIM,
                         rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                         bn_mom=cfg.TRAIN_BN_MOM)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        self.upsample_sub = nn.UpsamplingBilinear2d(
            scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4)

        indim = 256
        self.shortcut_conv = nn.Sequential(
            nn.Conv2d(indim,
                      cfg.MODEL_SHORTCUT_DIM,
                      cfg.MODEL_SHORTCUT_KERNEL,
                      1,
                      padding=cfg.MODEL_SHORTCUT_KERNEL // 2,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM,
                                    momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
        )
        self.cat_conv = nn.Sequential(
            nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM,
                      cfg.MODEL_ASPP_OUTDIM,
                      3,
                      1,
                      padding=1,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM,
                                    momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(cfg.MODEL_ASPP_OUTDIM,
                      cfg.MODEL_ASPP_OUTDIM,
                      3,
                      1,
                      padding=1,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM,
                                    momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM,
                                  cfg.MODEL_NUM_CLASSES,
                                  1,
                                  1,
                                  padding=0)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = build_backbone(cfg.MODEL_BACKBONE,
                                       os=cfg.MODEL_OUTPUT_STRIDE)
        self.backbone_layers = self.backbone.get_layers()
	def __init__(self, cfg):
		super(deeplabv3plus, self).__init__()
		self.backbone = None		
		self.backbone_layers = None
		#input_channel = 2368
		#self.aspp = ASPP(dim_in=input_channel, 
		#		dim_out=cfg.MODEL_ASPP_OUTDIM, 
		#		rate=16//cfg.MODEL_OUTPUT_STRIDE,
		#		bn_mom = cfg.TRAIN_BN_MOM)
		self.aspp = DenseASPP()
		self.dropout1 = nn.Dropout(0.5)
		#self.dropout1 = nn.Dropout(0.4)
		######修改dropout#######
		self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
		self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=cfg.MODEL_OUTPUT_STRIDE//4)

		indim = 256
		self.shortcut_conv = nn.Sequential(
				nn.Conv2d(128, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL//2,bias=True),
				SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM),
				nn.ReLU(inplace=True),		
		)
		self.shortcut_conv1 = nn.Sequential(
			nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1,
					  padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True),
			SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM),
			nn.ReLU(inplace=True),
		)
		self.cat_conv = nn.Sequential(
				#nn.Conv2d(cfg.MODEL_ASPP_OUTDIM+cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True),
                nn.Conv2d(2*cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True),
            ######################
				SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
				nn.ReLU(inplace=True),
		############修改dropout#######
				nn.Dropout(0.5),
				#nn.Dropout(0.4),
				nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True),
				SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
				nn.ReLU(inplace=True),
				nn.Dropout(0.3),
		)
		self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0)
		self.end_conv = nn.Conv2d(cfg.MODEL_NUM_CLASSES, 1, 1, 1, padding=0)


		self.conv3 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0, bias=False)
		self.conv4 = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0, bias=False)
		self.conv5 = nn.Conv2d(1, 1, kernel_size=3, stride=2, padding=1, bias=False)
		self.conv6 = nn.Conv2d(1, 1, kernel_size=3, stride=2, padding=1, bias=False)
		self.bn4 = SynchronizedBatchNorm2d(48)
		self.bn5 = SynchronizedBatchNorm2d(48)
		self.bn6 = SynchronizedBatchNorm2d(1)
		self.relu1=nn.ReLU(inplace=True)
		self.conv0 = nn.Conv2d(3, 3, kernel_size=7, stride=4, padding=3, bias=False)
		self.conv00 = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0, bias=False)




		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
			elif isinstance(m, SynchronizedBatchNorm2d):
				nn.init.constant_(m.weight, 1)
				nn.init.constant_(m.bias, 0)
		self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE)
		self.backbone_layers = self.backbone.get_layers()
		 #自己添加#
		self.upsample_2=nn.UpsamplingBilinear2d(size=(57,76),scale_factor=None)
示例#13
0
    def __init__(self, os):
        """ Constructor
        Args:
            num_classes: number of classes
        """
        super(Xception, self).__init__()

        stride_list = None
        if os == 8:
            stride_list = [2, 1, 1]
        elif os == 16:
            stride_list = [2, 2, 1]
        else:
            raise ValueError(
                'xception.py: output stride=%d is not supported.' % os)
        self.conv1 = nn.Conv2d(4, 32, 3, 2, 1, bias=False)
        self.bn1 = SynchronizedBatchNorm2d(32, momentum=0.0003)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False)
        self.bn2 = SynchronizedBatchNorm2d(64, momentum=0.0003)
        #do relu here

        self.block1 = Block(64, 128, 2)
        self.block2 = Block(128, 256, stride_list[0], inplace=False)
        self.block3 = Block(256, 728, stride_list[1])

        rate = 16 // os
        self.block4 = Block(728, 728, 1, atrous=rate)
        self.block5 = Block(728, 728, 1, atrous=rate)
        self.block6 = Block(728, 728, 1, atrous=rate)
        self.block7 = Block(728, 728, 1, atrous=rate)

        self.block8 = Block(728, 728, 1, atrous=rate)
        self.block9 = Block(728, 728, 1, atrous=rate)
        self.block10 = Block(728, 728, 1, atrous=rate)
        self.block11 = Block(728, 728, 1, atrous=rate)

        self.block12 = Block(728, 728, 1, atrous=rate)
        self.block13 = Block(728, 728, 1, atrous=rate)
        self.block14 = Block(728, 728, 1, atrous=rate)
        self.block15 = Block(728, 728, 1, atrous=rate)

        self.block16 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 2 * rate, 1 * rate])
        self.block17 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 2 * rate, 1 * rate])
        self.block18 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 2 * rate, 1 * rate])
        self.block19 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 2 * rate, 1 * rate])

        self.block20 = Block(728,
                             1024,
                             stride_list[2],
                             atrous=rate,
                             grow_first=False)
        #self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)

        self.conv3 = SeparableConv2d(1024,
                                     1536,
                                     3,
                                     1,
                                     1 * rate,
                                     dilation=rate,
                                     activate_first=False)
        # self.bn3 = SynchronizedBatchNorm2d(1536, momentum=0.0003)

        self.conv4 = SeparableConv2d(1536,
                                     1536,
                                     3,
                                     1,
                                     1 * rate,
                                     dilation=rate,
                                     activate_first=False)
        # self.bn4 = SynchronizedBatchNorm2d(1536, momentum=0.0003)

        #do relu here
        self.conv5 = SeparableConv2d(1536,
                                     2048,
                                     3,
                                     1,
                                     1 * rate,
                                     dilation=rate,
                                     activate_first=False)
        # self.bn5 = SynchronizedBatchNorm2d(2048, momentum=0.0003)
        self.layers = []

        #------- init weights --------
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, SynchronizedBatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
示例#14
0
    def __init__(self, cfg, test=False):
        super(deeplabv3plus, self).__init__()
        if test:
            self.batch_size = cfg.TEST_BATCHES
        else:
            self.batch_size = cfg.TRAIN_BATCHES
        self.backbone = None
        self.backbone_layers = None
        input_channel = 2048
        self.aspp = ASPP(dim_in=input_channel,
                         dim_out=cfg.MODEL_ASPP_OUTDIM,
                         rate=16 // cfg.MODEL_OUTPUT_STRIDE,
                         bn_mom=cfg.TRAIN_BN_MOM)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        self.upsample_sub = nn.UpsamplingBilinear2d(
            scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4)

        indim = 256
        self.shortcut_conv = nn.Sequential(
            nn.Conv2d(indim,
                      cfg.MODEL_SHORTCUT_DIM,
                      cfg.MODEL_SHORTCUT_KERNEL,
                      1,
                      padding=cfg.MODEL_SHORTCUT_KERNEL // 2,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM,
                                    momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
        )
        self.cat_conv = nn.Sequential(
            nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM,
                      cfg.MODEL_ASPP_OUTDIM,
                      3,
                      1,
                      padding=1,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM,
                                    momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(cfg.MODEL_ASPP_OUTDIM,
                      cfg.MODEL_ASPP_OUTDIM,
                      3,
                      1,
                      padding=1,
                      bias=True),
            SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM,
                                    momentum=cfg.TRAIN_BN_MOM),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM,
                                  cfg.MODEL_NUM_CLASSES,
                                  1,
                                  1,
                                  padding=0)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone1 = build_backbone(cfg.MODEL_BACKBONE,
                                        os=cfg.MODEL_OUTPUT_STRIDE)
        self.backbone2 = build_backbone(cfg.MODEL_BACKBONE,
                                        os=cfg.MODEL_OUTPUT_STRIDE)
        self.backbone_layers1 = self.backbone1.get_layers()
        self.backbone_layers2 = self.backbone2.get_layers()
        self.tc_conv = nn.Sequential(
            nn.Conv2d(6, 96, 7, stride=3),  # 96*20*20
            nn.ReLU(inplace=True),  # 96*20*20
            nn.MaxPool2d(2),  # 96*10*10
            nn.Conv2d(96, 256, 5),  # 192*6*6
            nn.ReLU(),  # 192*6*6
            nn.MaxPool2d(2),  # 192*3*3
            nn.Conv2d(256, 512, 3),
            nn.ReLU()  # 256*1*1           
        )
        self.liner = nn.Sequential(nn.Linear(41472, 256), nn.ReLU())
        self.out = nn.Linear(256, 1)