Esempio n. 1
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=1,
              stride=1,
              padding=0,
              dilation=1,
              bias=False,
              activate_first=True,
              inplace=True):
     super(SeparableConv2d, self).__init__()
     self.relu0 = nn.ReLU(inplace=inplace)
     self.depthwise = nn.Conv2d(in_channels,
                                in_channels,
                                kernel_size,
                                stride,
                                padding,
                                dilation,
                                groups=in_channels,
                                bias=bias)
     self.bn1 = SynchronizedBatchNorm2d(in_channels, momentum=bn_mom)
     self.relu1 = nn.ReLU(inplace=True)
     self.pointwise = nn.Conv2d(in_channels,
                                out_channels,
                                1,
                                1,
                                0,
                                1,
                                1,
                                bias=bias)
     self.bn2 = SynchronizedBatchNorm2d(out_channels, momentum=bn_mom)
     self.relu2 = nn.ReLU(inplace=True)
     self.activate_first = activate_first
Esempio n. 2
0
            def __init__(self, in_channel, out_channel, batch_norm, pool):
                super(RNN_cell, self).__init__()
                self.outchannel = out_channel
                conv_R = nn.Conv2d(in_channels=in_channel1,
                                   out_channels=out_channel,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
                conv_T = nn.Conv2d(in_channels=in_channel2,
                                   out_channels=out_channel,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
                layers_data = [
                    conv_R, SynchronizedBatchNorm2d(out_channel)
                ]  #,nn.BatchNorm2d(out_channel * 4), nn.Dropout2d(p=dropout)]
                layers_ctrl = [conv_T, SynchronizedBatchNorm2d(out_channel)]

                self.conv_data = nn.Sequential(*layers_data)
                self.conv_ctrl = nn.Sequential(*layers_ctrl)
                self.pool = nn.MaxPool2d(kernel_size=2,
                                         stride=2,
                                         padding=0,
                                         ceil_mode=True)
                self.ispool = pool
                self.detachout = detachout
Esempio n. 3
0
    def __init__(self,
                 nInputChannels=3,
                 n_classes=21,
                 os=16,
                 pretrained=False,
                 _print=True):
        if _print:
            print("Constructing DeepLabv3+ model...")
            print("Number of classes: {}".format(n_classes))
            print("Output stride: {}".format(os))
            print("Number of Input Channels: {}".format(nInputChannels))
        super(DeepLabv3_plus_multi_set, self).__init__()

        # Atrous Conv
        self.xception_features = Xception(nInputChannels, os, pretrained)

        # ASPP
        if os == 16:
            rates = [1, 6, 12, 18]
        elif os == 8:
            rates = [1, 12, 24, 36]
        else:
            raise NotImplementedError

        self.aspp1 = ASPP_module_rate0(2048, 256, rate=rates[0])
        self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
        self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
        self.aspp4 = ASPP_module(2048, 256, rate=rates[3])

        self.relu = nn.ReLU()

        self.global_avg_pool = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Conv2d(2048, 256, 1, stride=1, bias=False),
            SynchronizedBatchNorm2d(256), nn.ReLU())

        self.concat_projection_conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.concat_projection_bn1 = SynchronizedBatchNorm2d(256)

        # adopt [1x1, 48] for channel reduction.
        self.feature_projection_conv1 = nn.Conv2d(256, 48, 1, bias=False)
        self.feature_projection_bn1 = SynchronizedBatchNorm2d(48)

        self.decoder = nn.Sequential(Decoder_module(304, 256),
                                     Decoder_module(256, 256))

        self.semantic_aux_cihp = nn.Conv2d(256, 20, kernel_size=1, stride=1)
        self.semantic_aux_pascal = nn.Conv2d(256, 7, kernel_size=1, stride=1)
        self.semantic_aux_atr = nn.Conv2d(256, 18, kernel_size=1, stride=1)
Esempio n. 4
0
    def __init__(self, config_text, norm_nc, label_nc):
        super().__init__()

        assert config_text.startswith('spade')
        parsed = re.search('spade(\D+)(\d)x\d', config_text)
        param_free_norm_type = str(parsed.group(1))
        ks = int(parsed.group(2))

        if param_free_norm_type == 'instance':
            self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
        elif param_free_norm_type == 'syncbatch':
            self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
        elif param_free_norm_type == 'batch':
            self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
        else:
            raise ValueError('%s is not a recognized param-free norm type in SPADE'
                             % param_free_norm_type)

        # The dimension of the intermediate embedding space. Yes, hardcoded.
        nhidden = 128

        pw = ks // 2
        self.mlp_shared = nn.Sequential(
            nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
            nn.ReLU()
        )
        self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
        self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
Esempio n. 5
0
    def add_norm_layer(layer):
        nonlocal norm_type
        if norm_type.startswith('spectral'):
            layer = spectral_norm(layer)
            subnorm_type = norm_type[len('spectral'):]

        if subnorm_type == 'none' or len(subnorm_type) == 0:
            return layer

        # remove bias in the previous layer, which is meaningless
        # since it has no effect after normalization
        if getattr(layer, 'bias', None) is not None:
            delattr(layer, 'bias')
            layer.register_parameter('bias', None)

        if subnorm_type == 'batch':
            norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
        elif subnorm_type == 'sync_batch':
            norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
        elif subnorm_type == 'instance':
            norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
        else:
            raise ValueError('normalization layer %s is not recognized' % subnorm_type)

        return nn.Sequential(layer, norm_layer)
Esempio n. 6
0
    def __init__(self, opt, norm_nc):
        super().__init__()

        self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)

        # number of internal filters for generating scale/bias
        nhidden = 128
        # size of kernels
        kernal_size = 3
        # padding size
        padding = kernal_size // 2

        self.mlp_shared = nn.Sequential(
            nn.Conv2d(opt['label_nc'],
                      nhidden,
                      kernel_size=kernal_size,
                      padding=padding), nn.ReLU())
        self.mlp_gamma = nn.Conv2d(nhidden,
                                   norm_nc,
                                   kernel_size=kernal_size,
                                   padding=padding)
        self.mlp_beta = nn.Conv2d(nhidden,
                                  norm_nc,
                                  kernel_size=kernal_size,
                                  padding=padding)
Esempio n. 7
0
    def _make_layer(self, net_dict, batch_norm=False):

        layers = []
        length = len(net_dict)
        for i in range(length):
            one_layer = net_dict[i]
            key = one_layer.keys()[0]
            v = one_layer[key]

            if 'pool' in key:
                layers += [
                    nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
                ]
            else:
                conv2d = nn.Conv2d(in_channels=v[0],
                                   out_channels=v[1],
                                   kernel_size=v[2],
                                   stride=v[3],
                                   padding=v[4])
                if batch_norm:
                    layers += [
                        conv2d,
                        SynchronizedBatchNorm2d(v[1]),
                        nn.ReLU(inplace=True)
                    ]
                else:
                    layers += [conv2d, nn.ReLU(inplace=True)]
        return nn.Sequential(*layers)
Esempio n. 8
0
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        is_batchnorm=True,
    ):
        super(conv2DBatchNorm, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        if is_batchnorm:
            self.cb_unit = nn.Sequential(conv_mod, SynchronizedBatchNorm2d(int(n_filters)))
        else:
            self.cb_unit = nn.Sequential(conv_mod)
Esempio n. 9
0
    def __init__(self,
                 in_filters,
                 out_filters,
                 strides=1,
                 atrous=None,
                 grow_first=True,
                 activate_first=True,
                 inplace=True):
        super(Block, self).__init__()
        if atrous == None:
            atrous = [1] * 3
        elif isinstance(atrous, int):
            atrous_list = [atrous] * 3
            atrous = atrous_list
        idx = 0
        self.head_relu = True
        if out_filters != in_filters or strides != 1:
            self.skip = nn.Conv2d(in_filters,
                                  out_filters,
                                  1,
                                  stride=strides,
                                  bias=False)
            self.skipbn = SynchronizedBatchNorm2d(out_filters, momentum=bn_mom)
            self.head_relu = False
        else:
            self.skip = None

        self.hook_layer = None
        if grow_first:
            filters = out_filters
        else:
            filters = in_filters
        self.sepconv1 = SeparableConv2d(in_filters,
                                        filters,
                                        3,
                                        stride=1,
                                        padding=1 * atrous[0],
                                        dilation=atrous[0],
                                        bias=False,
                                        activate_first=activate_first,
                                        inplace=self.head_relu)
        self.sepconv2 = SeparableConv2d(filters,
                                        out_filters,
                                        3,
                                        stride=1,
                                        padding=1 * atrous[1],
                                        dilation=atrous[1],
                                        bias=False,
                                        activate_first=activate_first)
        self.sepconv3 = SeparableConv2d(out_filters,
                                        out_filters,
                                        3,
                                        stride=strides,
                                        padding=1 * atrous[2],
                                        dilation=atrous[2],
                                        bias=False,
                                        activate_first=activate_first,
                                        inplace=inplace)
Esempio n. 10
0
    def testSyncBatchNorm2DSyncTrain(self):
        bn = nn.BatchNorm2d(10)
        sync_bn = SynchronizedBatchNorm2d(10)
        sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])

        bn.cuda()
        sync_bn.cuda()

        self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True)
Esempio n. 11
0
    def __init__(self, in_size, out_size, is_batchnorm):
        super(unetConv2, self).__init__()

        if is_batchnorm:
            self.conv1 = nn.Sequential(
                nn.Conv2d(in_size, out_size, 3, 1, 0),
                SynchronizedBatchNorm2d(out_size),
                nn.ReLU(),
            )
            self.conv2 = nn.Sequential(
                nn.Conv2d(out_size, out_size, 3, 1, 0),
                SynchronizedBatchNorm2d(out_size),
                nn.ReLU(),
            )
        else:
            self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 0), nn.ReLU())
            self.conv2 = nn.Sequential(
                nn.Conv2d(out_size, out_size, 3, 1, 0), nn.ReLU()
            )
Esempio n. 12
0
            def __init__(self, in_channel1, in_channel2, out_channel, pool, detachout, kernel_size, stride, padding):
                super(RNN_cell, self).__init__()
                self.outchannel = out_channel
                conv_data = nn.Conv2d(in_channels=in_channel1, out_channels=out_channel, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)
                conv_ctrl = nn.Conv2d(in_channels=in_channel2, out_channels=out_channel, kernel_size=3, stride=1, padding=1, bias=True)
                self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)

                if syn_bn:
                    layers_data = [conv_data, SynchronizedBatchNorm2d(out_channel), torch.nn.ReLU()]
                    layers_ctrl = [conv_ctrl, SynchronizedBatchNorm2d(out_channel), torch.nn.Sigmoid()]
                else:
                    layers_data = [conv_data, nn.BatchNorm2d(out_channel), torch.nn.ReLU()]
                    layers_ctrl = [conv_ctrl, nn.BatchNorm2d(out_channel), torch.nn.Sigmoid()]

                self.conv_data = nn.Sequential(*layers_data)
                self.conv_ctrl = nn.Sequential(*layers_ctrl)

                self.ispool = pool
                self.detachout = detachout
                self.stride = stride
Esempio n. 13
0
    def __init__(self,
                 inplanes,
                 planes,
                 kernel_size=3,
                 stride=1,
                 dilation=1,
                 bias=False,
                 padding=0):
        super(SeparableConv2d_same, self).__init__()

        self.depthwise = nn.Conv2d(inplanes,
                                   inplanes,
                                   kernel_size,
                                   stride,
                                   padding,
                                   dilation,
                                   groups=inplanes,
                                   bias=bias)
        self.depthwise_bn = SynchronizedBatchNorm2d(inplanes)
        self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
        self.pointwise_bn = SynchronizedBatchNorm2d(planes)
Esempio n. 14
0
	def __init__(self, dim_in, dim_out, rate=1, bn_mom=0.1):
		super(ASPP, self).__init__()
		self.branch1 = nn.Sequential(
				nn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate,bias=True),
				SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
				nn.ReLU(inplace=True),
		)
		self.branch2 = nn.Sequential(
				nn.Conv2d(dim_in, dim_out, 3, 1, padding=6*rate, dilation=6*rate,bias=True),
				SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
				nn.ReLU(inplace=True),	
		)
		self.branch3 = nn.Sequential(
				nn.Conv2d(dim_in, dim_out, 3, 1, padding=12*rate, dilation=12*rate,bias=True),
				SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
				nn.ReLU(inplace=True),	
		)
		self.branch4 = nn.Sequential(
				nn.Conv2d(dim_in, dim_out, 3, 1, padding=18*rate, dilation=18*rate,bias=True),
				SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
				nn.ReLU(inplace=True),	
		)
		self.branch5_conv = nn.Conv2d(dim_in, dim_out, 1, 1, 0,bias=True)
		self.branch5_bn = SynchronizedBatchNorm2d(dim_out, momentum=bn_mom)
		self.branch5_relu = nn.ReLU(inplace=True)
		self.conv_cat = nn.Sequential(
				nn.Conv2d(dim_out*5, dim_out, 1, 1, padding=0,bias=True),
				SynchronizedBatchNorm2d(dim_out, momentum=bn_mom),
				nn.ReLU(inplace=True),		
		)
Esempio n. 15
0
    def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
        super(deconv2DBatchNorm, self).__init__()

        self.dcb_unit = nn.Sequential(
            nn.ConvTranspose2d(
                int(in_channels),
                int(n_filters),
                kernel_size=k_size,
                padding=padding,
                stride=stride,
                bias=bias,
            ),
            SynchronizedBatchNorm2d(int(n_filters)),
        )
Esempio n. 16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=(3, 3),
                 stride=(1, 1),
                 padding=(1, 1)):
        super(ConvBn2d, self).__init__()

        self.conv = nn.Conv2d(in_channels,
                              out_channels,
                              kernel_size=kernel_size,
                              stride=stride,
                              padding=padding,
                              bias=False)
        #self.dropout = nn.Dropout2d(p=0.1, inplace=False)
        self.bn = SynchronizedBatchNorm2d(out_channels, eps=1e-5, affine=False)
Esempio n. 17
0
    def __init__(self, args, ch, emb_dim):
        super().__init__()
        if args.norm_g == 'syncbatch':
            from sync_batchnorm import SynchronizedBatchNorm2d
            self.norm = SynchronizedBatchNorm2d(ch, affine=False)
        elif args.norm_g == 'batch':
            self.norm = nn.BatchNorm2d(ch, affine=False)
        elif args.norm_g == 'instance':
            self.norm = nn.InstanceNorm2d(ch, affine=False)
        elif args.norm_g == 'none':
            self.norm = lambda x: x  # Identity
        else:
            raise

        self.fc_gamma = nn.Linear(emb_dim, ch)
        self.fc_beta = nn.Linear(emb_dim, ch)
Esempio n. 18
0
 def __init__(self, inplanes, planes, rate=1):
     super(ASPP_module_rate0, self).__init__()
     if rate == 1:
         kernel_size = 1
         padding = 0
         self.atrous_convolution = nn.Conv2d(inplanes,
                                             planes,
                                             kernel_size=kernel_size,
                                             stride=1,
                                             padding=padding,
                                             dilation=rate,
                                             bias=False)
         self.bn = SynchronizedBatchNorm2d(planes, eps=1e-5, affine=True)
         self.relu = nn.ReLU()
     else:
         raise RuntimeError()
Esempio n. 19
0
    def __init__(self, os):
        """ Constructor
        Args:
            num_classes: number of classes
        """
        super(Xception, self).__init__()

        stride_list = None
        if os == 8:
            stride_list = [2, 1, 1]
        elif os == 16:
            stride_list = [2, 2, 1]
        else:
            raise ValueError(
                'xception.py: output stride=%d is not supported.' % os)
        self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False)
        self.bn1 = SynchronizedBatchNorm2d(32, momentum=bn_mom)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False)
        self.bn2 = SynchronizedBatchNorm2d(64, momentum=bn_mom)
        #do relu here

        self.block1 = Block(64, 128, 2)
        self.block2 = Block(128, 256, stride_list[0], inplace=False)
        self.block3 = Block(256, 728, stride_list[1])

        rate = 16 // os
        self.block4 = Block(728, 728, 1, atrous=rate)
        self.block5 = Block(728, 728, 1, atrous=rate)
        self.block6 = Block(728, 728, 1, atrous=rate)
        self.block7 = Block(728, 728, 1, atrous=rate)

        self.block8 = Block(728, 728, 1, atrous=rate)
        self.block9 = Block(728, 728, 1, atrous=rate)
        self.block10 = Block(728, 728, 1, atrous=rate)
        self.block11 = Block(728, 728, 1, atrous=rate)

        self.block12 = Block(728, 728, 1, atrous=rate)
        self.block13 = Block(728, 728, 1, atrous=rate)
        self.block14 = Block(728, 728, 1, atrous=rate)
        self.block15 = Block(728, 728, 1, atrous=rate)

        self.block16 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 1 * rate, 1 * rate])
        self.block17 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 1 * rate, 1 * rate])
        self.block18 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 1 * rate, 1 * rate])
        self.block19 = Block(728,
                             728,
                             1,
                             atrous=[1 * rate, 1 * rate, 1 * rate])

        self.block20 = Block(728,
                             1024,
                             stride_list[2],
                             atrous=rate,
                             grow_first=False)
        #self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)

        self.conv3 = SeparableConv2d(1024,
                                     1536,
                                     3,
                                     1,
                                     1 * rate,
                                     dilation=rate,
                                     activate_first=False)
        # self.bn3 = SynchronizedBatchNorm2d(1536, momentum=bn_mom)

        self.conv4 = SeparableConv2d(1536,
                                     1536,
                                     3,
                                     1,
                                     1 * rate,
                                     dilation=rate,
                                     activate_first=False)
        # self.bn4 = SynchronizedBatchNorm2d(1536, momentum=bn_mom)

        #do relu here
        self.conv5 = SeparableConv2d(1536,
                                     2048,
                                     3,
                                     1,
                                     1 * rate,
                                     dilation=rate,
                                     activate_first=False)
        # self.bn5 = SynchronizedBatchNorm2d(2048, momentum=bn_mom)
        self.layers = []

        #------- init weights --------
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, SynchronizedBatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Esempio n. 20
0
    def __init__(self, inplanes=3, os=16, pretrained=False):
        super(Xception, self).__init__()

        if os == 16:
            entry_block3_stride = 2
            middle_block_rate = 1
            exit_block_rates = (1, 2)
        elif os == 8:
            entry_block3_stride = 1
            middle_block_rate = 2
            exit_block_rates = (2, 4)
        else:
            raise NotImplementedError

        # Entry flow
        self.conv1 = nn.Conv2d(inplanes,
                               32,
                               3,
                               stride=2,
                               padding=1,
                               bias=False)
        self.bn1 = SynchronizedBatchNorm2d(32)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
        self.bn2 = SynchronizedBatchNorm2d(64)

        self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
        self.block2 = Block2(128,
                             256,
                             reps=2,
                             stride=2,
                             start_with_relu=True,
                             grow_first=True)
        self.block3 = Block(256,
                            728,
                            reps=2,
                            stride=entry_block3_stride,
                            start_with_relu=True,
                            grow_first=True)

        # Middle flow
        self.block4 = Block(728,
                            728,
                            reps=3,
                            stride=1,
                            dilation=middle_block_rate,
                            start_with_relu=True,
                            grow_first=True)
        self.block5 = Block(728,
                            728,
                            reps=3,
                            stride=1,
                            dilation=middle_block_rate,
                            start_with_relu=True,
                            grow_first=True)
        self.block6 = Block(728,
                            728,
                            reps=3,
                            stride=1,
                            dilation=middle_block_rate,
                            start_with_relu=True,
                            grow_first=True)
        self.block7 = Block(728,
                            728,
                            reps=3,
                            stride=1,
                            dilation=middle_block_rate,
                            start_with_relu=True,
                            grow_first=True)
        self.block8 = Block(728,
                            728,
                            reps=3,
                            stride=1,
                            dilation=middle_block_rate,
                            start_with_relu=True,
                            grow_first=True)
        self.block9 = Block(728,
                            728,
                            reps=3,
                            stride=1,
                            dilation=middle_block_rate,
                            start_with_relu=True,
                            grow_first=True)
        self.block10 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block11 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block12 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block13 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block14 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block15 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block16 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block17 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block18 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)
        self.block19 = Block(728,
                             728,
                             reps=3,
                             stride=1,
                             dilation=middle_block_rate,
                             start_with_relu=True,
                             grow_first=True)

        # Exit flow
        self.block20 = Block(728,
                             1024,
                             reps=2,
                             stride=1,
                             dilation=exit_block_rates[0],
                             start_with_relu=True,
                             grow_first=False,
                             is_last=True)

        self.conv3 = SeparableConv2d_aspp(1024,
                                          1536,
                                          3,
                                          stride=1,
                                          dilation=exit_block_rates[1],
                                          padding=exit_block_rates[1])
        # self.bn3 = nn.BatchNorm2d(1536)

        self.conv4 = SeparableConv2d_aspp(1536,
                                          1536,
                                          3,
                                          stride=1,
                                          dilation=exit_block_rates[1],
                                          padding=exit_block_rates[1])
        # self.bn4 = nn.BatchNorm2d(1536)

        self.conv5 = SeparableConv2d_aspp(1536,
                                          2048,
                                          3,
                                          stride=1,
                                          dilation=exit_block_rates[1],
                                          padding=exit_block_rates[1])
        # self.bn5 = nn.BatchNorm2d(2048)

        # Init weights
        # self.__init_weight()

        # Load pretrained model
        if pretrained:
            self.__load_xception_pretrained()
Esempio n. 21
0
    def __init__(self, pretrained=True, num_classes=-1):
        super(deeplabv3plus, self).__init__()
        self.backbone = None
        self.backbone_layers = None
        input_channel = 2048
        self.aspp = ASPP(dim_in=input_channel,
                         dim_out=256,
                         rate=16 // 16,
                         bn_mom=0.0003)
        self.dropout1 = nn.Dropout(0.5)
        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=16 //
                                                    8)  #16//4

        indim = 728
        shallow1_dim = 64
        self.shortcut_conv1_1 = nn.Sequential(
            nn.Conv2d(indim, shallow1_dim, 1, 1, padding=1 // 2, bias=True),
            SynchronizedBatchNorm2d(shallow1_dim, momentum=0.0003),
            nn.ReLU(inplace=True),
        )
        self.cat_conv1_1 = nn.Sequential(
            nn.Conv2d(256 + shallow1_dim, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        indim = 256
        shallow2_dim = 32
        self.shortcut_conv1_2 = nn.Sequential(
            nn.Conv2d(indim, shallow2_dim, 1, 1, padding=1 // 2, bias=True),
            SynchronizedBatchNorm2d(shallow2_dim, momentum=0.0003),
            nn.ReLU(inplace=True),
        )
        self.cat_conv1_2 = nn.Sequential(
            nn.Conv2d(256 + shallow2_dim, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )
        # self.predict5x5 = nn.Conv2d(256, 256, 5, 1, padding=2)
        self.predict5x5 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            SynchronizedBatchNorm2d(256, momentum=0.0003),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1),
        )

        self.cls_conv = nn.Conv2d(256, num_classes, 1, 1, padding=0)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, SynchronizedBatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        self.backbone = xception.xception(pretrained=pretrained, os=16)
        self.backbone_layers = self.backbone.get_layers()
Esempio n. 22
0
    def __init__(self,
                 inplanes,
                 planes,
                 reps,
                 stride=1,
                 dilation=1,
                 start_with_relu=True,
                 grow_first=True,
                 is_last=False):
        super(Block2, self).__init__()

        if planes != inplanes or stride != 1:
            self.skip = nn.Conv2d(inplanes,
                                  planes,
                                  1,
                                  stride=stride,
                                  bias=False)
            self.skipbn = SynchronizedBatchNorm2d(planes)
        else:
            self.skip = None

        self.relu = nn.ReLU(inplace=True)
        rep = []

        filters = inplanes
        if grow_first:
            rep.append(self.relu)
            rep.append(
                SeparableConv2d_same(inplanes,
                                     planes,
                                     3,
                                     stride=1,
                                     dilation=dilation))
            #             rep.append(nn.BatchNorm2d(planes))
            filters = planes

        for i in range(reps - 1):
            rep.append(self.relu)
            rep.append(
                SeparableConv2d_same(filters,
                                     filters,
                                     3,
                                     stride=1,
                                     dilation=dilation))
#             rep.append(nn.BatchNorm2d(filters))

        if not grow_first:
            rep.append(self.relu)
            rep.append(
                SeparableConv2d_same(inplanes,
                                     planes,
                                     3,
                                     stride=1,
                                     dilation=dilation))


#             rep.append(nn.BatchNorm2d(planes))

        if not start_with_relu:
            rep = rep[1:]

        if stride != 1:
            self.block2_lastconv = nn.Sequential(*[
                self.relu,
                SeparableConv2d_same(
                    planes, planes, 3, stride=2, dilation=dilation)
            ])

        if is_last:
            rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))

        self.rep = nn.Sequential(*rep)