示例#1
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              dilation=1,
              downsample=None,
              bn_momentum=0.1):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = SynchronizedBatchNorm2d(planes, momentum=bn_momentum)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=dilation,
                            dilation=dilation,
                            bias=False)
     self.bn2 = SynchronizedBatchNorm2d(planes, momentum=bn_momentum)
     self.conv3 = nn.Conv2d(planes,
                            planes * self.expansion,
                            kernel_size=1,
                            bias=False)
     self.bn3 = SynchronizedBatchNorm2d(planes * self.expansion,
                                        momentum=bn_momentum)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
示例#2
0
def double_conv(in_channels, out_channels):
    return nn.Sequential(
        nn.Conv2d(in_channels, out_channels, 3, padding=1),
        SynchronizedBatchNorm2d(out_channels),
        nn.ReLU(inplace=True),
        nn.Conv2d(out_channels, out_channels, 3, padding=1),
        SynchronizedBatchNorm2d(out_channels),
        nn.ReLU(inplace=True)
    )
示例#3
0
    def __init__(self, class_num, bn_momentum=0.1):
        super(Decoder, self).__init__()
        self.conv1 = nn.Conv2d(256, 48, kernel_size=1, bias=False)
        self.bn1 = SynchronizedBatchNorm2d(48, momentum=bn_momentum)
        self.relu = nn.ReLU()
        # self.conv2 = SeparableConv2d(304, 256, kernel_size=3)
        # self.conv3 = SeparableConv2d(256, 256, kernel_size=3)
        self.conv2 = nn.Conv2d(304, 256, kernel_size=3, padding=1, bias=False)
        self.bn2 = SynchronizedBatchNorm2d(256, momentum=bn_momentum)
        self.dropout2 = nn.Dropout(0.5)
        self.conv3 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False)
        self.bn3 = SynchronizedBatchNorm2d(256, momentum=bn_momentum)
        self.dropout3 = nn.Dropout(0.1)
        self.conv4 = nn.Conv2d(256, class_num, kernel_size=1)

        self._init_weight()
示例#4
0
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        is_batchnorm=True,
    ):
        super(conv2DBatchNorm, self).__init__()

        conv_mod = nn.Conv2d(
            int(in_channels),
            int(n_filters),
            kernel_size=k_size,
            padding=padding,
            stride=stride,
            bias=bias,
            dilation=dilation,
        )

        if is_batchnorm:
            self.cb_unit = nn.Sequential(
                conv_mod, SynchronizedBatchNorm2d(int(n_filters)))
        else:
            self.cb_unit = nn.Sequential(conv_mod)
示例#5
0
    def __init__(self, bn_momentum=0.1, output_stride=16):
        super(Encoder, self).__init__()
        self.ASPP = AsppModule(bn_momentum=bn_momentum,
                               output_stride=output_stride)
        self.relu = nn.ReLU()
        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.bn1 = SynchronizedBatchNorm2d(256, momentum=bn_momentum)
        self.dropout = nn.Dropout(0.5)

        self.__init_weight()
示例#6
0
    def __init__(self, in_size, out_size, is_batchnorm):
        super(unetConv2, self).__init__()

        if is_batchnorm:
            self.conv1 = nn.Sequential(
                nn.Conv2d(in_size, out_size, 3, 1, 0),
                SynchronizedBatchNorm2d(out_size),
                nn.ReLU(),
            )
            self.conv2 = nn.Sequential(
                nn.Conv2d(out_size, out_size, 3, 1, 0),
                SynchronizedBatchNorm2d(out_size),
                nn.ReLU(),
            )
        else:
            self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 0),
                                       nn.ReLU())
            self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 0),
                                       nn.ReLU())
示例#7
0
    def __init__(self,
                 block,
                 layers,
                 bn_momentum=0.1,
                 pretrained=False,
                 output_stride=16):
        if output_stride == 16:
            dilations = [1, 1, 1, 2]
            strides = [1, 2, 2, 1]
        elif output_stride == 8:
            dilations = [1, 1, 2, 4]
            strides = [1, 2, 1, 1]
        else:
            raise Warning("output_stride must be 8 or 16!")
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = SynchronizedBatchNorm2d(64, momentum=bn_momentum)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block,
                                       64,
                                       layers[0],
                                       stride=strides[0],
                                       dilation=dilations[0],
                                       bn_momentum=bn_momentum)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=strides[1],
                                       dilation=dilations[1],
                                       bn_momentum=bn_momentum)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=strides[2],
                                       dilation=dilations[2],
                                       bn_momentum=bn_momentum)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=strides[3],
                                       dilation=dilations[3],
                                       bn_momentum=bn_momentum)

        self._init_weight()

        if pretrained:
            self._load_pretrained_model()
示例#8
0
def _AsppConv(in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              bn_momentum=0.1):
    asppconv = nn.Sequential(
        nn.Conv2d(in_channels,
                  out_channels,
                  kernel_size,
                  stride,
                  padding,
                  dilation,
                  bias=False),
        SynchronizedBatchNorm2d(out_channels, momentum=bn_momentum), nn.ReLU())
    return asppconv
示例#9
0
    def __init__(self, bn_momentum=0.1, output_stride=16):
        super(AsppModule, self).__init__()

        # output_stride choice
        if output_stride == 16:
            atrous_rates = [0, 6, 12, 18]
        elif output_stride == 8:
            atrous_rates = 2 * [0, 12, 24, 36]
        else:
            raise Warning("output_stride must be 8 or 16!")
        # atrous_spatial_pyramid_pooling part
        self._atrous_convolution1 = _AsppConv(2048,
                                              256,
                                              1,
                                              1,
                                              bn_momentum=bn_momentum)
        self._atrous_convolution2 = _AsppConv(2048,
                                              256,
                                              3,
                                              1,
                                              padding=atrous_rates[1],
                                              dilation=atrous_rates[1],
                                              bn_momentum=bn_momentum)
        self._atrous_convolution3 = _AsppConv(2048,
                                              256,
                                              3,
                                              1,
                                              padding=atrous_rates[2],
                                              dilation=atrous_rates[2],
                                              bn_momentum=bn_momentum)
        self._atrous_convolution4 = _AsppConv(2048,
                                              256,
                                              3,
                                              1,
                                              padding=atrous_rates[3],
                                              dilation=atrous_rates[3],
                                              bn_momentum=bn_momentum)

        #image_pooling part
        self._image_pool = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Conv2d(2048, 256, kernel_size=1, bias=False),
            SynchronizedBatchNorm2d(256, momentum=bn_momentum), nn.ReLU())

        self.__init_weight()
示例#10
0
    def __init__(self,
                 in_channels,
                 n_filters,
                 k_size,
                 stride,
                 padding,
                 bias=True):
        super(deconv2DBatchNorm, self).__init__()

        self.dcb_unit = nn.Sequential(
            nn.ConvTranspose2d(
                int(in_channels),
                int(n_filters),
                kernel_size=k_size,
                padding=padding,
                stride=stride,
                bias=bias,
            ),
            SynchronizedBatchNorm2d(int(n_filters)),
        )
示例#11
0
    def __init__(self, config_text, norm_nc=128, label_nc=128):
        super().__init__()

        #assert config_text.startswith('spade')
        #parsed = re.search('spade(\D+)(\d)x\d', config_text)
        #param_free_norm_type = str(parsed.group(1))
        #ks = int(parsed.group(2))
        param_free_norm_type = config_text

        if param_free_norm_type == 'instance':
            self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
        elif param_free_norm_type == 'syncbatch':
            self.param_free_norm = SynchronizedBatchNorm2d(norm_nc,
                                                           affine=False)
        elif param_free_norm_type == 'batch':
            self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
        else:
            raise ValueError(
                '%s is not a recognized param-free norm type in SPADE' %
                param_free_norm_type)

        # The dimension of the intermediate embedding space. Yes, hardcoded.
        self.nhidden = 128

        ks = 3
        pw = ks // 2
        self.mlp_shared = nn.Sequential(
            nn.Conv2d(label_nc, self.nhidden, kernel_size=ks, padding=pw),
            nn.ReLU())
        self.mlp_gamma = nn.Conv2d(self.nhidden,
                                   norm_nc,
                                   kernel_size=ks,
                                   padding=pw)
        self.mlp_beta = nn.Conv2d(self.nhidden,
                                  norm_nc,
                                  kernel_size=ks,
                                  padding=pw)
示例#12
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    dilation=1,
                    bn_momentum=0.1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                SynchronizedBatchNorm2d(planes * block.expansion,
                                        momentum=bn_momentum),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  dilation,
                  downsample,
                  bn_momentum=bn_momentum))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      dilation=dilation,
                      bn_momentum=bn_momentum))

        return nn.Sequential(*layers)