示例#1
0
 def __init__(self,
              num_input_features,
              growth_rate,
              bn_size,
              drop_rate,
              dilation_rate=1):
     super(_DenseLayer, self).__init__()
     self.add_module("norm1", bn(num_input_features)),
     self.add_module("relu1", nn.ReLU(inplace=True)),
     self.add_module(
         "conv1",
         nn.Conv2d(
             num_input_features,
             bn_size * growth_rate,
             kernel_size=1,
             stride=1,
             bias=False,
         ),
     ),
     self.add_module("norm2", bn(bn_size * growth_rate)),
     self.add_module("relu2", nn.ReLU(inplace=True)),
     self.add_module(
         "conv2",
         nn.Conv2d(
             bn_size * growth_rate,
             growth_rate,
             kernel_size=3,
             stride=1,
             dilation=dilation_rate,
             padding=dilation_rate,
             bias=False,
         ),
     ),
     self.drop_rate = drop_rate
示例#2
0
    def __init__(self,
                 num_input_features,
                 growth_rate,
                 bn_size,
                 drop_rate,
                 dilation_rate=1):
        super(_DenseLayer, self).__init__()

        self.add_module('norm.1', bn(num_input_features)),
        self.add_module('relu.1', nn.ReLU(inplace=True)),
        # self.add_module('conv_1', nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),
        self.add_module(
            'depthwise_conv.1',
            depthwise_separable_conv(num_input_features,
                                     bn_size * growth_rate,
                                     kernel_size=3,
                                     stride=1,
                                     dilation=1))

        self.add_module('norm.2', bn(bn_size * growth_rate)),
        self.add_module('relu.2', nn.ReLU(inplace=True)),
        # self.add_module('conv_2', nn.Conv2d(bn_size * growth_rate, growth_rate,
        #                 kernel_size=3, stride=1, dilation=dilation_rate, padding=dilation_rate, bias=False)),
        self.add_module(
            'depthwise_conv.2',
            depthwise_separable_conv(bn_size * growth_rate,
                                     growth_rate,
                                     kernel_size=3,
                                     stride=1,
                                     dilation=dilation_rate))
        self.drop_rate = drop_rate
示例#3
0
    def __init__(self,
                 input_num,
                 num1,
                 num2,
                 dilation_rate,
                 drop_out,
                 bn_start=True):
        super(_DenseAsppBlock, self).__init__()
        if bn_start:
            self.add_module("norm1", bn(input_num, momentum=0.0003)),

        self.add_module("relu1", nn.ReLU(inplace=True)),
        self.add_module(
            "conv1",
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module("norm2", bn(num1, momentum=0.0003)),
        self.add_module("relu2", nn.ReLU(inplace=True)),
        self.add_module(
            "conv2",
            nn.Conv2d(
                in_channels=num1,
                out_channels=num2,
                kernel_size=3,
                dilation=dilation_rate,
                padding=dilation_rate,
            ),
        ),

        self.drop_rate = drop_out
    def __init__(self,
                 input_num,
                 num1,
                 num2,
                 dilation_rate,
                 drop_out,
                 bn_start=True):

        # the number of output feature is transferred from num1 to num2. (num1 > num2)
        super(_DenseAsppBlock, self).__init__()
        if bn_start:
            self.add_module('norm.1', bn(input_num, momentum=0.0003)),

        self.add_module('relu.1', nn.ReLU(inplace=True)),
        self.add_module(
            'conv.1',
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module('norm.2', bn(num1, momentum=0.0003)),
        self.add_module('relu.2', nn.ReLU(inplace=True)),
        self.add_module(
            'conv.2',
            nn.Conv2d(in_channels=num1,
                      out_channels=num2,
                      kernel_size=3,
                      dilation=dilation_rate,
                      padding=dilation_rate)),

        self.drop_rate = drop_out
 def __init__(self,
              input_num,
              num1,
              num2,
              dilation_rate,
              drop_out,
              bn_start=True,
              modulation=True):
     super(_DenseAsppBlock, self).__init__()
     self.modulation = modulation
     self.bn_start = bn_start
     self.bn1 = bn(input_num, momentum=0.0003)
     self.relu1 = nn.ReLU(inplace=True)
     self.conv_1 = nn.Conv2d(in_channels=input_num,
                             out_channels=num1,
                             kernel_size=1)
     self.bn2 = bn(num1, momentum=0.0003)
     self.relu2 = nn.ReLU(inplace=True)
     #现deformv2集成代码
     self.deform_conv = DeformConv2d(num1,
                                     num2,
                                     3,
                                     padding=1,
                                     modulation=self.modulation)
     #原deformv1代码
     #self.offset = ConvOffset2D(num1)
     #self.conv_2 = nn.Conv2d(in_channels=num1, out_channels=num2, kernel_size=3,padding=1)
     self.conv_3 = nn.Conv2d(in_channels=num2,
                             out_channels=num2,
                             kernel_size=3,
                             dilation=dilation_rate,
                             padding=dilation_rate)
示例#6
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = bn(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = bn(planes)
     self.downsample = downsample
     self.stride = stride
示例#7
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(inplanes, planes)
     self.bn1 = bn(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn2 = bn(planes)
     self.conv3 = conv1x1(planes, planes * self.expansion)
     self.bn3 = bn(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
 def __init__(self, input_num, num1, num2, dilation_rate, drop_out, bn_start=True,modulation=True,adaptive_d=True):
     super(_DenseAsppBlock, self).__init__()
     self.modulation = modulation
     self.adaptive_d = adaptive_d
     self.bn_start = bn_start
     self.bn1 = bn(input_num, momentum=0.0003)
     self.relu1 = nn.ReLU(inplace = True)
     self.conv_1 = nn.Conv2d(in_channels=input_num, out_channels=num1, kernel_size=1)
     self.bn2 = bn(num1, momentum=0.0003)
     self.relu2 = nn.ReLU(inplace = True)
     
     self.deform_conv = DeformConv2d(num1,num2,3,padding=1,dilation=dilation_rate,modulation=self.modulation,adaptive_d=self.adaptive_d)
示例#9
0
 def __init__(self):
     super(Encoder, self).__init__()
     self.conv1 = nn.Sequential(nn.Conv2d(256, 64, kernel_size=1), bn(64),
                                nn.ReLU())
     self.atrous_6 = nn.Sequential(atrous_module(256, 64, rate=1),
                                   atrous_module(64, 64, rate=6))
     self.atrous_12 = nn.Sequential(atrous_module(256, 64, rate=1),
                                    atrous_module(64, 64, rate=12))
     self.atrous_18 = nn.Sequential(atrous_module(256, 64, rate=1),
                                    atrous_module(64, 64, rate=18))
     self.avg_pool = nn.Sequential(
         atrous_module(256, 64, rate=1), nn.AdaptiveAvgPool2d((1, 1)),
         nn.Upsample(scale_factor=32, mode='bilinear'))
     self.conv2 = nn.Sequential(nn.Conv2d(320, 128, kernel_size=1), bn(128),
                                nn.ReLU())
示例#10
0
    def __init__(self, block, layers, num_classes=1000, num_input_images=2):
        super(ResNetMultiImageInput, self).__init__(block, layers)
        self.inplanes = 64
        self.conv1 = nn.Conv2d(num_input_images * 3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = bn(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    pruning_rate,
                    stride=1,
                    dilation=1,
                    padding=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                bn(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, pruning_rate, stride, dilation,
                  padding, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      pruning_rate,
                      dilation=dilation,
                      padding=padding))

        return nn.Sequential(*layers)
示例#12
0
    def __init__(self, block, layers, num_classes=1000):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = bn(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, bn):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
示例#13
0
    def __init__(self, input_num, num1, num2, dilation_rate):
        super(_DenseAsppBlock, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=input_num,
                               out_channels=num1,
                               kernel_size=1)
        self.bn1 = bn(num1, momentum=0.0003)
        self.relu1 = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(in_channels=num1,
                               out_channels=num2,
                               kernel_size=3,
                               dilation=dilation_rate,
                               padding=dilation_rate)
        self.bn2 = bn(num2, momentum=0.0003)
        self.relu2 = nn.ReLU(inplace=True)
示例#14
0
 def __init__(self, num_input_features, num_output_features, stride=2):
     super(_Transition, self).__init__()
     self.add_module('norm', bn(num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     # self.add_module('relu', Mish())
     self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
     if stride == 2:
         self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=stride))
示例#15
0
    def __init__(self,
                 n_channels1,
                 n_channels2,
                 n_channels3,
                 det_features=256):
        """
        :param n_channels1:
        :param n_channels2:
        :param n_channels3:
        """
        super(AggregationBlock, self).__init__()
        self.squeeze1 = nn.Conv2d(in_channels=n_channels1,
                                  out_channels=det_features,
                                  kernel_size=1)
        self.squeeze2 = nn.Conv2d(in_channels=n_channels2,
                                  out_channels=det_features,
                                  kernel_size=1)
        self.squeeze3 = nn.Conv2d(in_channels=n_channels3,
                                  out_channels=det_features,
                                  kernel_size=1)

        self.node1 = nn.Sequential(
            nn.Conv2d(in_channels=det_features,
                      out_channels=det_features,
                      kernel_size=3,
                      padding=1), bn(num_features=det_features),
            nn.ReLU(inplace=True))

        self.node2 = nn.Sequential(
            nn.Conv2d(in_channels=det_features,
                      out_channels=det_features,
                      kernel_size=3,
                      padding=1), bn(num_features=det_features),
            nn.ReLU(inplace=True))

        self.node3 = nn.Sequential(
            nn.Conv2d(in_channels=det_features,
                      out_channels=det_features,
                      kernel_size=3,
                      padding=1), bn(num_features=det_features),
            nn.ReLU(inplace=True))

        self.fusing = nn.Sequential(
            nn.Conv2d(in_channels=det_features * 3,
                      out_channels=det_features,
                      kernel_size=1), )
示例#16
0
 def __init__(self, in_channels, out_channels, use_refl=True):
     super(Conv3x3, self).__init__()
     if use_refl:
         self.pad = nn.ReflectionPad2d(1)
     else:
         self.pad = nn.ZeroPad2d(1)
     self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
     self.bn = bn(int(out_channels))
 def __init__(self,
              inplanes,
              planes,
              pruning_rate,
              stride=1,
              dilation=1,
              padding=1,
              downsample=None):
     super(BasicBlock, self).__init__()
     self.pruned_channel_planes = int(planes -
                                      math.floor(planes * pruning_rate))
     self.conv1 = conv3x3(inplanes, self.pruned_channel_planes, stride,
                          dilation, padding)
     self.bn1 = bn(self.pruned_channel_planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(self.pruned_channel_planes, planes)
     self.bn2 = bn(planes)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, num_input_features, num_output_features, stride=2):
        super(_Transition, self).__init__()
        self.add_module('norm', bn(num_input_features))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
        if stride == 2:
            self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=stride))


# if __name__ == "__main__":
#     model = DenseASPP(model_cfg=DenseASPP161.Model_CFG)
#     print(model)
示例#19
0
 def __init__(self, input_num, out_num, stride=2):
     super(StridePoolBlock, self).__init__()
     self.add_module(
         'conv3',
         nn.Conv2d(
             in_channels=input_num,
             out_channels=out_num,
             kernel_size=3,
             stride=stride,
             padding=1))
     self.add_module('bn', bn(num_features=out_num))
     self.add_module('relu', nn.ReLU(inplace=True))
示例#20
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                bn(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
示例#21
0
 def __init__(self, num_input_features, num_output_features, stride=2):
     super(_Transition, self).__init__()
     self.add_module("norm", bn(num_input_features))
     self.add_module("relu", nn.ReLU(inplace=True))
     self.add_module(
         "conv",
         nn.Conv2d(
             num_input_features,
             num_output_features,
             kernel_size=1,
             stride=1,
             bias=False,
         ),
     )
     if stride == 2:
         self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=stride))
示例#22
0
    def __init__(self, n_class=19, output_stride=8):
        super(DenseASPP, self).__init__()
        # bn_size = model_cfg['bn_size']
        bn_size = 4
        drop_rate = 0
        growth_rate = 32
        num_init_features = 64
        block_config = (6, 12, 48, 32)

        dropout0 = 0.1
        dropout1 = 0.1
        d_feature0 = 480
        d_feature1 = 240

        feature_size = int(output_stride / 8)

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                (
                    "conv0",
                    nn.Conv2d(
                        3,
                        num_init_features,
                        kernel_size=7,
                        stride=2,
                        padding=3,
                        bias=False,
                    ),
                ),
                ("norm0", bn(num_init_features)),
                ("relu0", nn.ReLU(inplace=True)),
                ("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        # block1*****************************************************************************************************
        block = _DenseBlock(
            num_layers=block_config[0],
            num_input_features=num_features,
            bn_size=bn_size,
            growth_rate=growth_rate,
            drop_rate=drop_rate,
        )
        self.features.add_module("denseblock%d" % 1, block)
        num_features = num_features + block_config[0] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2)
        self.features.add_module("transition%d" % 1, trans)
        num_features = num_features // 2

        # block2*****************************************************************************************************
        block = _DenseBlock(
            num_layers=block_config[1],
            num_input_features=num_features,
            bn_size=bn_size,
            growth_rate=growth_rate,
            drop_rate=drop_rate,
        )
        self.features.add_module("denseblock%d" % 2, block)
        num_features = num_features + block_config[1] * growth_rate

        trans = _Transition(
            num_input_features=num_features,
            num_output_features=num_features // 2,
            stride=feature_size,
        )
        self.features.add_module("transition%d" % 2, trans)
        num_features = num_features // 2

        # block3*****************************************************************************************************
        block = _DenseBlock(
            num_layers=block_config[2],
            num_input_features=num_features,
            bn_size=bn_size,
            growth_rate=growth_rate,
            drop_rate=drop_rate,
            dilation_rate=int(2 / feature_size),
        )
        self.features.add_module("denseblock%d" % 3, block)
        num_features = num_features + block_config[2] * growth_rate

        trans = _Transition(
            num_input_features=num_features,
            num_output_features=num_features // 2,
            stride=1,
        )
        self.features.add_module("transition%d" % 3, trans)
        num_features = num_features // 2

        # block4*****************************************************************************************************
        block = _DenseBlock(
            num_layers=block_config[3],
            num_input_features=num_features,
            bn_size=bn_size,
            growth_rate=growth_rate,
            drop_rate=drop_rate,
            dilation_rate=int(4 / feature_size),
        )
        self.features.add_module("denseblock%d" % 4, block)
        num_features = num_features + block_config[3] * growth_rate

        trans = _Transition(
            num_input_features=num_features,
            num_output_features=num_features // 2,
            stride=1,
        )
        self.features.add_module("transition%d" % 4, trans)
        num_features = num_features // 2

        # Final batch norm
        self.features.add_module("norm5", bn(num_features))
        if feature_size > 1:
            self.features.add_module(
                "upsample", nn.Upsample(scale_factor=2, mode="bilinear"))

        self.ASPP_3 = _DenseAsppBlock(
            input_num=num_features,
            num1=d_feature0,
            num2=d_feature1,
            dilation_rate=3,
            drop_out=dropout0,
            bn_start=False,
        )

        self.ASPP_6 = _DenseAsppBlock(
            input_num=num_features + d_feature1 * 1,
            num1=d_feature0,
            num2=d_feature1,
            dilation_rate=6,
            drop_out=dropout0,
            bn_start=True,
        )

        self.ASPP_12 = _DenseAsppBlock(
            input_num=num_features + d_feature1 * 2,
            num1=d_feature0,
            num2=d_feature1,
            dilation_rate=12,
            drop_out=dropout0,
            bn_start=True,
        )

        self.ASPP_18 = _DenseAsppBlock(
            input_num=num_features + d_feature1 * 3,
            num1=d_feature0,
            num2=d_feature1,
            dilation_rate=18,
            drop_out=dropout0,
            bn_start=True,
        )

        self.ASPP_24 = _DenseAsppBlock(
            input_num=num_features + d_feature1 * 4,
            num1=d_feature0,
            num2=d_feature1,
            dilation_rate=24,
            drop_out=dropout0,
            bn_start=True,
        )
        num_features = num_features + 5 * d_feature1

        self.classification = nn.Sequential(
            nn.Dropout2d(p=dropout1),
            nn.Conv2d(in_channels=num_features,
                      out_channels=n_class,
                      kernel_size=1,
                      padding=0),
            nn.Upsample(scale_factor=8, mode="bilinear"),
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform(m.weight.data)

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def __init__(self, model_cfg, n_class=19, output_stride=8):
        super(DenseASPP, self).__init__()
        bn_size = model_cfg['bn_size']
        drop_rate = model_cfg['drop_rate']
        growth_rate = model_cfg['growth_rate']
        num_init_features = model_cfg['num_init_features']
        block_config = model_cfg['block_config']

        dropout0 = model_cfg['dropout0']
        dropout1 = model_cfg['dropout1']
        d_feature0 = model_cfg['d_feature0']
        d_feature1 = model_cfg['d_feature1']

        feature_size = int(output_stride / 8)

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', bn(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        # block1*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[0], num_input_features=num_features,
                            bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 1, block)

        # [[ why use block_config tuple?? ]]
        # If this tuple is not used, same first num_features as last num_features.
        num_features = num_features + block_config[0] * growth_rate

        trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
        self.features.add_module('transition%d' % 1, trans)
        num_features = num_features // 2

        # block2*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[1], num_input_features=num_features,
                            bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 2, block)
        num_features = num_features + block_config[1] * growth_rate

        trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, stride=feature_size)
        self.features.add_module('transition%d' % 2, trans)
        num_features = num_features // 2

        # block3*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[2], num_input_features=num_features, bn_size=bn_size,
                            growth_rate=growth_rate, drop_rate=drop_rate, dilation_rate=int(2 / feature_size))
        self.features.add_module('denseblock%d' % 3, block)
        num_features = num_features + block_config[2] * growth_rate

        trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, stride=1)
        self.features.add_module('transition%d' % 3, trans)
        num_features = num_features // 2

        # block4*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[3], num_input_features=num_features, bn_size=bn_size,
                            growth_rate=growth_rate, drop_rate=drop_rate, dilation_rate=int(4 / feature_size))
        self.features.add_module('denseblock%d' % 4, block)
        num_features = num_features + block_config[3] * growth_rate

        trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, stride=1)
        self.features.add_module('transition%d' % 4, trans)

        num_features = num_features // 2
        # #################################### remove the last two pooling layers ######################################
        # #################################### remove the last classification layer ####################################
        # #################################### set the dilation rates 2 and 4 ##########################################

        # Final batch norm
        self.features.add_module('norm5', bn(num_features))
        if feature_size > 1:
            self.features.add_module('upsample', nn.Upsample(scale_factor=2, mode='bilinear'))

        # [[ ASPP_3/6/12/18/24 : dilated convolutional layer ]]
        self.ASPP_3 = _DenseAsppBlock(input_num=num_features, num1=d_feature0, num2=d_feature1,
                                      dilation_rate=3, drop_out=dropout0, bn_start=False)

        self.ASPP_6 = _DenseAsppBlock(input_num=num_features + d_feature1 * 1, num1=d_feature0, num2=d_feature1,
                                      dilation_rate=6, drop_out=dropout0, bn_start=True)

        self.ASPP_12 = _DenseAsppBlock(input_num=num_features + d_feature1 * 2, num1=d_feature0, num2=d_feature1,
                                       dilation_rate=12, drop_out=dropout0, bn_start=True)

        self.ASPP_18 = _DenseAsppBlock(input_num=num_features + d_feature1 * 3, num1=d_feature0, num2=d_feature1,
                                       dilation_rate=18, drop_out=dropout0, bn_start=True)

        self.ASPP_24 = _DenseAsppBlock(input_num=num_features + d_feature1 * 4, num1=d_feature0, num2=d_feature1,
                                       dilation_rate=24, drop_out=dropout0, bn_start=True)
        """================================================MK========================================================"""
        self.ASPP_v2_3 = _DenseAsppBlock_v3(input_num=num_features, num1=d_feature0, num2=d_feature1,
                                            dilation_rate=2, drop_out=dropout0, bn_start=True)
        self.ASPP_v2_4 = _DenseAsppBlock_v3(input_num=num_features + d_feature1 * 1, num1=d_feature0, num2=d_feature1,
                                            dilation_rate=2, drop_out=dropout0, bn_start=True)
        self.ASPP_v2_5 = _DenseAsppBlock_v3(input_num=num_features + d_feature1 * 2, num1=d_feature0, num2=d_feature1,
                                            dilation_rate=2, drop_out=dropout0, bn_start=True)
        """------------------------------------------------MK--------------------------------------------------------"""
        # self.concat_conv = nn.Conv2d(in_channels=832 * 2, out_channels=832, kernel_size=1, stride=1, padding=0,
        #                              bias=False)
        self.concat_conv = nn.Conv2d(in_channels=1536, out_channels=832, kernel_size=1, stride=1, padding=0,
                                     bias=False)
        """================================================MK========================================================"""
        num_features = num_features + 5 * d_feature1

        self.classification = nn.Sequential(
            nn.Dropout2d(p=dropout1),
            nn.Conv2d(in_channels=num_features, out_channels=n_class, kernel_size=1, padding=0),
            nn.Upsample(scale_factor=8, mode='bilinear'),
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform(m.weight.data)

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def __init__(self, n_class=19, output_stride=8):
        super(DenseASPP, self).__init__()
        bn_size = 4
        drop_rate = 0
        growth_rate = 16
        num_init_features = 64
        block_config = (6, 12, 24, 16)

        dropout0 = 0.1
        dropout1 = 0.1
        d_feature0 = 128
        d_feature1 = 64

        feature_size = int(output_stride / 8)
        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', bn(num_init_features)),
                ('relu0', nn.ReLU(inplace=False)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        # block1*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[0],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 1, block)
        num_features = num_features + block_config[0] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2)
        self.features.add_module('transition%d' % 1, trans)
        num_features = num_features // 2

        # block2*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[1],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 2, block)
        num_features = num_features + block_config[1] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=feature_size)
        self.features.add_module('transition%d' % 2, trans)
        num_features = num_features // 2

        # block3*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[2],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate,
                            dilation_rate=int(2 / feature_size))
        self.features.add_module('denseblock%d' % 3, block)
        num_features = num_features + block_config[2] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=1)
        self.features.add_module('transition%d' % 3, trans)
        num_features = num_features // 2

        # block4*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[3],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate,
                            dilation_rate=int(4 / feature_size))
        self.features.add_module('denseblock%d' % 4, block)
        num_features = num_features + block_config[3] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=1)
        self.features.add_module('transition%d' % 4, trans)
        num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', bn(num_features))
        if feature_size > 1:
            self.features.add_module(
                'upsample', nn.Upsample(scale_factor=2, mode='bilinear'))
        num_features = 2048
        self.ASPP_3 = _DenseAsppBlock(input_num=num_features,
                                      num1=d_feature0,
                                      num2=d_feature1,
                                      dilation_rate=3,
                                      drop_out=dropout0,
                                      bn_start=False)

        self.ASPP_6 = _DenseAsppBlock(input_num=64,
                                      num1=d_feature0,
                                      num2=d_feature1,
                                      dilation_rate=6,
                                      drop_out=dropout0,
                                      bn_start=True)

        self.ASPP_12 = _DenseAsppBlock(input_num=64,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=12,
                                       drop_out=dropout0,
                                       bn_start=True)

        self.ASPP_18 = _DenseAsppBlock(input_num=64,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=18,
                                       drop_out=dropout0,
                                       bn_start=True)

        self.ASPP_24 = _DenseAsppBlock(input_num=64,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=24,
                                       drop_out=dropout0,
                                       bn_start=True)
        num_features = num_features + 5 * d_feature1
        #self.decoder = choose_decoder(decoder)
        #self.ecoder = choose_ecoder(ecoder)
        #self.decoder.apply(weights_init)
        #self.conv3 = nn.Conv2d(832, 1, kernel_size=3, stride=1, padding=1, bias=False)
        self.conv3 = nn.Conv2d(578,
                               258,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.conv4 = nn.Conv2d(52,
                               1,
                               kernel_size=5,
                               stride=2,
                               padding=2,
                               bias=False)
        #self.conv5 = nn.Conv2d(3, 1, kernel_size=3, stride=2, padding=1, bias=False)
        #self.conv6 = nn.Conv2d(1, 1, kernel_size=5, stride=2, padding=1, bias=False)
        #self.conv7 = nn.Conv2d(1, 1, kernel_size=3, stride=2, padding=1, bias=False)

        self.conv11 = nn.Sequential(
            OrderedDict([
                ('convQ',
                 nn.Conv2d(2368,
                           1024,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           bias=False)),
                ('normQ', bn(1024)),
                ('reluQ', nn.ReLU(inplace=False)),
            ]))
        self.conv33 = nn.Sequential(
            OrderedDict([
                ('convQ',
                 nn.Conv2d(1024,
                           512,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False)),
                ('normQ', bn(512)),
                ('reluQ', nn.ReLU(inplace=False)),
            ]))
        self.conv111 = nn.Sequential(
            OrderedDict([
                ('convQ',
                 nn.Conv2d(512,
                           64,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           bias=False)),
                ('normQ', bn(64)),
            ]))
        self.conv22 = nn.Sequential(
            OrderedDict([
                ('convQ',
                 nn.Conv2d(2048,
                           1024,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           bias=False)),
                ('normQ', bn(1024)),
                ('reluQ', nn.ReLU(inplace=False)),
            ]))

        self.bilinear = nn.Upsample(size=(oheight, owidth), mode='bilinear')
        self.classification = nn.Sequential(
            nn.Dropout2d(p=dropout1),
            nn.Conv2d(in_channels=num_features,
                      out_channels=n_class,
                      kernel_size=1,
                      padding=0),
            nn.Upsample(scale_factor=8, mode='bilinear'),
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform(m.weight.data)

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
示例#25
0
    def __init__(self,
                 model_cfg=Model_CFG_Dense161,
                 in_channels=3,
                 n_class=1,
                 output_stride=8):
        super(DenseASPP, self).__init__()
        bn_size = model_cfg['bn_size']
        drop_rate = model_cfg['drop_rate']
        growth_rate = model_cfg['growth_rate']
        num_init_features = model_cfg['num_init_features']
        block_config = model_cfg['block_config']

        dropout0 = model_cfg['dropout0']
        dropout1 = model_cfg['dropout1']
        d_feature0 = model_cfg['d_feature0']
        d_feature1 = model_cfg['d_feature1']

        feature_size = int(output_stride / 8)

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(in_channels,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', bn(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        # block1*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[0],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 1, block)
        num_features = num_features + block_config[0] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2)
        self.features.add_module('transition%d' % 1, trans)
        num_features = num_features // 2

        # block2*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[1],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 2, block)
        num_features = num_features + block_config[1] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=feature_size)
        self.features.add_module('transition%d' % 2, trans)
        num_features = num_features // 2

        # block3*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[2],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate,
                            dilation_rate=int(2 / feature_size))
        self.features.add_module('denseblock%d' % 3, block)
        num_features = num_features + block_config[2] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=1)
        self.features.add_module('transition%d' % 3, trans)
        num_features = num_features // 2

        # block4*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[3],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate,
                            dilation_rate=int(4 / feature_size))
        self.features.add_module('denseblock%d' % 4, block)
        num_features = num_features + block_config[3] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=1)
        self.features.add_module('transition%d' % 4, trans)
        num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', bn(num_features))
        if feature_size > 1:
            self.features.add_module(
                'upsample',
                nn.Upsample(scale_factor=2,
                            mode='bilinear',
                            align_corners=True))

        self.ASPP_3 = _DenseAsppBlock(input_num=num_features,
                                      num1=d_feature0,
                                      num2=d_feature1,
                                      dilation_rate=3,
                                      drop_out=dropout0,
                                      bn_start=False)

        self.ASPP_6 = _DenseAsppBlock(input_num=num_features + d_feature1 * 1,
                                      num1=d_feature0,
                                      num2=d_feature1,
                                      dilation_rate=6,
                                      drop_out=dropout0,
                                      bn_start=True)

        self.ASPP_12 = _DenseAsppBlock(input_num=num_features + d_feature1 * 2,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=12,
                                       drop_out=dropout0,
                                       bn_start=True)

        self.ASPP_18 = _DenseAsppBlock(input_num=num_features + d_feature1 * 3,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=18,
                                       drop_out=dropout0,
                                       bn_start=True)

        self.ASPP_24 = _DenseAsppBlock(input_num=num_features + d_feature1 * 4,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=24,
                                       drop_out=dropout0,
                                       bn_start=True)
        num_features = num_features + 5 * d_feature1

        self.classification = nn.Sequential(
            nn.Dropout2d(p=dropout1),
            nn.Conv2d(in_channels=num_features,
                      out_channels=n_class,
                      kernel_size=1,
                      padding=0),
            nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True),
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
示例#26
0
    def __init__(self, model_cfg, n_class=19, output_stride=8):
        super(DenseASPP, self).__init__()
        bn_size = model_cfg['bn_size']
        drop_rate = model_cfg['drop_rate']
        growth_rate = model_cfg['growth_rate']
        num_init_features = model_cfg['num_init_features']
        block_config = model_cfg['block_config']

        dropout0 = model_cfg['dropout0']
        dropout1 = model_cfg['dropout1']
        d_feature0 = model_cfg['d_feature0']
        d_feature1 = model_cfg['d_feature1']

        feature_size = int(output_stride / 8)
        self.pretrainedModelPath = "ASPP_pretrainedModel"
        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', bn(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        # block1*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[0],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 1, block)
        num_features = num_features + block_config[0] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2)
        self.features.add_module('transition%d' % 1, trans)
        num_features = num_features // 2

        # block2*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[1],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate)
        self.features.add_module('denseblock%d' % 2, block)
        num_features = num_features + block_config[1] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=feature_size)
        self.features.add_module('transition%d' % 2, trans)
        num_features = num_features // 2

        # block3*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[2],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate,
                            dilation_rate=int(2 / feature_size))
        self.features.add_module('denseblock%d' % 3, block)
        num_features = num_features + block_config[2] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=1)
        self.features.add_module('transition%d' % 3, trans)
        num_features = num_features // 2

        # block4*****************************************************************************************************
        block = _DenseBlock(num_layers=block_config[3],
                            num_input_features=num_features,
                            bn_size=bn_size,
                            growth_rate=growth_rate,
                            drop_rate=drop_rate,
                            dilation_rate=int(4 / feature_size))
        self.features.add_module('denseblock%d' % 4, block)
        num_features = num_features + block_config[3] * growth_rate

        trans = _Transition(num_input_features=num_features,
                            num_output_features=num_features // 2,
                            stride=1)
        self.features.add_module('transition%d' % 4, trans)
        num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', bn(num_features))

        # if not os.path.exists(os.path.join(self.pretrainedModelPath, 'denseASPP161_795.pkl')):
        #     weight = torch.load(os.path.join(self.pretrainedModelPath, "denseASPP161.pkl"), map_location=lambda storage, loc: storage)
        #     renamed_weight = OrderedDict()
        #     for key in weight:
        #         if 'norm.' in key and 'transition' not in key:
        #             newkey = key.replace('norm.', "norm_")
        #         elif 'conv.' in key and 'transition' not in key:
        #             newkey = key.replace('conv.', "conv_")
        #         elif 'relu.' in key and 'transition' not in key:
        #             newkey = key.replace('relu.', "relu_")
        #         else:
        #             newkey = key
        #         if 'features' in newkey:
        #             renamed_weight[newkey[16:]] = weight[key]
        #     self.features.load_state_dict(renamed_weight)
        # else:
        #     raise FileExistsError("Weights not found")

        if feature_size > 1:
            self.features.add_module(
                'upsample', nn.Upsample(scale_factor=2, mode='bilinear'))

        self.ASPP_3 = _DenseAsppBlock(input_num=num_features,
                                      num1=d_feature0,
                                      num2=d_feature1,
                                      dilation_rate=3,
                                      drop_out=dropout0,
                                      bn_start=False)

        self.ASPP_6 = _DenseAsppBlock(input_num=num_features + d_feature1 * 1,
                                      num1=d_feature0,
                                      num2=d_feature1,
                                      dilation_rate=6,
                                      drop_out=dropout0,
                                      bn_start=True)

        self.ASPP_12 = _DenseAsppBlock(input_num=num_features + d_feature1 * 2,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=12,
                                       drop_out=dropout0,
                                       bn_start=True)

        self.ASPP_18 = _DenseAsppBlock(input_num=num_features + d_feature1 * 3,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=18,
                                       drop_out=dropout0,
                                       bn_start=True)

        self.ASPP_24 = _DenseAsppBlock(input_num=num_features + d_feature1 * 4,
                                       num1=d_feature0,
                                       num2=d_feature1,
                                       dilation_rate=24,
                                       drop_out=dropout0,
                                       bn_start=True)
        num_features = num_features + 5 * d_feature1

        self.classification = nn.Sequential(
            nn.Dropout2d(p=dropout1),
            nn.Conv2d(in_channels=num_features,
                      out_channels=n_class,
                      kernel_size=1,
                      padding=0),
            nn.Upsample(scale_factor=8, mode='bilinear'),
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        # load
        densenet121 = torchvision.models.densenet121(pretrained=True)
        dsStateDict = densenet121.state_dict()
        renamedDsStateDict = OrderedDict()
        for key in dsStateDict:
            if 'denseblock' in key or 'transition' in key or 'conv0' in key or 'norm0' in key:
                newkey = key[9:]
                renamedDsStateDict[newkey] = dsStateDict[key]
                # print(newkey)
        self.features[:-2].load_state_dict(renamedDsStateDict)

        self.ms = (torch.Tensor([125.3, 123.0, 113.9]) / 255).view(1, 3, 1,
                                                                   1).cuda()
        # self.meanChange = self.meanChange.view(3,1,1).repeat(1,semanTrain_rgb.shape[1], semanTrain_rgb.shape[2])
        self.vs = (torch.Tensor([63.0, 62.1, 66.7]) / 255).view(1, 3, 1,
                                                                1).cuda()