Пример #1
0
 def __init__(self, inchannels, bn_type):
     super(EmbedModule, self).__init__()
     inter_channels = inchannels // 4
     self.conv = nn.Sequential(
         nn.Conv2d(inchannels,
                   inter_channels,
                   kernel_size=1,
                   padding=0,
                   bias=False),
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(inter_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(inter_channels,
                   inter_channels,
                   kernel_size=7,
                   padding=3,
                   stride=1,
                   bias=False),
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(inter_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(inter_channels,
                   inchannels,
                   kernel_size=1,
                   padding=0,
                   bias=False),
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(inchannels),
         nn.ReLU(inplace=True),
     )
Пример #2
0
    def __init__(self, block, layers, num_classes=1000, bn_type=None):
        self.inplanes = 128
        super(CaffeResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(64, momentum=0.1)
        self.relu1 = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(64, momentum=0.1)
        self.relu2 = nn.ReLU(inplace=True)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(128, momentum=0.1)
        self.relu3 = nn.ReLU(inplace=True)

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0], bn_type=bn_type)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, bn_type=bn_type)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, bn_type=bn_type)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, bn_type=bn_type)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, ModuleHelper.BatchNorm2d(bn_type=bn_type)):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Пример #3
0
 def __init__(self, num_class=150, fc_dim=4096, bn_type=None):
     super(PPMBilinearDeepsup, self).__init__()
     pool_scales = (1, 2, 3, 6)
     self.ppm = []
     for scale in pool_scales:
         self.ppm.append(
             nn.Sequential(
                 nn.AdaptiveAvgPool2d(scale),
                 nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                 ModuleHelper.BatchNorm2d(bn_type=bn_type)(512),
                 nn.ReLU(inplace=True)))
     self.ppm = nn.ModuleList(self.ppm)
     self.cbr_deepsup = _ConvBatchNormReluBlock(fc_dim // 2,
                                                fc_dim // 4,
                                                3,
                                                1,
                                                bn_type=bn_type)
     self.conv_last = nn.Sequential(
         nn.Conv2d(fc_dim + len(pool_scales) * 512,
                   512,
                   kernel_size=3,
                   padding=1,
                   bias=False),
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(512),
         nn.ReLU(inplace=True), nn.Dropout2d(0.1),
         nn.Conv2d(512, num_class, kernel_size=1))
     self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
     self.dropout_deepsup = nn.Dropout2d(0.1)
Пример #4
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate,
              bn_type):
     super(_DenseLayer, self).__init__()
     self.add_module(
         'norm1',
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         nn.Conv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module(
         'norm2',
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(bn_size * growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         nn.Conv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = drop_rate
Пример #5
0
    def __init__(self, input_num, num1, num2, dilation_rate, drop_out,
                 norm_type):
        super(_DenseAsppBlock, self).__init__()
        self.add_module('relu1', nn.ReLU(inplace=False)),
        self.add_module(
            'conv1',
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=num1)),
        self.add_module('relu2', nn.ReLU(inplace=False)),
        self.add_module(
            'conv2',
            nn.Conv2d(in_channels=num1,
                      out_channels=num2,
                      kernel_size=3,
                      dilation=dilation_rate,
                      padding=dilation_rate)),
        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                num_features=input_num)),

        self.drop_rate = drop_out
Пример #6
0
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type=None):
        """Construct a PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_layer      -- normalization layer
        """
        super(NLayerDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')
        kw = 4
        padw = 1
        sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):  # gradually increase the number of filters
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]  # output 1 channel prediction map
        self.model = nn.Sequential(*sequence)
Пример #7
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, bn_type=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.downsample = downsample
     self.stride = stride
Пример #8
0
    def freeze_bn(net, norm_type=None):
        for m in net.modules():
            if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm3d):
                m.eval()

            if norm_type is not None:
                from models.tools.module_helper import ModuleHelper
                if isinstance(m, ModuleHelper.BatchNorm2d(norm_type=norm_type, ret_cls=True)) \
                        or isinstance(m, ModuleHelper.BatchNorm1d(norm_type=norm_type, ret_cls=True)) \
                        or isinstance(m, ModuleHelper.BatchNorm3d(norm_type=norm_type, ret_cls=True)):
                    m.eval()
Пример #9
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, bn_type=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Пример #10
0
    def __init__(self,
                 outer_nc,
                 inner_nc,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 norm_type=None,
                 use_dropout=False):
        super(UNetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        downconv = nn.Conv2d(outer_nc,
                             inner_nc,
                             kernel_size=4,
                             stride=2,
                             padding=1)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(inner_nc)
        uprelu = nn.ReLU(True)
        upnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
Пример #11
0
    def __init__(self, outer_nc, inner_nc, input_nc=None,
                 submodule=None, outermost=False, innermost=False, norm_type=None, use_dropout=False):
        """Construct a Unet submodule with skip connections.
        Parameters:
            outer_nc (int) -- the number of filters in the outer conv layer
            inner_nc (int) -- the number of filters in the inner conv layer
            input_nc (int) -- the number of channels in input images/features
            submodule (UnetSkipConnectionBlock) -- previously defined submodules
            outermost (bool)    -- if this module is the outermost module
            innermost (bool)    -- if this module is the innermost module
            norm_layer          -- normalization layer
            user_dropout (bool) -- if use dropout layers.
        """
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        use_bias = (norm_type == 'instancenorm')
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
                             stride=2, padding=1, bias=use_bias)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(inner_nc)
        uprelu = nn.ReLU(True)
        upnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1, bias=use_bias)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1, bias=use_bias)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
Пример #12
0
    def __init__(self,
                 low_in_channels,
                 high_in_channels,
                 key_channels,
                 value_channels,
                 out_channels=None,
                 scale=1,
                 norm_type=None,
                 psp_size=(1, 3, 6, 8)):
        super(_SelfAttentionBlock, self).__init__()
        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
        self.f_key = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels,
                      out_channels=self.key_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_query = nn.Sequential(
            nn.Conv2d(in_channels=high_in_channels,
                      out_channels=self.key_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_value = nn.Conv2d(in_channels=self.in_channels,
                                 out_channels=self.value_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.W = nn.Conv2d(in_channels=self.value_channels,
                           out_channels=self.out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0)

        self.psp = PSPModule(psp_size)
        nn.init.constant_(self.W.weight, 0)
        nn.init.constant_(self.W.bias, 0)
Пример #13
0
 def squeezenet_dilated8(self):
     model = DilatedSqueezeNet()
     model = ModuleHelper.load_model(model,
                                     pretrained=self.configer.get(
                                         'network', 'pretrained'),
                                     all_match=False)
     return model
Пример #14
0
    def __init__(self, configer):
        self.inplanes = 128
        super(DeepLabV3, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()

        self.head = nn.Sequential(
            ASPPModule(2048, bn_type=self.configer.get('network', 'bn_type')),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            ModuleHelper.BNReLU(512,
                                bn_type=self.configer.get(
                                    'network', 'bn_type')), nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Пример #15
0
    def _make_layer(self, block, planes, blocks, stride=1, norm_type=None):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes *
                                                              block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  norm_type=norm_type))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, norm_type=norm_type))

        return nn.Sequential(*layers)
Пример #16
0
 def mobilenetv2(self):
     model = MobileNetV2()
     model = ModuleHelper.load_model(model,
                                     pretrained=self.configer.get(
                                         'network', 'pretrained'),
                                     all_match=False)
     return model
Пример #17
0
    def __init__(self, configer):
        super(PSPNet, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()
        num_features = self.backbone.get_num_features()
        self.dsn = nn.Sequential(
            _ConvBatchNormReluBlock(num_features // 2,
                                    num_features // 4,
                                    3,
                                    1,
                                    bn_type=self.configer.get(
                                        'network', 'bn_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(num_features // 4, self.num_classes, 1, 1, 0))
        self.ppm = PPMBilinearDeepsup(fc_dim=num_features,
                                      bn_type=self.configer.get(
                                          'network', 'bn_type'))

        self.cls = nn.Sequential(
            nn.Conv2d(num_features + 4 * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            ModuleHelper.BNReLU(512,
                                bn_type=self.configer.get(
                                    'network', 'bn_type')), nn.Dropout2d(0.1),
            nn.Conv2d(512, self.num_classes, kernel_size=1))
Пример #18
0
    def __init__(self, configer):
        super(DeepLabV3, self).__init__()
        self.configer = configer
        self.backbone = BackboneSelector(configer).get_backbone()

        self.backbone = nn.Sequential(
            self.backbone.conv1, self.backbone.bn1, self.backbone.relu1,
            self.backbone.conv2, self.backbone.bn2, self.backbone.relu2,
            self.backbone.conv3, self.backbone.bn3, self.backbone.relu3,
            self.backbone.maxpool, self.backbone.layer1, self.backbone.layer2,
            self.backbone.layer3)
        self.MG_features = _ResidualBlockMulGrid(
            inplanes=1024,
            midplanes=512,
            outplanes=2048,
            stride=1,
            dilation=2,
            mulgrid=self.configer.get('network', 'multi_grid'),
            bn_type=self.configer.get('network', 'bn_type'))
        pyramids = [6, 12, 18]
        self.aspp = _ASPPModule(2048,
                                256,
                                pyramids,
                                bn_type=self.configer.get(
                                    'network', 'bn_type'))

        self.fc1 = nn.Sequential(
            nn.Conv2d(1280, 256, kernel_size=1),  # 256 * 5 = 1280
            ModuleHelper.BatchNorm2d(
                bn_type=self.configer.get('network', 'bn_type'))(256))
        self.fc2 = nn.Conv2d(256,
                             self.configer.get('data', 'num_classes'),
                             kernel_size=1)
Пример #19
0
 def __init__(self,
              low_in_channels,
              high_in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1]),
              norm_type=None,
              psp_size=(1, 3, 6, 8)):
     super(AFNB, self).__init__()
     self.stages = []
     self.norm_type = norm_type
     self.psp_size = psp_size
     self.stages = nn.ModuleList([
         self._make_stage([low_in_channels, high_in_channels], out_channels,
                          key_channels, value_channels, size)
         for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(out_channels + high_in_channels,
                   out_channels,
                   kernel_size=1,
                   padding=0),
         ModuleHelper.BatchNorm2d(norm_type=self.norm_type)(out_channels),
         nn.Dropout2d(dropout))
Пример #20
0
 def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bn_type=None):
     super(ConvBnRelu, self).__init__()
     self.conv_bn_relu = nn.Sequential(
         nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, dilation, groups,
                   False),
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(out_channel),
         nn.ReLU(True))
Пример #21
0
    def build_conv_block(self, dim, padding_type, norm_type, use_dropout,
                         use_bias):
        """Construct a convolutional block.
        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not
        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim),
            nn.ReLU(True)
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim)
        ]

        return nn.Sequential(*conv_block)
Пример #22
0
 def __init__(self, inplanes, outplanes, kernel_size, stride, padding, dilation, relu=True, bn_type=None):
     super(_ConvBatchNormReluBlock, self).__init__()
     self.relu = relu
     self.conv =  nn.Conv2d(in_channels=inplanes,out_channels=outplanes,
                         kernel_size=kernel_size, stride=stride, padding = padding,
                         dilation = dilation, bias=False)
     self.bn = ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_features=outplanes)
     self.relu_f = nn.ReLU()
Пример #23
0
 def squeezenet(self):
     """Constructs a ResNet-18 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = SqueezeNet()
     model = ModuleHelper.load_model(model, pretrained=self.configer.get('network', 'pretrained'), all_match=False)
     return model
Пример #24
0
 def vgg_bn(self, vgg_cfg=None):
     backbone = self.configer.get('network', 'backbone')
     model = VGG(cfg_name=backbone, vgg_cfg=vgg_cfg, bn=True)
     model = ModuleHelper.load_model(model,
                                     pretrained=self.configer.get(
                                         'network', 'pretrained'),
                                     all_match=False)
     return model
Пример #25
0
    def __init__(self, input_nc, output_nc, ngf=64, norm_type=None, use_dropout=False, n_blocks=6, padding_type='reflect'):
        """Construct a Resnet-based generator
        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert(n_blocks >= 0)
        super(ResNetGenerator, self).__init__()
        use_bias = (norm_type == 'instancenorm')

        model = [nn.ReflectionPad2d(3),
                 nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
                 ModuleHelper.BatchNorm2d(norm_type=norm_type)(ngf),
                 nn.ReLU(True)]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2 ** i
            model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
                      ModuleHelper.BatchNorm2d(norm_type=norm_type)(ngf * mult * 2),
                      nn.ReLU(True)]

        mult = 2 ** n_downsampling
        for i in range(n_blocks):       # add ResNet blocks

            model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_type=norm_type, use_dropout=use_dropout, use_bias=use_bias)]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2 ** (n_downsampling - i)
            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
                                         kernel_size=3, stride=2,
                                         padding=1, output_padding=1,
                                         bias=use_bias),
                      ModuleHelper.BatchNorm2d(norm_type=norm_type)(int(ngf * mult / 2)),
                      nn.ReLU(True)]
        model += [nn.ReflectionPad2d(3)]
        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
Пример #26
0
 def __init__(self, configer):
     super(asymmetric_non_local_network, self).__init__()
     self.configer = configer
     self.num_classes = self.configer.get('data', 'num_classes')
     self.backbone = BackboneSelector(configer).get_backbone()
     # low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout
     self.fusion = AFNB(1024,
                        2048,
                        2048,
                        256,
                        256,
                        dropout=0.05,
                        sizes=([1]),
                        norm_type=self.configer.get('network', 'norm_type'))
     # extra added layers
     self.context = nn.Sequential(
         nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         APNB(in_channels=512,
              out_channels=512,
              key_channels=256,
              value_channels=256,
              dropout=0.05,
              sizes=([1]),
              norm_type=self.configer.get('network', 'norm_type')))
     self.cls = nn.Conv2d(512,
                          self.num_classes,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=True)
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         nn.Dropout2d(0.05),
         nn.Conv2d(512,
                   self.num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
Пример #27
0
 def darknet53(self):
     """Constructs a darknet-53 model.
     """
     model = DarkNet([1, 2, 8, 8, 4])
     model = ModuleHelper.load_model(model,
                                     pretrained=self.configer.get(
                                         'network', 'pretrained'),
                                     all_match=False)
     return model
Пример #28
0
 def resnet34(self, **kwargs):
     """Constructs a ResNet-34 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = ResNet(BasicBlock, [3, 4, 6, 3], deep_base=False,
                    bn_type=self.configer.get('network', 'bn_type'), **kwargs)
     model = ModuleHelper.load_model(model, pretrained=self.configer.get('network', 'pretrained'))
     return model
Пример #29
0
 def deepbase_resnet101(self, **kwargs):
     """Constructs a ResNet-101 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = ResNet(Bottleneck, [3, 4, 23, 3], deep_base=True,
                    bn_type=self.configer.get('network', 'bn_type'), **kwargs)
     model = ModuleHelper.load_model(model, pretrained=self.configer.get('network', 'pretrained'))
     return model
Пример #30
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, bn_type=None):

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, bn_type=bn_type)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2, bn_type=bn_type)
                avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
                self.features.add_module('transition%d' % (i + 1), trans)
                self.features.add_module('transition%s_pool' % (i + 1), avg_pool)
                num_features = num_features // 2

        self.num_features = num_features
        # Final batch norm
        self.features.add_module('norm5', ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, ModuleHelper.BatchNorm2d(bn_type=bn_type)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)