Ejemplo n.º 1
0
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(GCBModule, self).__init__()
        inter_channels = in_channels // 4
        self.configer = configer
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))
        self.ctb = ContextBlock(inter_channels, ratio=1. / 4)
        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Ejemplo n.º 2
0
    def __init__(self, low_in_channels, high_in_channels, key_channels, value_channels, out_channels=None, scale=1, norm_type=None,psp_size=(1,3,6,8)):
        super(_SelfAttentionBlock, self).__init__()
        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
        self.f_key = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
                      kernel_size=1, stride=1, padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_query = nn.Sequential(
            nn.Conv2d(in_channels=high_in_channels, out_channels=self.key_channels,
                      kernel_size=1, stride=1, padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels,
                                 kernel_size=1, stride=1, padding=0)
        self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels,
                           kernel_size=1, stride=1, padding=0)

        self.psp = PSPModule(psp_size)
        nn.init.constant_(self.W.weight, 0)
        nn.init.constant_(self.W.bias, 0)
    def _make_transition_layer(
            self, num_channels_pre_layer, num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(nn.Sequential(
                        nn.Conv2d(num_channels_pre_layer[i],
                                  num_channels_cur_layer[i],
                                  3,
                                  1,
                                  1,
                                  bias=False),
                        ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                            num_channels_cur_layer[i]),
                        nn.ReLU(inplace=True)))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i+1-num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i-num_branches_pre else inchannels
                    conv3x3s.append(nn.Sequential(
                        nn.Conv2d(
                            inchannels, outchannels, 3, 2, 1, bias=False),
                        ModuleHelper.BatchNorm2d(norm_type=norm_type)(outchannels),
                        nn.ReLU(inplace=True)))
                transition_layers.append(nn.Sequential(*conv3x3s))

        return nn.ModuleList(transition_layers)
Ejemplo n.º 4
0
    def __init__(self, input_num, num1, num2, dilation_rate, drop_out,
                 norm_type):
        super(_DenseAsppBlock, self).__init__()
        self.add_module(
            'conv1',
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module(
            'norm1',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=num1)),
        self.add_module('relu1', nn.ReLU(inplace=False)),
        self.add_module(
            'conv2',
            nn.Conv2d(in_channels=num1,
                      out_channels=num2,
                      kernel_size=3,
                      dilation=dilation_rate,
                      padding=dilation_rate)),
        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                num_features=input_num)),
        self.add_module('relu2', nn.ReLU(inplace=False)),

        self.drop_rate = drop_out
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(NLModule_nowd, self).__init__()
        inter_channels = in_channels // 4
        self.configer = configer

        self.conva = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
                                   ModuleHelper.BNReLU(inter_channels, norm_type=self.configer.get('network', 'norm_type')))

        self.ctb = NonLocal2d_nowd(inter_channels, inter_channels // 2,
                                   downsample=self.configer.get('nonlocal', 'downsample'),
                                   whiten_type=self.configer.get('nonlocal', 'whiten_type'), 
                                   weight_init_scale=self.configer.get('nonlocal', 'weight_init_scale'),
                                   with_gc=self.configer.get('nonlocal', 'with_gc'),
                                   with_nl=self.configer.get('nonlocal', 'with_nl'),
                                   nowd=self.configer.get('nonlocal', 'nowd'),
                                   use_out=self.configer.get('nonlocal', 'use_out'),
                                   out_bn=self.configer.get('nonlocal', 'out_bn'))

        self.convb = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
                                   ModuleHelper.BNReLU(inter_channels, norm_type=self.configer.get('network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels+inter_channels, out_channels, kernel_size=3, padding=1, dilation=1, bias=False),
            ModuleHelper.BNReLU(out_channels, norm_type=self.configer.get('network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
            )
Ejemplo n.º 6
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate,
              norm_type):
     super(_DenseLayer, self).__init__()
     self.add_module(
         'norm1',
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         nn.Conv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module(
         'norm2',
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(bn_size *
                                                       growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         nn.Conv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = drop_rate
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, **kwargs):
        super(HighResolutionNet, self).__init__()
        
        self.num_features = 720
        # stem net
        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)
        self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)
        self.relu = nn.ReLU(inplace=True)
        
        self.stage1_cfg = {'NUM_MODULES':1, 'NUM_BRANCHES':1, 'BLOCK':'BOTTLENECK', 
                           'NUM_BLOCKS':[4], 'NUM_CHANNELS':[64], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
        block = blocks_dict[self.stage1_cfg['BLOCK']]
        num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
        stage1_out_channel = block.expansion*num_channels

        self.stage2_cfg = {'NUM_MODULES':1, 'NUM_BRANCHES':2, 'BLOCK':'BASIC', 
                           'NUM_BLOCKS':[4,4], 'NUM_CHANNELS':[48,96], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition1 = self._make_transition_layer(
            [stage1_out_channel], num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        self.stage3_cfg = {'NUM_MODULES':4, 'NUM_BRANCHES':3, 'BLOCK':'BASIC', 
                           'NUM_BLOCKS':[4,4,4], 'NUM_CHANNELS':[48,96,192], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition2 = self._make_transition_layer(
            pre_stage_channels, num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        self.stage4_cfg = {'NUM_MODULES':3, 'NUM_BRANCHES':4, 'BLOCK':'BASIC', 
                           'NUM_BLOCKS':[4,4,4,4], 'NUM_CHANNELS':[48,96,192,384], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition3 = self._make_transition_layer(
            pre_stage_channels, num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels, multi_scale_output=True)
        
        last_inp_channels = np.int(np.sum(pre_stage_channels))
Ejemplo n.º 9
0
    def freeze_bn(net, norm_type=None):
        for m in net.modules():
            if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm3d):
                m.eval()

            if norm_type is not None:
                from model.tools.module_helper import ModuleHelper
                if isinstance(m, ModuleHelper.BatchNorm2d(norm_type=norm_type, ret_cls=True)) \
                        or isinstance(m, ModuleHelper.BatchNorm1d(norm_type=norm_type, ret_cls=True)) \
                        or isinstance(m, ModuleHelper.BatchNorm3d(norm_type=norm_type, ret_cls=True)):
                    m.eval()
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(NLModule, self).__init__()
        inter_channels = in_channels // 2
        self.configer = configer
        self.down = self.configer.get('nonlocal', 'downsample')
        self.whiten_type = self.configer.get('nonlocal', 'whiten_type')
        self.temp = self.configer.get('nonlocal', 'temp')
        self.with_gc = self.configer.get('nonlocal', 'with_gc')
        self.with_unary = self.configer.get('nonlocal',
                                            'with_unary',
                                            default=False)
        self.use_out = self.configer.get('nonlocal', 'use_out')
        self.out_bn = self.configer.get('nonlocal', 'out_bn')
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.ctb = NonLocal2d_bn(inter_channels,
                                 inter_channels // 2,
                                 downsample=self.down,
                                 whiten_type=self.whiten_type,
                                 temperature=self.temp,
                                 with_gc=self.with_gc,
                                 with_unary=self.with_unary,
                                 use_out=self.use_out,
                                 out_bn=self.out_bn)

        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
                            bias=False)
     self.bn3 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type=None):
        """Construct a PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_layer      -- normalization layer
        """
        super(NLayerDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')
        kw = 4
        padw = 1
        sequence = [
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1,
                       n_layers):  # gradually increase the number of filters
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev,
                          ndf * nf_mult,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev,
                      ndf * nf_mult,
                      kernel_size=kw,
                      stride=1,
                      padding=padw,
                      bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [
            nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
        ]  # output 1 channel prediction map
        self.model = nn.Sequential(*sequence)
Ejemplo n.º 13
0
    def _make_layer(self, block, planes, blocks, stride=1, norm_type=None):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes *
                                                              block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  norm_type=norm_type))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, norm_type=norm_type))

        return nn.Sequential(*layers)
    def __init__(self, input_nc, ndf=64, norm_type=None):
        """Construct a 1x1 PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            norm_layer      -- normalization layer
        """
        super(PixelDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')

        self.net = [
            nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf,
                      ndf * 2,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf * 2,
                      1,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=use_bias)
        ]

        self.net = nn.Sequential(*self.net)
Ejemplo n.º 15
0
    def __init__(self, configer):
        super(PSPNet, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()
        num_features = self.backbone.get_num_features()
        self.dsn = nn.Sequential(
            _ConvBatchNormReluBlock(num_features // 2,
                                    num_features // 4,
                                    3,
                                    1,
                                    norm_type=self.configer.get(
                                        'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(num_features // 4, self.num_classes, 1, 1, 0))
        self.ppm = PPMBilinearDeepsup(fc_dim=num_features,
                                      norm_type=self.configer.get(
                                          'network', 'norm_type'))

        self.cls = nn.Sequential(
            nn.Conv2d(num_features + 4 * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            ModuleHelper.BNReLU(512,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1), nn.Conv2d(512, self.num_classes, kernel_size=1))
        self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                            configer.get('loss.loss_type'))
Ejemplo n.º 16
0
    def __init__(self, configer):
        self.inplanes = 128
        super(DeepLabV3, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()

        self.head = nn.Sequential(
            ASPPModule(self.backbone.get_num_features(),
                       norm_type=self.configer.get('network', 'norm_type')),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            ModuleHelper.BNReLU(512,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                            configer.get('loss.loss_type'))
Ejemplo n.º 17
0
 def mobilenetv2(self):
     model = MobileNetV2()
     model = ModuleHelper.load_model(model,
                                     pretrained=self.configer.get(
                                         'network', 'pretrained'),
                                     all_match=False)
     return model
Ejemplo n.º 18
0
 def __init__(self, configer):
     super(asymmetric_non_local_network, self).__init__()
     self.configer = configer
     self.num_classes = self.configer.get('data', 'num_classes')
     self.backbone = BackboneSelector(configer).get_backbone()
     # low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout
     self.fusion = AFNB(1024,
                        2048,
                        2048,
                        256,
                        256,
                        dropout=0.05,
                        sizes=([1]),
                        norm_type=self.configer.get('network', 'norm_type'))
     # extra added layers
     self.context = nn.Sequential(
         nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         APNB(in_channels=512,
              out_channels=512,
              key_channels=256,
              value_channels=256,
              dropout=0.05,
              sizes=([1]),
              norm_type=self.configer.get('network', 'norm_type')))
     self.cls = nn.Conv2d(512,
                          self.num_classes,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=True)
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         nn.Dropout2d(0.05),
         nn.Conv2d(512,
                   self.num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
     self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                         configer.get('loss.loss_type'))
Ejemplo n.º 19
0
    def build_conv_block(self, dim, padding_type, norm_type, use_dropout,
                         use_bias):
        """Construct a convolutional block.
        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not
        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim),
            nn.ReLU(True)
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim)
        ]

        return nn.Sequential(*conv_block)
Ejemplo n.º 20
0
def dfnetv2(pretrained=None):
    """Constructs a ResNet-18 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on Places
    """
    model = DFNetV2(num_classes=1000)
    model = ModuleHelper.load_model(model, pretrained=pretrained, all_match=False)
    return model
Ejemplo n.º 21
0
 def squeezenet(self):
     """Constructs a ResNet-18 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = SqueezeNet()
     model = ModuleHelper.load_model(model, pretrained=self.configer.get('network', 'pretrained'), all_match=False)
     return model
Ejemplo n.º 22
0
 def vgg(self, vgg_cfg=None):
     """Constructs a ResNet-18 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     backbone = self.configer.get('network', 'backbone')
     model = VGG(cfg_name=backbone, vgg_cfg=vgg_cfg, bn=False)
     model = ModuleHelper.load_model(model, pretrained=self.configer.get('network', 'pretrained'), all_match=False)
     return model
Ejemplo n.º 23
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, norm_type=None):

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, norm_type=norm_type)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2, norm_type=norm_type)
                avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
                self.features.add_module('transition%d' % (i + 1), trans)
                self.features.add_module('transition%s_pool' % (i + 1), avg_pool)
                num_features = num_features // 2

        self.num_features = num_features
        # Final batch norm
        self.features.add_module('norm5', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, ModuleHelper.BatchNorm2d(norm_type=norm_type, ret_cls=True)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
 def mobilenetv2_dilated8(self, pretrained=None):
     """Constructs a ResNet-18 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = MobileNetV2Dilated8()
     model = ModuleHelper.load_model(model,
                                     pretrained=pretrained,
                                     all_match=False)
     return model
Ejemplo n.º 25
0
 def vgg(self, backbone=None, pretrained=None):
     """Constructs a ResNet-18 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = VGG(cfg_name=backbone, bn=False)
     model = ModuleHelper.load_model(model,
                                     pretrained=pretrained,
                                     all_match=False)
     return model
Ejemplo n.º 26
0
 def densenet161(self, pretrained=None, **kwargs):
     r"""Densenet-161 model from
     `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
     Args:
         pretrained (bool): If True, returns a model pre-trained on ImageNet
     """
     model = DenseNet(num_init_features=96, growth_rate=48,
                      block_config=(6, 12, 36, 24), norm_type=self.configer.get('network', 'norm_type'), **kwargs)
     model = ModuleHelper.load_model(model, pretrained=pretrained)
     return model
Ejemplo n.º 27
0
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(UNModule, self).__init__()
        inter_channels = in_channels // 4
        self.configer = configer
        self.down = self.configer.get('unary', 'downsample')
        self.use_out = self.configer.get('unary', 'use_out')
        self.out_bn = self.configer.get('unary', 'out_bn')
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.ctb = Unary2d(inter_channels,
                           inter_channels // 2,
                           downsample=self.down,
                           use_out=self.use_out,
                           out_bn=self.out_bn)

        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
 def squeezenet(self, pretrained=None):
     """Constructs a ResNet-18 model.
     Args:
         pretrained (bool): If True, returns a model pre-trained on Places
     """
     model = SqueezeNet()
     model = ModuleHelper.load_model(model,
                                     pretrained=pretrained,
                                     all_match=False)
     return model
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(nn.Sequential(
                        nn.Conv2d(num_inchannels[j],
                                  num_inchannels[i],
                                  1,
                                  1,
                                  0,
                                  bias=False),
                        ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_inchannels[i])))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i-j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(nn.Sequential(
                                nn.Conv2d(num_inchannels[j],
                                          num_outchannels_conv3x3,
                                          3, 2, 1, bias=False),
                                ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_outchannels_conv3x3)))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(nn.Sequential(
                                nn.Conv2d(num_inchannels[j],
                                          num_outchannels_conv3x3,
                                          3, 2, 1, bias=False),
                                ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_outchannels_conv3x3),
                                nn.ReLU(inplace=True)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
Ejemplo n.º 30
0
    def __init__(self, features, inner_features=512, out_features=512, dilations=(12, 24, 36), norm_type=None):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                   nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
                                             bias=False),
                                   ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(out_features, norm_type=norm_type),
            nn.Dropout2d(0.1)
        )