def _make_transition_layer(
            self, num_channels_pre_layer, num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(nn.Sequential(
                        nn.Conv2d(num_channels_pre_layer[i],
                                  num_channels_cur_layer[i],
                                  3,
                                  1,
                                  1,
                                  bias=False),
                        ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                            num_channels_cur_layer[i]),
                        nn.ReLU(inplace=True)))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i+1-num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i-num_branches_pre else inchannels
                    conv3x3s.append(nn.Sequential(
                        nn.Conv2d(
                            inchannels, outchannels, 3, 2, 1, bias=False),
                        ModuleHelper.BatchNorm2d(norm_type=norm_type)(outchannels),
                        nn.ReLU(inplace=True)))
                transition_layers.append(nn.Sequential(*conv3x3s))

        return nn.ModuleList(transition_layers)
    def __init__(self, input_num, num1, num2, dilation_rate, drop_out,
                 norm_type):
        super(_DenseAsppBlock, self).__init__()
        self.add_module(
            'conv1',
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module(
            'norm1',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=num1)),
        self.add_module('relu1', nn.ReLU(inplace=False)),
        self.add_module(
            'conv2',
            nn.Conv2d(in_channels=num1,
                      out_channels=num2,
                      kernel_size=3,
                      dilation=dilation_rate,
                      padding=dilation_rate)),
        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                num_features=input_num)),
        self.add_module('relu2', nn.ReLU(inplace=False)),

        self.drop_rate = drop_out
示例#3
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate,
              norm_type):
     super(_DenseLayer, self).__init__()
     self.add_module(
         'norm1',
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         nn.Conv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module(
         'norm2',
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(bn_size *
                                                       growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         nn.Conv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = drop_rate
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, **kwargs):
        super(HighResolutionNet, self).__init__()
        
        self.num_features = 720
        # stem net
        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)
        self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)
        self.relu = nn.ReLU(inplace=True)
        
        self.stage1_cfg = {'NUM_MODULES':1, 'NUM_BRANCHES':1, 'BLOCK':'BOTTLENECK', 
                           'NUM_BLOCKS':[4], 'NUM_CHANNELS':[64], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
        block = blocks_dict[self.stage1_cfg['BLOCK']]
        num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
        stage1_out_channel = block.expansion*num_channels

        self.stage2_cfg = {'NUM_MODULES':1, 'NUM_BRANCHES':2, 'BLOCK':'BASIC', 
                           'NUM_BLOCKS':[4,4], 'NUM_CHANNELS':[48,96], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition1 = self._make_transition_layer(
            [stage1_out_channel], num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        self.stage3_cfg = {'NUM_MODULES':4, 'NUM_BRANCHES':3, 'BLOCK':'BASIC', 
                           'NUM_BLOCKS':[4,4,4], 'NUM_CHANNELS':[48,96,192], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition2 = self._make_transition_layer(
            pre_stage_channels, num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        self.stage4_cfg = {'NUM_MODULES':3, 'NUM_BRANCHES':4, 'BLOCK':'BASIC', 
                           'NUM_BLOCKS':[4,4,4,4], 'NUM_CHANNELS':[48,96,192,384], 'FUSE_METHOD':'SUM'}
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition3 = self._make_transition_layer(
            pre_stage_channels, num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels, multi_scale_output=True)
        
        last_inp_channels = np.int(np.sum(pre_stage_channels))
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
                            bias=False)
     self.bn3 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type=None):
        """Construct a PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_layer      -- normalization layer
        """
        super(NLayerDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')
        kw = 4
        padw = 1
        sequence = [
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1,
                       n_layers):  # gradually increase the number of filters
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev,
                          ndf * nf_mult,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev,
                      ndf * nf_mult,
                      kernel_size=kw,
                      stride=1,
                      padding=padw,
                      bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [
            nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
        ]  # output 1 channel prediction map
        self.model = nn.Sequential(*sequence)
    def __init__(self, input_nc, ndf=64, norm_type=None):
        """Construct a 1x1 PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            norm_layer      -- normalization layer
        """
        super(PixelDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')

        self.net = [
            nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf,
                      ndf * 2,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf * 2,
                      1,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=use_bias)
        ]

        self.net = nn.Sequential(*self.net)
示例#9
0
    def _make_layer(self, block, planes, blocks, stride=1, norm_type=None):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes *
                                                              block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  norm_type=norm_type))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, norm_type=norm_type))

        return nn.Sequential(*layers)
    def build_conv_block(self, dim, padding_type, norm_type, use_dropout,
                         use_bias):
        """Construct a convolutional block.
        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not
        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim),
            nn.ReLU(True)
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim)
        ]

        return nn.Sequential(*conv_block)
示例#11
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, norm_type=None):

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, norm_type=norm_type)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2, norm_type=norm_type)
                avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
                self.features.add_module('transition%d' % (i + 1), trans)
                self.features.add_module('transition%s_pool' % (i + 1), avg_pool)
                num_features = num_features // 2

        self.num_features = num_features
        # Final batch norm
        self.features.add_module('norm5', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, ModuleHelper.BatchNorm2d(norm_type=norm_type, ret_cls=True)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(nn.Sequential(
                        nn.Conv2d(num_inchannels[j],
                                  num_inchannels[i],
                                  1,
                                  1,
                                  0,
                                  bias=False),
                        ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_inchannels[i])))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i-j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(nn.Sequential(
                                nn.Conv2d(num_inchannels[j],
                                          num_outchannels_conv3x3,
                                          3, 2, 1, bias=False),
                                ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_outchannels_conv3x3)))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(nn.Sequential(
                                nn.Conv2d(num_inchannels[j],
                                          num_outchannels_conv3x3,
                                          3, 2, 1, bias=False),
                                ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_outchannels_conv3x3),
                                nn.ReLU(inplace=True)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
示例#13
0
    def freeze_bn(net, norm_type=None):
        for m in net.modules():
            if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm3d):
                m.eval()

            if norm_type is not None:
                from model.tools.module_helper import ModuleHelper
                if isinstance(m, ModuleHelper.BatchNorm2d(norm_type=norm_type, ret_cls=True)) \
                        or isinstance(m, ModuleHelper.BatchNorm1d(norm_type=norm_type, ret_cls=True)) \
                        or isinstance(m, ModuleHelper.BatchNorm3d(norm_type=norm_type, ret_cls=True)):
                    m.eval()
示例#14
0
 def __init__(self, num_input_features, num_output_features, norm_type):
     super(_Transition, self).__init__()
     self.add_module(
         'norm',
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         nn.Conv2d(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
示例#15
0
 def __init__(self, low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout,
              sizes=([1]), norm_type=None,psp_size=(1,3,6,8)):
     super(AFNB, self).__init__()
     self.stages = []
     self.norm_type = norm_type
     self.psp_size=psp_size
     self.stages = nn.ModuleList(
         [self._make_stage([low_in_channels, high_in_channels], out_channels, key_channels, value_channels, size) for
          size in sizes])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(out_channels + high_in_channels, out_channels, kernel_size=1, padding=0),
         ModuleHelper.BatchNorm2d(norm_type=self.norm_type)(out_channels),
         nn.Dropout2d(dropout)
     )
示例#16
0
 def __init__(self,
              in_channel,
              out_channel,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              norm_type=None):
     super(ConvBn, self).__init__()
     self.conv_bn = nn.Sequential(
         nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding,
                   dilation, groups, False),
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(out_channel))
    def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or \
           self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.num_inchannels[branch_index],
                          num_channels[branch_index] * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_channels[branch_index] * block.expansion),
            )

        layers = []
        layers.append(block(self.num_inchannels[branch_index],
                            num_channels[branch_index], stride, downsample))
        self.num_inchannels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(block(self.num_inchannels[branch_index],
                                num_channels[branch_index]))

        return nn.Sequential(*layers)
示例#18
0
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 deep_base=False,
                 norm_type=None):
        super(ResNet, self).__init__()
        self.inplanes = 128 if deep_base else 64
        if deep_base:
            self.prefix = nn.Sequential(
                OrderedDict([
                    ('conv1',
                     nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)),
                    ('bn1', ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)),
                    ('relu1', nn.ReLU(inplace=False)),
                    ('conv2',
                     nn.Conv2d(64,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)),
                    ('bn2', ModuleHelper.BatchNorm2d(norm_type=norm_type)(64)),
                    ('relu2', nn.ReLU(inplace=False)),
                    ('conv3',
                     nn.Conv2d(64,
                               128,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)),
                    ('bn3', ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                        self.inplanes)), ('relu3', nn.ReLU(inplace=False))
                ]))
        else:
            self.prefix = nn.Sequential(
                OrderedDict([('conv1',
                              nn.Conv2d(3,
                                        64,
                                        kernel_size=7,
                                        stride=2,
                                        padding=3,
                                        bias=False)),
                             ('bn1',
                              ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                                  self.inplanes)),
                             ('relu', nn.ReLU(inplace=False))]))

        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change.

        self.layer1 = self._make_layer(block,
                                       64,
                                       layers[0],
                                       norm_type=norm_type)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       norm_type=norm_type)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       norm_type=norm_type)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       norm_type=norm_type)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(
                    m,
                    ModuleHelper.BatchNorm2d(norm_type=norm_type,
                                             ret_cls=True)):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 norm_type=None,
                 use_dropout=False,
                 n_blocks=6,
                 padding_type='reflect'):
        """Construct a Resnet-based generator
        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert (n_blocks >= 0)
        super(ResNetGenerator, self).__init__()
        use_bias = (norm_type == 'instancenorm')

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ngf),
            nn.ReLU(True)
        ]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            model += [
                nn.Conv2d(ngf * mult,
                          ngf * mult * 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(ngf * mult * 2),
                nn.ReLU(True)
            ]

        mult = 2**n_downsampling
        for i in range(n_blocks):  # add ResNet blocks

            model += [
                ResnetBlock(ngf * mult,
                            padding_type=padding_type,
                            norm_type=norm_type,
                            use_dropout=use_dropout,
                            use_bias=use_bias)
            ]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose2d(ngf * mult,
                                   int(ngf * mult / 2),
                                   kernel_size=3,
                                   stride=2,
                                   padding=1,
                                   output_padding=1,
                                   bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(int(ngf * mult /
                                                                  2)),
                nn.ReLU(True)
            ]
        model += [nn.ReflectionPad2d(3)]
        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
    def __init__(self,
                 outer_nc,
                 inner_nc,
                 input_nc=None,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 norm_type=None,
                 use_dropout=False):
        """Construct a Unet submodule with skip connections.
        Parameters:
            outer_nc (int) -- the number of filters in the outer conv layer
            inner_nc (int) -- the number of filters in the inner conv layer
            input_nc (int) -- the number of channels in input images/features
            submodule (UnetSkipConnectionBlock) -- previously defined submodules
            outermost (bool)    -- if this module is the outermost module
            innermost (bool)    -- if this module is the innermost module
            norm_layer          -- normalization layer
            user_dropout (bool) -- if use dropout layers.
        """
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        use_bias = (norm_type == 'instancenorm')
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc,
                             inner_nc,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             bias=use_bias)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(inner_nc)
        uprelu = nn.ReLU(True)
        upnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        bias=use_bias)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        bias=use_bias)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)