예제 #1
0
 def __init__(self, cin, cout, up_f=2, enable_dcn=False):
     super(IDAUp, self).__init__()
     self.enable_dcn = enable_dcn
     if enable_dcn:
         self.proj = DeformConv(cin, cout)
         self.node = DeformConv(cout, cout)
     else:
         self.proj = nn.Conv2dBnAct(cin,
                                    cout,
                                    kernel_size=1,
                                    stride=1,
                                    pad_mode='same',
                                    has_bias=False,
                                    has_bn=True,
                                    momentum=BN_MOMENTUM,
                                    activation='relu',
                                    after_fake=False)
         self.node = nn.Conv2dBnAct(2 * cout,
                                    cout,
                                    kernel_size=3,
                                    stride=1,
                                    pad_mode='same',
                                    has_bias=False,
                                    has_bn=True,
                                    momentum=BN_MOMENTUM,
                                    activation='relu',
                                    after_fake=False)
     self.up = nn.Conv2dTranspose(cout,
                                  cout,
                                  up_f * 2,
                                  stride=up_f,
                                  pad_mode='pad',
                                  padding=up_f // 2)
     self.concat = ops.Concat(axis=1)
예제 #2
0
파일: model.py 프로젝트: yrpang/mindspore
    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = OrderedDict()
        for i in range(num_layers):
            kernel, padding, _ = \
                self._get_deconv_cfg(num_kernels[i])

            planes = num_filters[i]
            layers['deconv_{}'.format(i)] = nn.SequentialCell(
                OrderedDict([
                    ('deconv',
                     nn.Conv2dTranspose(
                         in_channels=self.inplanes,
                         out_channels=planes,
                         kernel_size=kernel,
                         stride=2,
                         pad_mode='pad',
                         padding=padding,
                         has_bias=self.deconv_with_bias,
                         weight_init=Normal(0.001),
                     )),
                    ('bn', nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)),
                    ('relu', nn.ReLU()),
                ]))
            self.inplanes = planes

        return nn.SequentialCell(layers)
예제 #3
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=4,
              stride=2,
              alpha=0.2,
              norm_mode='batch',
              pad_mode='CONSTANT',
              use_relu=True,
              padding=None):
     super(ConvTransposeNormReLU, self).__init__()
     conv = nn.Conv2dTranspose(in_planes,
                               out_planes,
                               kernel_size,
                               stride=stride,
                               pad_mode='same')
     norm = nn.BatchNorm2d(out_planes)
     if norm_mode == 'instance':
         # Use BatchNorm2d with batchsize=1, affine=False, training=True instead of InstanceNorm2d
         norm = nn.BatchNorm2d(out_planes, affine=False)
     has_bias = (norm_mode == 'instance')
     if padding is None:
         padding = (kernel_size - 1) // 2
     if pad_mode == 'CONSTANT':
         conv = nn.Conv2dTranspose(in_planes,
                                   out_planes,
                                   kernel_size,
                                   stride,
                                   pad_mode='same',
                                   has_bias=has_bias)
         layers = [conv, norm]
     else:
         paddings = ((0, 0), (0, 0), (padding, padding), (padding, padding))
         pad = nn.Pad(paddings=paddings, mode=pad_mode)
         conv = nn.Conv2dTranspose(in_planes,
                                   out_planes,
                                   kernel_size,
                                   stride,
                                   pad_mode='pad',
                                   has_bias=has_bias)
         layers = [pad, conv, norm]
     if use_relu:
         relu = nn.ReLU()
         if alpha > 0:
             relu = nn.LeakyReLU(alpha)
         layers.append(relu)
     self.features = nn.SequentialCell(layers)
예제 #4
0
 def __init__(self, in_channels, out_channels, bilinear=True):
     super().__init__()
     self.concat = F.Concat(axis=1)
     self.factor = 104.0 / 136.0
     self.center_crop = CentralCrop(central_fraction=self.factor)
     self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
     self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2)
     self.relu = nn.ReLU()
예제 #5
0
 def __init__(self, in_channel, out_channel, use_deconv, n_concat=2):
     super(UnetUp, self).__init__()
     self.conv = UnetConv2d(in_channel + (n_concat - 2) * out_channel, out_channel, False)
     self.concat = P.Concat(axis=1)
     self.use_deconv = use_deconv
     if use_deconv:
         self.up_conv = nn.Conv2dTranspose(in_channel, out_channel, kernel_size=2, stride=2, pad_mode="same")
     else:
         self.up_conv = nn.Conv2d(in_channel, out_channel, 1)
예제 #6
0
def _convTanspose(in_channels, out_channels, kernel_size=1, stride=1, padding=0, pad_mode='pad'):
    """ConvTranspose wrapper."""
    shape = (out_channels, in_channels, kernel_size, kernel_size)
    weights = initializer("XavierUniform", shape=shape, dtype=mstype.float32)
    shape_bias = (out_channels,)
    bias = Tensor(np.array(np.zeros(shape_bias)).astype(np.float32))
    return nn.Conv2dTranspose(in_channels, out_channels,
                              kernel_size=kernel_size, stride=stride, padding=padding,
                              pad_mode=pad_mode, weight_init=weights, has_bias=True, bias_init=bias)
예제 #7
0
파일: face_qa.py 프로젝트: yrpang/mindspore
    def __init__(self):
        super(FaceQABackbone, self).__init__()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.cast = P.Cast()

        self.conv0 = conv3x3(3, 64, stride=2, padding=1)
        self.bn0 = bn_with_initialize(64)
        self.relu0 = P.ReLU()
        self.conv1 = conv3x3(64, 64, stride=2, padding=1)
        self.bn1 = bn_with_initialize(64)
        self.relu1 = P.ReLU()
        self.backbone = nn.SequentialCell([Block1(), Block2()])

        # branch euler
        self.euler_conv = conv3x3(128, 128, stride=2, padding=1)
        self.euler_bn = bn_with_initialize(128)
        self.euler_relu = P.ReLU()
        self.euler_fc1 = fc_with_initialize(128 * 6 * 6, 256)
        self.euler_relu1 = P.ReLU()
        self.euler_fc2 = fc_with_initialize(256, 128)
        self.euler_relu2 = P.ReLU()
        self.euler_fc3 = fc_with_initialize(128, 3)

        # branch heatmap
        self.kps_deconv = nn.Conv2dTranspose(128,
                                             5,
                                             4,
                                             stride=2,
                                             pad_mode='pad',
                                             group=1,
                                             dilation=1,
                                             padding=1,
                                             has_bias=False)
        self.kps_up = nn.Conv2dTranspose(5,
                                         5,
                                         4,
                                         stride=2,
                                         pad_mode='pad',
                                         group=1,
                                         dilation=1,
                                         padding=1,
                                         has_bias=False)
예제 #8
0
def convTranspose2x2(
        in_channels,
        out_channels,
        has_bias=False):  #  Davinci devices only support 'groups=1'
    return nn.Conv2dTranspose(in_channels,
                              out_channels,
                              kernel_size=2,
                              stride=2,
                              has_bias=has_bias,
                              weight_init='normal',
                              bias_init='zeros')
예제 #9
0
def _convTanspose(in_channels, out_channels, kernel_size=1, stride=1, padding=0, pad_mode='pad',
                  gain=1):
    """ConvTranspose wrapper."""
    shape = (out_channels, in_channels, kernel_size, kernel_size)
    # xavier_normal
    fan_in = in_channels * kernel_size * kernel_size
    fan_out = out_channels * kernel_size * kernel_size
    std = gain * (2 / (fan_in + fan_out)) ** 0.5
    weights = Tensor(np.random.normal(loc=0.0, scale=std, size=shape).astype(np.float32))
    shape_bias = (out_channels,)
    bias = Tensor(np.array(np.zeros(shape_bias)).astype(np.float32))
    return nn.Conv2dTranspose(in_channels, out_channels,
                              kernel_size=kernel_size, stride=stride, padding=padding,
                              pad_mode=pad_mode, weight_init=weights, has_bias=True, bias_init=bias)
예제 #10
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride=1,
              pad_mode="same",
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(NetConv2dTranspose, self).__init__()
     self.conv = nn.Conv2dTranspose(cin, cout, kernel_size, stride,
                                    pad_mode, padding, dilation, group,
                                    has_bias, weight_init, bias_init)
예제 #11
0
파일: FCN8s.py 프로젝트: yrpang/mindspore
    def __init__(self, n_class):
        super().__init__()
        self.n_class = n_class
        self.conv1 = nn.SequentialCell(
            nn.Conv2d(in_channels=3,
                      out_channels=64,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(64),
            nn.ReLU())

        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = nn.SequentialCell(
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(in_channels=128,
                      out_channels=128,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(128),
            nn.ReLU())

        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = nn.SequentialCell(
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(in_channels=256,
                      out_channels=256,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(in_channels=256,
                      out_channels=256,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(256),
            nn.ReLU())

        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv4 = nn.SequentialCell(
            nn.Conv2d(in_channels=256,
                      out_channels=512,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(512),
            nn.ReLU())

        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv5 = nn.SequentialCell(
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=3,
                      weight_init='xavier_uniform'), nn.BatchNorm2d(512),
            nn.ReLU())

        self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv6 = nn.SequentialCell(
            nn.Conv2d(in_channels=512,
                      out_channels=4096,
                      kernel_size=7,
                      weight_init='xavier_uniform'),
            nn.BatchNorm2d(4096),
            nn.ReLU(),
        )

        self.conv7 = nn.SequentialCell(
            nn.Conv2d(in_channels=4096,
                      out_channels=4096,
                      kernel_size=1,
                      weight_init='xavier_uniform'),
            nn.BatchNorm2d(4096),
            nn.ReLU(),
        )

        self.score_fr = nn.Conv2d(in_channels=4096,
                                  out_channels=self.n_class,
                                  kernel_size=1,
                                  weight_init='xavier_uniform')

        self.upscore2 = nn.Conv2dTranspose(in_channels=self.n_class,
                                           out_channels=self.n_class,
                                           kernel_size=4,
                                           stride=2,
                                           weight_init='xavier_uniform')

        self.score_pool4 = nn.Conv2d(in_channels=512,
                                     out_channels=self.n_class,
                                     kernel_size=1,
                                     weight_init='xavier_uniform')

        self.upscore_pool4 = nn.Conv2dTranspose(in_channels=self.n_class,
                                                out_channels=self.n_class,
                                                kernel_size=4,
                                                stride=2,
                                                weight_init='xavier_uniform')

        self.score_pool3 = nn.Conv2d(in_channels=256,
                                     out_channels=self.n_class,
                                     kernel_size=1,
                                     weight_init='xavier_uniform')

        self.upscore8 = nn.Conv2dTranspose(in_channels=self.n_class,
                                           out_channels=self.n_class,
                                           kernel_size=16,
                                           stride=8,
                                           weight_init='xavier_uniform')
        self.shape = P.Shape()
        self.cast = P.Cast()
예제 #12
0
파일: yolov3.py 프로젝트: yrpang/mindspore
    def __init__(self, num_classes, num_anchors_list, args):
        layer_index = {
            # backbone
            '0_conv_batch_relu': [3, 16],
            '1_conv_batch_relu': [16, 32],
            '2_conv_batch_relu': [32, 64],
            '3_conv_batch_relu': [64, 64],
            '4_conv_batch_relu': [64, 64],
            '5_conv_batch_relu': [64, 128],
            '6_conv_batch': [128, 64],
            '7_conv_batch': [64, 64],
            '8_conv_batch_relu': [64, 128],
            '9_conv_batch': [128, 64],
            '10_conv_batch': [64, 64],
            '11_conv_batch_relu': [64, 128],
            '12_conv_batch': [128, 128],
            '13_conv_batch_relu': [128, 256],
            '14_conv_batch': [256, 144],
            '15_conv_batch': [144, 128],
            '16_conv_batch_relu': [128, 256],
            '17_conv_batch': [256, 128],
            '18_conv_batch': [128, 128],
            '19_conv_batch_relu': [128, 256],
            '20_conv_batch': [256, 144],
            '21_conv_batch': [144, 256],
            '22_conv_batch_relu': [256, 512],
            '23_conv_batch': [512, 256],
            '24_conv_batch': [256, 256],
            '25_conv_batch_relu': [256, 512],
            '26_conv_batch': [512, 256],
            '27_conv_batch': [256, 256],
            '28_conv_batch_relu': [256, 512],
            '30_deconv_up': [512, 64],
            '31_conv_batch': [320, 160],
            '32_conv_batch_relu': [160, 96],
            '33_conv_batch_relu': [96, 96],
            '34_conv_batch_relu': [96, 96],
            '35_conv_batch': [96, 80],
            '36_conv_batch_relu': [80, 128],
            '37_conv_batch': [128, 96],
            '38_conv_batch': [96, 128],
            '39_conv_batch_relu': [128, 256],
            '41_deconv_up': [256, 64],
            '42_conv_batch_relu': [192, 64],
            '43_conv_batch_relu': [64, 64],
            '44_conv_batch_relu': [64, 64],
            '45_conv_batch_relu': [64, 64],
            '46_conv_batch_relu': [64, 96],
            '47_conv_batch': [96, 64],
            '48_conv_batch_relu': [64, 128],
            # head
            '29_conv': [512],
            '40_conv': [256],
            '49_conv': [128]
        }
        super(HwYolov3, self).__init__()

        layer0 = [
            (layer_index['0_conv_batch_relu'][0],
             layer_index['0_conv_batch_relu'][1], 3, 2),
            (layer_index['1_conv_batch_relu'][0],
             layer_index['1_conv_batch_relu'][1], 3, 2),
            (layer_index['2_conv_batch_relu'][0],
             layer_index['2_conv_batch_relu'][1], 3, 2),
            (layer_index['3_conv_batch_relu'][0],
             layer_index['3_conv_batch_relu'][1], 3, 1),
            (layer_index['4_conv_batch_relu'][0],
             layer_index['4_conv_batch_relu'][1], 3, 1),
        ]
        layer1 = [
            (layer_index['5_conv_batch_relu'][0],
             layer_index['5_conv_batch_relu'][1], 3, 2),
            (layer_index['6_conv_batch'][0], layer_index['6_conv_batch'][1], 1,
             1, 0),
            (layer_index['7_conv_batch'][0], layer_index['7_conv_batch'][1], 3,
             1, 1),
            (layer_index['8_conv_batch_relu'][0],
             layer_index['8_conv_batch_relu'][1], 1, 1),
            (layer_index['9_conv_batch'][0], layer_index['9_conv_batch'][1], 1,
             1, 0),
            (layer_index['10_conv_batch'][0], layer_index['10_conv_batch'][1],
             3, 1, 1),
            (layer_index['11_conv_batch_relu'][0],
             layer_index['11_conv_batch_relu'][1], 1, 1),
        ]
        layer2 = [
            (layer_index['12_conv_batch'][0], layer_index['12_conv_batch'][1],
             3, 2, 1),
            (layer_index['13_conv_batch_relu'][0],
             layer_index['13_conv_batch_relu'][1], 1, 1),
            (layer_index['14_conv_batch'][0], layer_index['14_conv_batch'][1],
             1, 1, 0),
            (layer_index['15_conv_batch'][0], layer_index['15_conv_batch'][1],
             3, 1, 1),
            (layer_index['16_conv_batch_relu'][0],
             layer_index['16_conv_batch_relu'][1], 1, 1),
            (layer_index['17_conv_batch'][0], layer_index['17_conv_batch'][1],
             1, 1, 0),
            (layer_index['18_conv_batch'][0], layer_index['18_conv_batch'][1],
             3, 1, 1),
            (layer_index['19_conv_batch_relu'][0],
             layer_index['19_conv_batch_relu'][1], 1, 1),
        ]
        layer3 = [
            (layer_index['20_conv_batch'][0], layer_index['20_conv_batch'][1],
             1, 1, 0),
            (layer_index['21_conv_batch'][0], layer_index['21_conv_batch'][1],
             3, 2, 1),
            (layer_index['22_conv_batch_relu'][0],
             layer_index['22_conv_batch_relu'][1], 1, 1),
            (layer_index['23_conv_batch'][0], layer_index['23_conv_batch'][1],
             1, 1, 0),
            (layer_index['24_conv_batch'][0], layer_index['24_conv_batch'][1],
             3, 1, 1),
            (layer_index['25_conv_batch_relu'][0],
             layer_index['25_conv_batch_relu'][1], 1, 1),
            (layer_index['26_conv_batch'][0], layer_index['26_conv_batch'][1],
             1, 1, 0),
            (layer_index['27_conv_batch'][0], layer_index['27_conv_batch'][1],
             3, 1, 1),
            (layer_index['28_conv_batch_relu'][0],
             layer_index['28_conv_batch_relu'][1], 1, 1),
        ]

        layer4 = [
            (layer_index['30_deconv_up'][0], layer_index['30_deconv_up'][1], 4,
             2, 1),
        ]

        layer5 = [
            (layer_index['31_conv_batch'][0], layer_index['31_conv_batch'][1],
             1, 1, 0),
            (layer_index['32_conv_batch_relu'][0],
             layer_index['32_conv_batch_relu'][1], 3, 1),
            (layer_index['33_conv_batch_relu'][0],
             layer_index['33_conv_batch_relu'][1], 3, 1),
            (layer_index['34_conv_batch_relu'][0],
             layer_index['34_conv_batch_relu'][1], 3, 1),
            (layer_index['35_conv_batch'][0], layer_index['35_conv_batch'][1],
             1, 1, 0),
            (layer_index['36_conv_batch_relu'][0],
             layer_index['36_conv_batch_relu'][1], 3, 1),
            (layer_index['37_conv_batch'][0], layer_index['37_conv_batch'][1],
             1, 1, 0),
            (layer_index['38_conv_batch'][0], layer_index['38_conv_batch'][1],
             3, 1, 1),
            (layer_index['39_conv_batch_relu'][0],
             layer_index['39_conv_batch_relu'][1], 1, 1),
        ]

        layer6 = [
            (layer_index['41_deconv_up'][0], layer_index['41_deconv_up'][1], 4,
             2, 1),
        ]

        layer7 = [
            (layer_index['42_conv_batch_relu'][0],
             layer_index['42_conv_batch_relu'][1], 1, 1),
            (layer_index['43_conv_batch_relu'][0],
             layer_index['43_conv_batch_relu'][1], 3, 1),
            (layer_index['44_conv_batch_relu'][0],
             layer_index['44_conv_batch_relu'][1], 3, 1),
            (layer_index['45_conv_batch_relu'][0],
             layer_index['45_conv_batch_relu'][1], 3, 1),
            (layer_index['46_conv_batch_relu'][0],
             layer_index['46_conv_batch_relu'][1], 3, 1),
            (layer_index['47_conv_batch'][0], layer_index['47_conv_batch'][1],
             3, 1, 1),
            (layer_index['48_conv_batch_relu'][0],
             layer_index['48_conv_batch_relu'][1], 1, 1),
        ]
        self.layer0 = MakeYoloLayer(layer0)
        self.layer1 = MakeYoloLayer(layer1)
        self.layer2 = MakeYoloLayer(layer2)
        self.layer3 = MakeYoloLayer(layer3)
        self.layer4 = nn.Conv2dTranspose(layer4[0][0],
                                         layer4[0][1],
                                         layer4[0][2],
                                         layer4[0][3],
                                         pad_mode='pad',
                                         padding=layer4[0][4],
                                         has_bias=True)
        self.args = args
        self.concat = P.Concat(1)

        self.layer5 = MakeYoloLayer(layer5)
        self.layer6 = nn.Conv2dTranspose(layer6[0][0],
                                         layer6[0][1],
                                         layer6[0][2],
                                         layer6[0][3],
                                         pad_mode='pad',
                                         padding=layer6[0][4],
                                         has_bias=True)

        self.layer7 = MakeYoloLayer(layer7)
        self.head1_conv = nn.Conv2d(layer_index['29_conv'][0],
                                    num_anchors_list[0] *
                                    (4 + 1 + num_classes),
                                    1,
                                    1,
                                    has_bias=True)
        self.head2_conv = nn.Conv2d(layer_index['40_conv'][0],
                                    num_anchors_list[1] *
                                    (4 + 1 + num_classes),
                                    1,
                                    1,
                                    has_bias=True)
        self.head3_conv = nn.Conv2d(layer_index['49_conv'][0],
                                    num_anchors_list[2] *
                                    (4 + 1 + num_classes),
                                    1,
                                    1,
                                    has_bias=True)

        self.relu = nn.ReLU()
예제 #13
0
    def __init__(self,
                 outer_nc,
                 inner_nc,
                 in_planes=None,
                 dropout=False,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 alpha=0.2,
                 norm_mode='batch'):
        super(UnetSkipConnectionBlock, self).__init__()
        downnorm = nn.BatchNorm2d(inner_nc)
        upnorm = nn.BatchNorm2d(outer_nc)
        use_bias = False
        if norm_mode == 'instance':
            downnorm = nn.BatchNorm2d(inner_nc, affine=False)
            upnorm = nn.BatchNorm2d(outer_nc, affine=False)
            use_bias = True
        if in_planes is None:
            in_planes = outer_nc
        downconv = nn.Conv2d(in_planes,
                             inner_nc,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             has_bias=use_bias,
                             pad_mode='pad')
        downrelu = nn.LeakyReLU(alpha)
        uprelu = nn.ReLU()

        if outermost:
            upconv = nn.Conv2dTranspose(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        pad_mode='pad')
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.Conv2dTranspose(inner_nc,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        has_bias=use_bias,
                                        pad_mode='pad')
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.Conv2dTranspose(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        has_bias=use_bias,
                                        pad_mode='pad')
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            model = down + [submodule] + up
            if dropout:
                model.append(nn.Dropout(0.5))

        self.model = nn.SequentialCell(model)
        self.skip_connections = not outermost
        self.concat = ops.Concat(axis=1)