Beispiel #1
0
def test_mirror_pad():
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

    test1_arr_in = [[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]
    test_1_paddings = ((0, 0), (0, 0), (1, 1), (2, 2))
    test1_arr_exp = [[[[6, 5, 4, 5, 6, 5, 4], [3, 2, 1, 2, 3, 2, 1], [6, 5, 4, 5, 6, 5, 4],
                       [9, 8, 7, 8, 9, 8, 7], [6, 5, 4, 5, 6, 5, 4]]]]

    test2_arr_in = [[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]
    test_2_paddings = ((0, 0), (0, 0), (1, 1), (2, 2))
    test2_arr_exp = [[[[2, 1, 1, 2, 3, 3, 2], [2, 1, 1, 2, 3, 3, 2], [5, 4, 4, 5, 6, 6, 5],
                       [8, 7, 7, 8, 9, 9, 8], [8, 7, 7, 8, 9, 9, 8]]]]

    reflectOp = nn.Pad(mode='REFLECT', paddings=test_1_paddings)
    symmOp = nn.Pad(mode='SYMMETRIC', paddings=test_2_paddings)

    x_test_1 = Tensor(np.array(test1_arr_in), dtype=mindspore.float32)
    x_test_2 = Tensor(np.array(test2_arr_in), dtype=mindspore.float32)

    y_test_1 = reflectOp(x_test_1).asnumpy()
    y_test_2 = symmOp(x_test_2).asnumpy()

    print(np.array(test1_arr_in))
    print(y_test_1)

    np.testing.assert_equal(np.array(test1_arr_exp), y_test_1)
    np.testing.assert_equal(np.array(test2_arr_exp), y_test_2)
Beispiel #2
0
def test_pad_3d_pad():
    """
    Test full 3d padding, with all 3 input types
    """
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

    # float32
    test_arr = np.random.randn(5, 3, 30, 30).astype(np.float32)
    test_paddings = ((0, 0), (2, 1), (0, 1), (0, 2))  # padding 3 dims now
    pad_op_3d = nn.Pad(mode='CONSTANT', paddings=test_paddings)
    x_test = Tensor(np.array(test_arr), dtype=mindspore.float32)
    y_test = pad_op_3d(x_test).asnumpy()
    assert y_test.shape == (5, 6, 31, 32)
    np.testing.assert_equal(test_arr, y_test[:, 2:-1, :-1, :-2])

    # float16
    test_arr = np.random.randn(5, 3, 30, 30).astype(np.float16)
    test_paddings = ((0, 0), (2, 1), (0, 1), (0, 2))
    pad_op_3d = nn.Pad(mode='CONSTANT', paddings=test_paddings)
    x_test = Tensor(np.array(test_arr), dtype=mindspore.float16)
    y_test = pad_op_3d(x_test).asnumpy()
    assert y_test.shape == (5, 6, 31, 32)
    np.testing.assert_equal(test_arr, y_test[:, 2:-1, :-1, :-2])

    # int32
    test_arr = np.random.randint(1, 3000, (5, 3, 30, 30)).astype(np.int32)
    test_paddings = ((0, 0), (2, 1), (0, 1), (0, 2))
    pad_op_3d = nn.Pad(mode='CONSTANT', paddings=test_paddings)
    x_test = Tensor(np.array(test_arr), dtype=mindspore.int32)
    y_test = pad_op_3d(x_test).asnumpy()
    assert y_test.shape == (5, 6, 31, 32)
    np.testing.assert_equal(test_arr, y_test[:, 2:-1, :-1, :-2])
Beispiel #3
0
def test_pad_row():
    # Confirm correct row padding
    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")

    test_arr_1 = np.random.rand(40, 40).astype(np.float32)
    test_paddings_1 = ((2, 3), (0, 0))

    test_arr_2 = np.random.randn(3, 10, 30, 30).astype(np.float32)
    test_paddings_2 = ((0, 0), (0, 0), (3, 0), (0, 0))

    pad_op_row_1 = nn.Pad(mode='CONSTANT', paddings=test_paddings_1)
    pad_op_row_2 = nn.Pad(mode='CONSTANT', paddings=test_paddings_2)

    x_test_1 = Tensor(np.array(test_arr_1), dtype=mindspore.float32)
    x_test_2 = Tensor(np.array(test_arr_2), dtype=mindspore.float32)

    y_test_1 = pad_op_row_1(x_test_1).asnumpy()
    y_test_2 = pad_op_row_2(x_test_2).asnumpy()

    # check size
    assert y_test_1.shape == (45, 40)
    assert y_test_2.shape == (3, 10, 33, 30)

    # check values - select correct sections
    np.testing.assert_equal(y_test_1[2:-3, :], test_arr_1)
    np.testing.assert_equal(y_test_2[:, :, 3:, :], test_arr_2)
Beispiel #4
0
def test_pad_column():
    # Confirm correct column padding
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

    test_arr_1 = np.random.randn(40, 40).astype(np.float32)
    test_paddings_1 = ((0, 0), (3, 3))

    test_arr_2 = np.random.randn(3, 10, 30, 30).astype(np.float32)
    test_paddings_2 = ((0, 0), (0, 0), (0, 0), (6, 1))

    pad_op_col_1 = nn.Pad(mode='CONSTANT', paddings=test_paddings_1)
    pad_op_col_2 = nn.Pad(mode='CONSTANT', paddings=test_paddings_2)

    x_test_1 = Tensor(np.array(test_arr_1), dtype=mindspore.float32)
    x_test_2 = Tensor(np.array(test_arr_2), dtype=mindspore.float32)

    y_test_1 = pad_op_col_1(x_test_1).asnumpy()
    y_test_2 = pad_op_col_2(x_test_2).asnumpy()

    # check size
    assert y_test_1.shape == (40, 46)
    assert y_test_2.shape == (3, 10, 30, 37)

    # check values - select correct sections - should match
    np.testing.assert_equal(y_test_1[:, 3:-3], test_arr_1)
    np.testing.assert_equal(y_test_2[:, :, :, 6:-1], test_arr_2)
Beispiel #5
0
    def __init__(self, conv_out_dim):
        super(CNN, self).__init__()
        self.convRelu1 = ConvRelu(3, 64, (3, 3))
        self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))

        self.convRelu2 = ConvRelu(64, 128, (3, 3))
        self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))

        self.convBNRelu1 = ConvBNRelu(128, 256, (3, 3))
        self.convRelu3 = ConvRelu(256, 256, (3, 3))
        self.maxpool3 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))

        self.convBNRelu2 = ConvBNRelu(256, 384, (3, 3))
        self.convRelu4 = ConvRelu(384, 384, (3, 3))
        self.maxpool4 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))

        self.convBNRelu3 = ConvBNRelu(384, 384, (3, 3))
        self.convRelu5 = ConvRelu(384, 384, (3, 3))
        self.maxpool5 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))

        self.convBNRelu4 = ConvBNRelu(384, 384, (3, 3))
        self.convRelu6 = ConvRelu(384, 384, (3, 3))
        self.maxpool6 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))

        self.pad = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 1)))
        self.convBNRelu5 = ConvBNRelu(384, conv_out_dim, (2, 2), pad_mode='valid')
        self.dropout = nn.Dropout(keep_prob=0.5)

        self.squeeze = P.Squeeze(2)
        self.cast = P.Cast()
Beispiel #6
0
    def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, has_bias=False, modulation=True):
        super(DeformConv2d, self).__init__()
        self.kernel_size = kernel_size
        self.padding = padding
        self.stride = stride
        self.zero_padding = nn.Pad(((0, 0), (0, 0), (padding, padding), (padding, padding)))
        self.conv = nn.Conv2d(inc, outc, kernel_size=kernel_size, pad_mode='valid', padding=0,
                              stride=kernel_size, has_bias=has_bias)

        self.p_conv = nn.Conv2d(inc, 2*kernel_size*kernel_size, kernel_size=self.kernel_size,
                                pad_mode='pad', padding=self.padding, stride=self.stride)

        self.modulation = modulation
        if modulation:
            self.m_conv = nn.Conv2d(inc, kernel_size*kernel_size, kernel_size=self.kernel_size,
                                    pad_mode='valid', padding=0, stride=self.stride)
        if kernel_size % 2 == 0:
            raise ValueError("Only odd number is supported, but current kernel sizeis {}".format(kernel_size))
        self.N = kernel_size * kernel_size
        self.begin = kernel_size // 2
        self.sigmoid = ops.Sigmoid()
        self.dtype = ops.DType()
        self.perm_list = (0, 2, 3, 1)
        self.transpose = ops.Transpose()
        self.floor = ops.Floor()
        self.half = ops.Split(axis=-1, output_num=2)
        self.clip_value = ClipByValue()
        self.expand_dims = ops.ExpandDims()
        self.shape = ops.Shape()
        self.cast = ops.Cast()
        self._get_offset = GetOffsetPosition(self.begin, self.stride)
        self._get_surround = GetSurroundFeature()
        self._generate_fm = RegenerateFeatureMap(self.kernel_size)
Beispiel #7
0
def test_mirror_pad_fwd_back_4d_int32_reflect():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
    # set constants
    shape = (2, 3, 3, 5)
    pads = ((1, 0), (2, 0), (1, 2), (3, 4))
    total_val = np.prod(shape)
    test_arr_np = np.arange(total_val).reshape(shape) + 1
    test_arr_ms = Tensor(test_arr_np, dtype=mindspore.int32)
    # fwd_pass_check
    op = nn.Pad(mode="REFLECT", paddings=pads)
    expected_np_result = np.pad(test_arr_np, pads, 'reflect')
    obtained_ms_res = op(test_arr_ms).asnumpy()
    np.testing.assert_array_equal(expected_np_result, obtained_ms_res)
    # backwards pass check
    GradNet = Grad(Net(pads, "REFLECT"))
    dy_value = Tensor(np.ones(obtained_ms_res.shape), dtype=mindspore.int32)
    dx_value_obtained = GradNet(test_arr_ms, dy_value)[0].asnumpy()
    dx_value_expected = np.array(
        [[[[4, 6, 6, 6, 2], [6, 9, 9, 9, 3], [2, 3, 3, 3, 1]],
          [[8, 12, 12, 12, 4], [12, 18, 18, 18, 6], [4, 6, 6, 6, 2]],
          [[8, 12, 12, 12, 4], [12, 18, 18, 18, 6], [4, 6, 6, 6, 2]]],
         [[[8, 12, 12, 12, 4], [12, 18, 18, 18, 6], [4, 6, 6, 6, 2]],
          [[16, 24, 24, 24, 8], [24, 36, 36, 36, 12], [8, 12, 12, 12, 4]],
          [[16, 24, 24, 24, 8], [24, 36, 36, 36, 12], [8, 12, 12, 12, 4]]]],
        dtype=np.int32)
    np.testing.assert_array_equal(dx_value_expected, dx_value_obtained)
Beispiel #8
0
def test_pad_basic():
    # confirm array is being padded with 0's
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

    test_arr = np.array([[1, 2], [3, 4]]).astype(np.float32)
    test_arr_expected = np.array(
        [[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]]).astype(np.float32)
    x_test = Tensor(test_arr, dtype=mindspore.float32)

    pad_op = nn.Pad(mode='CONSTANT', paddings=((1, 1), (1, 1)))
    y_test = pad_op(x_test).asnumpy()

    np.testing.assert_array_equal(y_test, test_arr_expected)
Beispiel #9
0
def test_pad_3d_pad():
    # Confirm correct 3d padding - row, column, channel
    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")

    test_arr = np.random.randn(5, 3, 30, 30).astype(np.float32)
    test_paddings = ((0, 0), (2, 1), (0, 1), (0, 2))  # padding 3 dims now

    pad_op_3d = nn.Pad(mode='CONSTANT', paddings=test_paddings)
    x_test = Tensor(np.array(test_arr), dtype=mindspore.float32)

    y_test = pad_op_3d(x_test).asnumpy()
    assert y_test.shape == (5, 6, 31, 32)
    np.testing.assert_equal(test_arr, y_test[:, 2:-1, :-1, :-2])
Beispiel #10
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              padding,
              z_padding=1,
              bias=False):
     BranchSeparables.__init__(self, in_channels, out_channels, kernel_size,
                               stride, padding, bias)
     self.padding = nn.Pad(paddings=((0, 0), (0, 0), (z_padding, 0),
                                     (z_padding, 0)),
                           mode="CONSTANT")
Beispiel #11
0
def test_pad_error_cases():
    """
    Test against common errorneous inputs to trigger correct errors
    """
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

    # TEST 1 - Neg padding values
    test_op = nn.Pad(paddings=((0, 0), (-1, -1)), mode="CONSTANT")
    test_arr = np.random.randn(3, 3)
    test_arr_ms = Tensor(test_arr, dtype=mindspore.float32)

    with pytest.raises(ValueError):
        test_op(test_arr_ms)

    # TEST 2 - Mismatched input size and paddings - 1D tensor
    test_op = nn.Pad(paddings=((0, 0), (1, 0)), mode="CONSTANT")
    test_arr = np.random.randn(3)  # 1D Tensor
    test_arr_ms = Tensor(test_arr, dtype=mindspore.float32)

    with pytest.raises(ValueError):
        test_op(test_arr_ms)

    # TEST 3 - Mismatched input size and paddings - 2D tensor, 3D padding
    test_op = nn.Pad(paddings=((0, 0), (1, 0)), mode="CONSTANT")  # 2D Padding
    test_arr = np.random.randn(1, 3, 3)  # 3D Tensor
    test_arr_ms = Tensor(test_arr, dtype=mindspore.float32)

    with pytest.raises(ValueError):
        test_op(test_arr_ms)

    # TEST 4 - 1D Paddings should not work
    with pytest.raises(TypeError):
        test_op = nn.Pad(paddings=((0, 2)), mode="CONSTANT")

    # TEST 5 - Padding beyond 4d - (added check in nn file in PR)
    with pytest.raises(ValueError):
        _ = nn.Pad(paddings=((0, 0), (0, 0,), (0, 0), (0, 0),
                             (1, 0)), mode="CONSTANT")  # 2D Padding
Beispiel #12
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(FirstCell, self).__init__()
        self.conv_1x1 = nn.SequentialCell([
            nn.ReLU(),
            nn.Conv2d(in_channels=in_channels_right, out_channels=out_channels_right, kernel_size=1, stride=1,
                      pad_mode='pad', has_bias=False),
            nn.BatchNorm2d(num_features=out_channels_right, eps=0.001, momentum=0.9, affine=True)])

        self.relu = nn.ReLU()
        self.path_1 = nn.SequentialCell([
            nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid'),
            nn.Conv2d(in_channels=in_channels_left, out_channels=out_channels_left, kernel_size=1, stride=1,
                      pad_mode='pad', has_bias=False)])

        self.path_2 = nn.CellList([])
        self.path_2.append(nn.Pad(paddings=((0, 0), (0, 0), (0, 1), (0, 1)), mode="CONSTANT"))
        self.path_2.append(
            nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid')
        )
        self.path_2.append(
            nn.Conv2d(in_channels=in_channels_left, out_channels=out_channels_left, kernel_size=1, stride=1,
                      pad_mode='pad', has_bias=False)
        )

        self.final_path_bn = nn.BatchNorm2d(num_features=out_channels_left*2, eps=0.001, momentum=0.9, affine=True)

        self.comb_iter_0_left = BranchSeparables(
            out_channels_right, out_channels_right, 5, 1, 2, bias=False
        )
        self.comb_iter_0_right = BranchSeparables(
            out_channels_right, out_channels_right, 3, 1, 1, bias=False
        )

        self.comb_iter_1_left = BranchSeparables(
            out_channels_right, out_channels_right, 5, 1, 2, bias=False
        )
        self.comb_iter_1_right = BranchSeparables(
            out_channels_right, out_channels_right, 3, 1, 1, bias=False
        )

        self.comb_iter_2_left = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same')

        self.comb_iter_3_left = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same')
        self.comb_iter_3_right = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same')

        self.comb_iter_4_left = BranchSeparables(
            out_channels_right, out_channels_right, 3, 1, 1, bias=False
        )
Beispiel #13
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=4,
              stride=2,
              alpha=0.2,
              norm_mode='batch',
              pad_mode='CONSTANT',
              use_relu=True,
              padding=None):
     super(ConvTransposeNormReLU, self).__init__()
     conv = nn.Conv2dTranspose(in_planes,
                               out_planes,
                               kernel_size,
                               stride=stride,
                               pad_mode='same')
     norm = nn.BatchNorm2d(out_planes)
     if norm_mode == 'instance':
         # Use BatchNorm2d with batchsize=1, affine=False, training=True instead of InstanceNorm2d
         norm = nn.BatchNorm2d(out_planes, affine=False)
     has_bias = (norm_mode == 'instance')
     if padding is None:
         padding = (kernel_size - 1) // 2
     if pad_mode == 'CONSTANT':
         conv = nn.Conv2dTranspose(in_planes,
                                   out_planes,
                                   kernel_size,
                                   stride,
                                   pad_mode='same',
                                   has_bias=has_bias)
         layers = [conv, norm]
     else:
         paddings = ((0, 0), (0, 0), (padding, padding), (padding, padding))
         pad = nn.Pad(paddings=paddings, mode=pad_mode)
         conv = nn.Conv2dTranspose(in_planes,
                                   out_planes,
                                   kernel_size,
                                   stride,
                                   pad_mode='pad',
                                   has_bias=has_bias)
         layers = [pad, conv, norm]
     if use_relu:
         relu = nn.ReLU()
         if alpha > 0:
             relu = nn.LeakyReLU(alpha)
         layers.append(relu)
     self.features = nn.SequentialCell(layers)
Beispiel #14
0
 def __init__(self,
              in_planes=3,
              ngf=64,
              n_layers=9,
              alpha=0.2,
              norm_mode='batch',
              dropout=False,
              pad_mode="CONSTANT"):
     super(ResNetGenerator, self).__init__()
     self.conv_in = ConvNormReLU(in_planes,
                                 ngf,
                                 7,
                                 1,
                                 alpha,
                                 norm_mode,
                                 pad_mode=pad_mode)
     self.down_1 = ConvNormReLU(ngf, ngf * 2, 3, 2, alpha, norm_mode)
     self.down_2 = ConvNormReLU(ngf * 2, ngf * 4, 3, 2, alpha, norm_mode)
     layers = [
         ResidualBlock(
             ngf * 4, norm_mode, dropout=dropout, pad_mode=pad_mode)
     ] * n_layers
     self.residuals = nn.SequentialCell(layers)
     self.up_2 = ConvTransposeNormReLU(ngf * 4, ngf * 2, 3, 2, alpha,
                                       norm_mode)
     self.up_1 = ConvTransposeNormReLU(ngf * 2, ngf, 3, 2, alpha, norm_mode)
     if pad_mode == "CONSTANT":
         self.conv_out = nn.Conv2d(ngf,
                                   3,
                                   kernel_size=7,
                                   stride=1,
                                   pad_mode='pad',
                                   padding=3)
     else:
         pad = nn.Pad(paddings=((0, 0), (0, 0), (3, 3), (3, 3)),
                      mode=pad_mode)
         conv = nn.Conv2d(ngf, 3, kernel_size=7, stride=1, pad_mode='pad')
         self.conv_out = nn.SequentialCell([pad, conv])
     self.activate = ops.Tanh()
Beispiel #15
0
 def __init__(self):
     super(Net, self).__init__()
     self.pad = nn.Pad(mode="REFLECT", paddings=((0, 0), (0, 0), (1, 0), (0, 2)))
Beispiel #16
0
 def __init__(self):
     super(Net, self).__init__()
     self.pad = nn.Pad(mode="CONSTANT", paddings=(
         (0, 0), (4, 3), (1, 1), (0, 2)))
Beispiel #17
0
 def __init__(self):
     super(Layer2, self).__init__()
     self.net = nn.Conv2d(3, 1, 7, pad_mode='same')
     self.pad = nn.Pad(
         paddings=((0, 0), (0, 2), (0, 0), (0, 0)), mode="CONSTANT")
Beispiel #18
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 num_classes,
                 use_se=False,
                 res_base=False):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
        self.use_se = use_se
        self.res_base = res_base
        self.se_block = False
        if self.use_se:
            self.se_block = True

        if self.use_se:
            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
            self.bn1_0 = _bn(32)
            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
            self.bn1_1 = _bn(32)
            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
        else:
            self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)
        self.bn1 = _bn(64, self.res_base)
        self.relu = P.ReLU()

        if self.res_base:
            self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))
            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
        else:
            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0],
                                       use_se=self.use_se)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1],
                                       use_se=self.use_se)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2],
                                       use_se=self.use_se,
                                       se_block=self.se_block)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3],
                                       use_se=self.use_se,
                                       se_block=self.se_block)

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)
Beispiel #19
0
 def __init__(self, pads, mode_):
     super(Net, self).__init__()
     self.pad = nn.Pad(mode=mode_, paddings=pads)
Beispiel #20
0
 def __init__(self, raw_paddings, mode):
     super(Net, self).__init__()
     self.pad = nn.Pad(raw_paddings, mode=mode)
Beispiel #21
0
    def __init__(self, stem_filters, num_filters):
        super(CellStem1, self).__init__()
        self.num_filters = num_filters
        self.stem_filters = stem_filters
        self.conv_1x1 = nn.SequentialCell([
            nn.ReLU(),
            nn.Conv2d(in_channels=2 * self.num_filters,
                      out_channels=self.num_filters,
                      kernel_size=1,
                      stride=1,
                      pad_mode='pad',
                      has_bias=False),
            nn.BatchNorm2d(num_features=self.num_filters,
                           eps=0.001,
                           momentum=0.9,
                           affine=True)
        ])

        self.relu = nn.ReLU()
        self.path_1 = nn.SequentialCell([
            nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid'),
            nn.Conv2d(in_channels=self.stem_filters,
                      out_channels=self.num_filters // 2,
                      kernel_size=1,
                      stride=1,
                      pad_mode='pad',
                      has_bias=False)
        ])

        self.path_2 = nn.CellList([])
        self.path_2.append(
            nn.Pad(paddings=((0, 0), (0, 0), (0, 1), (0, 1)), mode="CONSTANT"))
        self.path_2.append(
            nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid'))
        self.path_2.append(
            nn.Conv2d(in_channels=self.stem_filters,
                      out_channels=self.num_filters // 2,
                      kernel_size=1,
                      stride=1,
                      pad_mode='pad',
                      has_bias=False))

        self.final_path_bn = nn.BatchNorm2d(num_features=self.num_filters,
                                            eps=0.001,
                                            momentum=0.9,
                                            affine=True)

        self.comb_iter_0_left = BranchSeparables(self.num_filters,
                                                 self.num_filters,
                                                 5,
                                                 2,
                                                 2,
                                                 bias=False)
        self.comb_iter_0_right = BranchSeparables(self.num_filters,
                                                  self.num_filters,
                                                  7,
                                                  2,
                                                  3,
                                                  bias=False)

        self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, pad_mode='same')
        self.comb_iter_1_right = BranchSeparables(self.num_filters,
                                                  self.num_filters,
                                                  7,
                                                  2,
                                                  3,
                                                  bias=False)

        self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, pad_mode='same')
        self.comb_iter_2_right = BranchSeparables(self.num_filters,
                                                  self.num_filters,
                                                  5,
                                                  2,
                                                  2,
                                                  bias=False)

        self.comb_iter_3_right = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              pad_mode='same')

        self.comb_iter_4_left = BranchSeparables(self.num_filters,
                                                 self.num_filters,
                                                 3,
                                                 1,
                                                 1,
                                                 bias=False)
        self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, pad_mode='same')
        self.shape = P.Shape()