Exemple #1
0
 def __init__(self, w1, w2):
     super(Conv2dBpropInputInplace, self).__init__()
     self.conv2d_1 = P.Conv2DBackpropInput(out_channel=256, kernel_size=1)
     self.w1 = Parameter(initializer(w1, w1.shape), name='w1')
     self.conv2d_2 = P.Conv2DBackpropInput(out_channel=256, kernel_size=1)
     self.w2 = Parameter(initializer(w2, w2.shape), name='w2')
     self.add = P.TensorAdd()
     self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode='SAME')
     self.maxpool_grad = G.MaxPoolGrad(kernel_size=3, strides=2, pad_mode='SAME')
     self.shape = (32, 64, 56, 56)
 def __init__(self):
     super(Net5, self).__init__()
     out_channel = 4
     kernel_size = 1
     self.conv_input = P.Conv2DBackpropInput(out_channel,
                                             kernel_size,
                                             pad_mode="valid",
                                             pad=0,
                                             mode=1,
                                             stride=1,
                                             dilation=1,
                                             group=1)
     self.w = Parameter(initializer(
         Tensor(
             np.array([[[[1, 0, -1], [1, 0, -1],
                         [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]),
                        name='w')
     self.x = Parameter(initializer(
         Tensor(
             np.array([[[[3, 0, 1, 2, 7, 4], [1, 5, 8, 9, 3, 1],
                         [2, 7, 2, 5, 1, 3], [0, 1, 3, 1, 7, 8],
                         [4, 2, 1, 6, 2, 8], [2, 4, 5, 2, 3,
                                              9]]]]).astype(np.float32)),
         [1, 1, 6, 6]),
                        name='x')
     self.out = Parameter(initializer(
         Tensor(
             np.array([[[[-5, -4, 0, 8], [-10, -2, 2, 3], [0, -2, -4, -7],
                         [-3, -2, -3, -16]]]]).astype(np.float32)),
         [1, 1, 4, 4]),
                          name='y')
     self.get_shape = P.Shape()
Exemple #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        # out_channels and in_channels swap.
        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
        # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
        super(Conv2dTranspose, self).__init__(in_channels,
                                              out_channels,
                                              kernel_size,
                                              stride,
                                              pad_mode,
                                              padding,
                                              dilation,
                                              group,
                                              has_bias,
                                              weight_init,
                                              bias_init,
                                              transposed=True)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.shape = P.Shape()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed ' +
                str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.is_valid = self.pad_mode == 'valid'
        self.is_same = self.pad_mode == 'same'
        self.is_pad = self.pad_mode == 'pad'
        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name='bias')

        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
        self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
                                                      kernel_size=kernel_size,
                                                      mode=1,
                                                      pad_mode=pad_mode,
                                                      pad=padding,
                                                      stride=stride,
                                                      dilation=dilation,
                                                      group=group)
        self.bias_add = P.BiasAdd()
Exemple #4
0
    def __init__(self):
        super(Conv2dInput, self).__init__()
        out_channel = 1
        kernel_size = 3
        self.conv_input = P.Conv2DBackpropInput(out_channel,
                                                kernel_size,
                                                pad_mode="valid",
                                                pad=0,
                                                mode=1,
                                                stride=1,
                                                dilation=1,
                                                group=1)

        self.get_shape = P.Shape()
Exemple #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
        Validator.check_value_type("stride", stride, [int], self.cls_name)
        Validator.check_value_type("padding", padding, [int], self.cls_name)
        Validator.check_value_type("dilation", dilation, [int], self.cls_name)
        Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
        Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
        Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
        Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
        kernel_size = (1, kernel_size)
        stride = (1, stride)
        dilation = (1, dilation)
        get_shape = P.Shape()
        get_dtype = P.DType()
        if isinstance(weight_init, Tensor):
            weight_init_shape = get_shape(weight_init)
            Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
            weight_init_dtype = get_dtype(weight_init)
            weight_init_value = weight_init.asnumpy()
            weight_init_value = np.expand_dims(weight_init_value, 2)
            weight_init = Tensor(weight_init_value, weight_init_dtype)
        # out_channels and in_channels swap.
        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
        # then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
        super(Conv1dTranspose, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            has_bias,
            weight_init,
            bias_init,
            transposed=True)
        self.padding = (0, 0, padding, padding)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.shape = P.Shape()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
                             + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.is_valid = self.pad_mode == 'valid'
        self.is_same = self.pad_mode == 'same'
        self.is_pad = self.pad_mode == 'pad'
        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')

        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
        self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
                                                      kernel_size=kernel_size,
                                                      mode=1,
                                                      pad_mode=pad_mode,
                                                      pad=self.padding,
                                                      stride=stride,
                                                      dilation=dilation,
                                                      group=group)
        self.bias_add = P.BiasAdd()
        self.expand_dims = P.ExpandDims()
        self.squeeze = P.Squeeze(2)
        'skip': ['backward']
    }),
    # rank of x is not 4
    ('AvgPool2', {
        'block': (P.AvgPool(ksize=50, strides=1), {
            'exception': ValueError,
            'error_keywords': ['AvgPool']
        }),
        'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))],
        'skip': ['backward']
    }),

    # input is scalar
    ('Conv2DBackpropInput0', {
        'block':
        (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3)), {
            'exception': TypeError,
            'error_keywords': ['Conv2DBackpropInput']
        }),
        'desc_inputs': [5.0, 5.0],
        'skip': ['backward']
    }),
    # input is Tensor(bool)
    ('Conv2DBackpropInput1', {
        'block':
        (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3)), {
            'exception': TypeError,
            'error_keywords': ['Conv2DBackpropInput']
        }),
        'desc_inputs': [
            Tensor(np.ones([5]).astype(np.bool_)),