def construct(self, input_x):
     """Callback method."""
     out = P.ReLU()(self.conv1(input_x))
     out = P.MaxPool(2, None, 'valid')(out)
     out = P.ReLU()(self.conv2(out))
     out = P.MaxPool(2, None, 'valid')(out)
     out = P.Reshape()(out, (
         P.Shape()(out)[0],
         -1,
     ))
     out = P.ReLU()(self.fc1(out))
     out = P.ReLU()(self.fc2(out))
     out = self.fc3(out)
     return out
Ejemplo n.º 2
0
    def __init__(self, in_channels, out_channels, num_outs):
        super(FeatPyramidNeck, self).__init__()
        self.num_outs = num_outs
        self.in_channels = in_channels
        self.fpn_layer = len(self.in_channels)

        assert not self.num_outs < len(in_channels)

        self.lateral_convs_list_ = []
        self.fpn_convs_ = []

        for _, channel in enumerate(in_channels):
            l_conv = _conv(channel,
                           out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           pad_mode='valid')
            fpn_conv = _conv(out_channels,
                             out_channels,
                             kernel_size=3,
                             stride=1,
                             padding=0,
                             pad_mode='same')
            self.lateral_convs_list_.append(l_conv)
            self.fpn_convs_.append(fpn_conv)
        self.lateral_convs_list = nn.layer.CellList(self.lateral_convs_list_)
        self.fpn_convs_list = nn.layer.CellList(self.fpn_convs_)
        self.interpolate1 = P.ResizeNearestNeighbor((48, 80))
        self.interpolate2 = P.ResizeNearestNeighbor((96, 160))
        self.interpolate3 = P.ResizeNearestNeighbor((192, 320))
        self.maxpool = P.MaxPool(ksize=1, strides=2, padding="same")
Ejemplo n.º 3
0
 def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
     super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode)
     self.max_pool = P.MaxPool(ksize=self.kernel_size,
                               strides=self.stride,
                               padding=self.pad_mode)
     self.max_pool_with_arg_max = P.MaxPoolWithArgmax(
         ksize=self.kernel_size, strides=self.stride, padding=self.pad_mode)
     self.is_tbe = context.get_context("device_target") == "Ascend"
Ejemplo n.º 4
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 weights_update=False):
        super(ResNetFea, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of "
                "layer_num, inchannel, outchannel list must be 4!")

        bn_training = False
        self.conv1 = _conv(3,
                           64,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           pad_mode='pad')
        self.bn1 = _BatchNorm2dInit(64,
                                    affine=bn_training,
                                    use_batch_statistics=bn_training)
        self.relu = P.ReLU()
        self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode="SAME")
        self.weights_update = weights_update

        if not self.weights_update:
            self.conv1.weight.requires_grad = False

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=1,
                                       training=bn_training,
                                       weights_update=self.weights_update)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=2,
                                       training=bn_training,
                                       weights_update=True)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=2,
                                       training=bn_training,
                                       weights_update=True)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=2,
                                       training=bn_training,
                                       weights_update=True)
Ejemplo n.º 5
0
 def __init__(self, w1, w2):
     super(Conv2dBpropInputInplace, self).__init__()
     self.conv2d_1 = P.Conv2DBackpropInput(out_channel=256, kernel_size=1)
     self.w1 = Parameter(initializer(w1, w1.shape), name='w1')
     self.conv2d_2 = P.Conv2DBackpropInput(out_channel=256, kernel_size=1)
     self.w2 = Parameter(initializer(w2, w2.shape), name='w2')
     self.add = P.TensorAdd()
     self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode='SAME')
     self.maxpool_grad = G.MaxPoolGrad(kernel_size=3, strides=2, pad_mode='SAME')
     self.shape = (32, 64, 56, 56)
Ejemplo n.º 6
0
 def __init__(self,
              kernel_size=1,
              stride=1,
              pad_mode="valid",
              data_format="NCHW"):
     super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode,
                                     data_format)
     self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
                               strides=self.stride,
                               pad_mode=self.pad_mode,
                               data_format=self.format)
Ejemplo n.º 7
0
 def __init__(self, num_classes=10, channel=3):
     super(AlexNet, self).__init__()
     self.conv1 = conv(channel, 96, 11, stride=4)
     self.conv2 = conv(96, 256, 5, pad_mode="same")
     self.conv3 = conv(256, 384, 3, pad_mode="same")
     self.conv4 = conv(384, 384, 3, pad_mode="same")
     self.conv5 = conv(384, 256, 3, pad_mode="same")
     self.relu = nn.ReLU()
     self.max_pool2d = P.MaxPool(ksize=3, strides=2)
     self.flatten = nn.Flatten()
     self.fc1 = fc_with_initialize(6*6*256, 4096)
     self.fc2 = fc_with_initialize(4096, 4096)
     self.fc3 = fc_with_initialize(4096, num_classes)
Ejemplo n.º 8
0
 def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
     super(MaxPool1d, self).__init__(kernel_size, stride, pad_mode)
     validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name)
     validator.check_value_type('stride', stride, [int], self.cls_name)
     self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name)
     validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name)
     validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
     self.kernel_size = (1, kernel_size)
     self.stride = (1, stride)
     self.max_pool = P.MaxPool(ksize=self.kernel_size,
                               strides=self.stride,
                               padding=self.pad_mode)
     self.shape = F.shape
     self.reduce_mean = P.ReduceMean(keep_dims=True)
     self.expand = P.ExpandDims()
     self.squeeze = P.Squeeze(2)
Ejemplo n.º 9
0
    def __init__(self, kernel_size=1, stride=1, pad_mode="VALID", padding=0):
        max_pool = P.MaxPool(ksize=kernel_size,
                             strides=stride,
                             padding=pad_mode)
        self.is_autodiff_backend = False
        if self.is_autodiff_backend:

            # At present, pad mode of max pool is not unified, so it is a temporarily avoided
            pad_mode = validator.check_string('pad_mode', pad_mode.lower(),
                                              ['valid', 'same'])

            max_pool = P.MaxPoolWithArgmax(window=kernel_size,
                                           stride=stride,
                                           pad_mode=pad_mode,
                                           pad=padding)
        super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, padding,
                                        max_pool)
Ejemplo n.º 10
0
 def __init__(self):
     super(Net, self).__init__()
     self.maxpool = P.MaxPool(pad_mode="SAME", window=3, stride=2)
Ejemplo n.º 11
0
 def __init__(self):
     super(Net, self).__init__()
     self.maxpool = P.MaxPool(padding="SAME", ksize=3, strides=2)
Ejemplo n.º 12
0
     'skip': ['backward']}),
 ('Elu', {
     'block': P.Elu(),
     'desc_inputs': [[2, 3, 4]],
     'desc_bprop': [[2, 3, 4]]}),
 ('EluGrad', {
     'block': G.EluGrad(),
     'desc_inputs': [[2, 3, 4], [2, 3, 4]],
     'desc_bprop': [[2, 3, 4]],
     'skip': ['backward']}),
 ('Sigmoid', {
     'block': P.Sigmoid(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('MaxPool', {
     'block': P.MaxPool(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_inputs': [[100, 3, 28, 28]],
     'desc_bprop': [[100, 3, 14, 14]]}),
 ('MaxPoolGrad', {
     'block': G.MaxPoolGrad(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_inputs': [[3, 4, 6, 6], [3, 4, 3, 3], [3, 4, 3, 3]],
     'desc_bprop': [[3, 4, 6, 6]],
     'skip': ['backward']}),
 ('AvgPool', {
     'block': P.AvgPool(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_inputs': [[100, 3, 28, 28]],
     'desc_bprop': [[100, 3, 14, 14]]}),
 ('AvgPoolGrad', {
     'block': G.AvgPoolGrad(ksize=(2, 2), strides=(2, 2), padding="VALID"),
     'desc_const': [(3, 4, 6, 6)],
     'const_first': True,
Ejemplo n.º 13
0
 def __init__(self):
     super(Net, self).__init__()
     self.maxpool = P.MaxPool(pad_mode="SAME", kernel_size=3, strides=2)
Ejemplo n.º 14
0
        'desc_inputs': [Tensor(np.ones([1, 1, 32]).astype(np.float32))],
        'skip': ['backward']
    }),
    # kernel size is invalid(very large)
    ('MaxPoolWithArgmax3', {
        'block': (P.MaxPoolWithArgmax(ksize=50), {
            'exception': ValueError,
            'error_keywords': ['MaxPoolWithArgmax']
        }),
        'desc_inputs': [Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))],
        'skip': ['backward']
    }),

    # input is scalar
    ('MaxPool0', {
        'block': (P.MaxPool(), {
            'exception': TypeError,
            'error_keywords': ['MaxPool']
        }),
        'desc_inputs': [5.0],
        'skip': ['backward']
    }),
    # rank of x is not 4
    ('MaxPool1', {
        'block': (P.MaxPool(), {
            'exception': ValueError,
            'error_keywords': ['MaxPool']
        }),
        'desc_inputs': [Tensor(np.ones([1, 1, 32]).astype(np.float32))],
        'skip': ['backward']
    }),