Esempio n. 1
0
    def __init__(self, num_classes=10):
        super(SqueezeNet, self).__init__()

        self.features = nn.SequentialCell([
            nn.Conv2d(3,
                      96,
                      kernel_size=7,
                      stride=2,
                      pad_mode='valid',
                      has_bias=True),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            Fire(96, 16, 64, 64),
            Fire(128, 16, 64, 64),
            Fire(128, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2),
            Fire(256, 32, 128, 128),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            nn.MaxPool2d(kernel_size=3, stride=2),
            Fire(512, 64, 256, 256),
        ])

        # Final convolution is initialized differently from the rest
        self.final_conv = nn.Conv2d(512,
                                    num_classes,
                                    kernel_size=1,
                                    has_bias=True)
        self.dropout = nn.Dropout(keep_prob=0.5)
        self.relu = nn.ReLU()
        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.custom_init_weight()
Esempio n. 2
0
    def __init__(self, in_str):
        super(LeNet5, self).__init__()

        a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 = in_str.strip(
        ).split()
        a1 = int(a1)
        a2 = int(a2)
        a3 = int(a3)
        a4 = int(a4)
        a5 = int(a5)
        a6 = int(a6)
        a7 = int(a7)
        a8 = int(a8)
        a9 = int(a9)
        a10 = int(a10)
        a11 = int(a11)
        a12 = int(a12)
        a13 = int(a13)
        a14 = int(a14)
        a15 = int(a15)

        self.conv1 = nn.Conv2d(a1, a2, a3, pad_mode="valid")
        self.conv2 = nn.Conv2d(a4, a5, a6, pad_mode="valid")
        self.fc1 = nn.Dense(a7 * a8 * a9, a10)
        self.fc2 = nn.Dense(a11, a12)
        self.fc3 = nn.Dense(a13, a14)
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=a15)
        self.flatten = nn.Flatten()
Esempio n. 3
0
    def __init__(self, num_classes=10):
        super(SqueezeNet_Residual, self).__init__()

        self.conv1 = nn.Conv2d(3,
                               96,
                               kernel_size=7,
                               stride=2,
                               pad_mode='valid',
                               has_bias=True)
        self.fire2 = Fire(96, 16, 64, 64)
        self.fire3 = Fire(128, 16, 64, 64)
        self.fire4 = Fire(128, 32, 128, 128)
        self.fire5 = Fire(256, 32, 128, 128)
        self.fire6 = Fire(256, 48, 192, 192)
        self.fire7 = Fire(384, 48, 192, 192)
        self.fire8 = Fire(384, 64, 256, 256)
        self.fire9 = Fire(512, 64, 256, 256)
        # Final convolution is initialized differently from the rest
        self.conv10 = nn.Conv2d(512, num_classes, kernel_size=1, has_bias=True)

        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2)
        self.add = P.Add()
        self.dropout = nn.Dropout(keep_prob=0.5)
        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.custom_init_weight()
Esempio n. 4
0
 def __init__(self, num_class=10):  # 一共分十类,图片通道数是1
     super(Forward_fashion, self).__init__()
     self.num_class = num_class
     self.flatten = nn.Flatten()
     self.fc1 = nn.Dense(cfg.channel * cfg.image_height * cfg.image_width, 128)
     self.relu = nn.ReLU()
     self.fc2 = nn.Dense(128, self.num_class)
Esempio n. 5
0
 def __init__(self, num_classes):
     super(Encoder, self).__init__()
     self.fc1 = nn.Dense(1024 + num_classes, 400)
     self.relu = nn.ReLU()
     self.flatten = nn.Flatten()
     self.concat = P.Concat(axis=1)
     self.one_hot = nn.OneHot(depth=num_classes)
Esempio n. 6
0
 def __init__(self, num_class=10):  # 一共分十类,图片通道数是1
     super(ForwardFashionRegularization, self).__init__()
     self.num_class = num_class
     self.conv1 = nn.Conv2d(1,
                            32,
                            kernel_size=3,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="valid")
     self.conv2 = nn.Conv2d(32,
                            64,
                            kernel_size=3,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="valid")
     self.conv3 = nn.Conv2d(64,
                            128,
                            kernel_size=3,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="valid")
     self.maxpool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.relu = nn.ReLU()
     self.dropout = nn.Dropout()
     self.flatten = nn.Flatten()
     self.fc1 = nn.Dense(3200, 128)
     self.bn = nn.BatchNorm1d(128)
     self.fc2 = nn.Dense(128, self.num_class)
Esempio n. 7
0
    def __init__(self, block, num_classes=100):
        super(ResNet9, self).__init__()

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)

        self.layer1 = self.MakeLayer(block,
                                     1,
                                     in_channels=64,
                                     out_channels=256,
                                     stride=1)
        self.layer2 = self.MakeLayer(block,
                                     1,
                                     in_channels=256,
                                     out_channels=512,
                                     stride=2)
        self.layer3 = self.MakeLayer(block,
                                     1,
                                     in_channels=512,
                                     out_channels=1024,
                                     stride=2)
        self.layer4 = self.MakeLayer(block,
                                     1,
                                     in_channels=1024,
                                     out_channels=2048,
                                     stride=2)

        self.avgpool = nn.AvgPool2d(7, 1)
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(512 * block.expansion, num_classes)
Esempio n. 8
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid')
     self.bn = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(64 * 222 * 222, 3)  # padding=0
Esempio n. 9
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal')
     self.bn = nn.BatchNorm2d(64)
     self.fc = nn.Dense(64, 10)
     self.relu = nn.ReLU()
     self.flatten = nn.Flatten()
Esempio n. 10
0
 def __init__(self, num_class=10):
     super(LeNet5, self).__init__()
     self.cast = P.Cast()
     self.flatten = nn.Flatten()
     self.embedding = nn.EmbeddingLookup(16, 4)
     self.relu = nn.ReLU()
     self.fc = fc_with_initialize(12, num_class)
Esempio n. 11
0
 def __init__(self,
              base,
              num_classes=1000,
              batch_norm=False,
              batch_size=1,
              args=None,
              phase="train",
              include_top=True):
     super(Vgg, self).__init__()
     _ = batch_size
     self.layers = _make_layer(base, args, batch_norm=batch_norm)
     self.include_top = include_top
     self.flatten = nn.Flatten()
     dropout_ratio = 0.5
     if not args.has_dropout or phase == "test":
         dropout_ratio = 1.0
     self.classifier = nn.SequentialCell([
         nn.Dense(512 * 7 * 7, 4096),
         nn.ReLU(),
         nn.Dropout(dropout_ratio),
         nn.Dense(4096, 4096),
         nn.ReLU(),
         nn.Dropout(dropout_ratio),
         nn.Dense(4096, num_classes)
     ])
     if args.initialize_mode == "KaimingNormal":
         default_recurisive_init(self)
         self.custom_init_weight()
Esempio n. 12
0
    def __init__(self, batchnorm, dropout):
        super(CosineNet, self).__init__()
        layers = []
        if batchnorm:
            layers.append(nn.BatchNorm2d(INPUT_DIM))

        # initialize hidden layers
        for l_n in range(N_LAYERS):
            in_channels = HIDDEN_DIM if l_n > 0 else INPUT_DIM
            # Use 1x1Conv instead of Dense, which coordinate better with BatchNorm2d opetator;
            conv = nn.Conv2d(in_channels,
                             HIDDEN_DIM,
                             kernel_size=1,
                             pad_mode='valid',
                             has_bias=True,
                             weight_init=Normal(0.01))
            layers.append(conv)
            if batchnorm:
                layers.append(nn.BatchNorm2d(HIDDEN_DIM))
            if dropout:
                layers.append(nn.Dropout(DROPOUT_RATE))
            layers.append(ACTIVATION())
        self.layers = nn.SequentialCell(layers)

        # initialize output layers
        self.flatten = nn.Flatten(
        )  # convert 4-dim tensor (N,C,H,W) to 2-dim tensor(N,C*H*W)
        self.fc = nn.Dense(HIDDEN_DIM,
                           OUTPUT_DIM,
                           weight_init=Normal(0.1),
                           bias_init='zeros')
Esempio n. 13
0
    def __init__(self, num_classes):
        super(GoogleNet, self).__init__()
        self.conv1 = Conv2dBlock(3, 64, kernel_size=7, stride=2, padding=0)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.conv2 = Conv2dBlock(64, 64, kernel_size=1)
        self.conv3 = Conv2dBlock(64, 192, kernel_size=3, padding=0)
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.block3a = Inception(192, 64, 96, 128, 16, 32, 32)
        self.block3b = Inception(256, 128, 128, 192, 32, 96, 64)
        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.block4a = Inception(480, 192, 96, 208, 16, 48, 64)
        self.block4b = Inception(512, 160, 112, 224, 24, 64, 64)
        self.block4c = Inception(512, 128, 128, 256, 24, 64, 64)
        self.block4d = Inception(512, 112, 144, 288, 32, 64, 64)
        self.block4e = Inception(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same")

        self.block5a = Inception(832, 256, 160, 320, 32, 128, 128)
        self.block5b = Inception(832, 384, 192, 384, 48, 128, 128)

        self.mean = P.ReduceMean(keep_dims=True)
        self.dropout = nn.Dropout(keep_prob=0.8)
        self.flatten = nn.Flatten()
        self.classifier = nn.Dense(1024,
                                   num_classes,
                                   weight_init=weight_variable(),
                                   bias_init=weight_variable())
Esempio n. 14
0
    def __init__(self, num_class=10, channel=1):
        super(LeNet5, self).__init__()
        self.num_class = num_class

        self.conv1 = nn.Conv2dBnFoldQuant(channel,
                                          6,
                                          5,
                                          pad_mode='valid',
                                          per_channel=True,
                                          quant_delay=900)
        self.conv2 = nn.Conv2dBnFoldQuant(6,
                                          16,
                                          5,
                                          pad_mode='valid',
                                          per_channel=True,
                                          quant_delay=900)
        self.fc1 = nn.DenseQuant(16 * 5 * 5,
                                 120,
                                 per_channel=True,
                                 quant_delay=900)
        self.fc2 = nn.DenseQuant(120, 84, per_channel=True, quant_delay=900)
        self.fc3 = nn.DenseQuant(84,
                                 self.num_class,
                                 per_channel=True,
                                 quant_delay=900)

        self.relu = nn.ActQuant(nn.ReLU(), per_channel=False, quant_delay=900)
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
Esempio n. 15
0
 def __init__(self,
              num_classes=10,
              channel=3,
              phase='train',
              include_top=True):
     super(AlexNet, self).__init__()
     self.conv1 = conv(channel,
                       64,
                       11,
                       stride=4,
                       pad_mode="same",
                       has_bias=True)
     self.conv2 = conv(64, 192, 5, pad_mode="same", has_bias=True)
     self.conv3 = conv(192, 384, 3, pad_mode="same", has_bias=True)
     self.conv4 = conv(384, 256, 3, pad_mode="same", has_bias=True)
     self.conv5 = conv(256, 256, 3, pad_mode="same", has_bias=True)
     self.relu = P.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    pad_mode='valid')
     self.include_top = include_top
     if self.include_top:
         dropout_ratio = 0.65
         if phase == 'test':
             dropout_ratio = 1.0
         self.flatten = nn.Flatten()
         self.fc1 = fc_with_initialize(6 * 6 * 256, 4096)
         self.fc2 = fc_with_initialize(4096, 4096)
         self.fc3 = fc_with_initialize(4096, num_classes)
         self.dropout = nn.Dropout(dropout_ratio)
Esempio n. 16
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 num_classes,
                 use_se=False):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )
        self.use_se = use_se
        self.se_block = False
        if self.use_se:
            self.se_block = True

        if self.use_se:
            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
            self.bn1_0 = _bn(32)
            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
            self.bn1_1 = _bn(32)
            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
        else:
            self.conv1 = _conv7x7(3, 64, stride=2)
        self.bn1 = _bn(64)
        self.relu = P.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0],
                                       use_se=self.use_se)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1],
                                       use_se=self.use_se)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2],
                                       use_se=self.use_se,
                                       se_block=self.se_block)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3],
                                       use_se=self.use_se,
                                       se_block=self.se_block)

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)
Esempio n. 17
0
    def __init__(self, block, num_classes=100, batch_size=32):
        super(ResNet, self).__init__()
        self.batch_size = batch_size
        self.num_classes = num_classes

        self.conv1 = conv7x7(3, 64, stride=2, padding=0)

        self.bn1 = bn_with_initialize(64)
        self.relu = P.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME")

        self.layer1 = MakeLayer0(block,
                                 in_channels=64,
                                 out_channels=256,
                                 stride=1)
        self.layer2 = MakeLayer1(block,
                                 in_channels=256,
                                 out_channels=512,
                                 stride=2)
        self.layer3 = MakeLayer2(block,
                                 in_channels=512,
                                 out_channels=1024,
                                 stride=2)
        self.layer4 = MakeLayer3(block,
                                 in_channels=1024,
                                 out_channels=2048,
                                 stride=2)

        self.pool = P.ReduceMean(keep_dims=True)
        self.fc = fc_with_initialize(512 * block.expansion, num_classes)
        self.flatten = nn.Flatten()
Esempio n. 18
0
    def __init__(self, block, layer_nums, in_channels, out_channels, strides,
                 num_classes, damping, loss_scale, frequency):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )

        self.conv1 = _conv7x7(3,
                              64,
                              stride=2,
                              damping=damping,
                              loss_scale=loss_scale,
                              frequency=frequency)
        self.bn1 = _bn(64)
        self.relu = P.ReLU()
        self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency)

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3],
                             num_classes,
                             damping=damping,
                             loss_scale=loss_scale,
                             frequency=frequency)
Esempio n. 19
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.fc1 = nn.Dense(1024, 400)
     self.fc2 = nn.Dense(400, 720)
     self.fc3 = nn.Dense(720, 1024)
     self.relu = nn.ReLU()
     self.sigmoid = nn.Sigmoid()
     self.flatten = nn.Flatten()
Esempio n. 20
0
 def __init__(self):
     super(LossNet, self).__init__()
     self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid')
     self.bn = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(64 * 222 * 222, 3)  # padding=0
     self.loss = nn.SoftmaxCrossEntropyWithLogits()
Esempio n. 21
0
 def __init__(self, num_classes=10):
     super(DefinedNet, self).__init__()
     self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, weight_init="zeros")
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=2, strides=2)
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(int(56*56*64), num_classes)
Esempio n. 22
0
 def __init__(self, num_classes=10):
     super(Net, self).__init__()
     self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, weight_init="zeros")
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(int(224 * 224 * 64 / 16), num_classes)
Esempio n. 23
0
 def __init__(self, axis, flag_boottom, flag_top):
     super(Menet, self).__init__()
     self.squeeze = P.Squeeze(axis)
     self.expanddims = P.ExpandDims()
     self.flatten = nn.Flatten()
     self.neg = P.Neg()
     self.axis = axis
     self.flag_boottom = flag_boottom
     self.flag_top = flag_top
Esempio n. 24
0
 def __init__(self):
     super(Net, self).__init__()
     Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01)
     self.conv = nn.Conv2d(3, 64, (7, 7), pad_mode="same", stride=2)
     self.relu = nn.ReLU()
     self.bn = nn.BatchNorm2d(64)
     self.mean = P.ReduceMean(keep_dims=True)
     self.flatten = nn.Flatten()
     self.dense = nn.Dense(64, 12)
Esempio n. 25
0
 def __init__(self, num_classes=10):
     super(ConvNet, self).__init__()
     self.conv1 = nn.Conv2d(3, ConvNet.output_ch, kernel_size=7, stride=2, pad_mode='pad', padding=3)
     self.bn1 = nn.BatchNorm2d(ConvNet.output_ch)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(
         int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch / (4 * 4)),
         num_classes)
Esempio n. 26
0
 def __init__(self, num_class=10, num_channel=1):
     super(LeNet5, self).__init__()
     self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
     self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
     self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
     self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
     self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flatten = nn.Flatten()
def test_time_distributed_flatten_no_reshape_axis():
    inputs = np.random.randint(0, 10, [3, 4, 5])
    flatten = nn.Flatten()
    output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
    inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
    time_distributed = TestTimeDistributed(flatten, time_axis=1)
    output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
    for i in range(output.shape[1]):
        assert np.all(output[:, i, :] == output_expect)
    print("Flatten op with no reshape axis wrapped successful")
Esempio n. 28
0
 def __init__(self):
     super(LeNet5, self).__init__()
     self.conv1 = conv(1, 6, 5)
     self.conv2 = conv(6, 16, 5)
     self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
     self.fc2 = fc_with_initialize(120, 84)
     self.fc3 = fc_with_initialize(84, 10)
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flatten = nn.Flatten()
Esempio n. 29
0
 def __init__(self):
     super(LeNet5, self).__init__()
     self.conv1 = nn.Conv2d(1, 6, 5, stride=1, pad_mode='valid')
     self.conv2 = nn.Conv2d(6, 16, 5, stride=1, pad_mode='valid')
     self.relu = nn.ReLU()
     self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flatten = nn.Flatten()
     self.fc1 = nn.Dense(400, 120)
     self.fc2 = nn.Dense(120, 84)
     self.fc3 = nn.Dense(84, 10)
Esempio n. 30
0
 def __init__(self, num_class=10):
     super(LeNet5, self).__init__()
     self.cast = P.Cast()
     self.flatten = nn.Flatten()
     self.embedding_table = Parameter(initializer("normal", (16, 4),
                                                  mstype.float32),
                                      name="embedding_table")
     self.embedding = nn.EmbeddingLookup()
     self.relu = nn.ReLU()
     self.fc = fc_with_initialize(12, num_class)