Пример #1
0
 def __init__(self, num_class=10):
     super(LeNet5, self).__init__()
     self.num_class = num_class
     self.conv1 = combined.Conv2d(
         1, 6, kernel_size=5, batchnorm=True, activation='relu6')
     self.conv2 = combined.Conv2d(6, 16, kernel_size=5, activation='relu')
     self.fc1 = combined.Dense(16 * 5 * 5, 120, activation='relu')
     self.fc2 = combined.Dense(120, 84, activation='relu')
     self.fc3 = combined.Dense(84, self.num_class)
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flattern = nn.Flatten()
Пример #2
0
    def __init__(self, num_class=1000, input_size=224, width_mul=1.):
        super(MobileNetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        last_channel = 1280
        inverted_residual_setting = [
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 230, 1, 1],
        ]
        if width_mul > 1.0:
            last_channel = make_divisible(last_channel * width_mul)
        self.last_channel = last_channel
        features = [_conv_bn(3, input_channel, 3, 2)]

        for t, c, n, s in inverted_residual_setting:
            out_channel = make_divisible(c * width_mul) if t > 1 else c
            for i in range(n):
                if i == 0:
                    features.append(block(input_channel, out_channel, s, t))
                else:
                    features.append(block(input_channel, out_channel, 1, t))
                input_channel = out_channel

        features.append(_conv_bn(input_channel, self.last_channel, 1))

        self.features = nn.SequentialCell(features)
        self.mean = P.ReduceMean(keep_dims=False)
        self.classifier = combined.Dense(self.last_channel, num_class)