Example #1
0
 def __init__(self):
     super(LeNet5, self).__init__()
     self.batch_size = 32
     self.conv1 = conv(1, 6, 5)
     self.conv2 = conv(6, 16, 5)
     self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
     self.fc2 = fc_with_initialize(120, 84)
     self.fc3 = fc_with_initialize(84, 10)
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.reshape = P.Reshape()
Example #2
0
 def __init__(self):
     super().__init__()
     self.relu = nn.ReLU()
     self.mul = P.Mul()
     self.add = P.Add()
     self.sub = P.Sub()
     self.assign = P.Assign()
     param_a = np.full((1, ), 5, dtype=np.float32)
     self.param_a = Parameter(Tensor(param_a), name='a')
     param_b = np.full((1, ), 2, dtype=np.float32)
     self.param_b = Parameter(Tensor(param_b), name='b')
Example #3
0
 def __init__(self, cin, cout):
     super(DeformConv, self).__init__()
     self.actf = nn.SequentialCell(
         [nn.BatchNorm2d(cout, momentum=BN_MOMENTUM),
          nn.ReLU()])
     self.conv = DCN(cin,
                     cout,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     modulation=True)
Example #4
0
 def __init__(self, in_channels, out_channels, use_batch_statistics=True):
     super(ASPPPooling, self).__init__()
     self.conv = nn.SequentialCell([
         nn.Conv2d(in_channels,
                   out_channels,
                   kernel_size=1,
                   weight_init='xavier_uniform'),
         nn.BatchNorm2d(out_channels,
                        use_batch_statistics=use_batch_statistics),
         nn.ReLU()
     ])
     self.shape = P.Shape()
Example #5
0
 def __init__(self, matmul_0_weight_shape, add_1_bias_shape):
     """init function"""
     super(Module15, self).__init__()
     self.matmul_0 = nn.MatMul()
     self.matmul_0_w = Parameter(Tensor(
         np.random.uniform(0, 1, matmul_0_weight_shape).astype(np.float32)),
                                 name=None)
     self.add_1 = P.Add()
     self.add_1_bias = Parameter(Tensor(
         np.random.uniform(0, 1, add_1_bias_shape).astype(np.float32)),
                                 name=None)
     self.relu_2 = nn.ReLU()
Example #6
0
 def __init__(self, linear_weight_shape, linear_bias_shape):
     """init function"""
     super(Linear, self).__init__()
     self.matmul = nn.MatMul()
     self.matmul_w = Parameter(Tensor(
         np.random.uniform(0, 1, linear_weight_shape).astype(np.float32)),
                               name=None)
     self.add = P.Add()
     self.add_bias = Parameter(Tensor(
         np.random.uniform(0, 1, linear_bias_shape).astype(np.float32)),
                               name=None)
     self.relu = nn.ReLU()
Example #7
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv = nn.Conv2d(3,
                           64,
                           3,
                           has_bias=False,
                           weight_init='normal',
                           pad_mode='valid')
     self.bn = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(64 * 222 * 222, 3)  # padding=0
Example #8
0
 def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
              expand3x3_planes):
     super(Fire, self).__init__()
     self.inplanes = inplanes
     self.squeeze = nn.Conv2d(inplanes,
                              squeeze_planes,
                              kernel_size=1,
                              has_bias=True)
     self.squeeze_activation = nn.ReLU()
     self.expand1x1 = nn.Conv2d(squeeze_planes,
                                expand1x1_planes,
                                kernel_size=1,
                                has_bias=True)
     self.expand1x1_activation = nn.ReLU()
     self.expand3x3 = nn.Conv2d(squeeze_planes,
                                expand3x3_planes,
                                kernel_size=3,
                                pad_mode='same',
                                has_bias=True)
     self.expand3x3_activation = nn.ReLU()
     self.concat = P.Concat(axis=1)
Example #9
0
 def __init__(self, base, num_classes=1000, batch_norm=False, batch_size=1, args=None, phase="train",
              include_top=True):
     super(Vgg, self).__init__()
     _ = batch_size
     self.layers = _make_layer(base, args, batch_norm=batch_norm)
     self.include_top = include_top
     self.flatten = nn.Flatten()
     dropout_ratio = 0.5
     if not args.has_dropout or phase == "test":
         dropout_ratio = 1.0
     self.classifier = nn.SequentialCell([
         nn.Dense(512 * 7 * 7, 4096),
         nn.ReLU(),
         nn.Dropout(dropout_ratio),
         nn.Dense(4096, 4096),
         nn.ReLU(),
         nn.Dropout(dropout_ratio),
         nn.Dense(4096, num_classes)])
     if args.initialize_mode == "KaimingNormal":
         default_recurisive_init(self)
         self.custom_init_weight()
Example #10
0
 def _make_layer(self, cfg, batch_norm=False):
     layers = []
     in_channels = 3
     for v in cfg:
         if v == 'M':
             layers += [
                 nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')
             ]
         else:
             conv2d = Conv2d(in_channels=in_channels,
                             out_channels=v,
                             kernel_size=3,
                             stride=1,
                             pad_mode='same',
                             has_bias=True)
             if batch_norm:
                 layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
             else:
                 layers += [conv2d, nn.ReLU()]
             in_channels = v
     return nn.SequentialCell(layers)
Example #11
0
 def __init__(self, num_input_features, num_output_features, avgpool=False):
     super(_Transition, self).__init__()
     if avgpool:
         poollayer = nn.AvgPool2d(kernel_size=2, stride=2)
     else:
         poollayer = nn.MaxPool2d(kernel_size=2, stride=2)
     self.features = nn.SequentialCell(
         OrderedDict([('norm', nn.BatchNorm2d(num_input_features)),
                      ('relu', nn.ReLU()),
                      ('conv',
                       conv1x1(num_input_features, num_output_features)),
                      ('pool', poollayer)]))
Example #12
0
 def __init__(self):
     super(Net, self).__init__()
     self.weight = Parameter(Tensor(
         np.ones([64, 10]).astype(np.float32)),
                             name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype(np.float32)),
                           name="bias")
     self.fc = P.MatMul()
     self.fc2 = nn.Dense(10, 10)
     self.biasAdd = P.BiasAdd()
     self.relu = nn.ReLU()
     self.cast = P.Cast()
Example #13
0
 def __init__(self, num_class=10):
     super(LeNet5, self).__init__()
     self.num_class = num_class
     self.batch_size = 32
     self.conv1 = conv(1, 6, 5)
     self.conv2 = conv(6, 16, 5)
     self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
     self.fc2 = fc_with_initialize(120, 84)
     self.fc3 = fc_with_initialize(84, self.num_class)
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flatten = nn.Flatten()
Example #14
0
 def __init__(self, num_class=15, num_channel=1, include_top=True):
     super(LeNet5, self).__init__()
     self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
     self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.include_top = include_top
     if self.include_top:
         self.flatten = nn.Flatten()
         self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
         self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
         self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
Example #15
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 stride=1,
                 damping=0.03,
                 loss_scale=1,
                 frequency=278):
        super(ResidualBlock, self).__init__()

        channel = out_channel // self.expansion
        self.conv1 = _conv1x1(in_channel,
                              channel,
                              stride=1,
                              damping=damping,
                              loss_scale=loss_scale,
                              frequency=frequency)
        self.bn1 = _bn(channel)

        self.conv2 = _conv3x3(channel,
                              channel,
                              stride=stride,
                              damping=damping,
                              loss_scale=loss_scale,
                              frequency=frequency)
        self.bn2 = _bn(channel)

        self.conv3 = _conv1x1(channel,
                              out_channel,
                              stride=1,
                              damping=damping,
                              loss_scale=loss_scale,
                              frequency=frequency)
        self.bn3 = _bn_last(out_channel)

        self.relu = nn.ReLU()

        self.down_sample = False

        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        self.down_sample_layer = None

        if self.down_sample:
            self.down_sample_layer = nn.SequentialCell([
                _conv1x1(in_channel,
                         out_channel,
                         stride,
                         damping=damping,
                         loss_scale=loss_scale,
                         frequency=frequency),
                _bn(out_channel)
            ])
        self.add = P.TensorAdd()
Example #16
0
 def __init__(self, num_class=10):
     super(BNNLeNet5, self).__init__()
     self.num_class = num_class
     self.conv1 = bnn_layers.ConvReparam(1, 6, 5, stride=1, padding=0, has_bias=False, pad_mode="valid")
     self.conv2 = conv(6, 16, 5)
     self.fc1 = bnn_layers.DenseReparam(16 * 5 * 5, 120)
     self.fc2 = fc_with_initialize(120, 84)
     self.fc3 = fc_with_initialize(84, self.num_class)
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flatten = nn.Flatten()
     self.reshape = P.Reshape()
Example #17
0
 def __init__(self, in_channels, out_channels, bilinear=True):
     super().__init__()
     self.concat = F.Concat(axis=1)
     self.factor = 56.0 / 64.0
     self.center_crop = CentralCrop(central_fraction=self.factor)
     self.print_fn = F.Print()
     self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
     self.up = nn.Conv2dTranspose(in_channels,
                                  in_channels // 2,
                                  kernel_size=2,
                                  stride=2)
     self.relu = nn.ReLU()
Example #18
0
 def __init__(self, act_func):
     super(Activation, self).__init__()
     if act_func == 'relu':
         self.act = nn.ReLU()
     elif act_func == 'relu6':
         self.act = nn.ReLU6()
     elif act_func in ('hsigmoid', 'hard_sigmoid'):
         self.act = MyHSigmoid()  # nn.HSigmoid()
     elif act_func in ('hswish', 'hard_swish'):
         self.act = nn.HSwish()
     else:
         raise NotImplementedError
Example #19
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 use_batch_statistics=False,
                 use_batch_to_stob_and_btos=False):
        super(BottleneckV1, self).__init__()
        expansion = 4
        mid_channels = out_channels // expansion
        self.conv_bn1 = _conv_bn_relu(in_channels,
                                      mid_channels,
                                      ksize=1,
                                      stride=1,
                                      use_batch_statistics=use_batch_statistics)
        self.conv_bn2 = _conv_bn_relu(mid_channels,
                                      mid_channels,
                                      ksize=3,
                                      stride=stride,
                                      padding=1,
                                      dilation=1,
                                      use_batch_statistics=use_batch_statistics)
        if use_batch_to_stob_and_btos:
            self.conv_bn2 = _stob_conv_btos_bn_relu(mid_channels,
                                                    mid_channels,
                                                    ksize=3,
                                                    stride=stride,
                                                    padding=0,
                                                    dilation=1,
                                                    space_to_batch_block_shape=2,
                                                    batch_to_space_block_shape=2,
                                                    paddings=[[2, 3], [2, 3]],
                                                    crops=[[0, 1], [0, 1]],
                                                    pad_mode="valid",
                                                    use_batch_statistics=use_batch_statistics)

        self.conv3 = nn.Conv2d(mid_channels,
                               out_channels,
                               kernel_size=1,
                               stride=1)
        self.bn3 = nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics)
        if in_channels != out_channels:
            conv = nn.Conv2d(in_channels,
                             out_channels,
                             kernel_size=1,
                             stride=stride)
            bn = nn.BatchNorm2d(out_channels, use_batch_statistics=use_batch_statistics)
            self.downsample = nn.SequentialCell([conv, bn])
        else:
            self.downsample = Subsample(stride)
        self.add = P.TensorAdd()
        self.relu = nn.ReLU()
        self.Reshape = P.Reshape()
Example #20
0
    def __init__(self, num_class=10, channel=1):
        super(LeNet5, self).__init__()
        self.num_class = num_class

        self.conv1 = nn.Conv2d(channel, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Dense(16 * 5 * 5, 120)
        self.fc2 = nn.Dense(120, 84)
        self.fc3 = nn.Dense(84, self.num_class)

        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
Example #21
0
def test_model_build_abnormal_string():
    """ test_model_build_abnormal_string """
    net = nn.ReLU()
    context.set_context(mode=context.GRAPH_MODE)
    model = Model(net)
    err = False
    try:
        model.predict('aaa')
    except ValueError as e:
        log.error("Find value error: %r ", e)
        err = True
    finally:
        assert err
Example #22
0
 def __init__(self, num_classes=10):
     super(DefinedNet, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=0,
                            weight_init="zeros")
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=2, strides=2)
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(int(56 * 56 * 64), num_classes)
Example #23
0
def Act(type='default'):
    if type in ['default', 'def']:
        return Act(DEFAULTS['activation'])
    if type == 'relu':
        return nn.ReLU()
    elif type == 'sigmoid':
        return nn.Sigmoid()
    elif type == 'hswish':
        return nn.HSwish()
    elif type == 'leaky_relu':
        return nn.LeakyReLU(alpha=DEFAULTS['leaky_relu']['alpha'])
    else:
        raise ValueError("Unsupported activation type: %s" % type)
Example #24
0
    def __init__(self, num_class=10, num_channel=3, count=0):
        super(myNN, self).__init__()
        self.conv1 = nn.Conv2d(num_channel, 16, 5, pad_mode='valid')
        self.conv2 = nn.Conv2d(16, 32, 5)

        self.fc1 = nn.Dense(3872, 128, weight_init=Normal(0.02))
        self.fc2 = nn.Dense(128, 64, weight_init=Normal(0.02))
        self.fc3 = nn.Dense(64, num_class, weight_init=Normal(0.02))

        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
        self.dropout = nn.Dropout(keep_prob=0.9)
Example #25
0
 def __init__(self, in_channel, out_channel):
     super(TestUserDefinedBpropNet, self).__init__()
     self.relu = nn.ReLU()
     self.conv = nn.Conv2d(in_channels=in_channel,
                           out_channels=out_channel,
                           kernel_size=2,
                           stride=1,
                           has_bias=False,
                           weight_init='ones',
                           pad_mode='same')
     self.crop = CropAndResizeNet((10, 10))
     self.boxes = Tensor(np.ones((128, 4)).astype(np.float32))
     self.box_indices = Tensor(np.ones((128, )).astype(np.int32))
Example #26
0
 def __init__(self, num_classes=10):
     super(Net, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=0,
                            weight_init="zeros")
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(int(224 * 224 * 64 / 16), num_classes)
Example #27
0
 def __init__(self, in_channels, out_channels, mid_channels=None):
     super().__init__()
     init_value_0 = TruncatedNormal(0.06)
     init_value_1 = TruncatedNormal(0.06)
     if not mid_channels:
         mid_channels = out_channels
     self.double_conv = nn.SequentialCell([
         nn.Conv2d(in_channels,
                   mid_channels,
                   kernel_size=3,
                   has_bias=True,
                   weight_init=init_value_0,
                   pad_mode="valid"),
         nn.ReLU(),
         nn.Conv2d(mid_channels,
                   out_channels,
                   kernel_size=3,
                   has_bias=True,
                   weight_init=init_value_1,
                   pad_mode="valid"),
         nn.ReLU()
     ])
Example #28
0
 def __init__(self, num_classes=10, channel=3):
     super(AlexNet, self).__init__()
     self.conv1 = conv(channel, 96, 11, stride=4)
     self.conv2 = conv(96, 256, 5, pad_mode="same")
     self.conv3 = conv(256, 384, 3, pad_mode="same")
     self.conv4 = conv(384, 384, 3, pad_mode="same")
     self.conv5 = conv(384, 256, 3, pad_mode="same")
     self.relu = nn.ReLU()
     self.max_pool2d = P.MaxPool(ksize=3, strides=2)
     self.flatten = nn.Flatten()
     self.fc1 = fc_with_initialize(6*6*256, 4096)
     self.fc2 = fc_with_initialize(4096, 4096)
     self.fc3 = fc_with_initialize(4096, num_classes)
Example #29
0
 def __init__(self):
     super(LossNet, self).__init__()
     self.conv = nn.Conv2d(3,
                           64,
                           3,
                           has_bias=False,
                           weight_init='normal',
                           pad_mode='valid')
     self.bn = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(64 * 222 * 222, 3)  # padding=0
     self.loss = nn.SoftmaxCrossEntropyWithLogits()
Example #30
0
    def __init__(self,
                 growth_rate,
                 block_config,
                 num_init_features,
                 bn_size=4,
                 drop_rate=0):
        super(Densenet, self).__init__()

        layers = OrderedDict()
        layers['conv0'] = conv7x7(3, num_init_features, stride=2, padding=3)
        layers['norm0'] = nn.BatchNorm2d(num_init_features)
        layers['relu0'] = nn.ReLU()
        layers['pool0'] = nn.MaxPool2d(kernel_size=3,
                                       stride=2,
                                       pad_mode='same')

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            layers['denseblock%d' % (i + 1)] = block
            num_features = num_features + num_layers * growth_rate

            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                layers['transition%d' % (i + 1)] = trans
                num_features = num_features // 2

        # Final batch norm
        layers['norm5'] = nn.BatchNorm2d(num_features)
        layers['relu5'] = nn.ReLU()

        self.features = nn.SequentialCell(layers)
        self.out_channels = num_features