def __init__(self):
     super(LeNet5, self).__init__()
     self.conv1 = nn.Conv2d(1, 6, 5)
     self.conv2 = nn.Conv2d(6, 16, 5)
     self.fc1 = nn.Dense(16 * 5 * 5, 120)
     self.fc2 = nn.Dense(120, 84)
     self.fc3 = nn.Dense(84, 10)
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2)
     self.flatten = P.Flatten()
Exemplo n.º 2
0
 def __init__(self):
     super(NetMissConstruct, self).__init__()
     self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
     self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
     self.fc1 = nn.Dense(16 * 5 * 5, 120)
     self.fc2 = nn.Dense(120, 84)
     self.fc3 = nn.Dense(84, 10)
     self.relu = nn.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=2)
     self.flatten = P.Flatten()
Exemplo n.º 3
0
 def __init__(self):
     super(Conv2dNativeNet, self).__init__()
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=3, kernel_size=(3, 3))
     self.flatten = P.Flatten()
     channel_multipliers = 1
     in_channels = 3
     kernel_size = (3, 3)
     self.weight = Parameter(initializer(
         Tensor(np.ones([channel_multipliers, in_channels, *kernel_size], dtype=np.float32)),
         [channel_multipliers, in_channels, *kernel_size]), name='weight')
Exemplo n.º 4
0
 def __init__(self):
     super(BlockNet, self).__init__()
     self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, pad_mode="pad", padding=3)
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
     self.block_down_sample = ResidualBlock(
         64, 256, stride=1, down_sample=True
     )
     self.flatten = P.Flatten()
     self.weight = Parameter(Tensor(np.ones([1024, 10]).astype(np.float32)), name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype((np.float32))), name="bias")
     self.fc = P.MatMul()
     self.biasAdd = P.BiasAdd()
Exemplo n.º 5
0
 def __init__(self, config):
     super(TransformerTrainingLoss, self).__init__(auto_prefix=False)
     self.vocab_size = config.vocab_size
     self.onehot = P.OneHot()
     self.on_value = Tensor(float(1 - config.label_smoothing), mstype.float32)
     self.off_value = Tensor(config.label_smoothing / float(self.vocab_size - 1), mstype.float32)
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.reshape = P.Reshape()
     self.last_idx = (-1,)
     self.flatten = P.Flatten()
     self.neg = P.Neg()
     self.cast = P.Cast()
     self.flat_shape = (config.batch_size * config.seq_length,)
Exemplo n.º 6
0
 def __init__(self):
     super(BatchnormNet, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            4,
                            kernel_size=8,
                            stride=2,
                            pad_mode="pad",
                            padding=3)
     self.bn1 = nn.BatchNorm2d(4)
     self.flatten = P.Flatten()
     self.weight = Parameter(Tensor(np.ones([64, 10], np.float32)),
                             name="weight")
     self.bias = Parameter(Tensor(np.ones([10], np.float32)), name="bias")
     self.fc = P.MatMul()
     self.biasAdd = P.BiasAdd()
 def __init__(self, config):
     super(LabelSmoothedCrossEntropyCriterion, self).__init__()
     self.vocab_size = config.vocab_size
     self.onehot = P.OneHot()
     self.on_value = Tensor(float(1 - config.label_smoothing), mstype.float32)
     self.off_value = Tensor(config.label_smoothing / float(self.vocab_size - 1), mstype.float32)
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.reshape = P.Reshape()
     self.last_idx = (-1,)
     self.flatten = P.Flatten()
     self.neg = P.Neg()
     self.cast = P.Cast()
     self.flat_shape = (config.batch_size * config.seq_length,)
     self.get_shape = P.Shape()
Exemplo n.º 8
0
    def __init__(self, model_cfgs, num_classes=1000, multiplier=1., final_drop=0., round_nearest=8):
        super(GhostNet, self).__init__()
        self.cfgs = model_cfgs['cfg']
        self.inplanes = 16
        first_conv_in_channel = 3
        first_conv_out_channel = _make_divisible(multiplier * self.inplanes)

        self.conv_stem = nn.Conv2d(in_channels=first_conv_in_channel,
                                   out_channels=first_conv_out_channel,
                                   kernel_size=3, padding=1, stride=2,
                                   has_bias=False, pad_mode='pad')
        self.bn1 = nn.BatchNorm2d(first_conv_out_channel)
        self.act1 = Activation('relu')

        self.blocks = []
        for layer_cfg in self.cfgs:
            self.blocks.append(self._make_layer(kernel_size=layer_cfg[0],
                                                exp_ch=_make_divisible(
                                                    multiplier * layer_cfg[1]),
                                                out_channel=_make_divisible(
                                                    multiplier * layer_cfg[2]),
                                                use_se=layer_cfg[3],
                                                act_func=layer_cfg[4],
                                                stride=layer_cfg[5]))
        output_channel = _make_divisible(
            multiplier * model_cfgs["cls_ch_squeeze"])
        self.blocks.append(ConvUnit(_make_divisible(multiplier * self.cfgs[-1][2]), output_channel,
                                    kernel_size=1, stride=1, padding=0, num_groups=1, use_act=True))
        self.blocks = nn.SequentialCell(self.blocks)

        self.global_pool = GlobalAvgPooling(keep_dims=True)
        self.conv_head = nn.Conv2d(in_channels=output_channel,
                                   out_channels=model_cfgs['cls_ch_expand'],
                                   kernel_size=1, padding=0, stride=1,
                                   has_bias=True, pad_mode='pad')
        self.act2 = Activation('relu')
        self.squeeze = P.Flatten()
        self.final_drop = final_drop
        if self.final_drop > 0:
            self.dropout = nn.Dropout(self.final_drop)

        self.classifier = nn.Dense(
            model_cfgs['cls_ch_expand'], num_classes, has_bias=True)

        self._initialize_weights()
Exemplo n.º 9
0
    def __init__(self, input_channels, output_channels, num_classes, pool_size):
        super(FpnCls, self).__init__()
        representation_size = input_channels * pool_size * pool_size
        shape_0 = (output_channels, representation_size)
        weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=mstype.float32)
        shape_1 = (output_channels, output_channels)
        weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=mstype.float32)
        self.shared_fc_0 = DenseNoTranpose(representation_size, output_channels, weights_0).to_float(mstype.float16)
        self.shared_fc_1 = DenseNoTranpose(output_channels, output_channels, weights_1).to_float(mstype.float16)

        cls_weight = initializer('Normal', shape=[num_classes, output_channels][::-1],
                                 dtype=mstype.float32)
        reg_weight = initializer('Normal', shape=[num_classes * 4, output_channels][::-1],
                                 dtype=mstype.float32)
        self.cls_scores = DenseNoTranpose(output_channels, num_classes, cls_weight).to_float(mstype.float16)
        self.reg_scores = DenseNoTranpose(output_channels, num_classes * 4, reg_weight).to_float(mstype.float16)

        self.relu = P.ReLU()
        self.flatten = P.Flatten()
Exemplo n.º 10
0
    def __init__(self, block, num_classes=100):
        super(ResNet50, self).__init__()

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='valid')

        self.layer1 = self.MakeLayer(
            block, 3, in_channels=64, out_channels=256, stride=1)
        self.layer2 = self.MakeLayer(
            block, 4, in_channels=256, out_channels=512, stride=2)
        self.layer3 = self.MakeLayer(
            block, 6, in_channels=512, out_channels=1024, stride=2)
        self.layer4 = self.MakeLayer(
            block, 3, in_channels=1024, out_channels=2048, stride=2)

        self.avgpool = nn.AvgPool2d(7, 1)
        self.flatten = P.Flatten()
        self.fc = nn.Dense(512 * block.expansion, num_classes)
Exemplo n.º 11
0
 def __init__(self):
     super(FlattenNet, self).__init__()
     self.flatten = P.Flatten()
Exemplo n.º 12
0
 def __init__(self):
     super(Conv2dWithBiasNet, self).__init__()
     self.conv = nn.Conv2d(3, 10, 1, bias_init='zeros')
     self.flatten = P.Flatten()
Exemplo n.º 13
0

class TopKNet(nn.Cell):
    def __init__(self, net, k):
        super(TopKNet, self).__init__()
        self.net = net
        self.k = k

    def construct(self, x):
        return self.net(x, self.k)


raise_set = [
    # input is scalar
    ('Flatten0', {
        'block': (P.Flatten(), {
            'exception': TypeError,
            'error_keywords': ['Flatten']
        }),
        'desc_inputs': [5.0],
        'skip': ['backward']
    }),
    # dim of input is zero
    ('Flatten1', {
        'block': (P.Flatten(), {
            'exception': ValueError,
            'error_keywords': ['Flatten']
        }),
        'desc_inputs': [F.scalar_to_tensor(5.0)],
        'skip': ['backward']
    }),
Exemplo n.º 14
0
    def __init__(self,
                 config,
                 representation_size,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)):
        super(Rcnn, self).__init__()
        cfg = config
        self.rcnn_loss_cls_weight = Tensor(
            np.array(cfg.rcnn_loss_cls_weight).astype(np.float16))
        self.rcnn_loss_reg_weight = Tensor(
            np.array(cfg.rcnn_loss_reg_weight).astype(np.float16))
        self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels
        self.train_batch_size = batch_size
        self.test_batch_size = cfg.test_batch_size
        self.use_ambigous_sample = cfg.use_ambigous_sample

        shape_0 = (self.rcnn_fc_out_channels, representation_size)
        weights_0 = initializer("XavierUniform",
                                shape=shape_0[::-1],
                                dtype=mstype.float16).to_tensor()
        shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
        weights_1 = initializer("XavierUniform",
                                shape=shape_1[::-1],
                                dtype=mstype.float16).to_tensor()
        self.shared_fc_0 = DenseNoTranpose(representation_size,
                                           self.rcnn_fc_out_channels,
                                           weights_0)
        self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels,
                                           self.rcnn_fc_out_channels,
                                           weights_1)

        cls_weight = initializer(
            'Normal',
            shape=[num_classes, self.rcnn_fc_out_channels][::-1],
            dtype=mstype.float16).to_tensor()
        reg_weight = initializer(
            'Normal',
            shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
            dtype=mstype.float16).to_tensor()
        self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels,
                                          num_classes, cls_weight)
        self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels,
                                          num_classes * 4, reg_weight)

        self.flatten = P.Flatten()
        self.relu = P.ReLU()
        self.logicaland = P.LogicalAnd()
        self.loss_cls = P.SoftmaxCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0)
        self.reshape = P.Reshape()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.equal = P.Equal()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.gather = P.GatherNd()
        self.argmax = P.ArgMaxWithValue(axis=1)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.value = Tensor(1.0, mstype.float16)

        self.num_bboxes = (cfg.num_expected_pos_stage2 +
                           cfg.num_expected_neg_stage2) * batch_size
        if self.use_ambigous_sample:
            self.num_bboxes = (cfg.num_expected_pos_stage2 +
                               cfg.num_expected_amb_stage2 +
                               cfg.num_expected_neg_stage2) * batch_size

        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes, ))
        self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))

        self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size

        range_max = np.arange(self.num_bboxes_test).astype(np.int32)
        self.range_max = Tensor(range_max)
Exemplo n.º 15
0
 def __init__(self):
     super(NetForFlatten0D, self).__init__()
     self.flatten = P.Flatten()
Exemplo n.º 16
0
     'desc_const': [(3, 4, 6, 6)],
     'const_first': True,
     'desc_inputs': [[3, 4, 6, 6]],
     'desc_bprop': [[3, 4, 6, 6]],
     'skip': ['backward']}),
 ('MaxPoolWithArgmax', {
     'block': P.MaxPoolWithArgmax(ksize=2, strides=2),
     'desc_inputs': [[128, 32, 32, 64]],
     'desc_bprop': [[128, 32, 8, 16], [128, 32, 8, 16]]}),
 ('SoftmaxCrossEntropyWithLogits', {
     'block': P.SoftmaxCrossEntropyWithLogits(),
     'desc_inputs': [[1, 10], [1, 10]],
     'desc_bprop': [[1], [1, 10]],
     'skip': ['backward_exec']}),
 ('Flatten', {
     'block': P.Flatten(),
     'desc_inputs': [[128, 32, 32, 64]],
     'desc_bprop': [[128 * 32 * 8 * 16]]}),
 ('LogSoftmax', {
     'block': P.LogSoftmax(),
     'desc_inputs': [[64, 2]],
     'desc_bprop': [[160, 30522]]}),
 ('LogSoftmaxGrad', {
     'block': G.LogSoftmaxGrad(),
     'desc_inputs': [[16, 1234], [16, 1234]],
     'desc_bprop': [[64, 2]],
     'skip': ['backward']}),
 ('LayerNorm', {
     'block': P.LayerNorm(),
     'desc_inputs': [[2, 16], [16], [16]],
     'desc_bprop': [[2, 16], [2, 16], [2, 16]]}),
Exemplo n.º 17
0
 def __init__(self, num_classes=10, dropout_keep_prob=0.8):
     super(Logits, self).__init__()
     self.avg_pool = nn.AvgPool2d(8, pad_mode='valid')
     self.dropout = nn.Dropout(keep_prob=dropout_keep_prob)
     self.flatten = P.Flatten()
     self.fc = nn.Dense(2048, num_classes)
Exemplo n.º 18
0
 def __init__(self):
     super(NetLastFlatten, self).__init__()
     self.flatten = P.Flatten()
     self.relu = P.ReLU()
Exemplo n.º 19
0
 def __init__(self):
     super(NetAllFlatten, self).__init__()
     self.flatten = P.Flatten()
Exemplo n.º 20
0
 def __init__(self, symbol, loop_count=(1, 3)):
     super().__init__()
     self.symbol = symbol
     self.loop_count = loop_count
     self.fla = P.Flatten()
     self.relu = ReLU()