Esempio n. 1
0
 def __init__(self, mul_weight, num_layers, strategy1=None, strategy2=None):
     super().__init__()
     self.network = Net(mul_weight, num_layers, strategy1, strategy2)
     self.relu = P.ReLU()
Esempio n. 2
0
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)

# model structure configurations for TinyNets, values are
# (resolution multiplier, channel multiplier, depth multiplier)
# codes are inspired and partially adapted from
# https://github.com/rwightman/gen-efficientnet-pytorch

TINYNET_CFG = {
    "a": (0.86, 1.0, 1.2),
    "b": (0.84, 0.75, 1.1),
    "c": (0.825, 0.54, 0.85),
    "d": (0.68, 0.54, 0.695),
    "e": (0.475, 0.51, 0.60)
}

relu = P.ReLU()
sigmoid = P.Sigmoid()


def _cfg(url='', **kwargs):
    return {
        'url': url,
        'num_classes': 1000,
        'input_size': (3, 224, 224),
        'pool_size': (7, 7),
        'crop_pct': 0.875,
        'interpolation': 'bicubic',
        'mean': IMAGENET_DEFAULT_MEAN,
        'std': IMAGENET_DEFAULT_STD,
        'first_conv': 'conv_stem',
        'classifier': 'classifier',
 def __init__(self, strategy0=None, strategy1=None):
     super(AddRelu, self).__init__()
     self.add = P.TensorAdd().shard(strategy=strategy0)
     self.relu = P.ReLU().shard(strategy=strategy1)
Esempio n. 4
0
 def __init__(self, funcs):
     super(OneInputBprop, self).__init__()
     self.op = P.ReLU()
     self.funcs = funcs
Esempio n. 5
0
 def __init__(self):
     super(AddReluNet, self).__init__()
     self.add = P.Add()
     self.relu = P.ReLU()
     self.relu_grad = G.ReluGrad()
Esempio n. 6
0
    def __init__(self, block, layer_nums, in_channels, out_channels, strides,
                 num_classes, damping, loss_scale, frequency, batch_size):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )

        self.conv1 = _conv7x7(3,
                              64,
                              stride=2,
                              damping=damping,
                              loss_scale=loss_scale,
                              frequency=frequency,
                              batch_size=batch_size)
        self.bn1 = _bn(64)
        self.relu = P.ReLU()
        # self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency,
                                       batch_size=batch_size)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency,
                                       batch_size=batch_size)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency,
                                       batch_size=batch_size)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3],
                                       damping=damping,
                                       loss_scale=loss_scale,
                                       frequency=frequency,
                                       batch_size=batch_size)

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3],
                             num_classes,
                             damping=damping,
                             loss_scale=loss_scale,
                             frequency=frequency,
                             batch_size=batch_size)
Esempio n. 7
0
 def __init__(self):
     super().__init__()
     self.op = P.ReLU()
Esempio n. 8
0
 def __init__(self):
     super(Net, self).__init__()
     self.relu = P.ReLU(strategy=None)
Esempio n. 9
0
 def __init__(self, strategy1, strategy2, weight):
     super().__init__()
     self.weight = Parameter(weight, "w1")
     self.matmul = P.MatMul(transpose_a=False,
                            transpose_b=True).shard(strategy1)
     self.relu = P.ReLU().shard(strategy2)
Esempio n. 10
0
 def __init__(self):
     super(Net, self).__init__()
     self.add = P.TensorAdd()
     self.sub = P.Sub()
     self.relu = P.ReLU()
     self.depend = depend
Esempio n. 11
0
 def __init__(self):
     super(Ms_Cell_Change_Shape, self).__init__()
     self.relu = P.ReLU()
Esempio n. 12
0
 def construct(self, x):
     self.gather(self.damping, self.cov_step, 0)
     out = P.ReLU()(x)
     out = self.getG(out)
     out = self.getG(out)
     return out
Esempio n. 13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 down_sample=False,
                 momentum=0.1,
                 training=False,
                 weights_update=False):
        super(ResidualBlockUsing, self).__init__()

        self.affine = weights_update

        out_chls = out_channels // self.expansion
        self.conv1 = _conv(in_channels,
                           out_chls,
                           kernel_size=1,
                           stride=1,
                           padding=0)
        self.bn1 = _BatchNorm2dInit(out_chls,
                                    momentum=momentum,
                                    affine=self.affine,
                                    use_batch_statistics=training)

        self.conv2 = _conv(out_chls,
                           out_chls,
                           kernel_size=3,
                           stride=stride,
                           padding=1)
        self.bn2 = _BatchNorm2dInit(out_chls,
                                    momentum=momentum,
                                    affine=self.affine,
                                    use_batch_statistics=training)

        self.conv3 = _conv(out_chls,
                           out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0)
        self.bn3 = _BatchNorm2dInit(out_channels,
                                    momentum=momentum,
                                    affine=self.affine,
                                    use_batch_statistics=training)

        if training:
            self.bn1 = self.bn1.set_train()
            self.bn2 = self.bn2.set_train()
            self.bn3 = self.bn3.set_train()

        if not weights_update:
            self.conv1.weight.requires_grad = False
            self.conv2.weight.requires_grad = False
            self.conv3.weight.requires_grad = False

        self.relu = P.ReLU()
        self.downsample = down_sample
        if self.downsample:
            self.conv_down_sample = _conv(in_channels,
                                          out_channels,
                                          kernel_size=1,
                                          stride=stride,
                                          padding=0)
            self.bn_down_sample = _BatchNorm2dInit(
                out_channels,
                momentum=momentum,
                affine=self.affine,
                use_batch_statistics=training)
            if training:
                self.bn_down_sample = self.bn_down_sample.set_train()
            if not weights_update:
                self.conv_down_sample.weight.requires_grad = False
        self.add = P.Add()
Esempio n. 14
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 num_classes,
                 use_se=False,
                 res_base=False):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )
        self.use_se = use_se
        self.res_base = res_base
        self.se_block = False
        if self.use_se:
            self.se_block = True

        if self.use_se:
            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
            self.bn1_0 = _bn(32)
            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
            self.bn1_1 = _bn(32)
            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
        else:
            self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)
        self.bn1 = _bn(64, self.res_base)
        self.relu = P.ReLU()

        if self.res_base:
            self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))
            self.maxpool = nn.MaxPool2d(kernel_size=3,
                                        stride=2,
                                        pad_mode="valid")
        else:
            self.maxpool = nn.MaxPool2d(kernel_size=3,
                                        stride=2,
                                        pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0],
                                       use_se=self.use_se)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1],
                                       use_se=self.use_se)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2],
                                       use_se=self.use_se,
                                       se_block=self.se_block)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3],
                                       use_se=self.use_se,
                                       se_block=self.se_block)

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)
Esempio n. 15
0
 def __init__(self):
     super().__init__()
     self.reshape = P.Reshape()
     self.relu1 = P.ReLU().shard(((2, 1), ))
     self.relu2 = P.ReLU().shard(((1, 1, 4), ))
Esempio n. 16
0
 def __init__(self):
     super(Ms_Cell, self).__init__()
     self.relu = P.ReLU()
 def __init__(self):
     super().__init__()
     self.reshape = P.Reshape()
     self.relu = P.ReLU()
Esempio n. 18
0
 def softmax_relu_pass():
     x = Any()
     pattern = Call(P.Softmax(), [x])
     target = Call(P.ReLU(), [x])
     return pattern, target
Esempio n. 19
0
 def __init__(self):
     super(GradInBprop_1, self).__init__()
     self.relu = P.ReLU()
Esempio n. 20
0
 def softmax_relu_pass():
     x = Any()
     sigmoid_softmax_pattern = Prim([P.Sigmoid(), P.Softmax()])
     pattern = Call(sigmoid_softmax_pattern, [x])
     target = Call(P.ReLU(), [x])
     return pattern, target
Esempio n. 21
0
 def __init__(self):
     super(Net, self).__init__()
     self.relu = P.ReLU()
Esempio n. 22
0
 ('GeluGrad', {
     'block': G.GeluGrad(),
     'desc_inputs': [[2, 2], [2, 2], [2, 2]],
     'desc_bprop': [[2, 2]],
     'skip': ['backward']}),
 ('Tanh', {
     'block': P.Tanh(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('TanhGrad', {
     'block': G.TanhGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]],
     'skip': ['backward']}),
 ('ReLU', {
     'block': P.ReLU(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLU6', {
     'block': P.ReLU6(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLUV2', {
     'block': P.ReLUV2(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4], [1, 3, 4, 4]]}),
 ('ReLUGrad', {
     'block': G.ReluGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'skip': ['backward']}),
 ('Elu', {
Esempio n. 23
0
 def __init__(self):
     super(Net, self).__init__()
     self.flatten = P.ReLU()  # nn.Flatten()
Esempio n. 24
0
    def __init__(self,
                 config,
                 representation_size,
                 batch_size,
                 num_classes,
                 target_means=(0., 0., 0., 0.),
                 target_stds=(0.1, 0.1, 0.2, 0.2)
                 ):
        super(Rcnn, self).__init__()
        cfg = config
        self.dtype = np.float32
        self.ms_type = mstype.float32
        self.rcnn_loss_cls_weight = Tensor(np.array(cfg.rcnn_loss_cls_weight).astype(self.dtype))
        self.rcnn_loss_reg_weight = Tensor(np.array(cfg.rcnn_loss_reg_weight).astype(self.dtype))
        self.rcnn_fc_out_channels = cfg.rcnn_fc_out_channels
        self.target_means = target_means
        self.target_stds = target_stds
        self.num_classes = num_classes
        self.in_channels = cfg.rcnn_in_channels
        self.train_batch_size = batch_size
        self.test_batch_size = cfg.test_batch_size

        shape_0 = (self.rcnn_fc_out_channels, representation_size)
        weights_0 = initializer("XavierUniform", shape=shape_0[::-1], dtype=self.ms_type).to_tensor()
        shape_1 = (self.rcnn_fc_out_channels, self.rcnn_fc_out_channels)
        weights_1 = initializer("XavierUniform", shape=shape_1[::-1], dtype=self.ms_type).to_tensor()
        self.shared_fc_0 = DenseNoTranpose(representation_size, self.rcnn_fc_out_channels, weights_0)
        self.shared_fc_1 = DenseNoTranpose(self.rcnn_fc_out_channels, self.rcnn_fc_out_channels, weights_1)

        cls_weight = initializer('Normal', shape=[num_classes, self.rcnn_fc_out_channels][::-1],
                                 dtype=self.ms_type).to_tensor()
        reg_weight = initializer('Normal', shape=[num_classes * 4, self.rcnn_fc_out_channels][::-1],
                                 dtype=self.ms_type).to_tensor()
        self.cls_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes, cls_weight)
        self.reg_scores = DenseNoTranpose(self.rcnn_fc_out_channels, num_classes * 4, reg_weight)

        self.flatten = P.Flatten()
        self.relu = P.ReLU()
        self.logicaland = P.LogicalAnd()
        self.loss_cls = P.SoftmaxCrossEntropyWithLogits()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0)
        self.reshape = P.Reshape()
        self.onehot = P.OneHot()
        self.greater = P.Greater()
        self.cast = P.Cast()
        self.sum_loss = P.ReduceSum()
        self.tile = P.Tile()
        self.expandims = P.ExpandDims()

        self.gather = P.GatherNd()
        self.argmax = P.ArgMaxWithValue(axis=1)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.value = Tensor(1.0, self.ms_type)

        self.num_bboxes = (cfg.num_expected_pos_stage2 + cfg.num_expected_neg_stage2) * batch_size

        rmv_first = np.ones((self.num_bboxes, self.num_classes))
        rmv_first[:, 0] = np.zeros((self.num_bboxes,))
        self.rmv_first_tensor = Tensor(rmv_first.astype(self.dtype))

        self.num_bboxes_test = cfg.rpn_max_num * cfg.test_batch_size

        range_max = np.arange(self.num_bboxes_test).astype(np.int32)
        self.range_max = Tensor(range_max)
Esempio n. 25
0
 def __init__(self, kernel, bias, in_channel, num_class):
     super().__init__()
     self.relu = P.ReLU()
     self.mean = P.ReduceMean(keep_dims=False)
     self.dense = Dense(in_channel, num_class, kernel, bias)
Esempio n. 26
0
 def __init__(self):
     super(Net, self).__init__()
     self.mul = P.Mul()
     self.relu = P.ReLU()
     self.wd = Parameter(Tensor(np.ones([8, 8, 8, 8]).astype(np.float32)), name="wide")
     self.wt = Parameter(Tensor(np.ones([8, 8, 8, 8]).astype(np.float32)), name="l")
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.matmul1 = P.MatMul().set_strategy(strategy1)
     self.matmul2 = P.MatMul().set_strategy(strategy2)
     self.activation = P.ReLU().set_strategy(strategy3)
Esempio n. 28
0
 def __init__(self, input_ch, out_ch):
     super(Net, self).__init__()
     self.dense = nn.Dense(input_ch, out_ch)
     self.relu = P.ReLU()
Esempio n. 29
0
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.mul1 = P.Mul().set_strategy(strategy1)
     self.arg_min_with_value = P.ArgMinWithValue(keep_dims=True, axis=-1).set_strategy(strategy2)
     self.relu = P.ReLU().set_strategy(strategy3)
Esempio n. 30
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 pad_mode,
                 padding=0,
                 eps=1e-5,
                 momentum=0.9,
                 weight_init=None,
                 beta_init=None,
                 gamma_init=None,
                 mean_init=None,
                 var_init=None,
                 group=1,
                 quant_delay=0,
                 freeze_bn=100000,
                 fake=True,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dBatchNormQuant, self).__init__()
        self.stride = stride
        self.conv = P.Conv2D(out_channel=out_channels,
                             kernel_size=kernel_size,
                             mode=1,
                             pad_mode=pad_mode,
                             pad=padding,
                             stride=stride,
                             dilation=1,
                             group=group)
        self.fake = fake
        self.freeze_bn = freeze_bn
        if isinstance(kernel_size, int):
            kernel_size = (kernel_size, kernel_size)

        if weight_init is None:
            weight_init = initializer(
                'normal', [out_channels, in_channels // group, *kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if gamma_init is None:
            gamma_init = initializer('ones', [out_channels])
        self.gamma = Parameter(gamma_init, name='gamma')
        if beta_init is None:
            beta_init = initializer('zeros', [out_channels])
        self.beta = Parameter(beta_init, name='beta')
        if mean_init is None:
            mean_init = initializer('zeros', [out_channels])
        self.moving_mean = Parameter(mean_init,
                                     name='moving_mean',
                                     requires_grad=False)
        if var_init is None:
            var_init = initializer('ones', [out_channels])
        self.moving_variance = Parameter(var_init,
                                         name='moving_variance',
                                         requires_grad=False)

        self.step = Parameter(initializer('normal', [1], dtype=mstype.int32),
                              name='step',
                              requires_grad=False)

        self.fake_quant_weight = nn.FakeQuantWithMinMax(
            min_init=-6,
            max_init=6,
            ema=False,
            num_bits=num_bits,
            quant_delay=quant_delay,
            per_channel=per_channel,
            channel_size=out_channels,
            symmetric=symmetric,
            narrow_range=narrow_range)

        self.batchnorm_fold_train = P.BatchNormFold(epsilon=eps,
                                                    momentum=momentum,
                                                    is_training=True,
                                                    freeze_bn=freeze_bn)
        self.batchnorm_fold_infer = P.BatchNormFold(epsilon=eps,
                                                    momentum=momentum,
                                                    is_training=False,
                                                    freeze_bn=freeze_bn)
        self.correct_mul = P.CorrectionMul()
        self.relu = P.ReLU()
        self.batchnorm_fold2 = P.BatchNormFold2(freeze_bn=freeze_bn)
        self.batchnorm_fold2_infer = P.BatchNormFold2(freeze_bn=0)
        self.one = Tensor(1, mstype.int32)
        self.assignadd = P.AssignAdd()