示例#1
0
 def __init__(self, channel=1, w=0.25):
     super().__init__()
     self.norm = P.L2Normalize(axis=1)
     self.prelu = P.PReLU()
     self.w = Parameter(initializer(w, [
         channel,
     ]), name='w')
示例#2
0
    def __init__(self,
                 embbeding_size=128,
                 classnum=270762,
                 s=32,
                 a=1.0,
                 m=0.3,
                 b=0.2):
        super(CombineMarginFCFp16, self).__init__()
        weight_shape = [classnum, embbeding_size]
        weight_init = initializer(me_init.ReidXavierUniform(), weight_shape)
        self.weight = Parameter(weight_init, name='weight')

        self.m = m
        self.s = s
        self.a = a
        self.b = b
        self.m_const = Tensor(self.m, dtype=mstype.float16)
        self.a_const = Tensor(self.a, dtype=mstype.float16)
        self.b_const = Tensor(self.b, dtype=mstype.float16)
        self.s_const = Tensor(self.s, dtype=mstype.float16)
        self.m_const_zero = Tensor(0, dtype=mstype.float16)
        self.a_const_one = Tensor(1, dtype=mstype.float16)
        self.normalize = P.L2Normalize(axis=1)
        self.fc = P.MatMul(transpose_b=True)

        self.onehot = P.OneHot()
        self.transpose = P.Transpose()
        self.acos = P.ACos()
        self.cos = P.Cos()
        self.cast = P.Cast()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
示例#3
0
 def __init__(self, channel=1, w=0.25, strategy1=None, strategy2=None):
     super().__init__()
     self.norm = P.L2Normalize().set_strategy(strategy1)
     self.prelu = P.PReLU().set_strategy(strategy2)
     self.w = Parameter(initializer(w, [
         channel,
     ]), name='w')
示例#4
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 low_dims,
                 training_mode=True,
                 use_MLP=False):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")

        self.use_MLP = use_MLP
        self.training_mode = training_mode
        self.concat = P.Concat()
        self.split = P.Split(0, 3)
        self.l2norm = P.L2Normalize(axis=1)

        self.conv1 = _conv3x3(3, 64, stride=1)
        self.bn1 = _bn(64, training_mode)
        self.relu = nn.ReLU()

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(block.expansion * 512, low_dims)
        self.mlp_layer1 = _fc(block.expansion * 512, block.expansion * 512)
        self.mlp_layer2 = _fc(block.expansion * 512, low_dims)
 def __init__(self,
              in_channels,
              out_channels,
              weight_init='normal',
              bias_init='zeros',
              has_bias=True):
     super(CommonHeadLastFN, self).__init__()
     weight_shape = [out_channels, in_channels]
     self.weight = Parameter(initializer(weight_init, weight_shape),
                             requires_grad=True,
                             name='weight')
     self.x_norm = P.L2Normalize(axis=1)
     self.w_norm = P.L2Normalize(axis=1)
     self.fc = P.MatMul(transpose_a=False, transpose_b=True)
     self.multiplier = Parameter(Tensor(np.ones([1]), mstype.float32),
                                 requires_grad=True,
                                 name='multiplier')
     self.has_bias = has_bias
     if self.has_bias:
         bias_shape = [out_channels]
         self.bias_add = P.BiasAdd()
         self.bias = Parameter(initializer(bias_init, bias_shape),
                               requires_grad=True,
                               name='bias')
示例#6
0
 def __init__(self, in_channel, out_channel, axis, input_shape, mul_size,
              test_size, prelu_size, transpose_b, matmul_size, num_class):
     super().__init__()
     mul_np = np.full(mul_size, 0.5, dtype=np.float32)
     self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
     bias_np = np.full((12, ), 7.1, dtype=np.float32)
     self.bias = Parameter(Tensor(bias_np), name="bias")
     prelu_np = np.full(prelu_size, 0.8, dtype=np.float32)
     self.prelu_weight = Parameter(Tensor(prelu_np), name="prelu_weight")
     matmul_np = np.full(matmul_size, 1.1, dtype=np.float32)
     self.matmul_weight = Parameter(Tensor(matmul_np), name="matmul_weight")
     self.mul = P.Mul()
     self.conv = Conv2d(in_channels=in_channel,
                        out_channels=out_channel,
                        kernel_size=5,
                        has_bias=True,
                        weight_init='ones',
                        bias_init='ones',
                        pad_mode='valid')
     self.scalar = 0.5
     self.parameter = Parameter(initializer(0.5,
                                            test_size,
                                            dtype=mstype.float32),
                                name='parameter')
     self.tensor = Tensor(np.full(test_size, 0.05, dtype=np.float32))
     self.softmax = Softmax(axis=axis)
     self.relu = ReLU()
     self.relu.relu.add_prim_attr("primitive_target", "CPU")
     self.reshape = P.Reshape()
     self.input_shape = input_shape
     self.equal = P.Equal()
     self.cast = P.Cast()
     self.concat = P.Concat(axis=1)
     self.reduce_sum = P.ReduceSum()
     self.bias_add = P.BiasAdd()
     self.cos = P.Cos()
     self.prelu = P.PReLU()
     self.matmul = P.MatMul(transpose_b=transpose_b)
     self.l2norm = P.L2Normalize(axis=(1 - axis))
     self.tensoradd = P.TensorAdd()
     self.strided_slice = P.StridedSlice()
     self.dense = Dense(in_channels=6,
                        out_channels=num_class,
                        weight_init='ones',
                        bias_init='ones',
                        has_bias=True)
示例#7
0
    def __init__(self, num_layers=36, feature_dim=128, shape=(96, 64)):
        super(SphereNet_float32, self).__init__()
        assert num_layers in [12, 20, 36, 64], 'SphereNet num_layers should be 12, 20 or 64'
        if num_layers == 12:
            layers = [1, 1, 1, 1]
            filter_list = [3, 16, 32, 64, 128]
            fc_size = 128 * 6 * 4
        elif num_layers == 20:
            layers = [1, 2, 4, 1]
            filter_list = [3, 64, 128, 256, 512]
            fc_size = 512 * 6 * 4
        elif num_layers == 36:
            layers = [2, 4, 4, 2]
            filter_list = [3, 32, 64, 128, 256]
            fc_size = 256 * 6 * 4
        elif num_layers == 64:
            layers = [3, 7, 16, 3]
            filter_list = [3, 64, 128, 256, 512]
            fc_size = 512 * 6 * 4
        else:
            raise ValueError('sphere' + str(num_layers) + " IS NOT SUPPORTED! (sphere20 or sphere64)")
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        block = BaseBlock

        self.layer1 = MakeLayer(block, filter_list[0], filter_list[1], layers[0], stride=2)
        self.layer2 = MakeLayer(block, filter_list[1], filter_list[2], layers[1], stride=2)
        self.layer3 = MakeLayer(block, filter_list[2], filter_list[3], layers[2], stride=2)
        self.layer4 = MakeLayer(block, filter_list[3], filter_list[4], layers[3], stride=2)

        self.fc = fc_with_initialize(fc_size, feature_dim)
        self.last_bn = nn.BatchNorm1d(feature_dim, momentum=0.9).add_flags_recursive(fp32=True)
        self.last_bn_sub = nn.BatchNorm2d(feature_dim, momentum=0.9).add_flags_recursive(fp32=True)
        self.cast = P.Cast()
        self.l2norm = P.L2Normalize(axis=1)

        for _, cell in self.cells_and_names():
            if isinstance(cell, (nn.Conv2d, nn.Dense)):
                if cell.bias is not None:
                    cell.weight.set_data(initializer(me_init.ReidKaimingUniform(a=math.sqrt(5), mode='fan_out'),
                                                     cell.weight.shape))
                    cell.bias.set_data(initializer('zeros', cell.bias.shape))
                else:
                    cell.weight.set_data(initializer(me_init.ReidXavierUniform(), cell.weight.shape))
        self.device_target = context.get_context('device_target')
示例#8
0
 def __init__(self,
              temperature=0.07,
              contrast_mode='all',
              base_temperature=0.07):
     super(SupConLoss, self).__init__()
     self.temperature = temperature
     self.contrast_mode = contrast_mode
     self.base_temperature = base_temperature
     self.normalize = P.L2Normalize(axis=2)
     self.eye = P.Eye()
     self.unbind = P.Unstack(axis=1)
     self.cat = P.Concat(axis=0)
     self.matmul = P.MatMul()
     self.div = P.Div()
     self.transpose = P.Transpose()
     self.maxes = P.ArgMaxWithValue(axis=1, keep_dims=True)
     self.tile = P.Tile()
     self.scatter = P.ScatterNd()
     self.oneslike = P.OnesLike()
     self.exp = P.Exp()
     self.sum = P.ReduceSum(keep_dims=True)
     self.log = P.Log()
     self.reshape = P.Reshape()
     self.mean = P.ReduceMean()
示例#9
0
 def __init__(self, args):
     super(CombineMarginFC, self).__init__()
     weight_shape = [args.num_classes, args.emb_size]
     weight_init = initializer(me_init.ReidXavierUniform(), weight_shape)
     self.weight = Parameter(weight_init, name='weight')
     self.m = args.margin_m
     self.s = args.margin_s
     self.a = args.margin_a
     self.b = args.margin_b
     self.m_const = Tensor(self.m, dtype=mstype.float16)
     self.a_const = Tensor(self.a, dtype=mstype.float16)
     self.b_const = Tensor(self.b, dtype=mstype.float16)
     self.s_const = Tensor(self.s, dtype=mstype.float16)
     self.m_const_zero = Tensor(0, dtype=mstype.float16)
     self.a_const_one = Tensor(1, dtype=mstype.float16)
     self.normalize = P.L2Normalize(axis=1)
     self.fc = P.MatMul(transpose_b=True)
     self.onehot = P.OneHot()
     self.transpose = P.Transpose()
     self.acos = P.ACos()
     self.cos = P.Cos()
     self.cast = P.Cast()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.0, mstype.float32)
示例#10
0
 def __init__(self):
     super(L2NormalizeNet, self).__init__()
     self.l2_normalize = P.L2Normalize()
示例#11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32.0,
                 has_bias=True,
                 activation=None):
        super(Dense_ThorNoBN, self).__init__()
        self.batch_size = batch_size
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A_inv = Parameter(Tensor(
            np.zeros([128, 128, 16, 16]).astype(np.float16)),
                                      name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(
            np.zeros([63, 63, 16, 16]).astype(np.float16)),
                                      name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  name="cov_step",
                                  requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.pad = P.Pad(((0, 24), (0, 24)))
        self.pad1 = P.Pad(((0, 8), (0, 8)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32),
                                   name="A_inv_max",
                                   requires_grad=False)
        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32),
                                   name="G_inv_max",
                                   requires_grad=False)
        self.fused_abs_max1 = P.CusFusedAbsMax1([1000, 1000])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(2048), mstype.float32)
        self.dampingG = Tensor(np.identity(1024), mstype.float32)
        self.add = P.TensorAdd()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.fake_G_inv_max = Tensor(np.zeros([
            1,
        ]).astype(np.float32))

        self.norm = P.L2Normalize(axis=1)
        self.multiplier = Parameter(Tensor(np.ones([1]), dtype=mstype.float16),
                                    requires_grad=True,
                                    name='multiplier')
 def __init__(self, axis=0, epsilon=1e-4):
     super(Net, self).__init__()
     self.norm = P.L2Normalize(axis=axis, epsilon=epsilon)
示例#13
0
 def __init__(self, axis=0, epsilon=1e-4, strategy0=None, strategy1=None):
     super(L2normalize, self).__init__()
     self.add = P.TensorAdd(strategy=strategy0)
     self.l2norm = P.L2Normalize(axis, epsilon, strategy1)
示例#14
0
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.norm1 = P.L2Normalize(axis=0).shard(strategy1)
     self.norm2 = P.L2Normalize(axis=0).shard(strategy1)
     self.mul1 = P.Mul().shard(strategy2)
     self.mul2 = P.Mul().shard(strategy3)
示例#15
0
 def __init__(self):
     super().__init__()
     self.norm1 = P.L2Normalize()
     self.norm2 = P.L2Normalize()
     self.mul1 = P.Mul()
     self.mul2 = P.Mul()
示例#16
0
    def __init__(self, args, strategy):
        super(SemiAutoOneHotNet, self).__init__()
        self.a = args.a
        self.b = args.b
        self.c = args.c
        self.d = args.d
        self.e = args.e
        self.cast = P.Cast()
        self.cast.set_strategy(strategy=strategy.twod_strategy)
        self.cast1 = P.Cast()
        self.cast1.set_strategy(strategy=strategy.twod_strategy)
        self.cast2 = P.Cast()
        self.cast2.set_strategy(strategy=strategy.twod_strategy)
        self.cast3 = P.Cast()
        self.cast3.set_strategy(strategy=strategy.scalar_strategy)
        self.cast4 = P.Cast()
        self.cast4.set_strategy(strategy=strategy.scalar_strategy)
        self.a_const = Tensor(self.a, dtype=mstype.float32)
        self.b_const = Tensor(self.b, dtype=mstype.float32)
        self.c_const = Tensor(self.c, dtype=mstype.float32)
        self.d_const = Tensor(self.d, dtype=mstype.float32)
        self.e_const = Tensor(self.e, dtype=mstype.float32)
        self.m_const_zero = Tensor(0, dtype=mstype.float32)
        self.a_const_one = Tensor(1, dtype=mstype.float32)
        self.onehot = P.OneHot()
        self.onehot.set_strategy(strategy=strategy.onehot_strategy)
        self.exp = P.Exp()
        self.exp.set_strategy(strategy=strategy.twod_strategy)
        self.exp2 = P.Exp()
        self.exp2.set_strategy(strategy=strategy.twod_strategy)
        self.exp3 = P.Exp()
        self.exp3.set_strategy(strategy=strategy.twod_strategy)
        self.mul_const = P.Mul()
        self.mul_const.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const2 = P.TensorAdd()
        self.mul_const2.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const3 = P.Sub()
        self.mul_const3.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul_const4 = P.Sub()
        self.mul_const4.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const5 = P.Mul()
        self.mul_const5.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul = P.Mul()
        self.mul.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul2 = P.Mul()
        self.mul2.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul3 = P.TensorAdd()
        self.mul3.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul4 = P.Sub()
        self.mul4.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul5 = P.RealDiv()
        self.mul5.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul6 = P.Mul()
        self.mul6.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul7 = P.Mul()
        self.mul7.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul8 = P.RealDiv()
        self.mul8.set_strategy(strategy=strategy.scalar_scalar_strategy)
        self.mul9 = P.TensorAdd()
        self.mul9.set_strategy(strategy=strategy.twod_scalar_strategy)

        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.reduce_max.set_strategy(strategy=strategy.twod_strategy)

        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reduce_sum.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_2 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_2.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_3 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_3.set_strategy(strategy=strategy.oned_strategy)

        self.reshape = P.Reshape()
        self.log = P.Log()
        self.log.set_strategy(strategy=strategy.twod_strategy)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.normalize = P.L2Normalize(axis=1)
        self.normalize.set_strategy(strategy=strategy.twod_strategy_m)
        self.normalize2 = P.L2Normalize(axis=1)
        self.normalize2.set_strategy(strategy=strategy.twod_strategy_m)
        self.fc = P.MatMul(transpose_b=True)
        self.fc.set_strategy(strategy=strategy.twodbc_twod_strategy)
        weight_shape = [args.num_classes, args.emb_size]
        weight_np = np.zeros(weight_shape, np.float32)
        self.weight = Parameter(Tensor(weight_np),
                                name='model_parallel_weight')
示例#17
0
 ('Sigmoid', {
     'block': P.Sigmoid(),
     'desc_inputs': [[4, 128, 1024]],
     'desc_bprop': [[4, 128, 1024]]}),
 ('Softmax', {
     'block': P.Softmax(),
     'desc_inputs': [[1, 16]],
     'desc_bprop': [[1, 16]],
     'skip': ['backward']}),  # check backward error
 ('Softmax', {
     'block': P.Softmax(axis=(0, 1)),
     'desc_inputs': [[1, 16]],
     'desc_bprop': [[1, 16]],
     'skip': ['backward']}),
 ('L2Normalize', {
     'block': P.L2Normalize(),
     'desc_inputs': [[4, 128, 1024]],
     'desc_bprop': [[4, 128, 1024]]}),
 ('ReLU', {
     'block': P.ReLU(),
     'desc_inputs': [[64, 64, 112, 112]],
     'desc_bprop': [[64, 64, 112, 112]]}),
 ('SeqConvBnRelu', {
     'block': SeqConvBnRelu(3, 64),
     'desc_inputs': [[64, 3, 112, 112]],
     'desc_bprop': [[64, 64, 112, 112]]}),
 ('PReluCell', {
     'block': nn.PReLU(1, [np.float32(0.25)]),
     'desc_inputs': [[128, 64, 112, 112]],
     'desc_bprop': [[128, 64, 112, 112]]}),
 ('PRelu', {