def __init__(self): super(DiGamma, self).__init__() # const numbers self.k_lanczos_gamma = 7 self.k_base_lanczos_coeff = 0.99999999999980993227684700473478 self.k_lanczos_coefficients = [676.520368121885098567009190444019, -1259.13921672240287047156078755283, 771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853, -0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7] self.nan = np.nan self.pi = np.pi self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5 self.log_lanczos_gamma_plus_one_half = np.log(self.lanczos_gamma_plus_one_half) # operations self.log1p = P.Log1p() self.abs = P.Abs() self.shape = P.Shape() self.dtype = P.DType() self.fill = P.Fill() self.floor = P.Floor() self.equal = P.Equal() self.less = P.Less() self.select = P.Select() self.sin = P.Sin() self.cos = P.Cos() self.logicaland = P.LogicalAnd()
def test_cos(): x_np = np.random.rand(2, 3, 4, 4).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") output_ms = P.Cos()(Tensor(x_np)) output_np = np.cos(x_np) assert np.allclose(output_ms.asnumpy(), output_np)
def __init__(self, embbeding_size=128, classnum=270762, s=32, a=1.0, m=0.3, b=0.2): super(CombineMarginFCFp16, self).__init__() weight_shape = [classnum, embbeding_size] weight_init = initializer(me_init.ReidXavierUniform(), weight_shape) self.weight = Parameter(weight_init, name='weight') self.m = m self.s = s self.a = a self.b = b self.m_const = Tensor(self.m, dtype=mstype.float16) self.a_const = Tensor(self.a, dtype=mstype.float16) self.b_const = Tensor(self.b, dtype=mstype.float16) self.s_const = Tensor(self.s, dtype=mstype.float16) self.m_const_zero = Tensor(0, dtype=mstype.float16) self.a_const_one = Tensor(1, dtype=mstype.float16) self.normalize = P.L2Normalize(axis=1) self.fc = P.MatMul(transpose_b=True) self.onehot = P.OneHot() self.transpose = P.Transpose() self.acos = P.ACos() self.cos = P.Cos() self.cast = P.Cast() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32)
def __init__(self, weight_angle=10): super(LossFunc, self).__init__() self.split = P.Split(1, 5) self.min = P.Minimum() self.log = P.Log() self.cos = P.Cos() self.mean = P.ReduceMean() #self.flatten = P.Flatten() self.sum = P.ReduceSum() self.weight_angle = weight_angle self.max = P.Maximum() self.print = P.Print()
def __init__(self, in_channel, out_channel, axis, input_shape, mul_size, test_size, prelu_size, transpose_b, matmul_size, num_class): super().__init__() mul_np = np.full(mul_size, 0.5, dtype=np.float32) self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight") bias_np = np.full((12, ), 7.1, dtype=np.float32) self.bias = Parameter(Tensor(bias_np), name="bias") prelu_np = np.full(prelu_size, 0.8, dtype=np.float32) self.prelu_weight = Parameter(Tensor(prelu_np), name="prelu_weight") matmul_np = np.full(matmul_size, 1.1, dtype=np.float32) self.matmul_weight = Parameter(Tensor(matmul_np), name="matmul_weight") self.mul = P.Mul() self.conv = Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=5, has_bias=True, weight_init='ones', bias_init='ones', pad_mode='valid') self.scalar = 0.5 self.parameter = Parameter(initializer(0.5, test_size, dtype=mstype.float32), name='parameter') self.tensor = Tensor(np.full(test_size, 0.05, dtype=np.float32)) self.softmax = Softmax(axis=axis) self.relu = ReLU() self.relu.relu.add_prim_attr("primitive_target", "CPU") self.reshape = P.Reshape() self.input_shape = input_shape self.equal = P.Equal() self.cast = P.Cast() self.concat = P.Concat(axis=1) self.reduce_sum = P.ReduceSum() self.bias_add = P.BiasAdd() self.cos = P.Cos() self.prelu = P.PReLU() self.matmul = P.MatMul(transpose_b=transpose_b) self.l2norm = P.L2Normalize(axis=(1 - axis)) self.tensoradd = P.TensorAdd() self.strided_slice = P.StridedSlice() self.dense = Dense(in_channels=6, out_channels=num_class, weight_init='ones', bias_init='ones', has_bias=True)
def __init__(self, args): super(CombineMarginFC, self).__init__() weight_shape = [args.num_classes, args.emb_size] weight_init = initializer(me_init.ReidXavierUniform(), weight_shape) self.weight = Parameter(weight_init, name='weight') self.m = args.margin_m self.s = args.margin_s self.a = args.margin_a self.b = args.margin_b self.m_const = Tensor(self.m, dtype=mstype.float16) self.a_const = Tensor(self.a, dtype=mstype.float16) self.b_const = Tensor(self.b, dtype=mstype.float16) self.s_const = Tensor(self.s, dtype=mstype.float16) self.m_const_zero = Tensor(0, dtype=mstype.float16) self.a_const_one = Tensor(1, dtype=mstype.float16) self.normalize = P.L2Normalize(axis=1) self.fc = P.MatMul(transpose_b=True) self.onehot = P.OneHot() self.transpose = P.Transpose() self.acos = P.ACos() self.cos = P.Cos() self.cast = P.Cast() self.on_value = Tensor(1.0, mstype.float32) self.off_value = Tensor(0.0, mstype.float32)
'desc_inputs': [[64, 64, 112, 112]], 'desc_bprop': [[64, 64, 112, 112]]}), ('SeqConvBnRelu', { 'block': SeqConvBnRelu(3, 64), 'desc_inputs': [[64, 3, 112, 112]], 'desc_bprop': [[64, 64, 112, 112]]}), ('PReluCell', { 'block': nn.PReLU(1, [np.float32(0.25)]), 'desc_inputs': [[128, 64, 112, 112]], 'desc_bprop': [[128, 64, 112, 112]]}), ('PRelu', { 'block': P.PReLU(), 'desc_inputs': [[128, 64, 112, 112], [64,]], 'desc_bprop': [[128, 64, 112, 112]]}), ('Cos', { 'block': P.Cos(), 'desc_inputs': [[8, 16]], 'desc_bprop': [[8, 16]]}), ('ACos', { 'block': P.ACos(), 'desc_inputs': [[8, 16]], 'desc_bprop': [[8, 16]]}), ('Exp', { 'block': P.Exp(), 'desc_inputs': [[256, 8]], 'desc_bprop': [[256, 8]]}), ('Pow', { 'block': P.Pow(), # 输入有标量插件产生了段错误。 'desc_const': [2.0], 'desc_inputs': [[1, 512]], 'desc_bprop': [[1, 512]]}),
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], 'skip': ['backward']}), # dims is not 1 ('NPUClearFloatStatus2', { 'block': (P.NPUClearFloatStatus(), {'exception': ValueError, 'error_keywords': ['NPUClearFloatStatus']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # shape[0] is not 8 ('NPUClearFloatStatus3', { 'block': (P.NPUClearFloatStatus(), {'exception': ValueError, 'error_keywords': ['NPUClearFloatStatus']}), 'desc_inputs': [Tensor(np.ones([3]).astype(np.float32))], 'skip': ['backward']}), # input is not tensor ('Cos0', { 'block': (P.Cos(), {'exception': TypeError, 'error_keywords': ['Cos']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(bool) ('Cos1', { 'block': (P.Cos(), {'exception': TypeError, 'error_keywords': ['Cos']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward']}), # input is not tensor ('ACos0', { 'block': (P.ACos(), {'exception': TypeError, 'error_keywords': ['ACos']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(bool) ('ACos1', {
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward'] }), # shape[0] is not 8 ('NPUClearFloatStatus3', { 'block': (P.NPUClearFloatStatus(), { 'exception': ValueError, 'error_keywords': ['NPUClearFloatStatus'] }), 'desc_inputs': [Tensor(np.ones([3]).astype(np.float32))], 'skip': ['backward'] }), # input is not tensor ('Cos0', { 'block': (P.Cos(), { 'exception': TypeError, 'error_keywords': ['Cos'] }), 'desc_inputs': [5.0], 'skip': ['backward'] }), # input is Tensor(bool) ('Cos1', { 'block': (P.Cos(), { 'exception': TypeError, 'error_keywords': ['Cos'] }), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward'] }),
def __init__(self): super(NetCos, self).__init__() self.cos = P.Cos()