def __init__(self): super(DiGamma, self).__init__() # const numbers self.k_lanczos_gamma = 7 self.k_base_lanczos_coeff = 0.99999999999980993227684700473478 self.k_lanczos_coefficients = [676.520368121885098567009190444019, -1259.13921672240287047156078755283, 771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853, -0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7] self.nan = np.nan self.pi = np.pi self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5 self.log_lanczos_gamma_plus_one_half = np.log(self.lanczos_gamma_plus_one_half) # operations self.log1p = P.Log1p() self.abs = P.Abs() self.shape = P.Shape() self.dtype = P.DType() self.fill = P.Fill() self.floor = P.Floor() self.equal = P.Equal() self.less = P.Less() self.select = P.Select() self.sin = P.Sin() self.cos = P.Cos() self.logicaland = P.LogicalAnd()
def test_sin(): x_np = np.random.rand(2, 3, 4, 4).astype(np.float32) context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") output_ms = P.Sin()(Tensor(x_np)) output_np = np.sin(x_np) assert np.allclose(output_ms.asnumpy(), output_np)
def __init__(self): super().__init__() self.relu = nn.ReLU() self.sqrt = P.Sqrt() self.sin = P.Sin()
'desc_bprop': [Tensor(np.array([-2, -1, 0, 1, 2]).astype(np.float16))]}), ('Floor', { 'block': P.Floor(), 'desc_inputs': [[2, 512, 56, 56]], 'desc_bprop': [[2, 512, 56, 56]], 'skip': ['backward']}), ('ACos', { 'block': P.ACos(), 'desc_inputs': [[2, 3]], 'desc_bprop': [[2, 3]]}), ('Acosh', { 'block': P.Acosh(), 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16))], 'skip': ['backward']}), ('Sin', { 'block': P.Sin(), 'desc_inputs': [[2, 3]], 'desc_bprop': [[2, 3]]}), ('Reciprocal', { 'block': P.Reciprocal(), 'desc_inputs': [[2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), ('Minimum_0', { 'block': P.Minimum(), 'desc_inputs': [[2, 3, 3, 5], [3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), ('Maximum', { 'block': P.Maximum(), 'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), ('Maximum_0', {
def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().set_strategy(strategy1) self.sin = P.Sin().set_strategy(strategy2) self.matmul2 = P.MatMul().set_strategy(strategy1)
'skip': ['backward']}), # input is not tensor ('ACos0', { 'block': (P.ACos(), {'exception': TypeError, 'error_keywords': ['ACos']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(bool) ('ACos1', { 'block': (P.ACos(), {'exception': TypeError, 'error_keywords': ['ACos']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward']}), # input is not tensor ('Sin0', { 'block': (P.Sin(), {'exception': TypeError, 'error_keywords': ['Sin']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(bool) ('Sin1', { 'block': (P.Sin(), {'exception': TypeError, 'error_keywords': ['Sin']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward']}), # input is not tensor ('NMSWithMask0', { 'block': (P.NMSWithMask(), {'exception': TypeError, 'error_keywords': ['NMSWithMask']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is not Tensor(float16) or Tensor(float32) ('NMSWithMask1', {
'desc_inputs': [5.0], 'skip': ['backward'] }), # input is Tensor(bool) ('ACos1', { 'block': (P.ACos(), { 'exception': TypeError, 'error_keywords': ['ACos'] }), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward'] }), # input is not tensor ('Sin0', { 'block': (P.Sin(), { 'exception': TypeError, 'error_keywords': ['Sin'] }), 'desc_inputs': [5.0], 'skip': ['backward'] }), # input is Tensor(bool) ('Sin1', { 'block': (P.Sin(), { 'exception': TypeError, 'error_keywords': ['Sin'] }), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward'] }),
def __init__(self): super(NetSin, self).__init__() self.sin = P.Sin()