def test_nobroadcast_fp16(): context.set_context(mode=context.GRAPH_MODE, device_target='GPU') np.random.seed(42) x1_np = np.random.rand(10, 20).astype(np.float16) x2_np = np.random.rand(10, 20).astype(np.float16) output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.minimum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np)) output_np = np.maximum(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np > x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np < x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np)) output_np = np.power(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np * x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np - x2_np assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np)) output_np = x1_np / x2_np assert np.allclose(output_ms.asnumpy(), output_np) x2_np_zero = np.zeros_like(x2_np) output_ms = P.DivNoNan()(Tensor(x1_np), Tensor(x2_np_zero)) assert np.allclose(output_ms.asnumpy(), x2_np_zero) output_ms = P.Mod()(Tensor(x1_np), Tensor(x2_np)) output_np = np.fmod(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np) output_ms = P.FloorMod()(Tensor(x1_np), Tensor(x2_np)) output_np = np.mod(x1_np, x2_np) assert np.allclose(output_ms.asnumpy(), output_np)
'skip': ['backward']}), ('RealDiv', { 'block': P.RealDiv(), 'desc_inputs': [[4], Tensor(np.ones(4).astype(np.float32))], 'desc_bprop': [[4]]}), ('RealDiv_1', { 'block': P.RealDiv(), 'desc_inputs': [[512, 1024], [512, 1024]], 'desc_bprop': [[512, 1024]]}), ('FloorDiv', { 'block': P.FloorDiv(), 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)), Tensor(np.random.rand(4).astype(np.float16))], 'skip': ['backward']}), ('FloorMod', { 'block': P.FloorMod(), 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)), Tensor(np.random.rand(4).astype(np.float16))], 'skip': ['backward']}), ('identity', { 'block': ops.functional.identity, 'desc_inputs': [[2, 2]], 'skip': ['backward']}), ('MatMul_1', { 'block': P.MatMul(transpose_a=False, transpose_b=False), 'desc_inputs': [[1024, 160], [160, 1024]], 'desc_bprop': [[1024, 1024]]}), ('MatMul_2', { 'block': P.MatMul(transpose_a=True, transpose_b=True), 'desc_inputs': [[160, 1024], [1024, 160]], 'desc_bprop': [[1024, 1024]]}),
# input two tensors, their shapes do not match ('FloorDiv2', { 'block': (P.FloorDiv(), {'exception': ValueError, 'error_keywords': ['FloorDiv']}), 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input x is Tensor(int32), not Tensor(float) ('Floor1', { 'block': (P.Floor(), {'exception': TypeError, 'error_keywords': ['Floor']}), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], 'skip': ['backward']}), # one input is scalar, and another is Tensor(float32) ('FloorMod0', { 'block': (P.FloorMod(), {'exception': TypeError, 'error_keywords': ['FloorMod']}), 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input two tensors, but element types are not same ('FloorMod1', { 'block': (P.FloorMod(), {'exception': TypeError, 'error_keywords': ['FloorMod']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input two tensors, their shapes do not match ('FFloorMod2', { 'block': (P.FloorMod(), {'exception': ValueError, 'error_keywords': ['FloorMod']}), 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input x is Tensor(int32), not Tensor(float) ('Acosh1', {
'skip': ['backward'] }), # input x is Tensor(int32), not Tensor(float) ('Floor1', { 'block': (P.Floor(), { 'exception': TypeError, 'error_keywords': ['Floor'] }), 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], 'skip': ['backward'] }), # input two tensors, their shapes do not match ('FFloorMod2', { 'block': (P.FloorMod(), { 'exception': ValueError, 'error_keywords': ['FloorMod'] }), 'desc_inputs': [ Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32)) ], 'skip': ['backward'] }), # input x is Tensor(int32), not Tensor(float) ('Acosh1', { 'block': (P.Acosh(), { 'exception': TypeError, 'error_keywords': ['Acosh']
def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().shard(strategy1) self.floormod = P.FloorMod().shard(strategy2)
'block': P.RealDiv(), 'desc_inputs': [[512, 1024], [512, 1024]], 'desc_bprop': [[512, 1024]] }), ('FloorDiv', { 'block': P.FloorDiv(), 'desc_inputs': [ Tensor(np.random.rand(4).astype(np.float16)), Tensor(np.random.rand(4).astype(np.float16)) ], 'skip': ['backward'] }), ('FloorMod', { 'block': P.FloorMod(), 'desc_inputs': [ Tensor(np.random.rand(4).astype(np.float16)), Tensor(np.random.rand(4).astype(np.float16)) ], 'skip': ['backward'] }), ('identity', { 'block': ops.functional.identity, 'desc_inputs': [[2, 2]], 'skip': ['backward'] }), ('MatMul_1', { 'block': P.MatMul(transpose_a=False, transpose_b=False), 'desc_inputs': [[1024, 160], [160, 1024]], 'desc_bprop': [[1024, 1024]]