def setUp(self): ipt = np.array(np.random.randint(10, size=[10, 10])).astype('uint16') self.inputs = {'X': ipt} self.outputs = {'Out': convert_uint16_to_float(ipt)} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.FP32), 'in_dtype': int(core.VarDesc.VarType.BF16) } self.op_type = 'transfer_dtype'
def verify_output(self, outs): if np.array(outs[0]).dtype == np.uint16: result = convert_uint16_to_float(np.array(outs[0])) else: result = np.array(outs[0]) hist, prob = self.output_hist(result) self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
def setUp(self): ipt = np.array(np.random.randint(10, size=[10, 10])).astype('uint16') self.inputs = {'X': ipt} self.outputs = {'Out': convert_uint16_to_float(ipt)} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.BF16), 'out_dtype': int(core.VarDesc.VarType.FP32) } self.op_type = 'cast' self.__class__.no_need_check_grad = True
def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) op = Operator("uniform_random", Out="X", shape=[1000, 784], min=-5.0, max=10.0, seed=10, dtype=int(core.VarDesc.VarType.BF16)) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) hist, prob = output_hist(result) self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
def test_check_output(self): place = core.CPUPlace() scope = core.Scope() out = scope.var("X").get_selected_rows() shape_tensor = scope.var("Shape").get_tensor() shape_tensor.set(np.array([1000, 784]).astype("int64"), place) paddle.seed(10) op = Operator("uniform_random", ShapeTensor="Shape", Out="X", min=-5.0, max=10.0, seed=10, dtype=int(core.VarDesc.VarType.BF16)) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) hist, prob = output_hist(result) self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))