def setUp(self): self.init_kernel_type() self.config() self.op_type = "matmul_v2" if self.is_bfloat16_op(): x = np.random.random(self.x_shape).astype(np.float32) y = np.random.random(self.y_shape).astype(np.float32) else: x = np.random.random(self.x_shape).astype(self.dtype) y = np.random.random(self.y_shape).astype(self.dtype) # -0.1 ~ 0.1 x = -0.1 + 0.2 * x y = -0.1 + 0.2 * y result = reference_matmul(x, y, self.trans_x, self.trans_y) if self.is_bfloat16_op(): result = result.astype(np.float32) self.inputs = { 'X': convert_float_to_uint16(x), 'Y': convert_float_to_uint16(y), } self.inputs_fp32 = { 'X': x, 'Y': y, } else: result = result.astype(self.dtype) self.inputs = { 'X': x, 'Y': y, } self.attrs = {'trans_x': self.trans_x, 'trans_y': self.trans_y} self.outputs = {'Out': result}
def setUp(self): self.op_type = "scale" self.python_api = paddle.scale self.dtype = np.uint16 self.attrs = {'scale': -2.3} x = np.random.random((10, 10)).astype(np.float32) out = x * np.float32(self.attrs['scale']) self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': convert_float_to_uint16(out)}
def setUp(self): self.op_type = "squeeze" self.dtype = np.uint16 self.init_test_case() x = np.random.random(self.ori_shape).astype("float32") out = x.reshape(self.new_shape) self.inputs = {"X": convert_float_to_uint16(x)} self.init_attrs() self.outputs = {"Out": convert_float_to_uint16(out)}
def setUp(self): self.op_type = "slice" self.config() self.inputs = {'Input': convert_float_to_uint16(self.input)} self.outputs = {'Out': convert_float_to_uint16(self.out)} self.attrs = { 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags }
def setUp(self): self.op_type = "dropout" self.dtype = np.uint16 x = np.random.random((32, 64)).astype("float32") self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': convert_float_to_uint16(np.zeros((32, 64)).astype('float32')), 'Mask': np.zeros((32, 64)).astype('uint8') }
def setUp(self): self._set_op_type() self.python_api = paddle.unbind self.dtype = self.get_dtype() self.axis = 0 self.num = 3 x = np.arange(12).reshape(3, 2, 2).astype(self.dtype) self.out = np.split(x, self.num, self.axis) self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = {'axis': self.axis} self.outputs = {'Out': [('out%d' % i, convert_float_to_uint16(self.out[i])) \ for i in range(len(self.out))]}
def setUp(self): self.op_type = "elementwise_sub" self.dtype = np.uint16 x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) out = x - y self.inputs = { 'X': convert_float_to_uint16(x), 'Y': convert_float_to_uint16(y) } self.outputs = {'Out': convert_float_to_uint16(out)}
def setUp(self): np.random.seed(100) self.python_api = paddle.sum self.op_type = "reduce_sum" self.dtype = np.uint16 self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32) self.attrs = {'dim': [0, 1, 2]} self.out = self.x.sum(axis=tuple(self.attrs['dim'])) self.gradient = self.calc_gradient() self.inputs = {'X': convert_float_to_uint16(self.x)} self.outputs = {'Out': convert_float_to_uint16(self.out)} self.gradient = self.calc_gradient()
def setUp(self): self.init_data() self.op_type = "reshape2" self.dtype = np.uint16 x = np.random.random(self.ori_shape).astype("float32") out = x.reshape(self.infered_shape) self.inputs = {"X": convert_float_to_uint16(x)} self.attrs = {"shape": self.new_shape} self.outputs = { "Out": convert_float_to_uint16(out), 'XShape': convert_float_to_uint16( np.random.random(self.ori_shape).astype("float32")) }
def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum self.dtype = np.uint16 # If x and y have the same value, the max() is not differentiable. # So we generate test data by the following method # to avoid them being too close to each other. x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32) y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) self.inputs = { 'X': convert_float_to_uint16(x), 'Y': convert_float_to_uint16(y) } self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}
def setUp(self): self.op_type = "elementwise_div" self.python_api = paddle.divide self.dtype = np.uint16 x = np.random.uniform(0.1, 1, [12, 13]).astype(np.float32) y = np.random.uniform(0.1, 1, [12, 13]).astype(np.float32) out = np.divide(x, y) self.inputs = { 'X': convert_float_to_uint16(x), 'Y': convert_float_to_uint16(y) } self.outputs = {'Out': convert_float_to_uint16(out)}
def setUp(self): self.init_op_type() self.initTestCase() self.dtype = np.uint16 x = np.random.random(self.shape).astype("float32") self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = { 'axis': list(self.axis), 'use_mkldnn': self.use_mkldnn, } self.outputs = { 'XShape': convert_float_to_uint16( np.random.random(self.shape).astype("float32")), 'Out': self.inputs['X'].transpose(self.axis) }
def setUp(self): self._set_op_type() self.dtype = self.get_dtype() axis = 1 if self.dtype == np.uint16: x = np.random.random((4, 5, 6)).astype(np.float32) out = np.split(x, [2, 3], axis) self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': [('out%d' % i, convert_float_to_uint16(out[i])) \ for i in range(len(out))]} else: x = np.random.random((4, 5, 6)).astype(self.dtype) out = np.split(x, [2, 3], axis) self.inputs = {'X': x} self.outputs = {'Out': [('out%d' % i, out[i]) \ for i in range(len(out))]} self.attrs = {'axis': axis, 'sections': [2, 1, 2]}
def setUp(self): self.op_type = "p_norm" self.python_api = p_norm_python_api self.init_test_case() self.x = (np.random.random(self.shape) + 0.5).astype(np.float32) self.norm = p_norm(self.x, self.axis, self.porder, self.keepdim, self.asvector) self.gradient = self.calc_gradient() self.inputs = {'X': convert_float_to_uint16(self.x)} self.attrs = { 'epsilon': self.epsilon, 'axis': self.axis, 'keepdim': self.keepdim, 'porder': float(self.porder), 'asvector': self.asvector } self.outputs = {'Out': convert_float_to_uint16(self.norm)}
def setUp(self): ipt = np.random.random(size=[10, 10]).astype('float32') self.inputs = {'X': ipt} self.outputs = {'Out': convert_float_to_uint16(ipt)} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.BF16), 'in_dtype': int(core.VarDesc.VarType.FP32) } self.op_type = 'transfer_dtype'
def setUp(self): ipt = np.random.random(size=[10, 10]).astype('float32') self.inputs = {'X': ipt} self.outputs = {'Out': convert_float_to_uint16(ipt)} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), 'out_dtype': int(core.VarDesc.VarType.BF16) } self.op_type = 'cast' self.__class__.no_need_check_grad = True
def setUp(self): self.initDefaultParameters() self.initParameters() self.op_type = 'stack' self.python_api = paddle.stack self.x = [] for i in range(self.num_inputs): self.x.append( np.random.random(size=self.input_dim).astype(np.float32)) out = np.stack(self.x, axis=self.axis) tmp = [] x_names = self.get_x_names() for i in range(self.num_inputs): tmp.append((x_names[i], convert_float_to_uint16(self.x[i]))) self.inputs = {'X': tmp} self.outputs = {'Y': convert_float_to_uint16(out)} self.attrs = {'axis': self.axis}
def setUp(self): self.op_type = "fill_any_like" self.dtype = np.uint16 self.value = 0.0 self.inputs = {'X': np.random.random((219, 232)).astype(np.float32)} self.attrs = {'value': self.value, 'dtype': core.VarDesc.VarType.BF16} self.outputs = { 'Out': convert_float_to_uint16(self.value * np.ones_like(self.inputs["X"])) }
def setUp(self): '''Test fill_constant op with specified value ''' self.op_type = "fill_constant" self.dtype = np.uint16 self.inputs = {} self.attrs = { 'shape': [123, 92], 'value': 3.8, 'dtype': core.VarDesc.VarType.BF16 } self.outputs = {'Out': convert_float_to_uint16(np.full((123, 92), 3.8))}
def set_data(self): self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} self.attrs = {'value': self.value, 'dtype': self.index} self.outputs = {'Out': np.full(self.shape, self.value)} if self.index == 22: self.outputs = { 'Out': np.full( self.shape, convert_float_to_uint16( np.array([self.value]).astype("float32"))) }
def setUp(self): self.op_type = "softmax" self.use_cudnn = self.init_cudnn() self.use_mkldnn = False self.dtype = np.uint16 self.shape = [10, 10] self.axis = -1 np.random.seed(0) x = np.random.uniform(0.1, 1, self.shape).astype(np.float32) out = np.apply_along_axis(stable_softmax, self.axis, x) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) } self.outputs = {'Out': convert_float_to_uint16(out)} self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn }
def setUp(self): '''Test fill_constant op with specified value ''' self.op_type = "fill_constant" self.init_data() self.inputs = { "ShapeTensor": np.array(self.shape).astype("int32"), 'ValueTensor': convert_float_to_uint16(np.array([self.value]).astype("float32")) } self.attrs = {'value': self.value, 'dtype': core.VarDesc.VarType.BF16} self.outputs = {'Out': np.full(self.shape, self.value)}
def setUp(self): self.op_type = "gather" self.python_api = paddle.gather self.dtype = np.uint16 self.config() xnp = np.random.random(self.x_shape).astype(np.float32) axis_np = np.array(self.axis).astype(self.axis_type) index_np = np.array(self.index).astype(self.index_type) self.inputs = { 'X': convert_float_to_uint16(xnp), 'Index': index_np, 'Axis': axis_np } out = gather_numpy(self.inputs['X'], index_np, axis_np[0]) self.outputs = {'Out': out}
def set_data(self): shape_tensor_list = [] for index, ele in enumerate(self.shape): shape_tensor_list.append(("x" + str(index), np.ones( (1)).astype('int32') * ele)) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = { 'shape': self.infer_shape, 'dtype': self.index, 'value': self.value } self.outputs = {'Out': np.full(self.shape, self.value)} if self.index == 22: self.outputs = { 'Out': np.full( self.shape, convert_float_to_uint16( np.array([self.value]).astype("float32"))) }
def _random(shape): if self.dtype == "bfloat16": data = np.random.random(shape).astype("float32") return convert_float_to_uint16(data) else: return np.random.random(shape).astype(self.dtype)