def set_inputs(self, x, y):
     self.inputs = {
         'X': convert_float_to_uint16(x),
         'Y': convert_float_to_uint16(y)
     }
     self.x_fp32 = x
     self.y_fp32 = y
Example #2
0
 def init_test_data(self):
     self.x0 = convert_float_to_uint16(
         np.random.random(self.x0_shape).astype(np.float32))
     self.x1 = convert_float_to_uint16(
         np.random.random(self.x1_shape).astype(np.float32))
     self.x2 = convert_float_to_uint16(
         np.random.random(self.x2_shape).astype(np.float32))
    def setUp(self):
        self.input_type = np.uint16
        self.dtype = np.uint16
        self.mkldnn_data_type = "bfloat16"
        self.init_op_type()
        self.init_test_case()

        input = np.random.random(self.input_size).astype(np.float32)
        filter = np.random.random(self.filter_size).astype(np.float32)

        self.attrs = {
            'strides': self.stride,
            'paddings': self.pad,
            'padding_algorithm': self.padding_algorithm,
            'groups': self.groups,
            'dilations': self.dilations,
            'is_test': self.is_test,
            'use_mkldnn': self.use_mkldnn,
            'mkldnn_data_type': self.mkldnn_data_type,
            'force_fp32_output': self.force_fp32_output,
            'data_format': self.data_format,
            'fuse_activation': self.fuse_activation,
            'fuse_alpha': self.fuse_alpha,
            'fuse_beta': self.fuse_beta
        }
        if self.output_size is not None:
            self.attrs['output_size'] = self.output_size

        if len(self.output_padding) > 0:
            self.attrs['output_padding'] = self.output_padding

        output = conv2dtranspose_forward_naive(input, filter,
                                               self.attrs).astype(np.float32)

        if self.input_type is not np.float32:
            input = convert_float_to_uint16(input)

        self.inputs = {
            'Input': input.view(self.input_type),
            'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
        }

        if self.fuse_bias and self.bias_size is not None:
            bias = np.random.random(self.bias_size).astype(np.float32)
            output = conv2d_bias_naive(output, bias)
            output = output.astype(np.float32)
            self.attrs['fuse_bias'] = self.fuse_bias
            self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias)

        if self.fuse_activation == "relu":
            output = np.maximum(output, 0).astype(np.float32)
        output = output.astype(np.float32)

        if not self.force_fp32_output:
            output = convert_float_to_uint16(output, self.attrs['data_format'])

        self.outputs['Output'] = output
    def setUp(self):
        self.op_type = "gelu"
        self.dtype = np.uint16

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
        out = convert_float_to_uint16(gelu(x, True))

        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True, "approximate": True}
 def setUp(self):
     self.op_type = "scale"
     self.scale = -2.3
     self.x_fp32 = np.random.random((10, 10)).astype(np.float32)
     self.x_bf16 = convert_float_to_uint16(self.x_fp32)
     self.scale_tensor = np.array([self.scale]).astype(np.float32)
     self.inputs = {
         'X': self.x_bf16,
         'ScaleTensor': convert_float_to_uint16(self.scale_tensor)
     }
     self.attrs = {'use_mkldnn': True}
     self.outputs = {'Out': self.x_fp32 * self.scale}
Example #6
0
    def setUp(self):
        self.op_type = "elementwise_sub"
        self.init_dtype()
        self.init_input_output()
        self.init_kernel_type()
        self.init_axis()

        self.x_bf16 = convert_float_to_uint16(self.x)
        self.y_bf16 = convert_float_to_uint16(self.y)
        self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16}
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
Example #7
0
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.use_mkldnn = True
        self.mkldnn_data_type = "bfloat16"
        self.axis = -1

        self.generate_data()
        self.x_bf16 = convert_float_to_uint16(self.x)
        self.y_bf16 = convert_float_to_uint16(self.y)
        self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16}
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
Example #8
0
 def setUp(self):
     self.op_type = "sum"
     self.init_kernel_type()
     x0 = np.random.random((3, 40)).astype(np.float32)
     x1 = np.random.random((3, 40)).astype(np.float32)
     x2 = np.random.random((3, 40)).astype(np.float32)
     y = x0 + x1 + x2
     self.inputs = {
         "X": [("x0", convert_float_to_uint16(x0)),
               ("x1", convert_float_to_uint16(x1)),
               ("x2", convert_float_to_uint16(x2))]
     }
     self.outputs = {'Out': convert_float_to_uint16(y)}
    def setUp(self):
        super(TestPoolBf16MklDNNOpGrad, self).setUp()
        self.attrs['mkldnn_data_type'] = "bfloat16"
        self.x_fp32 = np.random.random(self.shape).astype(np.float32)

        output = self.pool2D_forward_naive(self.x_fp32, self.ksize,
                                           self.strides, self.paddings,
                                           self.global_pool, self.ceil_mode,
                                           self.exclusive, self.adaptive,
                                           "float32").astype(np.float32)

        self.inputs = {'X': convert_float_to_uint16(self.x_fp32)}
        self.outputs = {'Out': convert_float_to_uint16(output)}
    def setUp(self):
        TestPool2D_Op_Mixin.setUp(self)
        self.dtype = np.uint16

        input = np.random.random(self.shape).astype(np.float32)
        output = (self.pool2D_forward_naive(input, self.ksize, self.strides,
                                            self.paddings, self.global_pool,
                                            self.ceil_mode, self.exclusive,
                                            self.adaptive,
                                            "float32")).astype(np.float32)

        self.inputs = {'X': convert_float_to_uint16(input)}
        self.outputs = {'Out': convert_float_to_uint16(output)}
Example #11
0
    def setUp(self):
        self.op_type = 'log_softmax'
        self.python_api = F.log_softmax
        self.dtype = np.uint16
        self.shape = [2, 3, 4, 5]
        self.axis = -1

        x = np.random.uniform(0.1, 1., self.shape).astype(np.float32)
        out = np.apply_along_axis(ref_log_softmax, self.axis, x)
        self.x_grad = ref_log_softmax_grad(x, self.axis)

        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': convert_float_to_uint16(out)}
        self.attrs = {'axis': self.axis}
Example #12
0
    def setUp(self):
        self.op_type = "softmax"
        self.use_mkldnn = True
        self.dtype = np.uint16
        self.init_kernel_type()
        self.shape = self.get_x_shape()
        self.axis = self.get_axis()

        x = np.random.uniform(0.1, 1, self.shape).astype(np.float)
        out = convert_float_to_uint16(
            np.apply_along_axis(stable_softmax, self.axis, x))

        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
Example #13
0
    def create_dense_param_var(self, scope, place, height, width):
        param_tensor = scope.var('Param').get_tensor()
        param_array = np.random.random((height, width)).astype('float32')
        param_array_bf16 = convert_float_to_uint16(param_array)
        param_tensor.set(param_array_bf16, place)

        return param_tensor, param_array
 def setUp(self):
     self.op_type = "scale"
     self.scale = 1.2
     self.x_fp32 = np.random.random((9, 13)).astype(np.float32)
     self.x_bf16 = convert_float_to_uint16(self.x_fp32)
     self.scale_tensor = np.array([self.scale]).astype(np.float32)
     self.inputs = {
         'X': self.x_bf16,
         'ScaleTensor': convert_float_to_uint16(self.scale_tensor)
     }
     self.attrs = {
         'bias': -1.1,
         'bias_after_scale': False,
         'use_mkldnn': True
     }
     self.outputs = {'Out': (self.x_fp32 + self.attrs['bias']) * self.scale}
 def test_check_grad(self):
     self.calculate_grads()
     self.check_grad_with_place(
         core.CPUPlace(), ["X", "Y"],
         "Out",
         user_defined_grads=[self.dx, self.dy],
         user_defined_grad_outputs=[convert_float_to_uint16(self.dout)])
Example #16
0
    def setUp(self):
        self.op_type = "matmul"
        self.use_mkldnn = True
        self.dtype = np.uint16
        self.mkldnn_data_type = "bfloat16"
        self.force_fp32_output = False
        self.generate_data()
        self.set_attributes()

        if not self.force_fp32_output:
            self.out = convert_float_to_uint16(self.out)
        self.outputs = {'Out': self.out}

        self.x = convert_float_to_uint16(self.x)
        self.y = convert_float_to_uint16(self.y)
        self.inputs = {'X': self.x, 'Y': self.y}
Example #17
0
    def create_dense_lr_var(self, scope, place):
        lr_tensor = scope.var('LearningRate').get_tensor()
        lr_value = np.random.uniform()
        lr_array = np.full((1), lr_value, np.float32)
        lr_array_bf16 = convert_float_to_uint16(lr_array)
        lr_tensor.set(lr_array_bf16, place)

        return lr_tensor, lr_value
 def test_check_grad(self):
     self.calculate_grads()
     self.check_grad_with_place(
         core.CPUPlace(), ["X"],
         "Out",
         check_dygraph=False,
         user_defined_grads=[self.dx],
         user_defined_grad_outputs=[convert_float_to_uint16(self.out)])
Example #19
0
 def test_check_grad_ingore_y(self):
     self.calculate_grads()
     self.check_grad_with_place(
         core.CPUPlace(), ['X'],
         'Out',
         set('Y'),
         user_defined_grads=[self.dx],
         user_defined_grad_outputs=[convert_float_to_uint16(self.dout)])
 def setUp(self):
     self.init_test()
     self.ids = np.random.randint(low=0, high=15,
                                  size=self.ids_shape).astype("int64")
     self.flat_ids = self.ids.flatten()
     self.w_fp32 = np.random.random((15, 32)).astype("float32")
     self.w_bf16 = convert_float_to_uint16(self.w_fp32)
     self.scope = core.Scope()
     self.place = core.CPUPlace()
    def setUp(self):
        self.dtype = np.uint16
        self.init_data()
        self.config()
        self.out = self.op_forward(self.x)

        self.inputs = {'X': convert_float_to_uint16(self.x)}
        self.outputs = {'Out': self.out}
        self.set_attrs()
Example #22
0
    def setUp(self):
        self.op_type = "elementwise_mul"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.multiply(self.x, self.y)

        self.axis = -1

        self.inputs = {
            'X':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)),
            'Y':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y))
        }
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
Example #23
0
    def setUp(self):
        self.op_type = 'sgd'
        self.dtype = np.uint16
        self.use_mkldnn = True
        self.conf()
        w = np.random.random((self.h, self.w)).astype('float32')
        w_bf16 = convert_float_to_uint16(w)
        g = np.random.random((self.h, self.w)).astype('float32')
        g_bf16 = convert_float_to_uint16(g)
        lr = np.array([0.1]).astype('float32')
        lr_bf16 = convert_float_to_uint16(lr)

        self.inputs = {
            'Param': w_bf16,
            'Grad': g_bf16,
            'LearningRate': lr_bf16
        }
        self.outputs = {'ParamOut': w - lr * g}
        self.attrs = {'use_mkldnn': self.use_mkldnn}
 def setUp(self):
     self.op_type = "scale"
     self.x_fp32 = np.random.random((10, 10)).astype(np.float32)
     self.x_bf16 = convert_float_to_uint16(self.x_fp32)
     self.scale = -2.3
     self.inputs = {'X': self.x_bf16}
     self.attrs = {'scale': self.scale, 'use_mkldnn': True, 'bias': 0.4}
     self.use_mkldnn = True
     self.outputs = {
         'Out': (self.x_fp32 * self.attrs['scale']) + self.attrs['bias']
     }
Example #25
0
    def create_sparse_grad_var(self, scope, place, height, rows, row_numel):
        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        grad_array = np.random.random((len(rows), row_numel)).astype('float32')
        np_array_bf16 = convert_float_to_uint16(grad_array)

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(np_array_bf16, place)

        return grad_tensor, grad_array
    def test_check_grad(self):
        dout = self.conv_output_float
        x = self.inputs_fp32['Input']
        w = self.inputs_fp32['Filter']

        dx, dweights = conv_backward(dout, x, w, self.conv2d_param)

        self.check_grad_with_place(
            core.CPUPlace(), ["Input", "Filter"],
            "Output",
            user_defined_grads=[dx, dweights],
            user_defined_grad_outputs=[convert_float_to_uint16(dout)])
Example #27
0
    def create_sparse_param_var(self, scope, place, height, rows, row_numel):
        param_selected_rows = scope.var('Param').get_selected_rows()
        param_selected_rows.set_height(height)
        param_selected_rows.set_rows(rows)
        param_selected_rows.sync_index()
        param_array = np.random.random(
            (len(rows), row_numel)).astype('float32')
        np_array_bf16 = convert_float_to_uint16(param_array)

        param_tensor = param_selected_rows.get_tensor()
        param_tensor.set(np_array_bf16, place)

        return param_tensor, param_array
Example #28
0
    def setUp(self):
        self.op_type = "sum"
        self.use_mkldnn = True
        self.mkldnn_data_type = "bfloat16"

        # float32 input to be use for reference
        x0 = np.random.random((25, 8)).astype('float32')
        x1 = np.random.random((25, 8)).astype('float32')
        x2 = np.random.random((25, 8)).astype('float32')

        # actual input (bf16) to bf16 sum op
        x0_bf16 = convert_float_to_uint16(x0)
        x1_bf16 = convert_float_to_uint16(x1)
        x2_bf16 = convert_float_to_uint16(x2)

        self.inputs = {
            "X": [("x0", x0_bf16), ("x1", x1_bf16), ("x2", x2_bf16)]
        }

        y = x0 + x1 + x2
        self.outputs = {'Out': convert_float_to_uint16(y)}
        self.attrs = {'use_mkldnn': self.use_mkldnn}
    def setUp(self):
        self.op_type = "fc"
        self.use_mkldnn = True
        self.mkldnn_data_type = "bfloat16"
        self.force_fp32_output = False
        self.generate_data()

        self.output = fully_connected_naive(self.matrix.input,
                                            self.matrix.weights, self.bias)
        if not self.force_fp32_output:
            self.output = convert_float_to_uint16(self.output)

        self.inputs = {
            'Input': convert_float_to_uint16(self.matrix.input),
            'W': self.matrix.weights,
            'Bias': self.bias
        }

        self.attrs = {
            'use_mkldnn': self.use_mkldnn,
            'force_fp32_output': self.force_fp32_output
        }

        self.outputs = {'Out': self.output}
    def setUp(self):
        self.init_test()
        self.dtype = np.uint16

        table = np.random.random((17, 31)).astype("float32")
        self.ids = np.random.randint(0, 17, self.ids_shape).astype("int64")
        self.flat_ids = self.ids.flatten()

        self.w_bf16 = convert_float_to_uint16(table)
        self.out_bf16 = _lookup(self.w_bf16, self.ids, self.flat_ids,
                                self.op_type)
        self.out_fp32 = _lookup(table, self.ids, self.flat_ids, self.op_type)
        self.w_grad_fp32 = _get_grad(table, self.ids, self.flat_ids,
                                     self.op_type)

        self.inputs = {'W': self.w_bf16, 'Ids': self.ids}
        self.outputs = {'Out': self.out_fp32}