def setUp(self):
     self.set_mlu()
     self.place = paddle.MLUPlace(0)
     x = np.random.random(size=(10, 7)).astype(typename)
     y = np.random.random(size=(10, 7)).astype(typename)
     out = callback(x, y)
     self.inputs = {'X': x, 'Y': y}
     self.outputs = {'Out': out}
     self.op_type = op_type
    def setUp(self):
        self.set_mlu()
        self.op_type = "leaky_relu"
        self.place = paddle.MLUPlace(0)

        self.init_dtype()
        np.random.seed(SEED)

        self.set_inputs()
        self.set_attrs()
        self.set_outputs()
Beispiel #3
0
    def _test(self, run_mlu=True):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        main_prog.random_seed = SEED
        startup_prog.random_seed = SEED
        np.random.seed(SEED)

        a_np = np.random.random(size=(32, 32)).astype('float32')
        b_np = np.random.random(size=(32, 32)).astype('float32')
        label_np = np.random.randint(2, size=(32, 1)).astype('int64')

        with paddle.static.program_guard(main_prog, startup_prog):
            a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
            b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
            label = paddle.static.data(name="label",
                                       shape=[32, 1],
                                       dtype='int64')

            c = paddle.multiply(a, b)

            fc_1 = fluid.layers.fc(input=c, size=128)
            fc_1_gelu = fluid.layers.gelu(fc_1)
            prediction = fluid.layers.fc(input=fc_1_gelu,
                                         size=2,
                                         act='softmax')

            cost = fluid.layers.cross_entropy(input=prediction, label=label)
            loss = fluid.layers.reduce_mean(cost)
            sgd = fluid.optimizer.SGD(learning_rate=0.01)
            sgd.minimize(loss)

        if run_mlu:
            place = paddle.MLUPlace(0)
        else:
            place = paddle.CPUPlace()

        exe = paddle.static.Executor(place)
        exe.run(startup_prog)

        print("Start run on {}".format(place))
        for epoch in range(100):

            pred_res, loss_res = exe.run(main_prog,
                                         feed={
                                             "a": a_np,
                                             "b": b_np,
                                             "label": label_np
                                         },
                                         fetch_list=[prediction, loss])
            if epoch % 10 == 0:
                print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
                    epoch, pred_res[0], loss_res))

        return pred_res, loss_res
 def setUp(self):
     self.op_type = "gather"
     self.place = paddle.MLUPlace(0)
     self.__class__.use_mlu = True
     self.python_api = paddle.gather
     self.config()
     xnp = np.random.random(self.x_shape).astype(self.x_type)
     self.inputs = {
         'X': xnp,
         'Index': np.array(self.index).astype(self.index_type)
     }
     self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
    def setUp(self):
        self.set_mlu()
        self.place = paddle.MLUPlace(0)
        self.op_type = "mean"
        self.init_dtype()

        x = np.random.random([3, 200]).astype(self.dtype)
        self.inputs = {'X': x}

        self.attrs = {}
        np_out = np.mean(x)
        self.outputs = {'Out': np_out}
Beispiel #6
0
    def setUp(self):
        self.set_mlu()
        self.op_type = "relu"
        self.place = paddle.MLUPlace(0)

        self.init_dtype()
        np.random.seed(SEED)
        x = np.array([0.1, -0.1, -1.0]).astype(self.dtype)
        out = np.array([0.1, 0.0, 0.0]).astype(self.dtype)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {}
        self.outputs = {'Out': out}
Beispiel #7
0
    def setUp(self):
        self.set_mlu()
        self.op_type = "gelu"
        self.place = paddle.MLUPlace(0)

        self.init_dtype()
        np.random.seed(SEED)
        x = np.random.uniform(1, 2, [3, 4]).astype(self.dtype)
        out = np_gelu(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {}
        self.outputs = {'Out': out}
Beispiel #8
0
    def setUp(self):
        self.set_mlu()
        self.init_dtype()
        self.op_type = "sum"
        self.place = paddle.MLUPlace(0)

        x0 = np.random.random((3, 3)).astype(self.dtype)

        self.inputs = {'X': [("x0", x0)]}
        y = x0
        self.outputs = {'Out': y}

        self.attrs = {'use_mkldnn': False}
Beispiel #9
0
 def test_out2(self):
     with fluid.dygraph.guard(paddle.MLUPlace(0)):
         input_1 = np.random.random([4, 6, 6]).astype("int32")
         # input is a variable which shape is [4, 6, 6]
         input = fluid.dygraph.to_variable(input_1)
         x0, x1, x2 = paddle.split(input, num_or_sections=[1, 2, 3], axis=1)
         x0_out = x0.numpy()
         x1_out = x1.numpy()
         x2_out = x2.numpy()
         ex_x0, ex_x1, ex_x2 = np.split(input_1, (1, 3), axis=1)
     self.assertTrue(np.allclose(ex_x0, x0_out))
     self.assertTrue(np.allclose(ex_x1, x1_out))
     self.assertTrue(np.allclose(ex_x2, x2_out))
Beispiel #10
0
 def setUp(self):
     self.op_type = "slice"
     self.__class__.use_mlu = True
     self.place = paddle.MLUPlace(0)
     self.config()
     self.inputs = {'Input': self.input}
     self.outputs = {'Out': self.out}
     self.attrs = {
         'axes': self.axes,
         'starts': self.starts,
         'ends': self.ends,
         'infer_flags': self.infer_flags
     }
    def test_static_api(self):
        paddle.enable_static()
        np_x = np.random.rand(2, 3, 4, 4).astype('float32')

        main_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, paddle.static.Program()):
            x = paddle.static.data(name="x",
                                   shape=[2, 3, 4, 4],
                                   dtype='float32')
            out = self.execute_api(x, start_axis=-2, stop_axis=-1)

        exe = paddle.static.Executor(place=paddle.MLUPlace(0))
        fetch_out = exe.run(main_prog, feed={"x": np_x}, fetch_list=[out])
        self.assertTrue((2, 3, 16) == fetch_out[0].shape)
Beispiel #12
0
    def setUp(self):
        self.set_mlu()
        self.op_type = "relu6"
        self.place = paddle.MLUPlace(0)

        self.init_dtype()
        np.random.seed(SEED)
        x = np.random.uniform(-10, -1, [10, 12]).astype(self.dtype)
        x[np.abs(x) < 0.005] = 0.02
        out = ref_relu6(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': 6.0}
        self.outputs = {'Out': out}
    def setUp(self):
        self.set_mlu()
        self.op_type = "flatten_contiguous_range"
        self.place = paddle.MLUPlace(0)

        self.start_axis = 0
        self.stop_axis = -1
        self.dtype = np.float64
        self.init_test_case()
        self.inputs = {"X": np.random.random(self.in_shape).astype(self.dtype)}
        self.init_attrs()
        self.outputs = {
            "Out": self.inputs["X"].reshape(self.new_shape),
            "XShape": np.random.random(self.in_shape).astype("float32")
        }
    def test_static_out(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data(name='x', shape=[2, 3, 4], dtype='float32')
            x_trans1 = paddle.transpose(x, perm=[1, 0, 2])
            x_trans2 = paddle.transpose(x, perm=(2, 1, 0))
            place = paddle.MLUPlace(0)
            exe = paddle.static.Executor(place)
            x_np = np.random.random([2, 3, 4]).astype("float32")
            result1, result2 = exe.run(feed={"x": x_np},
                                       fetch_list=[x_trans1, x_trans2])
            expected_result1 = np.transpose(x_np, [1, 0, 2])
            expected_result2 = np.transpose(x_np, (2, 1, 0))

            np.testing.assert_array_equal(result1, expected_result1)
            np.testing.assert_array_equal(result2, expected_result2)
Beispiel #15
0
    def setUp(self):
        self.op_type = "softmax"
        self.place = paddle.MLUPlace(0)
        self.dtype = np.float32
        self.init_kernel_type()
        self.shape = self.get_x_shape()
        self.axis = self.get_axis()

        np.random.seed(0)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.apply_along_axis(stable_softmax, self.axis, x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {
            'axis': self.axis,
        }
Beispiel #16
0
 def setUp(self):
     self.op_type = "slice"
     self.__class__.use_mlu = True
     self.place = paddle.MLUPlace(0)
     self.config()
     self.inputs = {
         'Input': self.input,
         "StartsTensor": np.array(
             self.starts, dtype="int32")
     }
     self.outputs = {'Out': self.out}
     self.attrs = {
         'axes': self.axes,
         #'starts': self.starts,
         'ends': self.ends,
         'infer_flags': self.infer_flags,
         'decrease_axis': self.decrease_axis,
     }
Beispiel #17
0
    def setUp(self):
        self.set_mlu()
        self.init_dtype()
        self.op_type = "sum"
        self.place = paddle.MLUPlace(0)

        x0 = np.random.random((3, 3)).astype(self.dtype)
        x1 = np.random.random((3, 3)).astype(self.dtype)
        x2 = np.random.random((3, 3)).astype(self.dtype)
        x3 = np.random.random((3, 3)).astype(self.dtype)
        self.inputs = {'X': [("x0", x0), ("x1", x1), ("x2", x2), ("x3", x3)]}
        # There will be a problem if just using `y=x0+x1+x2+x3` to calculate the
        # summation result as the reference standard result. The reason is that 
        # numpy's fp16 data has precision loss when doing `add` operation.
        # For example, the results of `x0+x1+x2+x3` is different from that of
        # `x3+x2+x1+x0` if the dtype is fp16.
        # Therefore, converting the input to fp32 for calculation.
        y = (x0.astype(np.float32) + x1.astype(np.float32) +
             x2.astype(np.float32) + x3.astype(np.float32)).astype(self.dtype)
        self.outputs = {'Out': y}

        self.attrs = {'use_mkldnn': False}
Beispiel #18
0
    def setUp(self):
        self.op_type = "slice"
        self.__class__.use_mlu = True
        self.place = paddle.MLUPlace(0)
        self.config()

        ends_tensor = []
        for index, ele in enumerate(self.ends):
            ends_tensor.append(("y" + str(index), np.ones(
                (1)).astype('int32') * ele))

        self.inputs = {
            'Input': self.input,
            "StartsTensor": np.array(
                self.starts, dtype="int32"),
            'EndsTensorList': ends_tensor
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            #'starts': self.starts,
            'ends': self.ends_infer,
            'infer_flags': self.infer_flags
        }
 def test_Negative():
     paddle.disable_static(paddle.MLUPlace(0))
     img = paddle.to_tensor(x)
     out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
     return out.numpy().shape
Beispiel #20
0
 def set_mlu(self):
     self.__class__.use_mlu = True
     self.place = paddle.MLUPlace(0)
Beispiel #21
0
 def setUp(self):
     self.place = paddle.MLUPlace(0)
     self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32')
     self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np)
     self.executed_api()