Exemplo n.º 1
0
            output_np = bce_loss(input_np, label_np)

            self.inputs = {'X': input_np, 'Label': label_np}
            self.outputs = {'Out': output_np}

        def test_check_output(self):
            self.check_output_with_place(self.place)

        def test_check_grad(self):
            self.check_grad_with_place(self.place, ['X'], 'Out')

        def init_test_case(self):
            self.shape = [10, 10]

    class TestBceLossOpCase1(TestBceLossOp):
        def init_test_cast(self):
            self.shape = [2, 3, 4, 5]

    class TestBceLossOpCase2(TestBceLossOp):
        def init_test_cast(self):
            self.shape = [2, 3, 20]


support_types = get_xpu_op_support_types('bce_loss')
for stype in support_types:
    create_test_class(globals(), XPUTestBceLossOp, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
Exemplo n.º 2
0
                'Y':
                np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype),
            }

            self.attrs = {'axis': 2}

            self.outputs = {
                'Out': np.divide(self.inputs['X'], self.inputs['Y'])
            }

    class TestElementwiseDivBroadcast(unittest.TestCase):
        def test_shape_with_batch_sizes(self):
            with fluid.program_guard(fluid.Program()):
                x_var = fluid.data(
                    name='x', dtype='float32', shape=[None, 3, None, None])
                one = 2.
                out = one / x_var
                exe = fluid.Executor(fluid.XPUPlace(0))
                x = np.random.uniform(0.1, 0.6,
                                      (1, 3, 32, 32)).astype('float32')
                out_result, = exe.run(feed={'x': x}, fetch_list=[out])
                self.assertEqual((out_result == (2 / x)).all(), True)


support_types = get_xpu_op_support_types('elementwise_div')
for stype in support_types:
    create_test_class(globals(), XPUTestElementwiseDivOp, stype)

if __name__ == '__main__':
    unittest.main()
Exemplo n.º 3
0
    class TestWithStride_AsyPadding(TestConv2DOp_v2):
        def init_test_case(self):
            self.stride = [2, 2]
            self.input_size = [2, 3, 6, 6]  # NCHW
            assert np.mod(self.input_size[1], self.groups) == 0
            f_c = self.input_size[1] // self.groups
            self.filter_size = [6, f_c, 3, 3]

        def init_paddings(self):
            self.pad = [1, 1, 1, 1]
            self.padding_algorithm = "EXPLICIT"


support_types = get_xpu_op_support_types('conv2d')
for stype in support_types:
    create_test_class(globals(), XPUTestConv2DOp, stype)
    create_test_class(globals(), XPUTestConv2DOp_v2, stype)

#---------- test SAME VALID -----------
#create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
#create_test_padding_SAME_class(TestWithPad_AsyPadding)
#create_test_padding_SAME_class(TestWithStride_AsyPadding)

#create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
#create_test_padding_VALID_class(TestWithPad_AsyPadding)
#create_test_padding_VALID_class(TestWithStride_AsyPadding)

# ------------ test channel last ---------
#create_test_channel_last_class(TestConv2DOp_AsyPadding)
#create_test_channel_last_class(TestWithPad_AsyPadding)
Exemplo n.º 4
0
        def initParameters(self):
            self.axis = 3

    class TestStackOp7(TestStackOp):
        def initParameters(self):
            self.num_inputs = 4
            self.input_dim = (5, 6, 7)
            self.axis = 0
            self.dtype = np.int64

        def test_check_grad(self):
            pass

    class TestStackOp8(TestStackOp):
        def initParameters(self):
            self.num_inputs = 4
            self.input_dim = (5, 6, 7)
            self.axis = 0
            self.dtype = np.int32

        def test_check_grad(self):
            pass


support_types = get_xpu_op_support_types('stack')
for stype in support_types:
    create_test_class(globals(), XPUTestStackOp, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 5
0
            place = paddle.XPUPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['X', 'Y'],
                                       'Out',
                                       max_relative_error=0.1)

        def test_check_grad_ingore_x(self):
            place = paddle.XPUPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['Y'],
                                       'Out',
                                       max_relative_error=0.1,
                                       no_grad_set=set("X"))

        def test_check_grad_ingore_y(self):
            place = paddle.XPUPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['X'],
                                       'Out',
                                       max_relative_error=0.1,
                                       no_grad_set=set('Y'))


support_types = get_xpu_op_support_types('mul')
for stype in support_types:
    create_test_class(globals(), XPUTestMulOp, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
Exemplo n.º 6
0
        def init_test_input(self):
            self.inputs = {
                "X": np.random.random(self.ori_shape).astype(self.dtype),
                "Shape": np.array(self.new_shape, dtype="int32")
            }

        def init_attrs(self):
            self.attrs = {"use_xpu": True}

    class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
        def init_data(self):
            self.ori_shape = (5, 20)
            self.new_shape = (5, -1, 10)
            self.infered_shape = (5, -1, 10)
            self.shape = (5, -1, -1)

    class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
        def init_data(self):
            self.ori_shape = (10, 2, 6)
            self.new_shape = (10, 0, 3, -1)
            self.infered_shape = (10, 2, 3, -1)
            self.shape = (10, 0, 3, -1)


support_types = get_xpu_op_support_types("reshape2")
for stype in support_types:
    create_test_class(globals(), XPUTestReshapeOp, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 7
0
        def init_data(self):
            self.ori_shape = [100]
            self.repeat_times = [2]

        def test_check_output(self):
            self.check_output_with_place(self.place)

    class TestTileOpRank2_tensor(TestTileOpRank1_tensor):
        def init_data(self):
            self.ori_shape = [12, 14]
            self.repeat_times = [2, 3]


support_types = get_xpu_op_support_types('tile')
for stype in support_types:
    create_test_class(globals(), XPUTestTileOpRank1, stype)
    create_test_class(globals(), XPUTestTileOpRank1_tensor_attr, stype)
    create_test_class(globals(), XPUTestTileOpRank1_tensor, stype)


# Test python API
class TestTileAPI(unittest.TestCase):
    def test_api(self):
        with fluid.dygraph.guard(paddle.XPUPlace(0)):
            np_x = np.random.random([12, 14]).astype("float32")
            x = paddle.to_tensor(np_x)

            positive_2 = np.array([2]).astype("int32")
            positive_2 = paddle.to_tensor(positive_2)

            repeat_times = np.array([2, 3]).astype("int32")
        def init_test_case(self):
            self.interp_method = 'bilinear'
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.scale = 0.
            self.out_size = np.array([66, 40]).astype("int32")
            self.align_corners = True
            self.shape_by_1Dtensor = True

    # scale is a 1-D tensor
    class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor
                                               ):
        def init_test_case(self):
            self.interp_method = 'bilinear'
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.scale = 2.0
            self.out_size = None
            self.align_corners = True
            self.scale_by_1Dtensor = True


support_types = get_xpu_op_support_types('bilinear_interp_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestBilinearInterpV2Op, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 9
0
        def set_attrs(self):
            self.num_layers = 3
            self.is_bidirec = False

    class TestRNNOp5(TestRNNOp):
        def set_attrs(self):
            self.num_layers = 2
            self.is_bidirec = True

    class TestRNNOp6(TestRNNOp):
        def set_attrs(self):
            self.num_layers = 2
            self.is_bidirec = True
            self.sequence_length = None

    class TestRNNOp7(TestRNNOp):
        def set_attrs(self):
            self.num_layers = 3
            self.is_bidirec = True


support_types = get_xpu_op_support_types('rnn')
for stype in support_types:
    create_test_class(globals(),
                      XPUTestRNNOp,
                      stype,
                      ignore_deivce_version=[core.XPUVersion.XPU1])

if __name__ == '__main__':
    unittest.main()
Exemplo n.º 10
0
            self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)

    class TestTopkOp10(TestTopkOp):
        def init_args(self):
            self.k = 3
            self.axis = 1
            self.largest = True
            self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)

    class TestTopkOp11(TestTopkOp):
        def init_args(self):
            self.k = 5
            self.axis = 1
            self.largest = True
            self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)

    class TestTopkOp12(TestTopkOp):
        def init_args(self):
            self.k = 1
            self.axis = 1
            self.largest = True
            self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)


support_types = get_xpu_op_support_types('top_k_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestTopKV2Op, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 11
0
                    user_defined_grad_outputs=user_defined_grad_outputs)

    # Correct: There is mins axis.
    class TestSqueeze2Op1(TestSqueeze2Op):
        def init_test_case(self):
            self.ori_shape = (1, 20, 1, 5)
            self.axes = (0, -2)
            self.new_shape = (20, 5)

    # Correct: No axes input.
    class TestSqueeze2Op2(TestSqueeze2Op):
        def init_test_case(self):
            self.ori_shape = (1, 20, 1, 5)
            self.axes = ()
            self.new_shape = (20, 5)

    # Correct: Just part of axes be squeezed.
    class TestSqueeze2Op3(TestSqueeze2Op):
        def init_test_case(self):
            self.ori_shape = (6, 1, 5, 1, 4, 1)
            self.axes = (1, -1)
            self.new_shape = (6, 5, 1, 4)


support_types = get_xpu_op_support_types("squeeze2")
for stype in support_types:
    create_test_class(globals(), XPUTestSqueeze2Op, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 12
0
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run adam operator
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)

        for key, np_array in self.outputs.items():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])

            for i in range(np_array.size):
                self.assertLess((actual[i] - np_array[i]), 0.00001)

    def test_sparse_adam(self):
        xpu_version = core.get_xpu_device_version(0)
        version_str = "xpu2" if xpu_version == core.XPUVersion.XPU2 else "xpu1"
        if "xpu2" == version_str:
            self.check_with_place(paddle.XPUPlace(0), False)


support_types = get_xpu_op_support_types('adam')
for stype in support_types:
    create_test_class(globals(), XPUTestAdamOp, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
Exemplo n.º 13
0
            self.axis = 0

    class TestArgMaxCase9(XPUBaseTestCase):
        def initTestCase(self):
            self.dims = (2, )
            self.axis = 0

    class TestArgMaxCase10(XPUBaseTestCase):
        def initTestCase(self):
            self.dims = (3, )
            self.axis = 0


support_types = get_xpu_op_support_types('arg_max')
for stype in support_types:
    create_test_class(globals(), XPUTestArgMax, stype)


class TestArgMaxAPI(unittest.TestCase):
    def initTestCase(self):
        self.dims = (3, 4, 5)
        self.dtype = 'float32'
        self.axis = 0

    def setUp(self):
        self.initTestCase()
        self.__class__.use_Xpu = True
        self.place = [paddle.XPUPlace(0)]

    def test_dygraph_api(self):
        def run(place):
Exemplo n.º 14
0
        def test_check_grad_ingore_x(self):
            self.check_grad_with_place(self.place, ['Y'],
                                       'Out',
                                       no_grad_set=set("residual"))

        def test_check_grad_ingore_y(self):
            self.check_grad_with_place(self.place, ['X'],
                                       'Out',
                                       no_grad_set=set('residual'))

    class TestHuberLossOp1(TestHuberLossOp):
        def set_shape(self):
            return (640)

    class TestHuberLossOp2(TestHuberLossOp):
        def set_shape(self):
            return (10, 10)

    class TestHuberLossOp3(TestHuberLossOp):
        def set_shape(self):
            return (10, 10, 1)


support_types = get_xpu_op_support_types('huber_loss')
for stype in support_types:
    create_test_class(globals(), XPUTestHuberLossOp, stype)

if __name__ == '__main__':
    unittest.main()
Exemplo n.º 15
0
            self.y_shape = [4, 1]
            self.low = -100
            self.high = 100

    class XPUTestBitwiseAndCase3(XPUTestBitwiseAndBase):
        def init_case(self):
            self.dtype = np.int32
            self.x_shape = [2, 3, 4, 5]
            self.y_shape = [2, 3, 4, 5]
            self.low = 0
            self.high = 100


support_types = get_xpu_op_support_types('bitwise_and')
for stype in support_types:
    create_test_class(globals(), XPUTestBitwiseAnd, stype)


################## TEST OP: BitwiseOr ##################
class XPUTestBitwiseOr(XPUOpTestWrapper):
    def __init__(self):
        self.op_name = 'bitwise_or'

    class XPUTestBitwiseOrBase(XPUOpTest):
        def setUp(self):
            self.place = paddle.XPUPlace(0)
            self.init_case()
            self.set_case()

        def set_case(self):
            self.op_type = 'bitwise_or'
Exemplo n.º 16
0
                    self.assertTrue(
                        np.allclose(
                            input.gradient(),
                            self.cal_grad_upscale_train(mask.numpy(), prob)))

        def test_backward_upscale_train_2(self):
            for place in self.places:
                with fluid.dygraph.guard(place):

                    prob = 0.3
                    input = paddle.uniform([40, 40], dtype=self.in_type)
                    input.stop_gradient = False
                    out, mask = core.ops.dropout(input, 'dropout_prob', prob,
                                                 "dropout_implementation",
                                                 "upscale_in_train")
                    out.backward()

                    self.assertTrue(
                        np.allclose(
                            input.gradient(),
                            self.cal_grad_upscale_train(mask.numpy(), prob)))


support_types = get_xpu_op_support_types('dropout')
for stype in support_types:
    create_test_class(globals(), XPUTestDropoutOp, stype)

if __name__ == '__main__':
    unittest.main()
Exemplo n.º 17
0
                'Condition':
                np.array([[[True, False], [False, True]],
                          [[False, True], [True, False]],
                          [[False, False], [False, True]]]).astype(self.dtype),
            }

            self.outputs = {
                'Out': np.array(
                    [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]],
                    dtype='int64')
            }


support_types = get_xpu_op_support_types('where_index')
for stype in support_types:
    create_test_class(globals(), XPUTestWhereIndexOp, stype)


class TestWhereOpError(unittest.TestCase):
    def test_api(self):
        with program_guard(Program(), Program()):
            cond = fluid.layers.data(name='cond', shape=[4], dtype='bool')
            result = fluid.layers.where(cond)

            exe = fluid.Executor(paddle.XPUPlace(0))
            exe.run(fluid.default_startup_program())
            cond_i = np.array([True, False, False, False]).astype("bool")
            out = exe.run(fluid.default_main_program(), feed={'cond': cond_i})


class TestWhereRaiseError(unittest.TestCase):
Exemplo n.º 18
0
    class XPUTestExp(TestActivationOPBase):
        def set_case(self):
            self.op_type = 'exp'
            self.dtype = self.in_type

            x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
            out = np.exp(x)
            self.attrs = {'use_xpu': True}
            self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
            self.outputs = {'Out': out}


support_types = get_xpu_op_support_types('exp')
for stype in support_types:
    create_test_class(globals(), XPUTestExpOP, stype)


class XPUTestSigmoidOP(XPUOpTestWrapper):
    def __init__(self):
        self.op_name = 'sigmoid'
        self.use_dynamic_create_class = False

    class XPUTestSigmoid(TestActivationOPBase):
        def set_case(self):
            self.op_type = "sigmoid"
            self.dtype = self.in_type
            self.init_config()
            out = 1 / (1 + np.exp(-self.x))

            self.attrs = {'use_xpu': True}
Exemplo n.º 19
0
                    'Out': self.inputs['X'].sum(axis=self.axis,
                                                keepdims=self.attrs['keep_dim'])
                }

        def init_case(self):
            self.shape = (5, 6, 10)
            self.axis = (0, )
            self.reduce_all = False
            self.keep_dim = False

        def test_check_output(self):
            self.check_output_with_place(self.place)

        def test_check_grad(self):
            pass

    class XPUTestReduceSumCase1(XPUTestReduceSumBase):
        def init_case(self):
            self.shape = (5, 6, 10)
            self.axis = (0, )
            self.reduce_all = False
            self.keep_dim = True


support_types = get_xpu_op_support_types('reduce_sum')
for stype in support_types:
    create_test_class(globals(), XPUTestReduceSumOp, stype)

if __name__ == '__main__':
    unittest.main()
Exemplo n.º 20
0
            self.trans_y = False

    class TestMatMulOp17(TestMatMulV2Op):
        """
        case 17 : to check the gradient for special case
        """
        def config(self):
            self.x_shape = (2, 1, 100)
            self.y_shape = (100)
            self.trans_x = False
            self.trans_y = False

    class TestMatMulOp18(TestMatMulV2Op):
        """
        case 18 : for ppyoloe model
        """
        def config(self):
            self.x_shape = (8, 111, 4, 17)
            self.y_shape = (17)
            self.trans_x = False
            self.trans_y = False


support_types = get_xpu_op_support_types('matmul_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestMatmulV2Op, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
            self.attrs = {
                "numeric_stable_mode": self.numeric_stable_mode,
                "soft_label": self.soft_label,
            }
            if self.ignore_index >= 0:
                self.attrs['ignore_index'] = self.ignore_index
            if self.axis != -1:
                self.attrs['axis'] = self.axis

        def test_check_output(self):
            if paddle.is_compiled_with_xpu():
                paddle.enable_static()
                place = paddle.XPUPlace(0)
                self.check_output_with_place(place, atol=1e-2)

        def test_check_grad(self):
            if paddle.is_compiled_with_xpu():
                paddle.enable_static()
                place = paddle.XPUPlace(0)
                self.check_grad_with_place(place, ["Logits"],
                                           "Loss",
                                           max_relative_error=0.2)


support_types = get_xpu_op_support_types('softmax_with_cross_entropy')
for stype in support_types:
    create_test_class(globals(), XPUTestSoftmaxWithCrossEntropyOp, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 22
0
        def set_inputs(self):
            self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
            self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
            self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
            self.axis = 1

        def test_check_grad(self):
            pass

    @skip_check_grad_ci(
        reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
    )
    class TestConcatOp4(TestConcatOp):
        def set_inputs(self):
            self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
            self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
            self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
            self.axis = 0

        def test_check_grad(self):
            pass


support_types = get_xpu_op_support_types('concat')
for stype in support_types:
    create_test_class(globals(), XPUTestConcatOp, stype)

if __name__ == '__main__':
    paddle.enable_static()
    unittest.main()
Exemplo n.º 23
0
        out_1 = self._executed_api(images, min=0.2, max=0.8)
        images = paddle.to_tensor(data, dtype='float32')
        out_2 = self._executed_api(images, min=0.2, max=0.9)
        images = paddle.to_tensor(data, dtype='float32')
        out_3 = self._executed_api(images, min=v_min, max=v_max)

        self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8)))
        self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9)))
        self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8)))

    def test_errors(self):
        paddle.enable_static()
        x1 = fluid.data(name='x1', shape=[1], dtype="int16")
        x2 = fluid.data(name='x2', shape=[1], dtype="int8")
        self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
        self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
        paddle.disable_static()


class TestInplaceClipAPI(TestClipAPI):
    def _executed_api(self, x, min=None, max=None):
        return x.clip_(min, max)


support_types = get_xpu_op_support_types('clip')
for stype in support_types:
    create_test_class(globals(), XPUTestClipOp, stype)

if __name__ == '__main__':
    unittest.main()
Exemplo n.º 24
0
                                             dtype="int32")

            out_1 = paddle.expand(x, shape=[12, 14])
            out_2 = paddle.expand(x, shape=[positive_2, 14])
            out_3 = paddle.expand(x, shape=expand_shape)

            g0 = fluid.backward.calc_gradient(out_2, x)

            exe = fluid.Executor(place=paddle.XPUPlace(0))
            res_1, res_2, res_3 = exe.run(fluid.default_main_program(),
                                          feed={
                                              "x":
                                              input,
                                              "expand_shape":
                                              np.array([12,
                                                        14]).astype("int32")
                                          },
                                          fetch_list=[out_1, out_2, out_3])

            assert np.array_equal(res_1, np.tile(input, (1, 1)))
            assert np.array_equal(res_2, np.tile(input, (1, 1)))
            assert np.array_equal(res_3, np.tile(input, (1, 1)))


support_types = get_xpu_op_support_types('expand_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestExpandV2Op, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 25
0
            place = paddle.XPUPlace(0)
            self.check_output_with_place(place, check_dygraph=False)

    class TestRangeOpCase0(TestRangeOp):
        def init_config(self):
            self.case = (0, 5, 1)

    class TestRangeOpCase1(TestRangeOp):
        def init_config(self):
            self.case = (0, 5, 2)

    class TestRangeOpCase2(TestRangeOp):
        def init_config(self):
            self.case = (10, 1, -2)

    class TestRangeOpCase3(TestRangeOp):
        def init_config(self):
            self.case = (-1, -10, -2)

    class TestRangeOpCase4(TestRangeOp):
        def init_config(self):
            self.case = (10, -10, -11)


support_types = get_xpu_op_support_types("range")
for stype in support_types:
    create_test_class(globals(), XPUTestRangeOp, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 26
0
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([[1, 1], [2, 1]]).astype("int32")
            self.output = self.xnp[tuple(self.inp.T)]

    class XPUTestGatherNdOpWithSameIndexAsX2(XPUTestGatherNdBase):
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([[1, 1], [2, 1]]).astype("int64")
            self.output = self.xnp[tuple(self.inp.T)]

    class XPUTestGatherNdOpIndex1(XPUTestGatherNdBase):
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([1, 2]).astype("int32")
            self.output = self.xnp[tuple(self.inp.T)]

    class XPUTestGatherNdOpIndex2(XPUTestGatherNdBase):
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([1, 2]).astype("int64")
            self.output = self.xnp[tuple(self.inp.T)]


support_types = get_xpu_op_support_types('gather_nd')
for stype in support_types:
    create_test_class(globals(), XPUTestGatherNd, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 27
0
            linear_2 = paddle.nn.Linear(5, 3)
            adam = paddle.optimizer.AdamW(
                learning_rate=paddle.optimizer.lr.PiecewiseDecay(
                    boundaries=[3, 6], values=[0.1, 0.2, 0.3]),
                parameters=[{
                    'params': linear_1.parameters(),
                    'learning_rate': 0.1,
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                }],
                apply_decay_param_fun=lambda name: True,
                weight_decay=0.01)

            for _ in range(2):
                out = linear_1(a)
                out = linear_2(out)
                out.backward()
                adam.step()
                adam.clear_gradients()


support_types = get_xpu_op_support_types('adamw')
for stype in support_types:
    create_test_class(globals(), XPUTestAdamwOp1, stype)
    create_test_class(globals(), XPUTestAdamwOp2, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
Exemplo n.º 28
0
        def test_check_output(self):
            self.check_output_with_place(self.place)

    class TestFillAnyLikeOp2(TestFillAnyLikeOp):
        def set_value(self):
            self.value = -0.0

    class TestFillAnyLikeOp3(TestFillAnyLikeOp):
        def set_value(self):
            self.value = 1.0

    class TestFillAnyLikeOp4(TestFillAnyLikeOp):
        def init(self):
            self.value = 1e-9

    class TestFillAnyLikeOp5(TestFillAnyLikeOp):
        def set_value(self):
            if self.dtype == "float16":
                self.value = 0.05
            else:
                self.value = 5.0


support_types = get_xpu_op_support_types('fill_any_like')
for stype in support_types:
    create_test_class(globals(), XPUTestFillAnyLikeOp, stype)

if __name__ == "__main__":
    unittest.main()
            self.out_h = 12
            self.out_w = 12
            self.out_size = [8, 12]

    # out_size is a 1-D tensor
    class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor):
        def init_test_case(self):
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.out_size = np.array([66, 40]).astype("int32")
            self.shape_by_1Dtensor = True

    # scale is a 1-D tensor
    class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor):
        def init_test_case(self):
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.scale = 2.0
            self.out_size = None
            self.scale_by_1Dtensor = True


support_types = get_xpu_op_support_types('nearest_interp_v2')
for stype in support_types:
    create_test_class(globals(), XPUNearestInterpOpWrapper, stype)

if __name__ == "__main__":
    unittest.main()
Exemplo n.º 30
0
class TestScaleInplaceApiStatic(TestScaleApiStatic):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return x.scale_(scale, bias)


class TestScaleApiDygraph(unittest.TestCase):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return paddle.scale(x, scale, bias)

    def test_api(self):
        paddle.disable_static()
        input = np.random.random([2, 25]).astype("float32")
        x = paddle.to_tensor(input)
        out = self._executed_api(x, scale=2.0, bias=3.0)
        self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True)
        paddle.enable_static()


class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
    def _executed_api(self, x, scale=1.0, bias=0.0):
        return x.scale_(scale, bias)


support_types = get_xpu_op_support_types('scale')
for stype in support_types:
    create_test_class(globals(), XPUTestScaleOp, stype)

if __name__ == "__main__":
    unittest.main()