def init_test_case(self):
            self.interp_method = 'bilinear'
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.scale = 0.
            self.out_size = np.array([66, 40]).astype("int32")
            self.align_corners = True
            self.shape_by_1Dtensor = True

    # scale is a 1-D tensor
    class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor
                                               ):
        def init_test_case(self):
            self.interp_method = 'bilinear'
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.scale = 2.0
            self.out_size = None
            self.align_corners = True
            self.scale_by_1Dtensor = True


support_types = get_xpu_op_support_types('bilinear_interp_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestBilinearInterpV2Op, stype)

if __name__ == "__main__":
    unittest.main()
示例#2
0
                'Y':
                np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype),
            }

            self.attrs = {'axis': 2}

            self.outputs = {
                'Out': np.divide(self.inputs['X'], self.inputs['Y'])
            }

    class TestElementwiseDivBroadcast(unittest.TestCase):
        def test_shape_with_batch_sizes(self):
            with fluid.program_guard(fluid.Program()):
                x_var = fluid.data(
                    name='x', dtype='float32', shape=[None, 3, None, None])
                one = 2.
                out = one / x_var
                exe = fluid.Executor(fluid.XPUPlace(0))
                x = np.random.uniform(0.1, 0.6,
                                      (1, 3, 32, 32)).astype('float32')
                out_result, = exe.run(feed={'x': x}, fetch_list=[out])
                self.assertEqual((out_result == (2 / x)).all(), True)


support_types = get_xpu_op_support_types('elementwise_div')
for stype in support_types:
    create_test_class(globals(), XPUTestElementwiseDivOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#3
0
            self.padding_algorithm = "EXPLICIT"

    class TestWithStride_AsyPadding(TestConv2DOp_v2):
        def init_test_case(self):
            self.stride = [2, 2]
            self.input_size = [2, 3, 6, 6]  # NCHW
            assert np.mod(self.input_size[1], self.groups) == 0
            f_c = self.input_size[1] // self.groups
            self.filter_size = [6, f_c, 3, 3]

        def init_paddings(self):
            self.pad = [1, 1, 1, 1]
            self.padding_algorithm = "EXPLICIT"


support_types = get_xpu_op_support_types('conv2d')
for stype in support_types:
    create_test_class(globals(), XPUTestConv2DOp, stype)
    create_test_class(globals(), XPUTestConv2DOp_v2, stype)

#---------- test SAME VALID -----------
#create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
#create_test_padding_SAME_class(TestWithPad_AsyPadding)
#create_test_padding_SAME_class(TestWithStride_AsyPadding)

#create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
#create_test_padding_VALID_class(TestWithPad_AsyPadding)
#create_test_padding_VALID_class(TestWithStride_AsyPadding)

# ------------ test channel last ---------
#create_test_channel_last_class(TestConv2DOp_AsyPadding)
示例#4
0
        def initParameters(self):
            self.axis = 3

    class TestStackOp7(TestStackOp):
        def initParameters(self):
            self.num_inputs = 4
            self.input_dim = (5, 6, 7)
            self.axis = 0
            self.dtype = np.int64

        def test_check_grad(self):
            pass

    class TestStackOp8(TestStackOp):
        def initParameters(self):
            self.num_inputs = 4
            self.input_dim = (5, 6, 7)
            self.axis = 0
            self.dtype = np.int32

        def test_check_grad(self):
            pass


support_types = get_xpu_op_support_types('stack')
for stype in support_types:
    create_test_class(globals(), XPUTestStackOp, stype)

if __name__ == "__main__":
    unittest.main()
示例#5
0
            place = paddle.XPUPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['X', 'Y'],
                                       'Out',
                                       max_relative_error=0.1)

        def test_check_grad_ingore_x(self):
            place = paddle.XPUPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['Y'],
                                       'Out',
                                       max_relative_error=0.1,
                                       no_grad_set=set("X"))

        def test_check_grad_ingore_y(self):
            place = paddle.XPUPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['X'],
                                       'Out',
                                       max_relative_error=0.1,
                                       no_grad_set=set('Y'))


support_types = get_xpu_op_support_types('mul')
for stype in support_types:
    create_test_class(globals(), XPUTestMulOp, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
            }

    class TestElementwiseMulOpError(unittest.TestCase):
        def test_errors(self):
            with program_guard(Program(), Program()):
                # the input of elementwise_mul must be Variable.
                x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                             [[1, 1, 1, 1]], fluid.XPUPlace(0))
                y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                             [[1, 1, 1, 1]], fluid.XPUPlace(0))
                self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1,
                                  y1)

                # the input dtype of elementwise_mul must be float32
                x2 = fluid.layers.data(name='x2',
                                       shape=[3, 4, 5, 6],
                                       dtype="uint8")
                y2 = fluid.layers.data(name='y2',
                                       shape=[3, 4, 5, 6],
                                       dtype="uint8")
                self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2,
                                  y2)


support_types = get_xpu_op_support_types('elementwise_mul')
for stype in support_types:
    create_test_class(globals(), XPUTestElementwiseMulOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#7
0
            self.Xshape = (3, 3, 3)
            self.Yshape = (3, 3, 3)

    class TestBmmOp2(TestBmmOp):
        def set_shape(self):
            self.Xshape = (128, 3, 16)
            self.Yshape = (128, 16, 3)

    class TestBmmOp3(TestBmmOp):
        def set_shape(self):
            self.Xshape = (2048, 16, 27)
            self.Yshape = (2048, 27, 16)

    class TestBmmOp4(TestBmmOp):
        def set_shape(self):
            self.Xshape = (2, 27, 27)
            self.Yshape = (2, 27, 27)

    class TestBmmOp5(TestBmmOp):
        def set_shape(self):
            self.Xshape = (2, 1, 1)
            self.Yshape = (2, 1, 1)


support_types = get_xpu_op_support_types('bmm')
for stype in support_types:
    create_test_class(globals(), XPUTestBmmOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#8
0
            self.trans_y = False

    class TestMatMulOp17(TestMatMulV2Op):
        """
        case 17 : to check the gradient for special case
        """
        def config(self):
            self.x_shape = (2, 1, 100)
            self.y_shape = (100)
            self.trans_x = False
            self.trans_y = False

    class TestMatMulOp18(TestMatMulV2Op):
        """
        case 18 : for ppyoloe model
        """
        def config(self):
            self.x_shape = (8, 111, 4, 17)
            self.y_shape = (17)
            self.trans_x = False
            self.trans_y = False


support_types = get_xpu_op_support_types('matmul_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestMatmulV2Op, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
示例#9
0
        def set_inputs(self):
            self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
            self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
            self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
            self.axis = 1

        def test_check_grad(self):
            pass

    @skip_check_grad_ci(
        reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
    )
    class TestConcatOp4(TestConcatOp):
        def set_inputs(self):
            self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
            self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
            self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
            self.axis = 0

        def test_check_grad(self):
            pass


support_types = get_xpu_op_support_types('concat')
for stype in support_types:
    create_test_class(globals(), XPUTestConcatOp, stype)

if __name__ == '__main__':
    paddle.enable_static()
    unittest.main()
示例#10
0
    class TestXPUWhereOp2(TestXPUWhereOp):
        def init_data(self):
            self.x = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
            self.y = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype)
            self.cond = np.ones((60, 2)).astype("bool")

    class TestXPUWhereOp3(TestXPUWhereOp):
        def init_data(self):
            self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
            self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype)
            self.cond = np.array(
                np.random.randint(
                    2, size=(20, 2, 4)), dtype=bool)


support_types = get_xpu_op_support_types('where')
for stype in support_types:
    create_test_class(globals(), XPUTestWhereOp, stype)


class TestXPUWhereAPI(unittest.TestCase):
    def setUp(self):
        self.__class__.use_xpu = True
        self.place = paddle.XPUPlace(0)
        self.init_data()

    def init_data(self):
        self.shape = [10, 15]
        self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool)
        self.x = np.random.uniform(-2, 3, self.shape).astype(np.float32)
        self.y = np.random.uniform(-2, 3, self.shape).astype(np.float32)
示例#11
0
        self.op_name = 'exp'
        self.use_dynamic_create_class = False

    class XPUTestExp(TestActivationOPBase):
        def set_case(self):
            self.op_type = 'exp'
            self.dtype = self.in_type

            x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
            out = np.exp(x)
            self.attrs = {'use_xpu': True}
            self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
            self.outputs = {'Out': out}


support_types = get_xpu_op_support_types('exp')
for stype in support_types:
    create_test_class(globals(), XPUTestExpOP, stype)


class XPUTestSigmoidOP(XPUOpTestWrapper):
    def __init__(self):
        self.op_name = 'sigmoid'
        self.use_dynamic_create_class = False

    class XPUTestSigmoid(TestActivationOPBase):
        def set_case(self):
            self.op_type = "sigmoid"
            self.dtype = self.in_type
            self.init_config()
            out = 1 / (1 + np.exp(-self.x))
示例#12
0
                    self.assertTrue(
                        np.allclose(
                            input.gradient(),
                            self.cal_grad_upscale_train(mask.numpy(), prob)))

        def test_backward_upscale_train_2(self):
            for place in self.places:
                with fluid.dygraph.guard(place):

                    prob = 0.3
                    input = paddle.uniform([40, 40], dtype=self.in_type)
                    input.stop_gradient = False
                    out, mask = core.ops.dropout(input, 'dropout_prob', prob,
                                                 "dropout_implementation",
                                                 "upscale_in_train")
                    out.backward()

                    self.assertTrue(
                        np.allclose(
                            input.gradient(),
                            self.cal_grad_upscale_train(mask.numpy(), prob)))


support_types = get_xpu_op_support_types('dropout')
for stype in support_types:
    create_test_class(globals(), XPUTestDropoutOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#13
0
        def set_attrs(self):
            self.num_layers = 3
            self.is_bidirec = False

    class TestRNNOp5(TestRNNOp):
        def set_attrs(self):
            self.num_layers = 2
            self.is_bidirec = True

    class TestRNNOp6(TestRNNOp):
        def set_attrs(self):
            self.num_layers = 2
            self.is_bidirec = True
            self.sequence_length = None

    class TestRNNOp7(TestRNNOp):
        def set_attrs(self):
            self.num_layers = 3
            self.is_bidirec = True


support_types = get_xpu_op_support_types('rnn')
for stype in support_types:
    create_test_class(globals(),
                      XPUTestRNNOp,
                      stype,
                      ignore_deivce_version=[core.XPUVersion.XPU1])

if __name__ == '__main__':
    unittest.main()
示例#14
0
                    }

                x = fluid.data(name="x", shape=[3], dtype='float32')
                y = fluid.data(name="y", shape=[3], dtype='float32')
                z = paddle.add(x, y)

                place = fluid.XPUPlace(0)
                exe = fluid.Executor(place)
                z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
                z_expected = np.array([3., 8., 6.])
                self.assertEqual((z_value == z_expected).all(), True)

        def test_dygraph(self):
            with fluid.dygraph.guard():
                np_x = np.array([2, 3, 4]).astype('float32')
                np_y = np.array([1, 5, 2]).astype('float32')
                x = fluid.dygraph.to_variable(np_x)
                y = fluid.dygraph.to_variable(np_y)
                z = paddle.add(x, y)
                np_z = z.numpy()
                z_expected = np.array([3., 8., 6.])
                self.assertEqual((np_z == z_expected).all(), True)


support_types = get_xpu_op_support_types('elementwise_add')
for stype in support_types:
    create_test_class(globals(), XPUTestElementwiseAddOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#15
0
        def init_test_input(self):
            self.inputs = {
                "X": np.random.random(self.ori_shape).astype(self.dtype),
                "Shape": np.array(self.new_shape, dtype="int32")
            }

        def init_attrs(self):
            self.attrs = {"use_xpu": True}

    class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
        def init_data(self):
            self.ori_shape = (5, 20)
            self.new_shape = (5, -1, 10)
            self.infered_shape = (5, -1, 10)
            self.shape = (5, -1, -1)

    class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape):
        def init_data(self):
            self.ori_shape = (10, 2, 6)
            self.new_shape = (10, 0, 3, -1)
            self.infered_shape = (10, 2, 3, -1)
            self.shape = (10, 0, 3, -1)


support_types = get_xpu_op_support_types("reshape2")
for stype in support_types:
    create_test_class(globals(), XPUTestReshapeOp, stype)

if __name__ == "__main__":
    unittest.main()
示例#16
0
                                             dtype="int32")

            out_1 = paddle.expand(x, shape=[12, 14])
            out_2 = paddle.expand(x, shape=[positive_2, 14])
            out_3 = paddle.expand(x, shape=expand_shape)

            g0 = fluid.backward.calc_gradient(out_2, x)

            exe = fluid.Executor(place=paddle.XPUPlace(0))
            res_1, res_2, res_3 = exe.run(fluid.default_main_program(),
                                          feed={
                                              "x":
                                              input,
                                              "expand_shape":
                                              np.array([12,
                                                        14]).astype("int32")
                                          },
                                          fetch_list=[out_1, out_2, out_3])

            assert np.array_equal(res_1, np.tile(input, (1, 1)))
            assert np.array_equal(res_2, np.tile(input, (1, 1)))
            assert np.array_equal(res_3, np.tile(input, (1, 1)))


support_types = get_xpu_op_support_types('expand_v2')
for stype in support_types:
    create_test_class(globals(), XPUTestExpandV2Op, stype)

if __name__ == "__main__":
    unittest.main()
示例#17
0
                    'Out': self.inputs['X'].sum(axis=self.axis,
                                                keepdims=self.attrs['keep_dim'])
                }

        def init_case(self):
            self.shape = (5, 6, 10)
            self.axis = (0, )
            self.reduce_all = False
            self.keep_dim = False

        def test_check_output(self):
            self.check_output_with_place(self.place)

        def test_check_grad(self):
            pass

    class XPUTestReduceSumCase1(XPUTestReduceSumBase):
        def init_case(self):
            self.shape = (5, 6, 10)
            self.axis = (0, )
            self.reduce_all = False
            self.keep_dim = True


support_types = get_xpu_op_support_types('reduce_sum')
for stype in support_types:
    create_test_class(globals(), XPUTestReduceSumOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#18
0
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([[1, 1], [2, 1]]).astype("int32")
            self.output = self.xnp[tuple(self.inp.T)]

    class XPUTestGatherNdOpWithSameIndexAsX2(XPUTestGatherNdBase):
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([[1, 1], [2, 1]]).astype("int64")
            self.output = self.xnp[tuple(self.inp.T)]

    class XPUTestGatherNdOpIndex1(XPUTestGatherNdBase):
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([1, 2]).astype("int32")
            self.output = self.xnp[tuple(self.inp.T)]

    class XPUTestGatherNdOpIndex2(XPUTestGatherNdBase):
        def init_data(self):
            self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type)
            self.inp = np.array([1, 2]).astype("int64")
            self.output = self.xnp[tuple(self.inp.T)]


support_types = get_xpu_op_support_types('gather_nd')
for stype in support_types:
    create_test_class(globals(), XPUTestGatherNd, stype)

if __name__ == "__main__":
    unittest.main()
示例#19
0
            self.outputs = {'Out': output}

        def init_data(self):
            self.ori_shape = [100]
            self.repeat_times = [2]

        def test_check_output(self):
            self.check_output_with_place(self.place)

    class TestTileOpRank2_tensor(TestTileOpRank1_tensor):
        def init_data(self):
            self.ori_shape = [12, 14]
            self.repeat_times = [2, 3]


support_types = get_xpu_op_support_types('tile')
for stype in support_types:
    create_test_class(globals(), XPUTestTileOpRank1, stype)
    create_test_class(globals(), XPUTestTileOpRank1_tensor_attr, stype)
    create_test_class(globals(), XPUTestTileOpRank1_tensor, stype)


# Test python API
class TestTileAPI(unittest.TestCase):
    def test_api(self):
        with fluid.dygraph.guard(paddle.XPUPlace(0)):
            np_x = np.random.random([12, 14]).astype("float32")
            x = paddle.to_tensor(np_x)

            positive_2 = np.array([2]).astype("int32")
            positive_2 = paddle.to_tensor(positive_2)
示例#20
0
        def test_check_output(self):
            self.check_output_with_place(self.place)

    class TestFillAnyLikeOp2(TestFillAnyLikeOp):
        def set_value(self):
            self.value = -0.0

    class TestFillAnyLikeOp3(TestFillAnyLikeOp):
        def set_value(self):
            self.value = 1.0

    class TestFillAnyLikeOp4(TestFillAnyLikeOp):
        def init(self):
            self.value = 1e-9

    class TestFillAnyLikeOp5(TestFillAnyLikeOp):
        def set_value(self):
            if self.dtype == "float16":
                self.value = 0.05
            else:
                self.value = 5.0


support_types = get_xpu_op_support_types('fill_any_like')
for stype in support_types:
    create_test_class(globals(), XPUTestFillAnyLikeOp, stype)

if __name__ == "__main__":
    unittest.main()
            self.attrs = {
                "numeric_stable_mode": self.numeric_stable_mode,
                "soft_label": self.soft_label,
            }
            if self.ignore_index >= 0:
                self.attrs['ignore_index'] = self.ignore_index
            if self.axis != -1:
                self.attrs['axis'] = self.axis

        def test_check_output(self):
            if paddle.is_compiled_with_xpu():
                paddle.enable_static()
                place = paddle.XPUPlace(0)
                self.check_output_with_place(place, atol=1e-2)

        def test_check_grad(self):
            if paddle.is_compiled_with_xpu():
                paddle.enable_static()
                place = paddle.XPUPlace(0)
                self.check_grad_with_place(place, ["Logits"],
                                           "Loss",
                                           max_relative_error=0.2)


support_types = get_xpu_op_support_types('softmax_with_cross_entropy')
for stype in support_types:
    create_test_class(globals(), XPUTestSoftmaxWithCrossEntropyOp, stype)

if __name__ == "__main__":
    unittest.main()
示例#22
0
            self.dtype = np.int32
            self.x_shape = [2, 3, 4, 5]
            self.y_shape = [4, 1]
            self.low = -100
            self.high = 100

    class XPUTestBitwiseAndCase3(XPUTestBitwiseAndBase):
        def init_case(self):
            self.dtype = np.int32
            self.x_shape = [2, 3, 4, 5]
            self.y_shape = [2, 3, 4, 5]
            self.low = 0
            self.high = 100


support_types = get_xpu_op_support_types('bitwise_and')
for stype in support_types:
    create_test_class(globals(), XPUTestBitwiseAnd, stype)


################## TEST OP: BitwiseOr ##################
class XPUTestBitwiseOr(XPUOpTestWrapper):
    def __init__(self):
        self.op_name = 'bitwise_or'

    class XPUTestBitwiseOrBase(XPUOpTest):
        def setUp(self):
            self.place = paddle.XPUPlace(0)
            self.init_case()
            self.set_case()
示例#23
0
        out_1 = self._executed_api(images, min=0.2, max=0.8)
        images = paddle.to_tensor(data, dtype='float32')
        out_2 = self._executed_api(images, min=0.2, max=0.9)
        images = paddle.to_tensor(data, dtype='float32')
        out_3 = self._executed_api(images, min=v_min, max=v_max)

        self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8)))
        self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9)))
        self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8)))

    def test_errors(self):
        paddle.enable_static()
        x1 = fluid.data(name='x1', shape=[1], dtype="int16")
        x2 = fluid.data(name='x2', shape=[1], dtype="int8")
        self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
        self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
        paddle.disable_static()


class TestInplaceClipAPI(TestClipAPI):
    def _executed_api(self, x, min=None, max=None):
        return x.clip_(min, max)


support_types = get_xpu_op_support_types('clip')
for stype in support_types:
    create_test_class(globals(), XPUTestClipOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#24
0
        def init_data(self):
            self.inputs = {
                'Condition':
                np.array([[[True, False], [False, True]],
                          [[False, True], [True, False]],
                          [[False, False], [False, True]]]).astype(self.dtype),
            }

            self.outputs = {
                'Out': np.array(
                    [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]],
                    dtype='int64')
            }


support_types = get_xpu_op_support_types('where_index')
for stype in support_types:
    create_test_class(globals(), XPUTestWhereIndexOp, stype)


class TestWhereOpError(unittest.TestCase):
    def test_api(self):
        with program_guard(Program(), Program()):
            cond = fluid.layers.data(name='cond', shape=[4], dtype='bool')
            result = fluid.layers.where(cond)

            exe = fluid.Executor(paddle.XPUPlace(0))
            exe.run(fluid.default_startup_program())
            cond_i = np.array([True, False, False, False]).astype("bool")
            out = exe.run(fluid.default_main_program(), feed={'cond': cond_i})
示例#25
0
            place = paddle.XPUPlace(0)
            self.check_output_with_place(place, check_dygraph=False)

    class TestRangeOpCase0(TestRangeOp):
        def init_config(self):
            self.case = (0, 5, 1)

    class TestRangeOpCase1(TestRangeOp):
        def init_config(self):
            self.case = (0, 5, 2)

    class TestRangeOpCase2(TestRangeOp):
        def init_config(self):
            self.case = (10, 1, -2)

    class TestRangeOpCase3(TestRangeOp):
        def init_config(self):
            self.case = (-1, -10, -2)

    class TestRangeOpCase4(TestRangeOp):
        def init_config(self):
            self.case = (10, -10, -11)


support_types = get_xpu_op_support_types("range")
for stype in support_types:
    create_test_class(globals(), XPUTestRangeOp, stype)

if __name__ == "__main__":
    unittest.main()
            }

    class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
        def compute_input_output(self):
            self.inputs = {
                'X':
                np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(self.dtype),
                'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype)
            }
            self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}

    class TestElementwisePowOpInt(OpTest):
        def setUp(self):
            self.op_type = "elementwise_pow"
            self.inputs = {
                'X': np.asarray([1, 3, 6]),
                'Y': np.asarray([1, 1, 1])
            }
            self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}

        def test_check_output(self):
            self.check_output()


support_types = get_xpu_op_support_types('elementwise_pow')
for stype in support_types:
    create_test_class(globals(), XPUTestElementwisePowOp, stype)

if __name__ == '__main__':
    unittest.main()
示例#27
0
            linear_2 = paddle.nn.Linear(5, 3)
            adam = paddle.optimizer.AdamW(
                learning_rate=paddle.optimizer.lr.PiecewiseDecay(
                    boundaries=[3, 6], values=[0.1, 0.2, 0.3]),
                parameters=[{
                    'params': linear_1.parameters(),
                    'learning_rate': 0.1,
                }, {
                    'params': linear_2.parameters(),
                    'weight_decay': 0.001,
                }],
                apply_decay_param_fun=lambda name: True,
                weight_decay=0.01)

            for _ in range(2):
                out = linear_1(a)
                out = linear_2(out)
                out.backward()
                adam.step()
                adam.clear_gradients()


support_types = get_xpu_op_support_types('adamw')
for stype in support_types:
    create_test_class(globals(), XPUTestAdamwOp1, stype)
    create_test_class(globals(), XPUTestAdamwOp2, stype)

if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
示例#28
0
                np.random.randint(0, 2,
                                  (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
            }
            self.outputs = {
                'Out': self.inputs['X'].all(axis=self.attrs['dim'])
            }

    class XPUTestReduceAllCase3(XPUTestReduceAllBase):
        def set_case(self):
            self.op_type = 'reduce_all'
            self.attrs = {
                'use_xpu': True,
                'keep_dim': True,
                'dim': [1]
                # 'reduce_all': True,
            }
            self.inputs = {
                'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")
            }
            self.outputs = {
                'Out': np.expand_dims(self.inputs['X'].all(axis=1), axis=1)
            }


support_types = get_xpu_op_support_types('reduce_all')
for stype in support_types:
    create_test_class(globals(), XPUTestReduceAllOp, stype)

if __name__ == '__main__':
    unittest.main()
            self.out_h = 12
            self.out_w = 12
            self.out_size = [8, 12]

    # out_size is a 1-D tensor
    class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor):
        def init_test_case(self):
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.out_size = np.array([66, 40]).astype("int32")
            self.shape_by_1Dtensor = True

    # scale is a 1-D tensor
    class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor):
        def init_test_case(self):
            self.input_shape = [3, 2, 32, 16]
            self.out_h = 64
            self.out_w = 32
            self.scale = 2.0
            self.out_size = None
            self.scale_by_1Dtensor = True


support_types = get_xpu_op_support_types('nearest_interp_v2')
for stype in support_types:
    create_test_class(globals(), XPUNearestInterpOpWrapper, stype)

if __name__ == "__main__":
    unittest.main()
示例#30
0
                    user_defined_grad_outputs=user_defined_grad_outputs)

    # Correct: There is mins axis.
    class TestSqueeze2Op1(TestSqueeze2Op):
        def init_test_case(self):
            self.ori_shape = (1, 20, 1, 5)
            self.axes = (0, -2)
            self.new_shape = (20, 5)

    # Correct: No axes input.
    class TestSqueeze2Op2(TestSqueeze2Op):
        def init_test_case(self):
            self.ori_shape = (1, 20, 1, 5)
            self.axes = ()
            self.new_shape = (20, 5)

    # Correct: Just part of axes be squeezed.
    class TestSqueeze2Op3(TestSqueeze2Op):
        def init_test_case(self):
            self.ori_shape = (6, 1, 5, 1, 4, 1)
            self.axes = (1, -1)
            self.new_shape = (6, 5, 1, 4)


support_types = get_xpu_op_support_types("squeeze2")
for stype in support_types:
    create_test_class(globals(), XPUTestSqueeze2Op, stype)

if __name__ == "__main__":
    unittest.main()