コード例 #1
0
 def func_exception(self):
     caught_exception = False
     try:
         x = np.random.uniform(-1, 1, [4, 8]).astype('int32')
         custom_relu_dynamic(custom_module.custom_relu, 'cpu', 'int32', x)
     except OSError as e:
         caught_exception = True
         self.assertTrue(
             "function \"relu_cpu_forward\" is not implemented for data type `int32`"
             in str(e))
         if IS_WINDOWS:
             self.assertTrue(
                 r"python\paddle\fluid\tests\custom_op\custom_relu_op.cc" in
                 str(e))
         else:
             self.assertTrue(
                 "python/paddle/fluid/tests/custom_op/custom_relu_op.cc" in
                 str(e))
     self.assertTrue(caught_exception)
     caught_exception = False
     # MAC-CI don't support GPU
     if IS_MAC:
         return
     try:
         x = np.random.uniform(-1, 1, [4, 8]).astype('int32')
         custom_relu_dynamic(custom_module.custom_relu, 'gpu', 'int32', x)
     except OSError as e:
         caught_exception = True
         self.assertTrue(
             "function \"relu_cuda_forward_kernel\" is not implemented for data type `int32`"
             in str(e))
         self.assertTrue(
             "python/paddle/fluid/tests/custom_op/custom_relu_op.cu" in
             str(e))
     self.assertTrue(caught_exception)
コード例 #2
0
 def test_dynamic(self):
     for device in self.devices:
         for dtype in self.dtypes:
             if device == 'cpu' and dtype == 'float16':
                 continue
             x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
             for custom_op in self.custom_ops:
                 out, x_grad = custom_relu_dynamic(custom_op, device, dtype,
                                                   x)
                 pd_out, pd_x_grad = custom_relu_dynamic(
                     custom_op, device, dtype, x, False)
                 self.assertTrue(
                     np.array_equal(out, pd_out),
                     "custom op out: {},\n paddle api out: {}".format(
                         out, pd_out))
                 self.assertTrue(
                     np.array_equal(x_grad, pd_x_grad),
                     "custom op x grad: {},\n paddle api x grad: {}".format(
                         x_grad, pd_x_grad))