def test_check_output(self):
        places = []
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                places.append(place)

        for place in places:
            for data_format in ["NCHW", "NHWC"]:
                self.check_with_place(place, data_format, self.dtype,
                                      [2, 3, 4, 5])
                self.check_with_place(place, data_format, self.dtype, [2, 3])
Exemple #2
0
 def test_check_grad_normal(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_grad_with_place(place, ['Input'],
                                    'Out',
                                    max_relative_error=0.006)
Exemple #3
0
 def test_w_is_selected_rows(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         for inplace in [True, False]:
             self.check_with_place(place, inplace)
Exemple #4
0
 def test_check_grad(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_grad(['x0'], 'Out', max_relative_error=0.15)
Exemple #5
0
 def test_check_output(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_output_with_place(place, atol=2e-2)
Exemple #6
0
 def test_check_grad_no_input(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place) and grad_check:
         self.check_grad_with_place(place, ['Filter'],
                                    'Output',
                                    no_grad_set=set(['Input']))
Exemple #7
0
 def test_check_grad_ignore_uv(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_grad_with_place(place, ['X'], 'Out')
 def set_dtypes(self):
     self.dtypes = ['float64']
     if core.is_float16_supported(self.place):
         self.dtypes.append('float16')
Exemple #9
0
 def test_check_grad(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_grad_with_place(place, ["X"],
                                    "Out",
                                    max_relative_error=0.05)
Exemple #10
0
 def test_check_grad(self):
     place = core.CUDAPlace(0)
     support_fp16 = core.is_float16_supported(place)
     if support_fp16 and grad_check:
         self.check_grad_with_place(
             place, ['X'], 'Out', max_relative_error=grad_atol)
Exemple #11
0
 def test_check_output(self):
     place = core.CUDAPlace(0)
     support_fp16 = core.is_float16_supported(place)
     if support_fp16:
         self.check_output_with_place(place, atol=atol)
Exemple #12
0
 def test_checkout_grad(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_grad_with_place(
             place, ['X'], 'Out', max_relative_error=0.8)
 def test_check_output(self):
     if core.is_compiled_with_cuda():
         place = core.CUDAPlace(0)
         if core.is_float16_supported(place):
             self.check_output_with_place(place, atol=1e-3)
Exemple #14
0
 def test_check_output(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_output_with_place(place, atol=0.002, check_eager=True)
Exemple #15
0
 def test_check_output(self):
     paddle.enable_static()
     if core.is_compiled_with_cuda():
         place = fluid.CUDAPlace(0)
         if core.is_float16_supported(place):
             self.check_output_with_place(place)
Exemple #16
0
 def test_check_output(self):
     if core.is_compiled_with_cuda():
         place = core.CUDAPlace(0)
         if core.is_float16_supported(place):
             self.check_output_with_place(place, atol=1e-3)
Exemple #17
0
 def test_scale_selected_rows_inplace(self):
     place = core.CUDAPlace(0)
     if core.is_float16_supported(place):
         self.check_with_place(place, 'in', 'in')
Exemple #18
0
 def test_check_output(self):
     if self.dtype == np.float16 and not core.is_float16_supported(
             self.place):
         return
     self.check_output_with_place(self.place, atol=self.atol)