def test_check_output(self): places = [] if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): place = core.CUDAPlace(0) if core.is_float16_supported(place): places.append(place) for place in places: for data_format in ["NCHW", "NHWC"]: self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) self.check_with_place(place, data_format, self.dtype, [2, 3])
def test_check_grad_normal(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad_with_place(place, ['Input'], 'Out', max_relative_error=0.006)
def test_w_is_selected_rows(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): for inplace in [True, False]: self.check_with_place(place, inplace)
def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad(['x0'], 'Out', max_relative_error=0.15)
def test_check_output(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2)
def test_check_grad_no_input(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: self.check_grad_with_place(place, ['Filter'], 'Output', no_grad_set=set(['Input']))
def test_check_grad_ignore_uv(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad_with_place(place, ['X'], 'Out')
def set_dtypes(self): self.dtypes = ['float64'] if core.is_float16_supported(self.place): self.dtypes.append('float16')
def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad_with_place(place, ["X"], "Out", max_relative_error=0.05)
def test_check_grad(self): place = core.CUDAPlace(0) support_fp16 = core.is_float16_supported(place) if support_fp16 and grad_check: self.check_grad_with_place( place, ['X'], 'Out', max_relative_error=grad_atol)
def test_check_output(self): place = core.CUDAPlace(0) support_fp16 = core.is_float16_supported(place) if support_fp16: self.check_output_with_place(place, atol=atol)
def test_checkout_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad_with_place( place, ['X'], 'Out', max_relative_error=0.8)
def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3)
def test_check_output(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=0.002, check_eager=True)
def test_check_output(self): paddle.enable_static() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place)
def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3)
def test_scale_selected_rows_inplace(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_with_place(place, 'in', 'in')
def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( self.place): return self.check_output_with_place(self.place, atol=self.atol)