def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. shape = [2, 3, 4, 5] eps = 0.0001 dtype = np.float64 x = layers.data('x', shape, False, dtype) y = layers.data('y', shape, False, dtype) x.persistable = True y.persistable = True out = layers.elementwise_div(x, y, axis=0) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr[np.abs(y_arr) < 0.005] = 0.02 gradient_checker.double_grad_check([x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3) gradient_checker.double_grad_check_for_dygraph(self.divide_wrapper, [x, y], out, x_init=[x_arr, y_arr], place=place, atol=1e-3)
def func(self, place): prog = fluid.Program() with fluid.program_guard(prog): np.random.seed() dtype = "float32" eps = 0.005 atol = 1e-4 x = layers.create_parameter(dtype=dtype, shape=self.shape, name='x') z = fluid.layers.batch_norm(input=x, data_layout=self.data_layout, use_global_stats=self.use_global_stats) x_arr = np.random.uniform(-1, 1, self.shape).astype(dtype) gradient_checker.double_grad_check([x], z, x_init=x_arr, atol=atol, place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.batch_norm_wrapper, [x], z, x_init=x_arr, atol=atol, place=place)
def func(self, place): shape = [2, 4, 3, 3] eps = 0.005 dtype = np.float64 if core.is_compiled_with_rocm(): dtype = np.float32 x = layers.data('x', shape, False, dtype) y = layers.conv2d_transpose( x, 2, filter_size=1, groups=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) if core.is_compiled_with_rocm(): # HIP will sometimes fail if no atol gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps, atol=1e-4) else: gradient_checker.double_grad_check( [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.conv_transpose_wrapper, [x] + w, y, x_init=[x_arr] + w_arr, place=place)
def func(self, place): prog = fluid.Program() with fluid.program_guard(prog): np.random.seed() shape = [2, 3, 4, 5] dtype = "float32" eps = 0.005 atol = 1e-4 x = layers.create_parameter(dtype=dtype, shape=shape, name='x') z = paddle.nn.functional.instance_norm(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) # check for static mode gradient_checker.double_grad_check([x], z, x_init=x_arr, atol=atol, place=place, eps=eps) # check for eager mode gradient_checker.double_grad_check_for_dygraph( self.instance_norm_wrapper, [x], z, x_init=x_arr, atol=atol, place=place)
def func(self, place): x_shape = [2, 4, 3, 3] w_shape = [4, 1, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm( ) else np.float64 x = layers.data('x', x_shape, False, dtype) w = layers.data('w', w_shape, False, dtype) # condition of depthwise conv: # use_cudnn == False # groups == filters # num_filters % num_channels == 0 y = paddle.nn.functional.conv2d(x, w, groups=4) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype) gradient_checker.double_grad_check([x, w], y, x_init=[x_arr, w_arr], place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.depthwise_conv2d_wrapper, [x, w], y, x_init=[x_arr, w_arr], place=place)
def func(self, place): x_shape = [2, 4, 10] dtype = np.float64 x = layers.data('x', x_shape, False, dtype) x.persistable = True out = paddle.clip(x, min=-1., max=1.) x_arr = np.random.uniform(-5., 5., x_shape).astype(dtype) gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) gradient_checker.double_grad_check_for_dygraph( self.clip_wrapper, [x], out, x_init=x_arr, place=place)
def func(self, place): x_shape = [2, 3, 4, 5] pad = [1, 1, 1, 1] eps = 0.005 dtype = np.float64 x = layers.data('x', x_shape, False, dtype) x.persistable = True out = paddle.nn.functional.pad(x, pad) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) gradient_checker.double_grad_check( [x], out, x_init=x_arr, place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.pad_wrapper, [x], out, x_init=x_arr, place=place)
def func(self, place): shape = [2, 3, 7, 9] eps = 0.0001 dtype = np.float64 x = layers.data('x', shape, False, dtype) x.persistable = True y = layers.rsqrt(x) x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) gradient_checker.double_grad_check( [x], y, x_init=x_arr, place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.rsqrt_wrapper, [x], y, x_init=x_arr, place=place)
def func(self, place): x_shape = [3, 40] axes = [1, 2] eps = 0.005 dtype = np.float64 x = layers.data('x', x_shape, False, dtype) x.persistable = True out = paddle.unsqueeze(x, axes) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) gradient_checker.double_grad_check( [x], out, x_init=x_arr, place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.unsqueeze_wrapper, [x], out, x_init=x_arr, place=place)
def func(self, place): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 x = layers.data('x', shape, False, dtype=dtype) x.persistable = True y = layers.sigmoid(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 gradient_checker.double_grad_check( [x], y, x_init=x_arr, place=place, eps=eps) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph( self.sigmoid_wrapper, [x], y, x_init=x_arr, place=place) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func(self, place): input_NCHW = fluid.layers.data( name="input_NCHW", shape=[2, 3, 5, 5], append_batch_size=False, dtype="float32") input_NCHW.persistable = True y = layers.pool2d(input_NCHW, pool_size=[4, 4], pool_type="avg") y = paddle.nn.functional.avg_pool2d(input_NCHW, kernel_size=[4, 4]) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) gradient_checker.double_grad_check( [input_NCHW], y, x_init=x_arr, place=place, eps=0.05) gradient_checker.double_grad_check_for_dygraph( self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place)
def func(self, place): input_NHWC = fluid.layers.data( name="input_NHWC", shape=[2, 5, 5, 3], append_batch_size=False, dtype="float32") input_NHWC.persistable = True y = paddle.nn.functional.avg_pool2d( input_NHWC, kernel_size=2, data_format="NHWC") x_arr = np.random.uniform(-1, 1, [2, 5, 5, 3]).astype(np.float32) gradient_checker.double_grad_check( [input_NHWC], y, x_init=x_arr, place=place, eps=0.05) gradient_checker.double_grad_check_for_dygraph( self.pool2d_wrapper, [input_NHWC], y, x_init=x_arr, place=place)
def func(self, place): shape = [2, 3, 7, 9] eps = 0.005 alpha = 0.2 dtype = np.float64 x = layers.data('x', shape, False, dtype) x.persistable = True y = layers.leaky_relu(x, alpha=alpha) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.02 gradient_checker.double_grad_check( [x], y, x_init=x_arr, place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph( self.leaky_relu_wrapper, [x], y, x_init=x_arr, place=place)
def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. shape = [2, 3, 7, 9] eps = 0.005 dtype = np.float64 x = layers.data('x', shape, False, dtype) x.persistable = True y = layers.square(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) gradient_checker.double_grad_check( [x], y, x_init=x_arr, place=place, eps=eps) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph( self.square_wrapper, [x], y, x_init=x_arr, place=place) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func(self, place): shape = [2, 4, 4, 4] eps = 1e-6 alpha = 0.2 dtype = np.float64 SEED = 0 x = layers.data('x', shape, False, dtype) x.persistable = True y = F.celu(x, alpha=alpha) np.random.RandomState(SEED) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) gradient_checker.double_grad_check( [x], y, x_init=x_arr, place=place, eps=eps) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph( self.celu_wrapper, [x], y, x_init=x_arr, place=place) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
def func(self, place): x_shape = [2, 3, 4, 5] pad = [1, 1, 1, 1] dtype = np.float64 x1 = layers.data('x', x_shape, False, dtype) x2 = layers.data('x', x_shape, False, dtype) x1.persistable = True x2.persistable = True out = paddle.concat([x1, x2], axis=0) x2_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) x1_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) gradient_checker.double_grad_check( [x1, x2], out, x_init=[x1_arr, x2_arr], place=place) gradient_checker.double_grad_check_for_dygraph( self.concat_wrapper, [x1, x2], out, x_init=[x1_arr, x2_arr], place=place)
def func(self, place): x_shape = [2, 3, 8, 8, 8] w_shape = [6, 3, 3, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm( ) else np.float64 x = layers.data('x', x_shape, False, dtype) w = layers.data('w', w_shape, False, dtype) x.persistable = True w.persistable = True y = paddle.nn.functional.conv3d(x, w) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype) gradient_checker.double_grad_check([x, w], y, x_init=[x_arr, w_arr], place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph(self.conv3d_wrapper, [x, w], y, x_init=[x_arr, w_arr], place=place)
def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. shape = [2, 3, 4, 5] eps = 0.005 dtype = np.float64 x = layers.data('x', shape, False, dtype) y = layers.data('y', shape, False, dtype) x.persistable = True y.persistable = True out = layers.elementwise_sub(x, y) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) gradient_checker.double_grad_check([x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps) gradient_checker.double_grad_check_for_dygraph(self.subtract_wrapper, [x, y], out, x_init=[x_arr, y_arr], place=place)