def _assert_input_spec_layer_return(self, expect_layer, test_layer): input_x = paddle.uniform([8, 8], dtype='float32') input_y = paddle.uniform([8, 1], dtype='float64') expected_result = expect_layer(input_x, input_y) test_result = test_layer(input_x, input_y) np.testing.assert_allclose(expected_result[0].numpy(), test_result[0].numpy()) np.testing.assert_allclose(expected_result[1].numpy(), test_result[1].numpy())
def test_alias(self): paddle.uniform([2, 3], min=-5.0, max=5.0) paddle.tensor.uniform([2, 3], min=-5.0, max=5.0) paddle.tensor.random.uniform([2, 3], min=-5.0, max=5.0) def test_uniform_random(): paddle.tensor.random.uniform_random([2, 3], min=-5.0, max=5.0) self.assertRaises(AttributeError, test_uniform_random)
def get_params(img: Tensor, scale: List[float], ratio: List[float]) -> Tuple[int, int, int, int]: """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image or Tensor): Input image. scale (list): range of scale of the origin size cropped ratio (list): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ width, height = F._get_image_size(img) area = height * width log_ratio = paddle.log(paddle.to_tensor(ratio)) for _ in range(10): target_area = area * paddle.uniform( shape=[1], min=scale[0], max=scale[1]).numpy().item() aspect_ratio = paddle.exp( paddle.uniform(shape=[1], min=log_ratio[0], max=log_ratio[1])).numpy().item() w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if 0 < w <= width and 0 < h <= height: i = paddle.randint(0, height - h + 1, shape=(1, )).numpy().item() j = paddle.randint(0, width - w + 1, shape=(1, )).numpy().item() return i, j, h, w # Fallback to central crop in_ratio = float(width) / float(height) if in_ratio < min(ratio): w = width h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = height w = int(round(h * max(ratio))) else: # whole image w = width h = height i = (height - h) // 2 j = (width - w) // 2 return i, j, h, w
def kaiming_uniform_(x, a=0, mode='fan_in', nonlinearity='leaky_relu'): """Fills the input `Tensor` with values according to the method described in `Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification` - He, K. et al. (2015), using a uniform distribution. The resulting tensor will have values sampled from :math:`\mathcal{U}(-\text{bound}, \text{bound})` where .. math:: \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}} Also known as He initialization. Args: x: an n-dimensional `paddle.Tensor` a: the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``) mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` preserves the magnitude of the variance of the weights in the forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the backwards pass. nonlinearity: the non-linear function (`nn.functional` name), recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). """ fan = _calculate_correct_fan(x, mode) gain = calculate_gain(nonlinearity, a) std = gain / math.sqrt(fan) bound = math.sqrt( 3.0) * std # Calculate uniform bounds from standard deviation temp_value = paddle.uniform(x.shape, min=-bound, max=bound) x.set_value(temp_value) return x
def test_forward_pool2d(): class Pool2D1(nn.Layer): @paddle.jit.to_static def forward(self, inputs): return nn.functional.avg_pool2d(inputs, kernel_size=2, stride=2, padding=0) class Pool2D2(nn.Layer): @paddle.jit.to_static def forward(self, inputs): return nn.functional.adaptive_avg_pool2d(inputs, output_size=[3, 3]) class Pool2D3(nn.Layer): @paddle.jit.to_static def forward(self, inputs): return nn.functional.avg_pool2d( inputs, kernel_size=3, stride=1, padding=[1, 1], exclusive=False, divisor_override=2.5, ) input_shapes = [[1, 2, 8, 8], [1, 3, 10, 10]] for input_shape in input_shapes: input_data = paddle.uniform(shape=input_shape, dtype="float32", min=-1, max=1) verify_model(Pool2D1(), input_data=input_data) verify_model(Pool2D2(), input_data=input_data) verify_model(Pool2D3(), input_data=input_data)
def variance_scaling_init_(tensor, scale=1, mode="fan_avg", distribution="uniform"): stop_gradient = tensor.stop_gradient tensor.stop_gradient = True fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == "fan_in": scale /= fan_in elif mode == "fan_out": scale /= fan_out else: scale /= (fan_in + fan_out) / 2 if distribution == "normal": std = math.sqrt(scale) tensor[:] = paddle.normal(0, std, shape=tensor.shape).astype(tensor.dtype) tensor.stop_gradient = stop_gradient return tensor else: bound = math.sqrt(3 * scale) tensor[:] = paddle.uniform(tensor.shape, dtype=tensor.dtype, min=-bound, max=bound) tensor.stop_gradient = stop_gradient return tensor
def test_forward_reduce(): class Reduce(nn.Layer): def __init__(self, op_name, axis=None, keepdim=False): super(Reduce, self).__init__() self.op_name = op_name self.axis = axis self.keepdim = keepdim @paddle.jit.to_static def forward(self, inputs): result = getattr(paddle, self.op_name)(inputs, axis=self.axis, keepdim=self.keepdim) result = result.astype("float32") return result input_shapes = [[1, 2, 2, 5, 5], [2, 3, 4], [4, 20], [2, 3, 30, 30]] for input_shape in input_shapes: input_data = paddle.uniform(min=-3, max=3, shape=input_shape, dtype="float32") verify_model(Reduce("all"), input_data=input_data.astype("bool")) verify_model(Reduce("any", 1), input_data=input_data.astype("bool")) verify_model(Reduce("max", 0, True), input_data=input_data) verify_model(Reduce("min", 1, True), input_data=input_data) verify_model(Reduce("prod", 0), input_data=input_data) verify_model(Reduce("sum", 0, True), input_data=input_data) verify_model(Reduce("mean", -1, True), input_data=input_data)
def test_tensor_method_clear_gradient_case2(self): input = paddle.uniform(self.input_shape) linear = paddle.nn.Linear(2, 3) out = linear(input) out.backward() # default arg set_to_zero is true # so, False means real clear gradient linear.weight.clear_gradient(False) # before ._gradient_set_empty(False), # the return of ._is_gradient_set_empty() should be True if not fluid.framework.in_dygraph_mode(): self.assertTrue(linear.weight._is_gradient_set_empty()) else: self.assertIsNone(linear.weight.grad) # reset, because ClearGradient will call SetIsEmpty(True), but this is not our expectation. if not fluid.framework.in_dygraph_mode(): linear.weight._gradient_set_empty(False) # after ._gradient_set_empty(False), # the return of ._is_gradient_set_empty() should be False self.assertFalse(linear.weight._is_gradient_set_empty()) # actual result gradient_actual = linear.weight.grad print(gradient_actual) # expected result self.assertTrue(np.empty(gradient_actual))
def test_forward_pool2d(): @paddle.jit.to_static def pool2d1(inputs): return nn.functional.avg_pool2d(inputs, kernel_size=2, stride=2, padding=0) @paddle.jit.to_static def pool2d2(inputs): return nn.functional.adaptive_avg_pool2d(inputs, output_size=[3, 3]) @paddle.jit.to_static def pool2d3(inputs): return nn.functional.max_pool2d(inputs, kernel_size=2, stride=2, padding=0, return_mask=True) input_data = paddle.uniform(shape=[1, 2, 32, 32], dtype="float32", min=-1, max=1) verify_model(pool2d1, input_data=input_data) verify_model(pool2d2, input_data=input_data)
def setUp(self): self.x = paddle.uniform([2, 10, 20, 25], dtype='float32') def test_dim_range_error(): self.x.mode(axis=5) self.assertRaises(ValueError, test_dim_range_error)
def forward(self, inputs): """ forward """ x = paddle.uniform([3, 10]) x = paddle.add(x, inputs) return x
def __getitem__(self, index): data = paddle.uniform(IMAGE_SIZE, dtype='float32') # 在 `__getitem__` 中对数据集使用数据增强方法 # data = self.transform(data.numpy()) label = paddle.randint(0, CLASS_NUM - 1, dtype='int64') return data, label
def _no_grad_uniform_(tensor, a, b): with paddle.no_grad(): tensor.set_value( paddle.uniform(shape=tensor.shape, dtype=tensor.dtype, min=a, max=b)) return tensor
def __getitem__(self, index): """ 步骤三:实现__getitem__方法,定义指定index时如何获取数据,并返回单条数据(训练数据,对应的标签) """ data = paddle.uniform(IMAGE_SIZE, dtype='float32') label = paddle.randint(0, CLASS_NUM - 1, dtype='int64') return data, label
def test_depthwise_conv2d(self): x_var = paddle.uniform((2, 8, 8, 4), dtype='float32', min=-1., max=1.) conv = paddle.nn.Conv2D(in_channels=4, out_channels=4, kernel_size=(3, 3), groups=4, data_format='NHWC') y_var = conv(x_var)
def kaiming_normal_(tensor, op='linear', a=0, mode='fan_in', nonlinearity='leaky_relu'): fan = _calculate_correct_fan(tensor, op, mode) gain = math.sqrt(2.0) std = gain / math.sqrt(fan) # Calculate uniform bounds from standard deviation bound = math.sqrt(3.0) * std with paddle.no_grad(): return paddle.assign(paddle.uniform(tensor.shape, min=-bound, max=bound), tensor)
def test_case(self): paddle.disable_static(paddle.NPUPlace(0)) x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2) print(conv) y_var = conv(x_var) y_np = y_var.numpy() self.assertIsNotNone(y_np) paddle.enable_static()
def test_forward_math_api(): class MathAPI(nn.Layer): def __init__(self, api_name): super(MathAPI, self).__init__() for candidate in (paddle, paddle.nn.functional): self.func = getattr(candidate, api_name, None) if self.func: break @paddle.jit.to_static def forward(self, inputs): return self.func(inputs) api_list = [ "abs", "acos", "asin", "atan", "ceil", "cos", "cosh", "erf", "exp", "floor", "hardshrink", "hardtanh", "log", "log2", "log10", "reciprocal", "relu", "relu6", "round", "rsqrt", "selu", "sigmoid", "sign", "sin", "sinh", "softplus", "softsign", "sqrt", "square", "swish", "tan", "tanh", ] input_shapes = [[128], [2, 100], [10, 2, 5], [7, 3, 4, 1]] for input_shape in input_shapes: input_data = paddle.rand(input_shape, dtype="float32") for api_name in api_list: if api_name in [ "log", "log2", "log10", "reciprocal", "sqrt", "rsqrt" ]: # avoid illegal input, all elements should be positive input_data = paddle.uniform(input_shape, min=0.01, max=0.99) verify_model(MathAPI(api_name), input_data=input_data)
def test_backward_downscale_in_infer_eager(self): for place in self.places: with fluid.dygraph.guard(place): with _test_eager_guard(): input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False out, mask = _C_ops.final_state_dropout( input, None, 0.5, False, "downgrade_in_infer", 0, False) out.backward() self.assertTrue( np.array_equal(input.gradient( ), self.cal_grad_downscale_in_infer(mask.numpy())))
def pem_reg_loss_func(self, pred_score, gt_iou_map, mask): gt_iou_map = paddle.multiply(gt_iou_map, mask) u_hmask = paddle.cast(x=gt_iou_map > 0.7, dtype=self.datatype) u_mmask = paddle.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3) u_mmask = paddle.cast(x=u_mmask, dtype=self.datatype) u_lmask = paddle.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.) u_lmask = paddle.cast(x=u_lmask, dtype=self.datatype) u_lmask = paddle.multiply(u_lmask, mask) num_h = paddle.cast(paddle.sum(u_hmask), dtype=self.datatype) num_m = paddle.cast(paddle.sum(u_mmask), dtype=self.datatype) num_l = paddle.cast(paddle.sum(u_lmask), dtype=self.datatype) r_m = num_h / num_m u_smmask = paddle.uniform( shape=[gt_iou_map.shape[1], gt_iou_map.shape[2]], dtype=self.datatype, min=0.0, max=1.0) u_smmask = paddle.multiply(u_mmask, u_smmask) u_smmask = paddle.cast(x=(u_smmask > (1. - r_m)), dtype=self.datatype) r_l = num_h / num_l u_slmask = paddle.uniform( shape=[gt_iou_map.shape[1], gt_iou_map.shape[2]], dtype=self.datatype, min=0.0, max=1.0) u_slmask = paddle.multiply(u_lmask, u_slmask) u_slmask = paddle.cast(x=(u_slmask > (1. - r_l)), dtype=self.datatype) weights = u_hmask + u_smmask + u_slmask weights.stop_gradient = True loss = F.square_error_cost(pred_score, gt_iou_map) loss = paddle.multiply(loss, weights) loss = 0.5 * paddle.sum(loss) / paddle.sum(weights) return loss
def test_backward_downscale_in_infer(self): _enable_legacy_dygraph() for place in self.places: with fluid.dygraph.guard(place): input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False out, mask = core.ops.dropout(input, 'dropout_prob', 0.5) out.backward() self.assertTrue( np.array_equal(input.gradient( ), self.cal_grad_downscale_in_infer(mask.numpy())))
def test_tensor_method_clear_gradient_case1(self): input = paddle.uniform(self.input_shape) linear = paddle.nn.Linear(2, 3) out = linear(input) out.backward() linear.weight.clear_gradient() # actual result gradient_actual = linear.weight.grad # expected result gradient_expected = np.zeros([2, 3]).astype('float64') self.assertTrue(np.allclose(gradient_actual.numpy(), gradient_expected))
def compute_out_shape(padding_alg): import paddle import paddle.nn as nn x_var = paddle.uniform( (batch_size, 48, 64, 64), dtype='float32', min=-1., max=1.) if padding_alg == "EXPLICIT": conv = nn.Conv2D(48, 48, (3, 3), strides, paddings, dilations, 1) else: conv = nn.Conv2D(48, 48, (3, 3), strides, padding_alg, dilations, 1) y_var = conv(x_var) return y_var.shape
def test_backward_upscale_train_eager(self): for place in self.places: with fluid.dygraph.guard(place): with _test_eager_guard(): prob = 0.5 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False out, mask = _C_ops.final_state_dropout( input, None, 0.5, False, "upscale_in_train", 0, False) out.backward() self.assertTrue( np.allclose(input.gradient( ), self.cal_grad_upscale_train(mask.numpy(), prob)))
def forward(self, *items): """Forward network""" if self.training and self.p > 0: masks = [ paddle.uniform(shape=x.shape[:2], min=0, max=1) >= self.p for x in items ] masks = [paddle.cast(x, 'float32') for x in masks] total = paddle.add(*masks) scale = len(items) / paddle.maximum(total, paddle.ones_like(total)) masks = [mask * scale for mask in masks] items = [ item * paddle.unsqueeze(mask, axis=-1) for item, mask in zip(items, masks) ] return items
def test_backward_upscale_train(self): _enable_legacy_dygraph() for place in self.places: with fluid.dygraph.guard(place): prob = 0.5 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False out, mask = core.ops.dropout(input, 'dropout_prob', prob, "dropout_implementation", "upscale_in_train") out.backward() self.assertTrue( np.allclose(input.gradient( ), self.cal_grad_upscale_train(mask.numpy(), prob)))
def test_forward_group_norm(): class GroupNorm(nn.Layer): def __init__(self, channels, groups): super(GroupNorm, self).__init__() self.group_norm = paddle.nn.GroupNorm(num_channels=channels, num_groups=groups) def forward(self, inputs): return self.group_norm(inputs) input_shapes = [[1, 4, 6, 6], [2, 2, 4, 7], [2, 8, 1, 1]] for input_shape in input_shapes: num_channels = input_shape[1] input_data = paddle.uniform(input_shape) verify_model(GroupNorm(num_channels, 1), input_data) verify_model(GroupNorm(num_channels, 2), input_data)
def glorot_uniform(t): """ tbd """ if len(t.shape) == 2: fan_in, fan_out = t.shape elif len(t.shape) == 3: # out_ch, in_ch, kernel for Conv 1 fan_in = t.shape[1] * t.shape[2] fan_out = t.shape[0] * t.shape[2] else: fan_in = np.prod(t.shape) fan_out = np.prod(t.shape) limit = np.sqrt(6.0 / (fan_in + fan_out)) weight = paddle.uniform(shape=t.shape, min=-limit, max=limit) t.set_value(weight)
def test_mlp(sort_sum_gradient): fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient}) input_size = 5 paddle.seed(1) mlp1 = MLP(input_size=input_size) # generate the gradient of each step mlp2 = MLP(input_size=input_size) expected_weight1_grad = 0. expected_bias1_grad = 0. expected_weight2_grad = 0. expected_bias2_grad = 0. for batch_id in range(100): x = paddle.uniform([10, input_size]) detach_x = x.detach() clear_loss = mlp2(detach_x) clear_loss.backward() expected_weight1_grad = expected_weight1_grad + mlp2._linear1.weight.grad expected_bias1_grad = expected_bias1_grad + mlp2._linear1.bias.grad expected_weight2_grad = expected_weight2_grad + mlp2._linear2.weight.grad expected_bias2_grad = expected_bias2_grad + mlp2._linear2.bias.grad loss = mlp1(x) loss.backward() self.assertTrue(np.array_equal(loss.grad, [1])) self.assertTrue( np.allclose(mlp1._linear1.weight.grad, expected_weight1_grad)) self.assertTrue( np.allclose(mlp1._linear1.bias.grad, expected_bias1_grad)) self.assertTrue( np.allclose(mlp1._linear2.weight.grad, expected_weight2_grad)) self.assertTrue( np.allclose(mlp1._linear2.bias.grad, expected_bias2_grad)) mlp2.clear_gradients() self.assertTrue(np.array_equal(clear_loss.grad, [1])) if ((batch_id + 1) % 10) == 0: mlp1.clear_gradients() expected_weight1_grad = 0. expected_bias1_grad = 0. expected_weight2_grad = 0. expected_bias2_grad = 0.
def run_program(self, enable_autotune): self.set_flags(enable_autotune) if enable_autotune: paddle.incubate.autotune.set_config( config={"kernel": { "enable": True, "tuning_range": [1, 2] }}) else: paddle.incubate.autotune.set_config( config={"kernel": { "enable": False }}) x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1., max=1.) net = SimpleNet() for i in range(3): train_dygraph(net, x_var) expected_res = self.get_expected_res(i, enable_autotune) self.check_status(expected_res)