def test_shift_forward_backward(seed, inshape, shifts, border_mode, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # Input inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.shift, ref_shift, inputs, func_args=[ shifts, border_mode], ctx=ctx, func_name=func_name, atol_f=1e-6, atol_b=1e-2)
def test_logical_binary_forward_backward(seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4)) for _ in range(2)] function_tester(rng, func, ref_func, inputs, ctx=ctx, backward=[False, False], func_name=func_name)
def test_logical_scalar_forward_backward(val, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname.replace('_scalar', '')) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4))] function_tester(rng, func, ref_func, inputs, [val], ctx=ctx, backward=[False], func_name=func_name)
def test_transpose_forward_backward(seed, inshape, axes, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # Input inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.transpose, ref_transpose, inputs, func_args=[ axes], ctx=ctx, func_name=func_name, atol_f=1e-6, atol_b=1e-2)
def test_huber_loss_forward_backward(seed, ctx, func_name, delta): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2 for _ in range(2)] function_tester(rng, F.huber_loss, ref_huber_loss, inputs, func_args=[delta], atol_b=1e-2, ctx=ctx, func_name=func_name)
def test_log_forward_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [ np.clip(np.abs(rng.randn(2, 3, 4).astype(np.float32)) * 1e10, 1e-6, 1e10)] function_tester(rng, F.log, np.log, inputs, atol_f=1e-6, atol_b=1e-2, ctx=ctx, func_name=func_name)
def test_reshape_forward_backward(seed, inshape, outshape, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # Input inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.reshape, ref_reshape, inputs, func_args=[ outshape], ctx=ctx, func_name=func_name)
def test_add_scalar_forward_backward(seed, val, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, F.add_scalar, lambda x, y: x + y, inputs, func_args=[val], ctx=ctx, func_name=func_name)
def test_pow_scalar_forward_backward(seed, val, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.rand(2, 3, 4).astype(np.float32) + 0.5] function_tester(rng, F.pow_scalar, lambda x, y: x ** y, inputs, func_args=[val], atol_b=5e-2, ctx=ctx, func_name=func_name)
def test_stack_forward_backward(seed, axis, num_inputs, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) shape = [2, 3, 4] inputs = [rng.randn(*shape).astype(np.float32) for x in range(num_inputs)] function_tester(rng, F.stack, ref_stack, inputs, func_kwargs=dict(axis=axis), ctx=ctx, func_name=func_name, atol_b=2e-3)
def test_sign_forward_backward(seed, alpha, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, F.sign, ref_func_sign, inputs, func_args=[alpha], ctx=ctx, func_name=func_name, atol_b=1e-2, dstep=1e-3, ref_grad=ref_grad_sign)
def test_binary_cross_entropy_forward_backward(seed, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.rand(2, 3, 4).astype(np.float32) for _ in range(2)] inputs[1] = np.round(inputs[1]) function_tester(rng, F.binary_cross_entropy, lambda x, y: -(y * np.log(x) + (1 - y) * np.log(1 - x)), inputs, atol_b=5e-2, ctx=ctx, func_name=func_name)
def test_slice_forward_backward(seed, inshape, start, stop, step, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) x = rng.randn(*inshape).astype(np.float32) function_tester(rng, F.slice, ref_slice, [x], func_args=[start, stop, step], ctx=ctx, func_name=func_name, atol_f=1e-4, atol_b=1e-2)
def test_epsilon_insensitive_loss_forward_backward(seed, ctx, func_name, epsilon): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2 for _ in range(2)] function_tester(rng, F.epsilon_insensitive_loss, ref_epsilon_insensitive_loss_forward, inputs, func_args=[epsilon], atol_b=1e-2, ctx=ctx, func_name=func_name)
def test_abs_forward_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [ cap_ignore_region( rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))] function_tester(rng, F.abs, np.abs, inputs, ctx=ctx, func_name=func_name)
def test_crelu_forward_backward(seed, axis, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [ cap_ignore_region( rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))] function_tester(rng, F.crelu, ref_crelu, inputs, func_args=[axis], ctx=ctx, func_name=func_name, atol_b=1e-2)
def test_logical_binary_compare_forward_backward(seed, fname, ctx, func_name): func = getattr(F, fname) opstr = opstrs[fname] ref_func = eval('lambda x, y: x {} y'.format(opstr)) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4)) for _ in range(2)] inputs[0][..., :2] = inputs[1][..., :2] function_tester(rng, func, ref_func, inputs, ctx=ctx, backward=[False, False], func_name=func_name)
def test_logical_scalar_compare_forward_backward(val, seed, fname, ctx, func_name): opstr = opstrs[fname.replace('_scalar', '')] func = getattr(F, fname) ref_func = eval('lambda x, y: x {} y'.format(opstr)) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4)) for _ in range(1)] inputs[0][..., :2] = val function_tester(rng, func, ref_func, inputs, [val], ctx=ctx, backward=[False, False], func_name=func_name)
def test_split_forward_backward(seed, axis, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) shape = [2, 3, 4] x = rng.randn(*shape).astype(np.float32) inputs = [x] function_tester(rng, F.split, ref_split, inputs, func_args=[axis], ctx=ctx, func_name=func_name, atol_b=1e-2)
def test_r_div_scalar_forward_backward(seed, val, ctx, func_name): from nbla_test_utils import function_tester, cap_ignore_region rng = np.random.RandomState(seed) inputs = [ cap_ignore_region( rng.randn(2, 3, 4).astype(np.float32) * 3, (-0.5, 0.5))] function_tester(rng, F.r_div_scalar, lambda x, y: y / x, inputs, func_args=[val], dstep=1e-4, atol_b=1e-1, ctx=ctx, func_name=func_name)
def test_prelu_forward_backward(seed, inshape, wshape, base_axis, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) x = rng.randn(*inshape).astype(np.float32) w = np.array(rng.randn(*wshape)).astype(np.float32) inputs = [x, w] function_tester(rng, F.prelu, ref_prelu, inputs, func_args=[base_axis], ctx=ctx, func_name=func_name, atol_b=1e-2)
def test_max_pooling_forward_backward(seed, inshape, kernel, stride, pad, ignore_border, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.max_pooling, ref_max_pooling, inputs=inputs, func_args=[kernel, stride, ignore_border, pad], ctx=ctx, func_name=func_name, atol_f=1e-6, atol_b=1e-2)
def test_embed_forward_backward(seed, shape_x, shape_w, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) n_class = shape_w[0] x = np.random.randint(0, n_class - 1, shape_x) w = np.random.randn(*shape_w) inputs = [x, w] function_tester(rng, F.embed, lambda x, w: w[x], inputs, ctx=ctx, func_name=func_name, atol_b=1e-2, backward=[False, True])
def test_activation_forward_backward(act_name, seed, ctx, func_name): act = getattr(F, act_name) ref_func = eval('ref_func_' + act_name) ref_grad = eval('ref_grad_' + act_name) rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, act, ref_func, inputs, atol_b=1e-2, dstep=1e-3, ref_grad=ref_grad, ctx=ctx, func_name=func_name)
def test_kl_multinomial_forward_backward(seed, ctx, base_axis, shape, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) input0 = 1 + rng.rand(*(shape)).astype(np.float32) input1 = 1 + rng.rand(*(shape)).astype(np.float32) input0 = input0 / np.sum(input0, axis=1, keepdims=True) input1 = input1 / np.sum(input1, axis=1, keepdims=True) inputs = [input0, input1] function_tester(rng, F.kl_multinomial, ref_kl_multinomial, inputs, func_args=[base_axis], atol_f=1e-6, atol_b=1e-2, dstep=1e-4, ctx=ctx, func_name=func_name)
def test_sigmoid_cross_entropy_forward_backward(seed, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype( np.float32), rng.rand(2, 3, 4).astype(np.float32)] inputs[1] = np.round(inputs[1]) function_tester(rng, F.sigmoid_cross_entropy, lambda x, y: - (y * np.log(1 / (np.exp(-x) + 1)) + (1 - y) * np.log(1 - 1 / (np.exp(-x) + 1))), inputs, atol_b=1e-2, ctx=ctx, func_name=func_name, backward=[True, False])
def test_linspace_forward(start, stop, num, ctx, func_name): function_tester( None, F.linspace, ref_linspace, inputs=[], ctx=ctx, func_args=[start, stop, num], func_name=func_name, backward=[], disable_half_test=True, )
def test_atanh_forward_backward(seed, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [np.clip(rng.randn(2, 3, 4).astype(np.float32) * 0.3, -0.9, 0.9)] function_tester(rng, F.atanh, np.arctanh, inputs, ctx=ctx, func_name=func_name, atol_f=1e-3, atol_b=1e-2)
def test_logical_scalar_forward_backward(val, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname.replace('_scalar', '')) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4)).astype(np.float32)] function_tester(rng, func, ref_func, inputs, [val], ctx=ctx, backward=[False], func_name=func_name)
def test_abs_forward_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [] for _ in range(2): inputs.append( cap_ignore_region( rng.randn(2, 3,).astype(np.float32) * 2, (-1e-3, 1e-3))) function_tester(rng, F.absolute_error, lambda x, y: np.abs(x - y), inputs, ctx=ctx, func_name=func_name, atol_b=1e-2) # NOTE if atol_b=1e-3, then a numerical error occurs.
def test_pad_reflect_forward_backward(seed, ctx, func_name, inshape, pad_width): rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] func_args = [pad_width, "reflect"] function_tester(rng, F.pad, ref_pad_reflect, inputs, ctx=ctx, dstep=1e-1, func_name=func_name, func_args=func_args)
def test_spectral_norm_forward_backward(seed, output_u, test, eps, itr, w_shape, dim, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) w = rng.randn(*w_shape).astype(np.float32) u = rng.randn(*(w_shape[dim],)).astype(np.float32) inputs = [w, u] backward = [False, False] if test else [True, False] ref_grad = ref_grad_spectral_norm if output_u else ref_grad_spectral_norm_no_output_u function_tester(rng, F.spectral_norm, ref_spectral_norm, inputs, func_args=[dim, itr, eps, test, output_u], backward=backward, ref_grad=ref_grad, atol_accum=3e-2, ctx=ctx, func_name=func_name)
def test_unpooling_forward_backward(seed, inshape, kernel, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.unpooling, ref_unpooling, inputs=inputs, func_args=[kernel], ctx=ctx, func_name=func_name, atol_f=1e-6, atol_b=1e-2)
def test_relu_forward_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [ cap_ignore_region( rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3)) ] function_tester(rng, F.relu, ref_relu, inputs, ctx=ctx, func_name=func_name)
def test_prune_forward_backward(rate, seed, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, F.prune, ref_func_prune, inputs, func_args=[rate], ctx=ctx, func_name=func_name, ref_grad=ref_grad_prune)
def test_floor_forward_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, \ function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, F.floor, ref_floor, inputs, atol_b=1e-3, backward=[True], ctx=ctx, func_name=func_name, ref_grad=ref_grad_floor)
def test_tile_forward_backward(inshape, reps, seed, ctx, func_name): rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.tile, np.tile, inputs, ctx=ctx, func_name=func_name, func_args=[reps], atol_b=1e-2, disable_half_test=False, backward=[False])
def test_relu6_forward_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [ np.clip( np.abs(rng.randn(2, 3, 4).astype(np.float32)) * 1e4, 1e-2, 1e4) ] function_tester(rng, F.relu6, ref_relu6, inputs, ctx=ctx, func_name=func_name)
def test_softplus_forward_backward(seed, beta, ctx, func_name): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, F.softplus, ref_softplus, inputs, func_args=[beta], atol_f=1e-2, atol_b=1e-2, ctx=ctx, func_name=func_name)
def test_reshape_forward_backward(seed, inshape, outshape, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # Input inputs = [rng.randn(*inshape).astype(np.float32)] inplace = False function_tester(rng, F.reshape, ref_reshape, inputs, func_args=[outshape, inplace], ctx=ctx, func_name=func_name)
def test_mul_n_forward_backward(num_inputs, seed, ctx, func_name): rng = np.random.RandomState(seed) shape0 = [2, 3, 4] inputs = [] for i in range(num_inputs): inputs.append(rng.randn(*shape0).astype(np.float32)) function_tester(rng, F.mul_n, ref_function, inputs, ctx=ctx, func_name=func_name, atol_b=2e-3)
def core_test_convolution_forward_backward(inshape, kernel, outmaps, pad, stride, dilation, group, channel_last, with_bias, seed, ctx, func_name): from nbla_test_utils import function_tester if func_name == 'ConvolutionCuda': pytest.skip( 'CUDA Convolution N-D is only supported in CUDNN extension') if channel_last and not func_name.endswith('Cudnn'): pytest.skip( 'channel_last=True is only supported in CUDNN backend so far.') if channel_last and func_name.endswith('Cudnn') and ( np.any(np.asarray(dilation) > 1) or group > 1): import nnabla_ext.cuda as nc major, minor, revision = map(int, nc.__cudnn_version__.split('.')) version = major * 1000 + minor * 100 if version < 7200: pytest.skip( 'channel_last dilated convolution not work in CUDNN {}.'. format(version)) base_axis = len(inshape) - len(kernel) - 1 inmaps = inshape[base_axis] if channel_last: t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel)) inshape = tuple(inshape[i] for i in t.inv_axes) rng = np.random.RandomState(seed) i = rng.randn(*inshape).astype(np.float32) kshape = (outmaps, ) + (inmaps // group, ) + kernel if channel_last: t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel)) kshape = tuple(kshape[i] for i in t.inv_axes) k = rng.randn(*kshape).astype(np.float32) b = None if with_bias: b = rng.randn(outmaps).astype(np.float32) inputs = [i, k, b] atol_half = 1.0 if inmaps > 64 else 1e-1 function_tester( rng, F.convolution, ref_convolution, inputs, func_args=[base_axis, pad, stride, dilation, group, channel_last], atol_f=1e-4, atol_b=1e-2, atol_accum=1e-5, dstep=1e-2, ctx=ctx, func_name=func_name, atol_half=atol_half)
def test_transpose_forward_backward(seed, inshape, axes, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) if np.product(inshape) > 1000000: with nn.context_scope(ctx): x = nn.Variable(inshape) y = F.transpose(x, axes) y.forward() assert y.d.shape == np.ndarray(inshape).transpose(axes).shape else: inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.transpose, ref_transpose, inputs, func_args=[axes], ctx=ctx, func_name=func_name, atol_f=1e-6, atol_b=1e-2)
def test_batch_normalization_forward_backward(seed, axis, decay_rate, eps, output_stat, batch_stat, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = list(create_inputs(rng, axis)) axes = [axis] if ctx.backend[0].split(':')[0] != 'cpu' and batch_stat == False: pytest.skip( "cuda and cudnn implementation for batch_stat==False is not implemented yet" ) else: function_tester( rng, F.batch_normalization, ref_batch_normalization, inputs, func_args=[axes, decay_rate, eps, batch_stat, output_stat], backward=[True, True, True, False, False], ctx=ctx, func_name=func_name, dstep=1e-2, atol_b=1e-2) # Check if running mean and var works. vinputs = [] for i in inputs: vinputs.append(nn.Variable(i.shape, True)) vinputs[-1].d = i for i in range(5): inputs[0] = rng.randn(*inputs[0].shape) vinputs[0].d[...] = inputs[0] ref_y = ref_batch_normalization( *(inputs + [axes, decay_rate, eps, batch_stat, output_stat])) with nn.context_scope(ctx), nn.auto_forward(): y = F.batch_normalization( *(vinputs + [axes, decay_rate, eps, batch_stat, output_stat])) assert np.allclose(vinputs[3].d, inputs[3], atol=1e-7) assert np.allclose(vinputs[4].d, inputs[4]) # Check if global stat mode works batch_stat = False if output_stat: return ref_y = ref_batch_normalization( *(inputs + [axes, decay_rate, eps, batch_stat, output_stat])) with nn.context_scope(ctx), nn.auto_forward(): y = F.batch_normalization( *(vinputs + [axes, decay_rate, eps, batch_stat, output_stat])) assert np.allclose(ref_y, y.d, atol=1e-6)
def test_flip_forward_backward(seed, inshape, axes, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # Input inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.flip, ref_flip, inputs, func_args=[axes], ctx=ctx, func_name=func_name, atol_f=1e-6, atol_b=1e-2)
def test_batch_det_forward_backward(seed, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # input must be batched square matrix inputs = [np.clip(rng.randn(2, 3, 3).astype(np.float32), -0.9, 0.9)] function_tester(rng, F.batch_det, ref_det, inputs, ctx=ctx, func_name=func_name, atol_b=2e-2, dstep=1e-4, disable_half_test=True)
def test_reduction_forward_backward(op, seed, inshape, axis, keepdims, ctx, func_name): func = getattr(F, op) ref_func = getattr(np, op) rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, func, ref_func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name, # The backward test on macOS doesn't pass with this tolerance. # Does Eigen library used in CPU computation backend produce # the different results on different platforms? # atol_b=3e-3, atol_b=6e-3)
def test_bool_scatter_forward_backward(seed, ctx, func_name, gshape, mask_shape): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) gdata0 = rng.randn(*gshape).astype(np.float32) mask = rng.randint(0, 2, size=mask_shape) sdata = gdata0[mask.astype(np.bool)] inputs = [sdata, mask] backward = [True, False] function_tester(rng, F.bool_scatter, ref_bool_scatter, inputs, ctx=ctx, func_name=func_name, func_args=[], backward=backward)
def test_matrix_diag_forward_backward(seed, ctx, func_name, shape): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(*shape).astype(np.float32) * 0.1] function_tester( rng, F.matrix_diag, ref_matrix_diag, inputs, func_args=[], atol_b=1e-3, ctx=ctx, func_name=func_name, )
def test_sum_pooling_3d(seed, inshape, kernel, stride, pad, ignore_border, channel_last, ctx, func_name): from nbla_test_utils import function_tester if channel_last and not func_name.endswith('Cudnn'): pytest.skip('Channel last is only supported in Cudnn so far') if channel_last: t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel)) inshape = tuple(inshape[i] for i in t.inv_axes) rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] func_args = [kernel, stride, ignore_border, pad, channel_last] function_tester(rng, F.sum_pooling, ref_sum_pooling, inputs=inputs, func_args=func_args, func_name=func_name, ctx=ctx, atol_f=1e-6, atol_b=1e-2)
def test_clip_by_norm_forward_backward(seed, ctx, func_name, clip_norm, axes): from nbla_test_utils import cap_ignore_region, function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4, 4).astype(np.float32) * 2] func_args = [clip_norm, axes] function_tester(rng, F.clip_grad_by_norm, ref_clip_grad_by_norm, inputs, func_args=func_args, backward=[True], ctx=ctx, func_name=func_name, ref_grad=ref_grad_clip_by_norm)
def test_logical_binary_forward_backward(seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname) rng = np.random.RandomState(seed) inputs = [ rng.randint(0, 2, size=(2, 3, 4)).astype(np.float32) for _ in range(2) ] function_tester(rng, func, ref_func, inputs, ctx=ctx, backward=[False, False], func_name=func_name)
def test_softmax_cross_entropy_forward_backward(seed, axis, ctx, func_name): from nbla_test_utils import function_tester ishape = [2, 3, 4] rng = np.random.RandomState(seed) l_shape = list(ishape) l_shape[axis] = 1 n_class = ishape[axis] inputs = [ rng.randn(2, 3, 4).astype(np.float32) * 2, rng.randint(0, n_class, size=l_shape).astype(np.int)] function_tester(rng, F.softmax_cross_entropy, ref_softmax_cross_entropy, inputs, func_args=[axis], backward=[True, False], ctx=ctx, func_name=func_name)
def test_reduction_forward_backward(op, seed, axis, keepdims, ctx, func_name): from nbla_test_utils import function_tester func = getattr(F, op) ref_func = getattr(np, op) rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4, 5).astype(np.float32)] function_tester(rng, func, ref_func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name, # The backward test on macOS doesn't pass with this torelance. # Does Eigen library used in CPU computatation backend produce # the different results on different platforms? # atol_b=3e-3, atol_b=6e-3)
def test_affine_forward_backward(seed, base_axis, weight_shape, bias, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) # Input inputs = [rng.randn(2, 3, 4).astype(np.float32)] # Weight inputs += [rng.randn(*weight_shape).astype(np.float32)] # Bias if bias: inputs += [rng.randn(*weight_shape[1:]).astype(np.float32)] else: inputs += [None] function_tester(rng, F.affine, ref_affine, inputs, func_args=[base_axis], atol_b=1e-2, dstep=1e-3, ctx=ctx, func_name=func_name)