def ref_grad_binary_tanh(x, dy, **kw): return (dy * (1 - np.floor(np.minimum(np.abs(x), 1)))).flatten() def ref_func_binary_sigmoid(x): # Binary sigmoid, value @x==0 is set to 0 return np.round(np.clip((x + 1) / 2, 0, 1)) def ref_grad_binary_sigmoid(x, dy, **kw): return (dy * (1 - np.floor(np.minimum(np.abs(x), 1))) / 2).flatten() @pytest.mark.parametrize("seed", [313]) @pytest.mark.parametrize("act_name, ctx, func_name", list_ctx_and_func_name( ['binary_tanh', 'binary_sigmoid'])) def test_activation_forward_backward(act_name, seed, ctx, func_name): act = getattr(F, act_name) ref_func = eval('ref_func_' + act_name) ref_grad = eval('ref_grad_' + act_name) rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, act, ref_func, inputs, atol_b=1e-2, dstep=1e-3, ref_grad=ref_grad, ctx=ctx,
return [rng.randn(*shapes[0]).astype(np.float32), denom] if fname == 'pow2': return [ rng.rand(*shapes[0]).astype(np.float32) + 0.5, rng.randn(*shapes[1]).astype(np.float32) ] return [rng.randn(*shapes[i]).astype(np.float32) * 2 for i in range(2)] # ----------------------------------------------------------------------------- # Test body # ----------------------------------------------------------------------------- @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name( ['sub2', 'mul2', 'div2', 'pow2'])) @pytest.mark.parametrize("seed", [314]) def test_transform_binary_inplace(seed, fname, ctx, func_name): from nbla_test_utils import inplace_function_test_helper x0 = nn.Variable([2, 3, 4], need_grad=True) x1 = nn.Variable([2, 3, 4], need_grad=True) func = getattr(F, fname) inplace_function_test_helper([x0, x1], func, ctx=ctx, rng=np.random.RandomState(seed)) atol_list = { 'add2': (1e-6, 4e-3), 'sub2': (1e-6, 3e-3),
PARAMS = [((2, 3), (2), 0), ((2, 3), (3), 1), ((2, 3, 4), (2), 0), ((2, 3, 4), (3), 1), ((2, 3, 4), (4), 2), ((2, 3, 4), (2, 3), 0), ((2, 3, 4), (3, 4), 1), ((2, 3, 4, 5), (2), 0), ((2, 3, 4, 5), (3), 1), ((2, 3, 4, 5), (4), 2), ((2, 3, 4, 5), (5), 3), ((2, 3, 4, 5), (2, 3), 0), ((2, 3, 4, 5), (3, 4), 1), ((2, 3, 4, 5), (4, 5), 2), ((2, 3, 4, 5), (2, 3, 4), 0), ((2, 3, 4, 5), (3, 4, 5), 1), ((2, 3, 4, 5), (5), -1), ((2, 3, 4, 5), (4, 5), -1), ((2, 3, 4, 5), (3, 4, 5), -1), ((2, 3, 4, 5), (2, 3, 4, 5), -1), ((2, 3, 4, 5), (2, 3, 4, 5), -2)] @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast_to'])) @pytest.mark.parametrize("xs, ys, axis", PARAMS) def test_broadcast_to_forward(xs, ys, axis, seed, fname, ctx, func_name): rng = np.random.RandomState(seed) ref_func = eval('ref_' + fname) func = getattr(F, fname) inputs = [rng.random_sample(xs), rng.random_sample(ys)] function_tester(rng, func, ref_func, inputs, [axis], backward=[False, False], ctx=ctx, func_name=func_name)
import pytest import numpy as np import nnabla as nn import nnabla.functions as F import pdb from nbla_test_utils import (function_tester, list_ctx_and_func_name) def ref_global_average_pooling(x): xs = x.shape newshape = (xs[0], xs[1], xs[2] * xs[3]) newx = np.reshape(x, newshape) return np.average(newx, 2)[:, :, np.newaxis, np.newaxis] @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['global_average_pooling'])) def test_global_average_pooling_forward_backward(seed, fname, ctx, func_name): rng = np.random.RandomState(seed) ref_func = eval('ref_' + fname) func = getattr(F, fname) inputs = [rng.random_sample((2, 3, 4, 5))] function_tester(rng, func, ref_func, inputs, [], ctx=ctx, func_name=func_name)
import pytest import numpy as np import nnabla as nn import nnabla.functions as F from nnabla.testing import assert_allclose from nbla_test_utils import (function_tester, list_context, list_ctx_and_func_name) @pytest.mark.parametrize("seed", [313]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, 3, (0, 2), (1, 2, 3)]) @pytest.mark.parametrize("keepdims", [False, True]) @pytest.mark.parametrize("inshape", [(2, 3, 4, 5), (2, 1, 4, 5)]) @pytest.mark.parametrize("op, ctx, func_name", list_ctx_and_func_name( ['sum', 'mean', 'max', 'min', 'prod'])) def test_reduction_forward_backward(op, seed, inshape, axis, keepdims, ctx, func_name): func = getattr(F, op) ref_func = getattr(np, op) rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester( rng, func, ref_func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name,
import pytest import numpy as np import nnabla as nn import nnabla.functions as F from nbla_test_utils import ( function_tester, list_ctx_and_func_name) # ---------------------------------------------------------------------------- # Logical scalar # ---------------------------------------------------------------------------- @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['logical_and_scalar', 'logical_or_scalar', 'logical_xor_scalar'])) @pytest.mark.parametrize("val", [False, True]) def test_logical_scalar_forward_backward(val, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname.replace('_scalar', '')) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4))] function_tester(rng, func, ref_func, inputs, [val], ctx=ctx, backward=[False], func_name=func_name) opstrs = { 'greater': '>', 'greater_equal': '>=', 'less': '<', 'equal': '==',
if n == 0: return [(n, np.array([], dtype=np.bool))] all_comb = np.vstack(map(lambda x: x.flatten(), np.meshgrid( *[[0, 1] for _ in range(n)]))).T.astype(np.bool) return [(n, comb) for comb in all_comb] def get_combinations(*N): ret = [] for n in N: ret.extend(get_combination(n)) return ret @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast'])) @pytest.mark.parametrize("ndim, broadcast_dim", get_combinations(*range(0, 6))) @pytest.mark.parametrize("align", [True, False]) def test_broadcast_forward_backward(align, ndim, broadcast_dim, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = eval('ref_' + fname) rng = np.random.RandomState(seed) shape = rng.randint(2, 5, size=(ndim,)) inshape = shape.copy() inshape[broadcast_dim] = 1 if ndim == 0: # Performing 0-dim array test too. inputs = [np.array(rng.randn()).astype("float32")] function_tester(rng, func, ref_func, inputs, [shape], ctx=ctx, backward=[True], func_name=func_name,
import pytest import numpy as np import nnabla as nn import nnabla.functions as F from nbla_test_utils import ( function_tester, list_ctx_and_func_name) @pytest.mark.parametrize("seed", [313]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, 3, (0, 2), (1, 2, 3)]) @pytest.mark.parametrize("keepdims", [False, True]) @pytest.mark.parametrize("op, ctx, func_name", list_ctx_and_func_name(['sum', 'mean', 'max', 'min', 'prod'])) def test_reduction_forward_backward(op, seed, axis, keepdims, ctx, func_name): from nbla_test_utils import function_tester func = getattr(F, op) ref_func = getattr(np, op) rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4, 5).astype(np.float32)] function_tester(rng, func, ref_func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name, # The backward test on macOS doesn't pass with this torelance. # Does Eigen library used in CPU computatation backend produce # the different results on different platforms? # atol_b=3e-3, atol_b=6e-3)
import pytest import numpy as np import nnabla as nn import nnabla.functions as F from nbla_test_utils import (function_tester, list_context, list_ctx_and_func_name) @pytest.mark.parametrize("seed", [313]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, 3, (0, 2), (1, 2, 3)]) @pytest.mark.parametrize("keepdims", [False, True]) @pytest.mark.parametrize("inshape", [(2, 3, 4, 5), (2, 1, 4, 5)]) @pytest.mark.parametrize("op, ctx, func_name", list_ctx_and_func_name(['sum', 'mean', 'max', 'min', 'prod'])) def test_reduction_forward_backward(op, seed, inshape, axis, keepdims, ctx, func_name): func = getattr(F, op) ref_func = getattr(np, op) rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, func, ref_func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name, # The backward test on macOS doesn't pass with this tolerance. # Does Eigen library used in CPU computation backend produce # the different results on different platforms? # atol_b=3e-3, atol_b=6e-3)
if n == 0: return [(n, np.array([], dtype=np.bool))] all_comb = np.vstack(map(lambda x: x.flatten(), np.meshgrid( *[[0, 1] for _ in range(n)]))).T.astype(np.bool) return [(n, comb) for comb in all_comb] def get_combinations(*N): ret = [] for n in N: ret.extend(get_combination(n)) return ret @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast'])) @pytest.mark.parametrize("ndim, broadcast_dim", get_combinations(*range(0, 6))) def test_broadcast_forward_backward(ndim, broadcast_dim, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = eval('ref_' + fname) rng = np.random.RandomState(seed) shape = rng.randint(2, 5, size=(ndim,)) inshape = shape.copy() inshape[broadcast_dim] = 1 if np.prod(inshape) == 1: # Performing 0-dim array test too. inputs = [np.array(rng.randn())] function_tester(rng, func, ref_func, inputs, [shape], ctx=ctx, backward=[True], func_name=func_name, atol_b=4e-3)
atol_list = { 'add2': (1e-6, 4e-3), 'sub2': (1e-6, 3e-3), 'mul2': (1e-6, 2e-2), 'div2': (1e-4, 1e-1), 'pow2': (1e-4, 1e-1), 'maximum2': (1e-6, 3e-3), 'minimum2': (1e-6, 3e-3), } @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['add2', 'sub2', 'mul2', 'div2', 'pow2', 'maximum2', 'minimum2'])) @pytest.mark.parametrize("seed", [313]) @pytest.mark.parametrize("broadcast_dims", [ (None, None), (None, (0,)), ((1,), None), (None, (2,)), ((0, 2), None), ((0,), (2,))]) def test_transform_binary_forward_backward(fname, ctx, func_name, broadcast_dims, seed): from nbla_test_utils import function_tester atol_f, atol_b = atol_list[fname] func = getattr(F, fname)
import pytest import numpy as np import nnabla as nn import nnabla.functions as F from nbla_test_utils import (function_tester, list_ctx_and_func_name) # ---------------------------------------------------------------------------- # Logical scalar # ---------------------------------------------------------------------------- @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name([ 'logical_and_scalar', 'logical_or_scalar', 'logical_xor_scalar' ])) @pytest.mark.parametrize("val", [False, True]) def test_logical_scalar_forward_backward(val, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname.replace('_scalar', '')) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4))] function_tester(rng, func, ref_func, inputs, [val], ctx=ctx, backward=[False], func_name=func_name)
import pytest import numpy as np import nnabla.functions as F from nbla_test_utils import ( function_tester, list_ctx_and_func_name) # ---------------------------------------------------------------------------- # Logical scalar # ---------------------------------------------------------------------------- @pytest.mark.parametrize("seed", [314]) @pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['logical_and_scalar', 'logical_or_scalar', 'logical_xor_scalar'])) @pytest.mark.parametrize("val", [False, True]) def test_logical_scalar_forward_backward(val, seed, fname, ctx, func_name): func = getattr(F, fname) ref_func = getattr(np, fname.replace('_scalar', '')) rng = np.random.RandomState(seed) inputs = [rng.randint(0, 2, size=(2, 3, 4)).astype(np.float32)] function_tester(rng, func, ref_func, inputs, [val], ctx=ctx, backward=[False], func_name=func_name) opstrs = { 'greater': '>', 'greater_equal': '>=', 'less': '<', 'less_equal': '<=',
def ref_func_binary_tanh(x): # Binary tanh, value @x==0 is set to -1 return 2 * np.round(np.clip((x + 1) / 2, 0, 1)) - 1 def ref_grad_binary_tanh(x, dy): return (dy * (1 - np.floor(np.minimum(np.abs(x), 1)))).flatten() def ref_func_binary_sigmoid(x): # Binary sigmoid, value @x==0 is set to 0 return np.round(np.clip((x + 1) / 2, 0, 1)) def ref_grad_binary_sigmoid(x, dy): return (dy * (1 - np.floor(np.minimum(np.abs(x), 1))) / 2).flatten() @pytest.mark.parametrize("seed", [313]) @pytest.mark.parametrize("act_name, ctx, func_name", list_ctx_and_func_name(['binary_tanh', 'binary_sigmoid'])) def test_activation_forward_backward(act_name, seed, ctx, func_name): act = getattr(F, act_name) ref_func = eval('ref_func_' + act_name) ref_grad = eval('ref_grad_' + act_name) rng = np.random.RandomState(seed) inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2] function_tester(rng, act, ref_func, inputs, atol_b=1e-2, dstep=1e-3, ref_grad=ref_grad, ctx=ctx, func_name=func_name)