Exemple #1
0
def value_tester(v_ref, v_act, rtol=1e-04, atol=1e-05):
    from nbla_test_utils import ArrayDiffStats

    v_ref.forward()
    v_act.forward()
    print(ArrayDiffStats(v_ref.d, v_act.d))
    assert_allclose(v_ref.d, v_act.d, rtol=rtol, atol=atol)
Exemple #2
0
def value_tester(v_ref,
                 v_act,
                 rtol=1e-04,
                 atol=1e-05,
                 clear_no_need_grad=False):
    from nbla_test_utils import ArrayDiffStats

    v_ref.forward(clear_no_need_grad)
    v_act.forward(clear_no_need_grad)
    print(ArrayDiffStats(v_ref.d, v_act.d))
    assert_allclose(v_ref.d, v_act.d, rtol=rtol, atol=atol)
Exemple #3
0
def test_nnp_graph(seed):

    rng = np.random.RandomState(seed)

    def unit(i, prefix):
        c1 = PF.convolution(i, 4, (3, 3), pad=(1, 1), name=prefix + '-c1')
        c2 = PF.convolution(F.relu(c1),
                            4, (3, 3),
                            pad=(1, 1),
                            name=prefix + '-c2')
        c = F.add2(c2, c1, inplace=True)
        return c

    x = nn.Variable([2, 3, 4, 4])
    c1 = unit(x, 'c1')
    c2 = unit(x, 'c2')
    y = PF.affine(c2, 5, name='fc')

    runtime_contents = {
        'networks': [{
            'name': 'graph',
            'batch_size': 2,
            'outputs': {
                'y': y
            },
            'names': {
                'x': x
            }
        }],
    }
    import tempfile
    tmpdir = tempfile.mkdtemp()
    import os
    nnp_file = os.path.join(tmpdir, 'tmp.nnp')
    try:
        from nnabla.utils.save import save
        save(nnp_file, runtime_contents)
        from nnabla.utils import nnp_graph
        nnp = nnp_graph.NnpLoader(nnp_file)
    finally:
        import shutil
        shutil.rmtree(tmpdir)
    graph = nnp.get_network('graph')
    x2 = graph.inputs['x']
    y2 = graph.outputs['y']

    d = rng.randn(*x.shape).astype(np.float32)
    x.d = d
    x2.d = d
    y.forward(clear_buffer=True)
    y2.forward(clear_buffer=True)
    from nbla_test_utils import ArrayDiffStats
    assert np.allclose(y.d, y2.d), str(ArrayDiffStats(y.d, y2.d))
Exemple #4
0
def test_grad_outputs(seed, ctx, auto_forward, type_grad_outputs):
    from nbla_test_utils import ArrayDiffStats

    # Settings
    nn.set_default_context(ctx)
    nn.set_auto_forward(auto_forward)
    b, c, h, w = 4, 3, 32, 32
    n_cls = 10
    rng = np.random.RandomState(seed)

    x = nn.Variable.from_numpy_array(rng.randn(b, c, h,
                                               w)).apply(need_grad=True)
    y = F.sigmoid(x)

    # Grad outputs
    if type_grad_outputs == int:
        g = rng.randint(-10, 10)
    elif type_grad_outputs == float:
        g = rng.randn()
    elif type_grad_outputs == np.ndarray:
        g = rng.randn(*y.shape)
    elif type_grad_outputs == nn.NdArray:
        g = nn.NdArray.from_numpy_array(rng.randn(*y.shape))

    # Zerograd, Forward, Backward on the forward graph
    inputs = [x]
    [inp.grad.fill(0) for inp in inputs]
    if not auto_forward:
        y.forward()
    y.backward(g)

    # Grad
    inputs = [x]
    outputs = [y]
    grad_outputs = [g]
    grads = nn.grad(outputs, inputs, grad_outputs)
    if not auto_forward:
        F.sink(*grads, one_input_grad=1).forward()

    # Check between results of var.bacwkard and nn.grad
    for inp, grad in zip(inputs, grads):
        assert np.allclose(inp.g, grad.d,
                           atol=1e-6), str(ArrayDiffStats(inp.g, grad.d))
Exemple #5
0
def test_multiple_objectives(seed, ctx, auto_forward):
    from nbla_test_utils import ArrayDiffStats

    # Settings
    nn.set_default_context(ctx)
    nn.set_auto_forward(auto_forward)
    b, c, h, w = 4, 3, 32, 32
    n_cls = 10
    rng = np.random.RandomState(seed)

    # Objecive0
    x0 = nn.Variable.from_numpy_array(rng.randn(b, c, h,
                                                w)).apply(need_grad=True)
    y0 = F.sigmoid(x0)
    # Objecive1
    x1 = nn.Variable.from_numpy_array(rng.randn(b, c, h,
                                                w)).apply(need_grad=True)
    y1 = F.tanh(x1)

    # Zerograd, Forward, Backward on the forward graph
    g0 = nn.NdArray.from_numpy_array(rng.randn(*x0.shape))
    g1 = nn.NdArray.from_numpy_array(rng.randn(*x1.shape))
    z = y0 * nn.Variable(g0.shape).apply(data=g0) + y1 * \
        nn.Variable(g1.shape).apply(data=g1)
    inputs = [x0, x1]
    [inp.grad.fill(0) for inp in inputs]
    if not auto_forward:
        z.forward()
    z.backward()

    # Grad
    inputs = [x0, x1]
    outputs = [y0, y1]
    grad_outputs = [g0, g1]
    grads = nn.grad(outputs, inputs, grad_outputs)
    if not auto_forward:
        F.sink(*grads, one_input_grad=1).forward()

    # Check between results of var.bacwkard and nn.grad
    for inp, grad in zip(inputs, grads):
        assert np.allclose(inp.g, grad.d,
                           atol=1e-6), str(ArrayDiffStats(inp.g, grad.d))
Exemple #6
0
def test_resnet_expansion(seed, ctx, auto_forward, flag_grad_outputs):
    from nbla_test_utils import ArrayDiffStats
    nn.clear_parameters()

    # Settings
    nn.set_default_context(ctx)
    nn.set_auto_forward(auto_forward)
    b, c, h, w = 4, 3, 32, 32
    n_cls = 10
    rng = np.random.RandomState(seed)

    # Network
    x = nn.Variable.from_numpy_array(rng.randn(b, c, h, w))
    y = nn.Variable.from_numpy_array(rng.randint(0, n_cls, b).reshape(b, 1))
    p = SmallResNet(x)
    loss = F.mean(F.softmax_cross_entropy(p, y))

    # Zerograd, Forward, Backward on the forward graph
    inputs = nn.get_parameters().values()
    [inp.grad.fill(0) for inp in inputs]
    grad = nn.NdArray.from_numpy_array(np.asarray(
        rng.randn())) if flag_grad_outputs else 1
    if not auto_forward:
        loss.forward()
    loss.backward(grad)

    # Grad
    grad_outputs = grad if flag_grad_outputs else None
    grads = nn.grad([loss], inputs, [grad_outputs])
    if not auto_forward:
        F.sink(*grads, one_input_grad=1).forward()

    # Check between results of var.bacwkard and nn.grad
    backend = ctx.backend[0].split(":")[0]
    if backend == 'cuda':
        pytest.skip(
            'CUDA Convolution N-D is only supported in CUDNN extension')
    for inp, grad in zip(inputs, grads):
        assert np.allclose(inp.g, grad.d,
                           atol=1e-6), str(ArrayDiffStats(inp.g, grad.d))
Exemple #7
0
def test_min_max_quantize_forward_backward(seed, x_shape, q_shape, decay,
                                           x_min_max, ema, ste_fine_grained,
                                           eps, quantize, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, \
        function_tester
    rng = np.random.RandomState(seed)
    # Inputs
    x = rng.randn(*x_shape)
    qr_min = -0.5 * rng.rand(*q_shape)
    qr_max = +0.5 * rng.rand(*q_shape)
    ql_min = np.zeros(q_shape)
    ql_max = np.ones(q_shape) * 255
    inputs = [x, qr_min, qr_max, ql_min, ql_max]
    func_args = [decay, x_min_max, ema, ste_fine_grained, eps, quantize]

    # No quantization
    if not quantize:
        vinputs = [nn.Variable.from_numpy_array(xd) for xd in inputs]
        v = vinputs[0]
        with nn.context_scope(ctx):
            o = F.min_max_quantize(*(vinputs + func_args))
        np.allclose(o.d, v.d)
        return
    # x_min_max and ema
    # function_tester does not work in this combination
    # when comparing gradients between true_g = v.g - g_init (accum=True) and g (accum=False)
    # since forward changes the exponential moving averages
    if x_min_max and ema:
        from nbla_test_utils import ArrayDiffStats
        vinputs = [nn.Variable.from_numpy_array(xd) for xd in inputs]
        vinputs[0].need_grad = True
        with nn.context_scope(ctx):
            y = F.min_max_quantize(*(vinputs + func_args))
        # forward check
        y.forward()
        y_ref = ref_min_max_quantize(x, qr_min, qr_max, ql_min, ql_max, decay,
                                     x_min_max, ema, ste_fine_grained, eps,
                                     quantize)
        assert np.allclose(y_ref, y.d, atol=1e-5), ArrayDiffStats(y_ref, y.d)
        # backward check (accum=False)
        xv = vinputs[0]
        xv.grad.zero()
        dy = rng.randn(*y.shape)
        y.backward(dy)
        gx_ref = ref_grad_min_max_quantize(x, qr_min, qr_max, ql_min, ql_max,
                                           dy, decay, x_min_max, ema,
                                           ste_fine_grained, eps, quantize)
        ag = xv.g.copy()
        assert np.allclose(gx_ref, ag.flatten(),
                           atol=1e-5), ArrayDiffStats(gx_ref, ag.flatten())
        # backward check (accum=True)
        y.backward(dy)
        assert np.allclose(ag * 2.0, xv.g.copy(),
                           atol=1e-5), ArrayDiffStats(ag * 2.0, xv.g.copy())
        return
    # General tests
    backward = [True, False, False, False, False, False] if x_min_max or ema \
        else [True, True, True, False, False, False]
    function_tester(rng,
                    F.min_max_quantize,
                    ref_min_max_quantize,
                    inputs,
                    func_args=func_args,
                    atol_b=1e-3,
                    backward=backward,
                    ctx=ctx,
                    func_name=func_name,
                    disable_half_test=True,
                    ref_grad=ref_grad_min_max_quantize)