def test_exit_condition(transformer_factory):
    bsz = 16
    class_num = 10

    # Limiting maximum absolute value for tensors elements to 7.9.
    #
    # There is used np.random.randn function to fill tensors with random values. It can give any
    # value as a result however values above 5 are highly improbable and would appear very rarely.
    # Limit 7.9 would almost never modify the tested tensor but would prevent from random
    # failures from time to time when the test is run in continuous environment.
    # This limit is approximate upper bound of range [4, 8). Numbers from this region can be
    # expressed by flexpoint number of the same dec.
    # Why not 15.9 that is approximate limit of [8, 16) range ?
    # Numbers above 8 are highly improbable and if appear from time to time can cause random
    # failures due to reduced accuracy of all numbers in tensor. Most numbers in normal
    # distribution are close to 0.

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    N, Y = ng.make_axis(bsz), ng.make_axis(class_num)
    y_val = rng.randn_abs_clip(ng.make_axes([N, Y]), clip_max=clip_val)
    y = ng.constant(y_val, ng.make_axes([N, Y]))

    likelihood = ng.log(ng.softmax(y, normalization_axes=y.axes[1]))

    with ExecutorFactory() as ex:
        comp = ex.executor(likelihood)

        val1 = comp()
        val2 = comp()
        ng.testing.assert_allclose(val1, val2, atol=0, rtol=0)
예제 #2
0
def test_unary_op_(ng_func, np_func):
    H = ng.make_axis().named('H')
    W = ng.make_axis().named('W')

    tests = [
        {
            'tensor1': [[1, 2, 3, 4], [5, 6, 7, 8]],
            'tensor1_axes': (H, W),
            'axes_lengths': {H: 2, W: 4}
        }]

    for test in tests:
        # set up tensors
        for axis, length in test['axes_lengths'].items():
            axis.length = length

        tensor1 = ng.placeholder(test['tensor1_axes'])
        value1 = np.array(test['tensor1'], dtype=np.float32)

        _ng_func = ng_func(tensor1)

        with ExecutorFactory() as ex:
            _ng_computation = ex.executor(_ng_func, tensor1)
            _ng_val = _ng_computation(value1)
            _ng_ref = np_func(value1)
            assert np.allclose(_ng_val, _ng_ref, rtol=0, atol=2)
예제 #3
0
def test_stack():
    W = ng.make_axis(length=4)
    H = ng.make_axis(length=5)
    I = ng.make_axis(length=3)

    axes = ng.make_axes([W, H])

    rng = RandomTensorGenerator(0, np.float32)

    a_v = [rng.uniform(0, 1, axes) for i in range(I.length)]

    for pos in range(len(axes) + 1):
        a = [ng.placeholder(axes, initial_value=p) for p in a_v]

        s = ng.stack(a, I, pos)

        with ExecutorFactory() as ex:
            num_funs = [
                ex.numeric_derivative(s, p, delta,
                                      *(np for np in a if np is not p))
                for p in a
            ]
            sym_funs = [
                ex.derivative(s, p, *(np for np in a if np is not p))
                for p in a
            ]

            for n_fun, s_fun, a_i in zip(num_funs, sym_funs, a_v):
                na_is = list(na_i for na_i in a_v if na_i is not a_i)
                d_n = n_fun(a_i, *na_is)
                d_s = s_fun(a_i, *na_is)
                ng.testing.assert_allclose(d_n, d_s, rtol=rtol, atol=atol)
예제 #4
0
def test_flat_tensor_dot_tensor():
    """
    Ensure that a flattened argument axis is not unflattend in the result.

    """
    H = ng.make_axis(2)
    W = ng.make_axis(7)
    C = ng.make_axis(3)
    K = ng.make_axis(11)

    axes_a = ng.make_axes([H, W, C])
    a = ng.constant(np.ones(axes_a.lengths), axes=axes_a)
    flat_a = ng.flatten_at(a, 2)

    axes_b = ng.make_axes([C, K])
    b = ng.constant(np.ones(axes_b.lengths), axes=axes_b)

    result = ng.dot(b, flat_a)

    with ExecutorFactory() as factory:
        result_fun = factory.executor(result)
        result_val = result_fun()

    result_correct = np.ones_like(result_val) * C.length
    ng.testing.assert_allclose(result_val, result_correct)
예제 #5
0
def test_binary_op(ng_func, np_func):
    H = ng.make_axis().named('H')
    W = ng.make_axis().named('W')

    tests = [
        {
            'tensor1': [[1, 2, 3, 4], [5, 6, 7, 8]],
            'tensor1_axes': (H, W),
            'tensor2': [[10, 2, 3, 40], [15, 6, 9, 8]],
            'tensor2_axes': (H, W),
            'axes_lengths': {H: 2, W: 4}
        }]

    for test in tests:
        # set up tensors
        for axis, length in test['axes_lengths'].items():
            axis.length = length

        tensor1 = ng.placeholder(test['tensor1_axes'])
        value1 = np.array(test['tensor1'], dtype=np.float32)

        tensor2 = ng.placeholder(test['tensor2_axes'])
        value2 = np.array(
            test['tensor2'], dtype=np.float32
        )

        _ng_func = ng_func(tensor1, tensor2)

        with ExecutorFactory() as ex:

            _ng_computation = ex.executor(_ng_func, tensor1, tensor2)
            _ng_val = _ng_computation(value1, value2)
            _ng_ref = np_func(value1, value2)
            np.testing.assert_equal(_ng_val, _ng_ref)
def test_variable_init(C):
    w_init = np.random.rand(C.length)
    W = ng.variable(ng.make_axes([C]), initial_value=w_init)

    with ExecutorFactory() as ex:
        result = ex.executor(W)()
    ng.testing.assert_allclose(result, w_init)
예제 #7
0
def test_dropout_train(nin, batch_size, keep):

    # set inputs
    N = ng.make_axis(batch_size, name='N')
    F = ng.make_axis(nin, name='F')

    inp = ng.placeholder([F, N])
    layer = Dropout(keep=keep)
    fprop = layer(inp)

    # create data
    x = np.random.uniform(size=(nin, batch_size))

    # evaluate
    with ExecutorFactory() as ex:
        comp = ex.executor([fprop, layer.mask], inp)
        out, mask = comp(x)
        numpy_out = x * mask[:, None]
        ng.testing.assert_allclose(out, numpy_out, atol=atol, rtol=rtol)

        if keep < 1.0:
            out1, mask1 = out.copy(), mask.copy()
            out2, mask2 = comp(x)
            assert (out1 != out2).any()
            assert (mask1 != mask2).any()
def test_initial_value():
    # Test work-around for issue #1138
    w = [3, 4, 5]
    x = ng.constant(w)
    y = ng.variable([ng.make_axis(length=len(w))], initial_value=x)
    with ExecutorFactory() as ex:
        result = ex.executor(y)()
    ng.testing.assert_allclose(result, np.asarray(w, dtype=np.float32))
예제 #9
0
def test_fill_state():
    with ExecutorFactory() as ex:
        N = ng.make_axis(3, name='N')
        x_np = np.ones((N.length)) * 4
        x = ng.fill([N], -1)
        f = ex.executor(x)
        x_val = f()
    assert np.allclose(-1, x_val)
def test_sign():
    x_np = np.array([-1.2, 2.3, 0.0, 1.2])
    N = ng.make_axis(len(x_np))
    x = ng.variable([N])
    y = ng.sign(x)
    y_np = np.sign(x_np)
    with ExecutorFactory() as ex:
        y_val = ex.executor(y, x)(x_np)
        assert np.allclose(y_val, y_np)
예제 #11
0
def test_modify_state():
    with ExecutorFactory() as ex:
        N = ng.make_axis(3, name='N')
        x_np = np.ones((N.length)) * 4
        x = ng.variable([N], initial_value=x_np).named('x')
        val = ng.sequential([ng.assign(x, x + x), x])
        f = ex.executor(val)
        x_val = f()
        assert np.allclose(x_np + x_np, x_val)
def test_sequential(N):
    x = ng.variable([N], initial_value=0)
    x0 = x + x
    x1 = x + x
    p = ng.sequential([x0, ng.assign(x, 2), x1, x0])
    with ExecutorFactory() as ex:
        x0_val, x1_val, p_val = ex.executor([x0, x1, p])()
    assert x0_val == 0
    assert x1_val == 4
    assert p_val == 0
예제 #13
0
 def baseline_value(self, x):
     '''
     Use defined ngraph constructed computation to evaluate
     activation on inputs x
     '''
     X = ng.placeholder([ng.make_axis(), ng.make_axis(name='N')])
     X.axes.set_shape(x.shape)
     with ExecutorFactory() as ex:
         activation_function = ex.executor(self.neon_activation(X), X)
         return activation_function(x)
예제 #14
0
def test_write_state():
    """
    This reads back a tensor set from an argument. No code is generated.
    """
    with ExecutorFactory() as ex:
        N = ng.make_axis(3, name='N')
        x_np = np.ones((N.length)) * 4
        x = ng.persistent_tensor([N]).named('x')
        f = ex.executor(x, x)
        x_val = f(x_np)
        assert np.allclose(x_np, x_val)
예제 #15
0
def test_read_state():
    """
    This just reads back a tensor. No code is generated.
    """
    with ExecutorFactory() as ex:
        N = ng.make_axis(3, name='N')
        x_np = np.ones((N.length)) * 4
        x = ng.variable([N], initial_value=x_np).named('x')
        f = ex.executor(x)
        x_val = f()
        assert np.allclose(x_np, x_val)
def test_scalar_broadcast():
    """
    Test broadcasting a scalar into a tensor
    """
    with ExecutorFactory() as ex:
        x_axes = ng.make_axes()
        broadcast_axes = ng.make_axes([ng.make_axis(2), ng.make_axis(3)])
        x = ng.constant(1., axes=x_axes)
        z = ng.broadcast(x, axes=broadcast_axes)
        z_comp = ex.executor(z)
        assert np.array_equal(z_comp(), np.ones(broadcast_axes.lengths))
def test_sequential_side(M):
    x1_np = 2
    x2_np = 3
    b_np = 1
    x_np = np.array([1, 2, 3], dtype=np.float32)

    x = ng.variable([M], initial_value=x_np)
    x1 = ng.persistent_tensor(axes=(), initial_value=x1_np)
    x2 = ng.persistent_tensor(axes=(), initial_value=x2_np)
    x1_vo = ng.value_of(x1)
    x2_vo = ng.value_of(x2)
    b = ng.persistent_tensor(axes=(), initial_value=b_np)

    y = ng.sequential([
        x1_vo, x2_vo,
        ng.assign(x1,
                  ng.sum(x, out_axes=()) + x1 * b + (1 - b)),
        ng.assign(x2,
                  ng.mean(x, out_axes=()) + x2 * b + (1 - b)), x * 2
    ])

    with ExecutorFactory() as ex:
        main_effect = ex.executor((y, x1_vo, x2_vo, x1, x2))
        current_values = ex.executor((x1, x2))

        # Run main path #1
        y_val, x1_init_val, x2_init_val, x1_final_val, x2_final_val = main_effect(
        )
        y_np = x_np * 2

        assert np.allclose(y_val, y_np)
        assert np.allclose(x1_init_val, x1_np)
        assert np.allclose(x2_init_val, x2_np)
        x1_np = np.sum(x_np) + x1_np * b_np + (1 - b_np)
        x2_np = np.mean(x_np) + x2_np * b_np + (1 - b_np)
        assert np.allclose(x1_final_val, x1_np)
        assert np.allclose(x2_final_val, x2_np)

        x1_val, x2_val = current_values()
        assert np.allclose(x1_val, x1_np)
        assert np.allclose(x2_val, x2_np)

        # Run main path #2 (Should be the same as before)
        y_val, x1_init_val, x2_init_val, x1_final_val, x2_final_val = main_effect(
        )
        y_np = x_np * 2

        assert np.allclose(y_val, y_np)
        assert np.allclose(x1_init_val, x1_np)
        assert np.allclose(x2_init_val, x2_np)
        x1_np = np.sum(x_np) + x1_np * b_np + (1 - b_np)
        x2_np = np.mean(x_np) + x2_np * b_np + (1 - b_np)
        assert np.allclose(x1_final_val, x1_np)
        assert np.allclose(x2_final_val, x2_np)
예제 #18
0
def test_use_state():
    """
    Uses the value of a tensor in a computation.
    """
    with ExecutorFactory() as ex:
        N = ng.make_axis(3, name='N')
        x_np = np.ones((N.length)) * 4
        x = ng.variable([N], initial_value=x_np).named('x')
        xx = x + x
        f = ex.executor(xx)
        xx_val = f()
        assert np.allclose(x_np + x_np, xx_val)
예제 #19
0
    def baseline_derivative(self, x):
        X = ng.placeholder([ng.make_axis(), ng.make_axis(name='N')])
        X.axes.set_shape(x.shape)
        with ExecutorFactory() as ex:
            activation_derivative = ex.derivative(self.neon_activation(X), X)

            # hack to get derivatives
            result = activation_derivative(x)
            result = result.ravel()[0:result.size:(x.size + 1)]
            result = result.reshape(x.shape)

            return result
예제 #20
0
def test_linear_zeros(input_placeholder, output_size):
    # basic sanity check with 0 weights random inputs
    x = np.random.random(input_placeholder.axes.lengths)
    layer = Linear(nout=output_size, init=UniformInit(0.0, 0.0))

    with ExecutorFactory() as ex:
        if ex.transformer.transformer_name == 'hetr':
            pytest.xfail("hetr fork-safe issue on mac")
        comp = ex.executor(layer(input_placeholder), input_placeholder)
        output_values = comp(x)

    assert np.min(output_values) == 0.0 and np.max(output_values) == 0.0
def test_concatenate(concatenate_variables):
    x_list, np_list, pos = concatenate_variables

    with ExecutorFactory() as ex:
        v = ng.concat_along_axis(x_list, x_list[0].axes[pos])
        d = ng.deriv(v,
                     x_list[0],
                     error=ng.constant(np.ones(v.axes.lengths), axes=v.axes))
        f = ex.executor([v, d])
        e_v, e_d = f()
        np_v = np.concatenate(np_list, axis=pos)
        ng.testing.assert_allclose(e_v.copy(), np_v)
        ng.testing.assert_allclose(e_d.copy(), np.ones(x_list[0].axes.lengths))
예제 #22
0
def test_concatenate():
    with ExecutorFactory() as ex:
        A = ng.make_axis(name='A', length=3)
        B = ng.make_axis(name='B', length=4)
        np_shape = (A.length, B.length)
        x0_np = -np.ones(np_shape)
        x1_np = np.ones(np_shape)
        x0_ng = ng.persistent_tensor([A, B], initial_value=x0_np).named('x0')
        x1_ng = ng.persistent_tensor([A, B], initial_value=x1_np).named('x1')
        j_np = np.concatenate([x0_np, x1_np], axis=0)
        j_ng = ng.concat_along_axis([x0_ng, x1_ng], A)
        f = ex.executor(j_ng)
        j_val = f()
        ng.testing.assert_allclose(j_val, j_np)
def test_sequential_reduce(M):
    x = ng.variable([M], initial_value=1)
    x0 = x + x
    x1 = ng.sum(x0, out_axes=())
    x2 = ng.sum(x0, out_axes=()) + x0
    p = ng.sequential([x0, x1, x2])

    with ExecutorFactory() as ex:
        x0_val, x1_val, x2_val, p_val, x_val = ex.executor([x0, x1, x2, p,
                                                            x])()
        x0_np = x_val + x_val
        x1_np = np.sum(x0_np)
        x2_np = x1_np + x0_np
        assert np.allclose(x0_val, x0_np)
        assert np.allclose(x1_val, x1_np)
        assert np.allclose(x2_val, x2_np)
        assert np.allclose(p_val, x2_np)
예제 #24
0
def test_slice_deriv():
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=3)

    x_np = np.array([[10, 20, 30], [1, 2, 3]], dtype='float32')
    x = ng.placeholder([C, D]).named('x')

    x_slice = x[0, :] + x[1, :]

    with ExecutorFactory() as ex:
        sym_deriv_fun = ex.derivative(x_slice, x)
        val_ng = sym_deriv_fun(x_np)
        val_np = np.zeros((D.length, C.length, D.length))
        for i in range(D.length):
            for j in range(C.length):
                val_np[i, j, i] = 1
        ng.testing.assert_allclose(val_ng, val_np)
예제 #25
0
def test_linear_ones(input_size, input_placeholder, output_size):
    # basic sanity check with all ones on the inputs and weights, check that
    # each row in output is the sum of the weights for that output this check
    # will confirm that the correct number of operations is being run
    x = np.ones(input_placeholder.axes.lengths)
    layer = Linear(nout=output_size, init=UniformInit(1.0, 1.0))

    with ExecutorFactory() as ex:
        if ex.transformer.transformer_name == 'hetr':
            pytest.xfail("hetr fork-safe issue on mac")
        out = layer(input_placeholder)
        comp = ex.executor([out, layer.W], input_placeholder)
        output_values, w = comp(x)

    ng.testing.assert_allclose(np.ones(out.axes.lengths) * input_size,
                               output_values,
                               atol=0.0,
                               rtol=0.0)
def test_concat_different_axis_lengths():
    ax1 = ng.make_axis(length=3, name="concat")
    ax2 = ng.make_axis(length=2, name="concat")
    ax3 = ng.make_axis(length=10, name="other")

    x = ng.placeholder(axes=[ax1, ax3])
    y = ng.placeholder(axes=[ax2, ax3])

    np_x = np.zeros(x.axes.lengths)
    np_y = np.zeros(y.axes.lengths)

    # ax1 and ax2 have same name, so this should work
    v = ng.concat_along_axis([x, y], ax1)
    with ExecutorFactory() as ex:
        f = ex.executor(v, x, y)
        e_v = f(np_x, np_y)
        np_v = np.concatenate([np_x, np_y], axis=0)
        ng.testing.assert_allclose(e_v.copy(), np_v)
def test_idempotent_axes_c():
    """
    Test test axes transformations with autodiff, case c, with broadcast,
    slice, cast and dim-shuffle
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])
        result_axes = [ng.make_axis(length=axis.length) for axis in axes]

        # variable
        w = ng.variable(axes, initial_value=np.ones((3, 1)))

        # broadcast l / r, introducing dummy length 1 axes
        l = ng.broadcast(w, axes)
        r = ng.broadcast(w, axes)

        # slice
        axes_slice = [slice(None, None, None), slice(None, None, None)]
        l_sliced = ng.tensor_slice(l, axes_slice)
        r_sliced = ng.tensor_slice(r, axes_slice)

        # cast r
        r_sliced_casted = ng.cast_axes(r_sliced, axes)

        # perform add
        result = ng.add(l_sliced, r_sliced_casted)

        # cast / dimshuffle
        result = ng.cast_axes(result, result_axes)
        result = ng.axes_with_order(result, result_axes)

        # cost and grad
        cost = ng.sum(result, reduction_axes=result.axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        cost_comp_ng = cost_comp()
        grad_comp_ng = grad_comp()
        grad_comp_np = np.ones((3, 1)) * 2.
        assert cost_comp_ng == 6.0
        assert np.array_equal(grad_comp_ng, grad_comp_np)
예제 #28
0
def test_logreg():
    # xs: (C, N), y: (N,)
    xs = np.array([[0.52, 0.88, 0.52, 0.74], [1.12, -1.08, 0.06, -2.49],
                   [0.77, 0.15, -1.3, 1.39]])
    ys = np.array([1, 1, 0, 1])
    max_iter = 10
    alpha = 0.1
    thetas = np.array([0., 0., 0.])

    np_logreg = NumpyLogreg(xs, ys, thetas)

    C, N = ng.make_axis(length=3), ng.make_axis(length=4)

    # input tensors
    xs_v = ng.placeholder((C, N))
    ys_v = ng.placeholder([N])
    alpha_v = ng.placeholder(())
    thetas_var = ng.variable([C], initial_value=thetas)

    # define ops
    ys_pred = ng.sigmoid(ng.dot(thetas_var, xs_v))
    log_likelihoods = ng.log(ys_pred) * ys_v + ng.log(1 - ys_pred) * (1 - ys_v)
    loss = -ng.sum(log_likelihoods, reduction_axes=[N])
    grad_comp = ng.deriv(loss, thetas_var)
    weight_update = ng.sequential(
        [ng.assign(thetas_var, thetas_var - alpha_v * grad_comp), thetas_var])

    # transformer
    with ExecutorFactory() as ex:
        train_eval_func = ex.executor([grad_comp, loss, weight_update], xs_v,
                                      ys_v, alpha_v)

        # evaluate
        for i in range(max_iter):
            grad_np, loss_np, thetas_np = np_logreg.optimize(alpha)
            grad_ng, loss_ng, thetas_ng = train_eval_func(xs, ys, alpha)
            ng.testing.assert_allclose(loss_np, loss_ng, rtol=1e-05, atol=1e-05, \
                                       transformer_overwrite=False)
            ng.testing.assert_allclose(grad_np, grad_ng,  rtol=1e-05, atol=1e-05, \
                                       transformer_overwrite=False)
            ng.testing.assert_allclose(thetas_np, thetas_ng, rtol=1e-05, atol=1e-05, \
                                       transformer_overwrite=False)
예제 #29
0
def test_specific_slice_deriv():
    #
    with ExecutorFactory() as ex:
        A = ng.make_axis(name='A', length=3)
        B = ng.make_axis(name='B', length=4)
        np_shape = (A.length, B.length)
        x_np = np.empty(np_shape, dtype=np.float32)
        for i in range(A.length):
            for j in range(B.length):
                x_np[i, j] = 10 * i + j
        x_ng = ng.persistent_tensor([A, B], initial_value=x_np)
        for i in range(A.length):
            for j in range(B.length):
                slice = ng.tensor_slice(x_ng, (i, j))
                dslice_dx = ng.deriv(slice, x_ng)
                dslice_dx_fun = ex.executor(dslice_dx)
                dslice_dx_val = dslice_dx_fun()
                dslice_dx_np = np.zeros_like(x_np)
                dslice_dx_np[i, j] = 1
                ng.testing.assert_allclose(dslice_dx_val, dslice_dx_np)
예제 #30
0
def test_squared_L2():
    H = ng.make_axis(2)
    W = ng.make_axis(3)
    N = ng.make_axis(5, name='N')

    axes = ng.make_axes([H, W, N])
    a = ng.constant(np.ones(axes.lengths), axes=axes)

    with ExecutorFactory() as factory:
        l2_samples_fun = factory.executor(ng.squared_L2(a))
        l2_samples_val = np.ones([N.length]) * H.length * W.length
        l2_all_fun = factory.executor(ng.squared_L2(a, out_axes=[]))
        l2_all_val = np.ones([]) * W.length * H.length * N.length
        l2_W_fun = factory.executor(ng.squared_L2(a, reduction_axes=[H, N]))
        l2_W_val = np.ones([W.length]) * H.length * N.length
        l2_samples_result = l2_samples_fun()
        l2_all_result = l2_all_fun()
        l2_W_result = l2_W_fun()
        ng.testing.assert_allclose(l2_samples_val, l2_samples_result)
        ng.testing.assert_allclose(l2_all_val, l2_all_result)
        ng.testing.assert_allclose(l2_W_val, l2_W_result)