Example #1
0
def test_softmax(transformer_factory):
    """TODO."""
    N = ng.make_axis(name='N', batch=True)
    W = ng.make_axis(name='W')

    W.length = 128
    N.length = 10
    axes = ng.make_axes([W, N])

    # set up some distributions
    u = rng.uniform(0, 1, ng.make_axes([W, N]))
    u = u / sum(u, 0).reshape(1, N.length)

    # Put them in pre-softmax form
    x = np.log(u) + rng.uniform(-5000, 5000,
                                ng.make_axes([N])).reshape(1, N.length)
    p_x = ng.placeholder(axes)

    ex = ExecutorFactory()
    smax_w_fun = ex.executor(ng.softmax(p_x, softmax_axes=ng.make_axes([W])), p_x)
    smax_fun = ex.executor(ng.softmax(p_x), p_x)

    s = smax_w_fun(x)
    np.testing.assert_allclose(s, u, atol=1e-6, rtol=1e-3)

    x = rng.uniform(-5000, 5000, ng.make_axes([W, N]))
    u = np_softmax(x, 0)
    s = smax_w_fun(x)
    np.testing.assert_allclose(s, u, atol=1e-6, rtol=1e-3)

    # Test with softmax_axis default
    s = smax_fun(x)
    np.testing.assert_allclose(s, u, atol=1e-6, rtol=1e-3)
Example #2
0
def test_setting():
    ex = ExecutorFactory()
    X = ng.make_axis(name='X', length=3)
    axes = ng.make_axes([X])

    np_x = np.array([1, 2, 3], dtype=np.float32)
    np_y = np.array([1, 3, 5], dtype=np.float32)

    x = ng.constant(np_x, axes)
    y = ng.constant(np_y, axes)

    v = ng.variable(axes, initial_value=x)

    f_v = ex.executor(v)

    with ng.Op.saved_user_deps():
        ng.assign(v, v + y)
        f_v1 = ex.executor(v)

    f_v2 = ex.executor(v)

    e_v = f_v().copy()
    assert np.allclose(e_v, np_x)
    e_v1 = f_v1().copy()
    assert np.allclose(e_v1, np_x + np_y)
    e_v2 = f_v2().copy()
    assert np.allclose(e_v2, np_x + np_y)
Example #3
0
def compare_f_at_x(f_be, x_be, f_np, x, **kwargs):
    """
    Compare op_graph implementation of a function with numpy implementation

    Arguments:
        f_be: op_graph function
        x_be: argument to op_graph
        f_np: numpy function
        x: value to pass in to both implementations of f
        kwargs: used to pass rtol/atol on to assert_allclose
    """
    # op_graph
    ex = ExecutorFactory()

    # if x_be and x are not tuples or lists, put them in lists with length 1
    if isinstance(x_be, (tuple, list)):
        assert len(x_be) == len(x)
    else:
        x_be = [x_be]
        x = [x]

    # numpy
    val_np = f_np(*x)

    val_be = ex.executor(f_be, *x_be)(*x)

    # compare numpy and op_graph
    np.testing.assert_allclose(val_np, val_be, **kwargs)
Example #4
0
def test_cast_axes(transformer_factory):
    C = ng.make_axis(name='C')
    D = ng.make_axis(name='D')

    ex = ExecutorFactory()

    C.length = 2
    D.length = 3

    x = ng.placeholder((C, D))

    x_slice = x[1, :]
    # Cast back to known axes
    x_cast = ng.cast_axes(x_slice, [D])

    # Verfiy that the tensor broadcasts along ax.D
    y = x + x_cast
    y_fun = ex.executor(y, x)
    num_deriv_fun = ex.numeric_derivative(y, x, delta)
    sym_deriv_fun = ex.derivative(y, x)

    x_np = np.array([[10, 20, 30], [1, 2, 3]], dtype='float32')
    assert np.allclose(y_fun(x_np),
                       np.array([[11, 22, 33], [2, 4, 6]], dtype='float32'))

    assert np.allclose(num_deriv_fun(x_np),
                       sym_deriv_fun(x_np),
                       rtol=rtol,
                       atol=atol)
Example #5
0
def test_variable_init(transformer_factory):
    C = ng.make_axis("C")
    C.length = 200

    w_init = np.random.rand(C.length)
    W = ng.variable(ng.make_axes([C]), initial_value=w_init)

    ex = ExecutorFactory()
    result = ex.executor(W)()
    np.testing.assert_allclose(result, w_init)
Example #6
0
def test_scalar_broadcast():
    """
    Test broadcasting a scalar into a tensor
    """
    ex = ExecutorFactory()
    x_axes = ng.make_axes()
    broadcast_axes = ng.make_axes([ng.make_axis(2), ng.make_axis(3)])
    x = ng.constant(1., axes=x_axes)
    z = ng.broadcast(x, axes=broadcast_axes)
    z_comp = ex.executor(z)
    assert np.array_equal(z_comp(), np.ones(broadcast_axes.lengths))
Example #7
0
def compare_tensors(func, outputs, targets, expected_result, tol=0.):
    ex = ExecutorFactory()
    N = ng.make_axis("N")
    N.length = outputs.shape[0]
    y = ng.placeholder([N])
    t = ng.placeholder([N])

    costfunc = ex.executor(func.__call__(y, t), y, t)
    np.testing.assert_allclose(costfunc(outputs, targets),
                               expected_result,
                               rtol=tol)
Example #8
0
def test_placeholder(transformer_factory):
    W = ng.make_axis(name='W')
    H = ng.make_axis(name='H')

    # Pass array through a placeholder
    W.length = 10
    H.length = 20
    aaxes = ng.make_axes([W, H])
    ashape = aaxes.lengths
    asize = aaxes.size
    aval = np.arange(asize, dtype=np.float32).reshape(ashape)

    x = ng.placeholder([W, H])
    d = 2 * x
    d2 = ng.squared_L2(x)

    ex = ExecutorFactory()
    # Return placeholder, param is placeholder
    placeholder_fun = ex.executor(x, x)
    prod_fun = ex.executor([d, d2], x)

    cval = placeholder_fun(aval)
    np.testing.assert_allclose(cval, aval)

    # Pass a different array though
    u = rng.uniform(-1.0, 1.0, aaxes)
    cval = placeholder_fun(u)
    np.testing.assert_allclose(cval, u)

    cval, s = prod_fun(aval)
    np.testing.assert_allclose(cval, aval * 2)
    np.testing.assert_allclose(s[()], np.dot(aval.flatten(), aval.flatten()))

    cval, s = prod_fun(u)
    u2 = u * 2
    np.testing.assert_allclose(cval, u2)
    np.testing.assert_allclose(s[()], np.dot(u.flatten(), u.flatten()))
Example #9
0
def test_reciprocal(transformer_factory):
    """TODO."""
    N = ng.make_axis(name='N')
    W = ng.make_axis(name='W')

    W.length = 20
    N.length = 128
    axes = ng.make_axes([W, N])
    p_u = ng.placeholder(axes)
    u = rng.uniform(.1, 5.0, p_u.axes)

    rec_u_np = np.reciprocal(u)
    rec_u = ng.reciprocal(p_u)

    ex = ExecutorFactory()
    rec_u_graph = ex.executor(rec_u, p_u)(u)
    np.testing.assert_allclose(rec_u_np, rec_u_graph)
Example #10
0
def test_idempotent_axes_a():
    """
    Test test axes transformations with autodiff, case a, reference test
    """
    ex = ExecutorFactory()
    axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])

    w = ng.variable(axes, initial_value=np.ones((3, 1)))
    result = w + w

    result = ng.cast_axes(result, axes)
    cost = ng.sum(result, reduction_axes=axes)
    grad = ng.deriv(cost, w)

    grad_comp = ex.executor(grad)
    cost_comp = ex.executor(cost)

    assert cost_comp() == 6.0
    assert np.array_equal(grad_comp(), np.ones((3, 1)) * 2.)
Example #11
0
def test_idempotent_axes_c():
    """
    Test test axes transformations with autodiff, case c, with broadcast,
    slice, cast and dim-shuffle
    """
    ex = ExecutorFactory()
    axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])
    result_axes = [ng.make_axis(length=axis.length) for axis in axes]

    # variable
    w = ng.variable(axes, initial_value=np.ones((3, 1)))
    l = w
    r = w

    # broadcast l / r, introducing dummy length 1 axes
    l = ng.broadcast(l, axes)
    r = ng.broadcast(r, axes)

    # slice
    axes_slice = [slice(None, None, None), slice(None, None, None)]
    l_sliced = ng.Slice(l, axes_slice)
    r_sliced = ng.Slice(r, axes_slice)

    # cast r
    r_sliced_casted = ng.cast_axes(r_sliced, axes)

    # perform add
    result = ng.add(l_sliced, r_sliced_casted)

    # cast / dimshuffle
    result = ng.cast_axes(result, result_axes)
    result = ng.axes_with_order(result, result_axes)

    # cost and grad
    cost = ng.sum(result, reduction_axes=result.axes)
    grad = ng.deriv(cost, w)

    grad_comp = ex.executor(grad)
    cost_comp = ex.executor(cost)

    assert cost_comp() == 6.0
    assert np.array_equal(grad_comp(), np.ones((3, 1)) * 2.)
Example #12
0
def test_clip(transformer_factory):
    W = ng.make_axis(name='W')
    H = ng.make_axis(name='H')
    W.length = 4
    H.length = 5
    axes = ng.make_axes([W, H])

    p_x = ng.placeholder(axes)
    x = (2 * rng.uniform(0, 1, axes) - 1) * 20
    clip_value = 10

    clip_func = ng.minimum(ng.maximum(p_x, -abs(clip_value)), abs(clip_value))

    # numpy results as expected results
    expected_result = np.clip(x, -abs(clip_value), abs(clip_value))

    ex = ExecutorFactory()
    costfunc = ex.executor(clip_func, p_x)
    result = costfunc(x)
    np.testing.assert_allclose(result, expected_result)
Example #13
0
def test_shuffled_deriv():
    # This gets the axes of a delta in a generate_add_delta in a different order than the
    # value being updated
    ax = ng.make_name_scope("ax")
    ax.C = ng.make_axis(3)
    ax.T = ng.make_axis(1)
    ax.R = ng.make_axis(5)
    ax.S = ng.make_axis(5)

    axes = [ax.R, ax.S, ax.C]
    v = ng.variable([ng.make_axis(_.length) for _ in axes])
    rsc = ng.cast_axes(v, axes)
    trsc = ng.expand_dims(rsc, ax.T, 0)
    ctrs = ng.axes_with_order(trsc, axes=[ax.C, ax.T, ax.R, ax.S])
    cost = ng.sum(ctrs, out_axes=None)
    grad = ng.deriv(cost, v)

    ex = ExecutorFactory()
    d_fun = ex.executor(grad)
    d_fun()
Example #14
0
def compare_tensors(func, inputs, expected_result, deriv=False, tol=0.):
    ex = ExecutorFactory()
    C = ng.make_axis('C')
    N = ng.make_axis('N', batch=True)
    C.length, N.length = inputs.shape
    x = ng.placeholder([C, N])

    if deriv is False:
        costfunc = ex.executor(func.__call__(x), x)
        result = costfunc(inputs)
    else:
        costfunc = ex.derivative(func.__call__(x), x)

        result = costfunc(inputs)

        # hack to get derivatives
        result = result.ravel()
        result = result[0:result.size:(C.length * N.length + 1)]
        result = result.reshape(inputs.shape)

    np.testing.assert_allclose(result, expected_result, rtol=tol)
Example #15
0
def test_elementwise_unary_ops_matched_args(transformer_factory):
    """TODO."""
    delta = .001
    axes = ng.make_axes([ng.make_axis(20), ng.make_axis(20)])

    for np_op, be_op in ELEMENTWISE_UNARY_OPS:
        p_u = ng.placeholder(axes)
        u = rng.uniform(1.0, 2.0, p_u.axes)
        u_np = np_op(u)
        result_op = be_op(p_u)

        ex = ExecutorFactory()
        fun = ex.executor(result_op, p_u)
        dudunum_fun = ex.numeric_derivative(result_op, p_u, delta)
        dudut_fun = ex.derivative(result_op, p_u)

        u_t = fun(u)
        np.testing.assert_allclose(u_np, u_t, atol=1e-4, rtol=1e-4)
        dudunum = dudunum_fun(u)
        dudut = dudut_fun(u)
        np.testing.assert_allclose(dudunum, dudut, atol=1e-3, rtol=1e-3)
Example #16
0
def test_idempotent_axes_b():
    """
    Test test axes transformations with autodiff, case b, with broadcast applied
    to the same tensor
    """
    ex = ExecutorFactory()
    axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])

    w = ng.variable(axes, initial_value=np.ones((3, 1)))
    l = ng.broadcast(w, axes)
    r = ng.broadcast(w, axes)
    result = ng.add(l, r)

    result = ng.cast_axes(result, axes)
    cost = ng.sum(result, reduction_axes=axes)
    grad = ng.deriv(cost, w)

    grad_comp = ex.executor(grad)
    cost_comp = ex.executor(cost)

    assert cost_comp() == 6.0
    assert np.array_equal(grad_comp(), np.ones((3, 1)) * 2.)
Example #17
0
def test_dot_sum_backprop(transformer_factory):
    delta = 1e-3
    rtol = atol = 1e-2

    C = ng.make_axis(name='C', length=2)
    N = ng.make_axis(name='N', length=3, batch=True)

    x_axes = ng.make_axes((C - 1, N))
    y_axes = ng.make_axes((C, ))
    x_np = np.random.random(x_axes.lengths).astype('float32')
    y_np = np.random.random(y_axes.lengths).astype('float32')

    # x_np[...] = [[1.0, 0.0,1.0], [2.0, 0.0, 3.0]]
    # y_np[...] = [-1.0, 1.0]

    x = ng.placeholder(x_axes)
    y = ng.placeholder(y_axes)
    d = ng.dot(x, y)
    s = ng.sum(d, out_axes=())

    ex = ExecutorFactory()
    s_fun = ex.executor(s, x, y)
    d_fun = ex.executor(d, x, y)

    dd_dx_fun_num = ex.numeric_derivative(d, x, delta, y)
    dd_dx_fun_sym = ex.derivative(d, x, y)

    dd_dy_fun_num = ex.numeric_derivative(d, y, delta, x)
    dd_dy_fun_sym = ex.derivative(d, y, x)

    ds_dx_fun_num = ex.numeric_derivative(s, x, delta, y)
    ds_dx_fun_sym = ex.derivative(s, x, y)

    ds_dy_fun_num = ex.numeric_derivative(s, y, delta, x)
    ds_dy_fun_sym = ex.derivative(s, y, x)

    # assert outputs are equal
    d_np = x_np.T.dot(y_np)
    d_val = d_fun(x_np, y_np)
    np.testing.assert_allclose(d_np, d_val, rtol=rtol, atol=atol)

    dd_dx_val_num = dd_dx_fun_num(x_np, y_np)
    dd_dx_val_sym = dd_dx_fun_sym(x_np, y_np)
    np.testing.assert_allclose(dd_dx_val_num,
                               dd_dx_val_sym,
                               rtol=rtol,
                               atol=atol)

    dd_dy_val_num = dd_dy_fun_num(y_np, x_np)
    dd_dy_val_sym = dd_dy_fun_sym(y_np, x_np)
    np.testing.assert_allclose(dd_dy_val_num,
                               dd_dy_val_sym,
                               rtol=rtol,
                               atol=atol)

    s_np = np.sum(d_np)
    s_val = s_fun(x_np, y_np)
    np.testing.assert_allclose(s_val, s_np, rtol=rtol, atol=atol)

    # assert derivative wrt to both tensors is the same when computed
    # symbolically by ngraph and numerically
    ds_dx_val_num = ds_dx_fun_num(x_np, y_np)
    ds_dx_val_sym = ds_dx_fun_sym(x_np, y_np)
    np.testing.assert_allclose(ds_dx_val_num,
                               ds_dx_val_sym,
                               rtol=rtol,
                               atol=atol)

    ds_dy_val_num = ds_dy_fun_num(y_np, x_np)
    ds_dy_val_sym = ds_dy_fun_sym(y_np, x_np)
    np.testing.assert_allclose(ds_dy_val_num,
                               ds_dy_val_sym,
                               rtol=rtol,
                               atol=atol)
Example #18
0
def test_tensor_dot_tensor(transformer_factory):
    """TODO."""
    C = ng.make_axis(name='C')
    D = ng.make_axis(name='D')
    H = ng.make_axis(name='H')
    N = ng.make_axis(name='N')

    tests = [{
        'tensor1': [[1, 2], [4, 5], [3, 4]],
        'tensor1_axes': (C, D - 1),
        'tensor2': [2, 5],
        'tensor2_axes': (D, ),
        'expected_output': [12, 33, 26],
        'axes_lengths': {
            C: 3,
            D: 2
        }
    }, {
        'tensor1': [[1, 4, 3], [2, 5, 4]],
        'tensor1_axes': (D - 1, C),
        'tensor2': [2, 5],
        'tensor2_axes': (D, ),
        'expected_output': [12, 33, 26],
        'axes_lengths': {
            C: 3,
            D: 2
        }
    }, {
        'tensor1': [[[1, 4], [2, 5]], [[7, 12], [13, 2]]],
        'tensor1_axes': (N, D - 1, C - 1),
        'tensor2': [[[3, 6], [7, 2]], [[9, 8], [10, 4]]],
        'tensor2_axes': (H, D, C),
        'expected_output': [[51, 81], [188, 297]],
        'axes_lengths': {
            N: 2,
            D: 2,
            C: 2,
            H: 2
        }
    }, {
        'tensor1': [1, 2],
        'tensor1_axes': (C, ),
        'tensor2': [7, 11, 13],
        'tensor2_axes': (D, ),
        'expected_output': [[7, 11, 13], [14, 22, 26]],
        'axes_lengths': {
            C: 2,
            D: 3
        }
    }, {
        'tensor1': [[1, 4], [6, 2]],
        'tensor1_axes': (C - 1, D - 1),
        'tensor2': [[1, 4], [6, 2]],
        'tensor2_axes': (C, D),
        'expected_output': 57,
        'axes_lengths': {
            C: 2,
            D: 2
        }
    }]

    for test in tests:
        # set up axis
        for axis, length in test['axes_lengths'].items():
            axis.length = length

        # set up tensors
        tensor1 = ng.placeholder(test['tensor1_axes'])
        value1 = np.array(test['tensor1'], dtype=np.float32)

        tensor2 = ng.placeholder(test['tensor2_axes'])
        value2 = np.array(test['tensor2'], dtype=np.float32)

        # compute outputs
        expected_output = np.array(test['expected_output'], dtype=np.float32)

        ex = ExecutorFactory()
        dot = ng.dot(tensor1, tensor2)
        evaluated_fun = ex.executor(dot, tensor1, tensor2)

        deriv1_fun_num = ex.numeric_derivative(dot, tensor1, 1e-3, tensor2)
        deriv1_fun_sym = ex.derivative(dot, tensor1, tensor2)

        deriv2_fun_num = ex.numeric_derivative(dot, tensor2, 1e-3, tensor1)
        deriv2_fun_sym = ex.derivative(dot, tensor2, tensor1)

        # assert outputs are equal
        evaluated = evaluated_fun(value1, value2)
        np.testing.assert_equal(evaluated, expected_output)

        # assert derivative wrt to both tensors is the same when computed
        # symbolically by ngraph and numerically
        deriv1_val_num = deriv1_fun_num(value1, value2)
        deriv1_val_sym = deriv1_fun_sym(value1, value2)
        np.testing.assert_allclose(deriv1_val_num,
                                   deriv1_val_sym,
                                   rtol=1e-2,
                                   atol=1e-2)

        deriv2_val_num = deriv2_fun_num(value2, value1)
        deriv2_val_sym = deriv2_fun_sym(value2, value1)
        np.testing.assert_allclose(deriv2_val_num,
                                   deriv2_val_sym,
                                   rtol=1e-2,
                                   atol=1e-2)
Example #19
0
def check_rnn(seq_len,
              input_size,
              hidden_size,
              batch_size,
              init_func,
              return_seq=True):
    # init_func is the initializer for the model params
    assert batch_size == 1, "the recurrent reference implementation only support batch size 1"

    # ========== neon model ==========
    Cin = ng.make_axis(input_size)
    REC = ng.make_axis(seq_len, recurrent=True)
    N = ng.make_axis(batch_size, batch=True)
    H = ng.make_axis(hidden_size)
    ax_s = ng.make_axes([H, N])

    ex = ExecutorFactory()
    np.random.seed(0)

    rnn_ng = Recurrent(hidden_size,
                       init_func,
                       activation=Tanh(),
                       reset_cells=True,
                       return_sequence=return_seq)

    inp_ng = ng.placeholder([Cin, REC, N])
    init_state_ng = ng.placeholder(ax_s)

    # fprop graph
    out_ng = rnn_ng.train_outputs(inp_ng, init_state=init_state_ng)
    out_ng.input = True

    rnn_W_input = rnn_ng.W_input
    rnn_W_input.input = True
    rnn_W_recur = rnn_ng.W_recur
    rnn_W_recur.input = True
    rnn_b = rnn_ng.b
    rnn_b.input = True

    fprop_neon_fun = ex.executor(out_ng, inp_ng, init_state_ng)

    dWrecur_s_fun = ex.derivative(out_ng, rnn_W_recur, inp_ng, rnn_W_input,
                                  rnn_b)
    dWrecur_n_fun = ex.numeric_derivative(out_ng, rnn_W_recur, delta, inp_ng,
                                          rnn_W_input, rnn_b)
    dWinput_s_fun = ex.derivative(out_ng, rnn_W_input, inp_ng, rnn_W_recur,
                                  rnn_b)
    dWinput_n_fun = ex.numeric_derivative(out_ng, rnn_W_input, delta, inp_ng,
                                          rnn_W_recur, rnn_b)
    dWb_s_fun = ex.derivative(out_ng, rnn_b, inp_ng, rnn_W_input, rnn_W_recur)
    dWb_n_fun = ex.numeric_derivative(out_ng, rnn_b, delta, inp_ng,
                                      rnn_W_input, rnn_W_recur)

    # fprop on random inputs
    input_value = rng.uniform(-1, 1, inp_ng.axes)
    init_state_value = rng.uniform(-1, 1, init_state_ng.axes)
    fprop_neon = fprop_neon_fun(input_value, init_state_value).copy()

    # after the rnn graph has been executed, can get the W values. Get copies so
    # shared values don't confuse derivatives
    Wxh_neon = rnn_ng.W_input.value.get(None).copy()
    Whh_neon = rnn_ng.W_recur.value.get(None).copy()
    bh_neon = rnn_ng.b.value.get(None).copy()

    # bprop derivs
    dWrecur_s = dWrecur_s_fun(Whh_neon, input_value, Wxh_neon, bh_neon)
    dWrecur_n = dWrecur_n_fun(Whh_neon, input_value, Wxh_neon, bh_neon)
    np.testing.assert_allclose(dWrecur_s, dWrecur_n, rtol=rtol, atol=atol)

    dWb_s = dWb_s_fun(bh_neon, input_value, Wxh_neon, Whh_neon)
    dWb_n = dWb_n_fun(bh_neon, input_value, Wxh_neon, Whh_neon)
    np.testing.assert_allclose(dWb_s, dWb_n, rtol=rtol, atol=atol)

    dWinput_s = dWinput_s_fun(Wxh_neon, input_value, Whh_neon, bh_neon)
    dWinput_n = dWinput_n_fun(Wxh_neon, input_value, Whh_neon, bh_neon)
    np.testing.assert_allclose(dWinput_s, dWinput_n, rtol=rtol, atol=atol)

    # ========= reference model ==========
    output_shape = (hidden_size, seq_len * batch_size)

    # generate random deltas tensor
    deltas = np.random.randn(*output_shape)

    # the reference code expects these shapes:
    # input_shape: (seq_len, input_size, batch_size)
    # output_shape: (seq_len, hidden_size, batch_size)
    deltas_ref = deltas.copy().T.reshape(seq_len, batch_size,
                                         hidden_size).swapaxes(1, 2)

    inp_ref = input_value.transpose([1, 0, 2])

    # reference numpy RNN
    rnn_ref = RefRecurrent(input_size, hidden_size)
    rnn_ref.Wxh[:] = Wxh_neon
    rnn_ref.Whh[:] = Whh_neon
    rnn_ref.bh[:] = bh_neon.reshape(rnn_ref.bh.shape)

    (dWxh_ref, dWhh_ref, db_ref, h_ref_list, dh_ref_list,
     d_out_ref) = rnn_ref.lossFun(inp_ref,
                                  deltas_ref,
                                  init_states=init_state_value)

    # comparing outputs
    if return_seq is False:
        h_ref_list = h_ref_list[:, -1].reshape(-1, 1)
    else:
        fprop_neon = fprop_neon[:, :, 0]
    np.testing.assert_allclose(fprop_neon, h_ref_list, rtol=0.0, atol=1.0e-5)

    return
Example #20
0
def test_elementwise_ops_unmatched_args(transformer_factory):
    """TODO."""
    # delta = .001
    N = ng.make_axis(name='N')
    H = ng.make_axis(name='H')
    W = ng.make_axis(name='W')

    W.length = 5
    H.length = 5
    N.length = 32
    sample_axes = [W, H]
    batch_axes = [W, H, N]
    broadcast_dims = (W.length, H.length, 1)

    for np_op, be_op in ELEMENTWISE_BINARY_OPS:
        # Matched sizes
        p_u = ng.placeholder(sample_axes)
        p_v = ng.placeholder(batch_axes)
        u = rng.uniform(1.0, 2.0, p_u.axes)
        v = rng.uniform(1.0, 2.0, p_v.axes)

        # u op v
        uv_np = np_op(u.reshape(broadcast_dims), v)
        uv_op = be_op(p_u, p_v)

        ex = ExecutorFactory()

        # fun(u, v)
        uv_fun = ex.executor(uv_op, p_u, p_v)
        duvdunum_fun = ex.numeric_derivative(uv_op, p_u, .001, p_v)
        duvdut_fun = ex.derivative(uv_op, p_u, p_v)
        duvdvnum_fun = ex.numeric_derivative(uv_op, p_v, .001, p_u)
        duvdvt_fun = ex.derivative(uv_op, p_v, p_u)

        # fun(v, u)
        vu_np = np_op(v, u.reshape(broadcast_dims))
        vu_op = be_op(p_v, p_u)

        vu_fun = ex.executor(vu_op, p_u, p_v)
        dvudunum_fun = ex.numeric_derivative(vu_op, p_u, .001, p_v)
        dvudut_fun = ex.derivative(vu_op, p_u, p_v)
        dvudvnum_fun = ex.numeric_derivative(vu_op, p_v, .001, p_u)
        dvudvt_fun = ex.derivative(vu_op, p_v, p_u)

        # u op v
        result_be = uv_fun(u, v)
        np.testing.assert_allclose(uv_np, result_be, atol=1e-4, rtol=1e-4)
        duvdunum = duvdunum_fun(u, v)
        duvdut = duvdut_fun(u, v)
        np.testing.assert_allclose(duvdunum, duvdut, atol=1e-3, rtol=1e-3)

        duvdvnum = duvdvnum_fun(v, u)
        duvdvt = duvdvt_fun(v, u)
        np.testing.assert_allclose(duvdvnum, duvdvt, atol=1e-3, rtol=1e-3)

        # v op u

        result_be = vu_fun(u, v)
        np.testing.assert_allclose(vu_np, result_be, atol=1e-4, rtol=1e-4)
        dvudunum = dvudunum_fun(u, v)
        dvudut = dvudut_fun(u, v)
        np.testing.assert_allclose(dvudunum, dvudut, atol=1e-3, rtol=1e-3)

        dvudvnum = dvudvnum_fun(v, u)
        dvudvt = dvudvt_fun(v, u)
        np.testing.assert_allclose(dvudvnum, dvudvt, atol=1e-3, rtol=1e-3)
Example #21
0
def test_slice(transformer_factory):
    """TODO."""

    C = ng.make_axis(name='C')
    D = ng.make_axis(name='D')

    tests = [{
        'tensor': [[1, 3], [2, 5]],
        'tensor_axes': (C, D),
        'slice': [0, 1],
        'sliced_axes': (),
        'axes_lengths': {
            C: 2,
            D: 2
        },
        'expected': 3
    }, {
        'tensor': [[1, 3], [2, 5]],
        'tensor_axes': (C, D),
        'slice': [slice(None), 0],
        'sliced_axes': (C, ),
        'axes_lengths': {
            C: 2,
            D: 2
        },
        'expected': [1, 2]
    }, {
        'tensor': [[1, 3], [2, 5]],
        'tensor_axes': (C, D),
        'slice': [1, slice(None)],
        'sliced_axes': (D, ),
        'axes_lengths': {
            C: 2,
            D: 2
        },
        'expected': [2, 5]
    }, {
        'tensor': [[1, 4, 5], [2, 5, 6]],
        'tensor_axes': (C, D),
        'slice': [1, slice(1, 3)],
        'sliced_axes': None,
        'axes_lengths': {
            C: 2,
            D: 3
        },
        'expected': [5, 6]
    }, {
        'tensor': [[1, 4, 5], [2, 5, 6]],
        'tensor_axes': (C, D),
        'slice': [1, slice(None, None, -1)],
        'sliced_axes': None,
        'axes_lengths': {
            C: 2,
            D: 3
        },
        'expected': [6, 5, 2]
    }, {
        'tensor': [[1, 4, 5], [2, 5, 6]],
        'tensor_axes': (C, D),
        'slice': [slice(None, None, -1),
                  slice(None, None, -1)],
        'sliced_axes': None,
        'axes_lengths': {
            C: 2,
            D: 3
        },
        'expected': [[6, 5, 2], [5, 4, 1]]
    }]

    for test in tests:
        ex = ExecutorFactory()
        for axis, length in test['axes_lengths'].items():
            axis.length = length
        tensor_axes = test['tensor_axes']

        tensor_np = np.array(test['tensor'], dtype='float32')
        tensor = ng.placeholder(tensor_axes)
        expected = np.array(test['expected'], dtype='float32')

        s = test['slice']
        s_axes = test['sliced_axes']

        sliced = ng.Slice(tensor, s, s_axes)
        sliced_val_fun = ex.executor(sliced, tensor)

        num_deriv_fun = ex.numeric_derivative(sliced, tensor, delta)
        # Test backpropagation
        sym_deriv_fun = ex.derivative(sliced, tensor)

        sliced_val = sliced_val_fun(tensor_np)
        assert np.array_equal(sliced_val, expected)

        numeric_deriv = num_deriv_fun(tensor_np)
        sym_deriv = sym_deriv_fun(tensor_np)

        assert np.allclose(numeric_deriv, sym_deriv, rtol=rtol, atol=atol)
Example #22
0
def test_padding(transformer_factory):
    """TODO."""
    C = ng.make_axis(name='C')
    D = ng.make_axis(name='D')
    M = ng.make_axis(name='M')
    N = ng.make_axis(name='N')

    tests = [{
        'tensor': [[1, 3], [2, 5]],
        'tensor_axes': (C, D),
        'padding': [(0, 1), (1, 0)],
        'padded_axes': (M, N),
        'axes_lengths': {
            C: 2,
            D: 2,
            M: 3,
            N: 3
        }
    }, {
        'tensor': [[1, 4, 5], [1, 4, 6]],
        'tensor_axes': (C, D),
        'padding': [(0, 1), 1],
        'padded_axes': None,
        'axes_lengths': {
            C: 2,
            D: 3
        }
    }]

    for test in tests:
        ex = ExecutorFactory()
        for axis, length in test['axes_lengths'].items():
            axis.length = length
        tensor_axes = test['tensor_axes']
        tensor_np = np.array(test['tensor'], dtype='float32')
        tensor = ng.placeholder(tensor_axes)
        padding = test['padding']
        padded_axes = test['padded_axes']
        padded = ng.pad(tensor, padding, padded_axes)
        computed_val_fun = ex.executor(padded, tensor)

        # Test backpropagation
        numeric_deriv_fun = ex.numeric_derivative(padded, tensor, delta)
        sym_deriv_fun = ex.derivative(padded, tensor)

        def to_tuple(p):
            """
            TODO.

            Arguments:
              p: TODO

            Returns:

            """
            return (p, p) if isinstance(p, int) else p

        np_padding = tuple(to_tuple(p) for p in padding)
        expected_val = np.pad(tensor_np, np_padding, mode='constant')

        computed_val = computed_val_fun(tensor_np)
        assert np.array_equal(expected_val, computed_val)

        numeric_deriv = numeric_deriv_fun(tensor_np)
        sym_deriv = sym_deriv_fun(tensor_np)

        assert np.allclose(numeric_deriv, sym_deriv, rtol=rtol, atol=atol)
Example #23
0
def test_expand_dims(transformer_factory):
    """TODO."""
    C = ng.make_axis(name='C')
    D = ng.make_axis(name='D')
    N = ng.make_axis(name='N')

    max_new_axis_length = 4

    tests = [{
        'tensor': [[2, 5], [13, 5]],
        'tensor_axes': (N, D),
        'tensor_axes_lengths': (2, 2),
        'new_axis': C,
    }, {
        'tensor': 2,
        'tensor_axes': (),
        'tensor_axes_lengths': (),
        'new_axis': D
    }]

    for test in tests:
        for new_axis_length in range(1, max_new_axis_length + 1):
            tensor_axes = test['tensor_axes']
            tensor_axes_lengths = test['tensor_axes_lengths']

            for dim in range(len(tensor_axes) + 1):
                ex = ExecutorFactory()
                for axis, length in zip(tensor_axes, tensor_axes_lengths):
                    axis.length = length

                new_axis = test['new_axis']
                new_axis.length = new_axis_length

                tensor_np = np.array(test['tensor'], dtype=np.float32)
                tensor = ng.placeholder(tensor_axes)

                expanded = ng.ExpandDims(tensor, new_axis, dim)
                expander_fun = ex.executor(expanded, tensor)

                num_deriv_fun = ex.numeric_derivative(expanded, tensor, delta)
                sym_deriv_fun = ex.derivative(expanded, tensor)

                expanded_shape = tensor_np.shape[:dim] \
                    + (new_axis.length,) + tensor_np.shape[dim:]
                expanded_strides = tensor_np.strides[:dim] \
                    + (0,) + tensor_np.strides[dim:]
                expanded_np = np.ndarray(buffer=tensor_np,
                                         shape=expanded_shape,
                                         strides=expanded_strides,
                                         dtype=tensor_np.dtype)

                expanded_result = expander_fun(tensor_np)
                assert np.array_equal(expanded_np, expanded_result)

                # Test backpropagation
                numeric_deriv = num_deriv_fun(tensor_np)
                sym_deriv = sym_deriv_fun(tensor_np)
                assert np.allclose(numeric_deriv,
                                   sym_deriv,
                                   rtol=rtol,
                                   atol=atol)