示例#1
0
def test_execute_non_placeholder():
    """
    Expect a failure if a non-input (Variable) is used as an argument to
    executor.
    """
    N = ng.make_axis(1)

    x = ng.variable([N])
    y = ng.variable([N])

    with pytest.raises(ValueError):
        executor(x + y, x, y)
示例#2
0
def test_variance_wgrad(transformer_factory):
    ax = ng.name_scope('x')
    ax.N = ng.make_axis(128, batch=True)
    ax.Y = ng.make_axis(100)

    inputs = ng.placeholder([ax.Y, ax.N])
    targets = ng.placeholder([ax.Y, ax.N])

    inp_stat = ng.variance(inputs, reduction_axes=inputs.axes.batch_axes())
    err = ng.sum(inp_stat - targets, out_axes=())
    d_inputs = ng.deriv(err, inputs)
    comp_func = executor([err, d_inputs], inputs, targets)

    input_value = rng.uniform(-0.1, 0.1, inputs.axes)
    target_value = rng.uniform(-0.1, 0.1, targets.axes)
    ng_f_res, ng_b_res = comp_func(input_value, target_value)

    np_f_res = np.sum(
        np.var(input_value, axis=1, keepdims=True) - target_value)

    np.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)

    np_b_res = 2 * (input_value - np.mean(input_value, axis=1, keepdims=True))

    np.testing.assert_allclose(np_b_res, ng_b_res, atol=1e-4, rtol=1e-4)
示例#3
0
def test_reduction(transformer_factory):
    C = ng.make_axis(name='C')
    W = ng.make_axis(name='W')
    H = ng.make_axis(name='H')

    C.length = 4
    W.length = 4
    H.length = 4
    axes = ng.make_axes([C, W, H])

    u = rng.uniform(-1.0, 1.0, axes)

    for npred, bered, red in [(np.sum, ng.sum, 'sum'),
                              (np.max, ng.max, 'max'),
                              (np.min, ng.min, 'min')]:
        for reduction_axes in [[C],
                               [W],
                               [H],
                               [C, W],
                               [W, H]]:
            p_u = ng.placeholder(axes)
            dims = tuple(axes.index(axis) for axis in reduction_axes)
            npval = npred(u, dims)
            graph_reduce = bered(p_u, reduction_axes=reduction_axes)
            graph_val = executor(graph_reduce, p_u)(u)
            np.testing.assert_allclose(
                npval, graph_val, rtol=1e-5), 'red:{red}, axes:{axes}'.format(
                red=red, axes=reduction_axes)
示例#4
0
def test_linear_ones(basic_linargs, transformer_factory):

    # basic sanity check with all ones on the inputs
    # and weights, check that each row in output
    # is the sum of the weights for that output
    # this check will confirm that the correct number
    # of operations is being run
    nin, nout, batch_size = basic_linargs

    # set inputs
    N = ng.make_axis(batch_size, name="N", batch=True)
    F = ng.make_axis(nin, name="F")

    inp = ng.placeholder([F, N])
    layer = Linear(nout=nout, init=UniformInit(1.0, 1.0))
    fprop = layer.train_outputs(inp)

    # create data
    x = np.ones((nin, batch_size))

    # evaluate
    ngt.make_transformer()
    out, w = executor([fprop, layer.W], inp)(x)
    sums = np.sum(w, 1).reshape((nout, 1)) * np.ones((1, batch_size))

    assert np.allclose(sums, out, atol=0.0,
                       rtol=0.0), '%e' % np.max(np.abs(out - sums))
示例#5
0
def test_evalutaion_twice(transformer_factory):
    """Test executing a computation graph twice on a one layer MLP."""
    C = ng.make_axis(name='C')
    W = ng.make_axis(name='W')
    D = ng.make_axis(name='D')

    C.length = 2
    D.length = 2
    W.length = 1

    x = ng.constant(np.array([[1, 2], [3, 4]], dtype='float32'),
                    ng.make_axes([C, D]))

    hidden1_weights = ng.constant(np.array([[1], [1]], dtype='float32'),
                                  ng.make_axes([C - 1, W]))

    hidden1_biases = ng.constant(np.array([[2], [2]], dtype='float32'),
                                 ng.make_axes([D, W]))

    hidden1 = ng.dot(hidden1_weights, x) + hidden1_biases

    comp = executor(hidden1)

    result_1 = comp()
    result_2 = comp()
    assert np.array_equal(result_1, result_2)
示例#6
0
def test_variance_sqrt_inverse(transformer_factory):
    ax = ng.name_scope('x')
    ax.N = ng.make_axis(128, batch=True)
    ax.Y = ng.make_axis(100)

    inputs = ng.placeholder([ax.Y, ax.N])
    targets = ng.placeholder([ax.Y, ax.N])

    epsilon = 1e-3

    inp_stat = ng.reciprocal(
        ng.sqrt(
            ng.variance(inputs, reduction_axes=inputs.axes.batch_axes()) + epsilon
        )
    )
    err = ng.sum(inp_stat - targets, out_axes=())
    d_inputs = ng.deriv(err, inputs)
    comp_func = executor([err, d_inputs], inputs, targets)

    input_value = rng.uniform(-1, 1, inputs.axes)
    target_value = rng.uniform(-1, 1, targets.axes)
    ng_f_res, ng_b_res = comp_func(input_value, target_value)

    npv = np.var(input_value, axis=1, keepdims=True) + epsilon
    np_f_res = 1.0 / np.sqrt(npv)

    npv_delta = 2 * (input_value - np.mean(input_value, axis=1, keepdims=True))

    np_b_res = - 0.5 * np_f_res / npv * npv_delta

    np_f_res = np.sum(np_f_res - target_value)

    np.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)
    np.testing.assert_allclose(np_b_res, ng_b_res, atol=1e-4, rtol=1e-4)
示例#7
0
def test_scalar(transformer_factory):
    """TODO."""
    # Simple evaluation of a scalar
    val = 5
    x = ng.constant(val)

    cval = executor(x)()
    assert cval.shape == ()
    np.testing.assert_allclose(cval, val)
示例#8
0
def test_conv_flatten_deriv(transformer_factory):
    """
    Test deriv of conv followed by flatten
    """
    # set shape
    C, D, H, W, N = (3, 1, 28, 28, 8)
    C, T, R, S, K = (3, 1, 5, 5, 32)

    # i, f, o axes
    ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
    ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
    ax_o = ng.make_axes([
        ng.make_axis(32, roles=[ar.Channel]),
        ng.make_axis(1, roles=[ar.Depth]),
        ng.make_axis(24, roles=[ar.Height]),
        ng.make_axis(24, roles=[ar.Width]), ax.N
    ])
    ax_i.set_shape((C, D, H, W, N))
    ax_f.set_shape((C, T, R, S, K))
    params = dict(pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1)
    axes_rsck = ng.make_axes([ax.R, ax.S, ax.C, ax.K])
    axes_rsck_prime = ng.make_axes(
        [ng.make_axis(l) for l in axes_rsck.lengths])

    # broadcast input / filter axes
    image = ng.constant(np.ones(ax_i.lengths), ax_i)
    filter = ng.variable(axes_rsck_prime, initial_value=np.ones((R, S, C, K)))
    filter_casted = ng.cast_axes(filter, axes_rsck)
    filter_casted = ng.expand_dims(filter_casted, ax.T, 0)
    filter_casted = ng.axes_with_order(filter_casted, axes=ax_f)

    # convolution
    output = ng.convolution(params, image, filter_casted, axes=ax_o)
    oC, oD, oH, oW, oN = output.axes
    output = ng.axes_with_order(output,
                                axes=ng.make_axes([oN, oD, oH, oW, oC]))

    # slice away the oD
    out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]
    conv = ng.Slice(output, out_slicing)
    flatten = ng.flatten_at(conv, idx=1)

    # cost and grad
    cost = ng.sum(flatten, reduction_axes=flatten.axes)
    grad = ng.deriv(cost, filter)

    # compute
    conv_grad_comp = executor([conv, grad])
    conv_val, grad_val = conv_grad_comp()

    assert np.allclose(conv_val, np.zeros_like(conv_val) + 75.)
    assert np.allclose(grad_val, np.zeros_like(grad_val) + 4608.)
示例#9
0
def test_missing_arguments_to_execute():
    """
    Expect a failure if the wrong number of arguments are passed to a
    computation.
    """
    N = ng.make_axis(1)

    x = ng.placeholder([N])
    y = ng.placeholder([N])

    f = executor(x + y, x, y)
    with pytest.raises(ValueError):
        f(1)
示例#10
0
def test_uniform_range_posneg(transformer_factory):
    """TODO."""
    M = ng.make_axis(5, name='M')
    N = ng.make_axis(8, name='N')

    ng_a = ng.persistent_tensor([M, N], initial_value=10.0)
    ng_a = ng.uniform(ng_a, low=-0.5, high=0.5)

    result = executor(ng_a)()
    print(result)

    assert np.all(result < 0.5)
    assert np.all(result >= -0.5)
    assert not np.all(result >= 0.0)
示例#11
0
def test_tensor_sum_single_reduction_axes(transformer_factory):
    """TODO."""
    Y = ng.make_axis(name='Y')
    N = ng.make_axis(name='N')

    N.length = 2
    Y.length = 2

    a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [N, Y])

    b = ng.sum(a, reduction_axes=Y)

    result = executor(b)()
    np.testing.assert_allclose(result, [2.0, 2.0])
示例#12
0
def test_constant_tensor_multiply(transformer_factory):
    Y = ng.make_axis(name='Y')
    N = ng.make_axis(name='N')

    Y.length = 2
    N.length = 2

    a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N])
    b = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N])

    c = ng.multiply(a, b)

    result = executor(c)()
    np.testing.assert_allclose(result, [[1.0, 1.0], [1.0, 1.0]])
示例#13
0
def test_elementwise_fp16_out(transformer_factory):
    Y = ng.make_axis(name='Y')
    N = ng.make_axis(name='N')

    Y.length = 2
    N.length = 2

    a = ng.constant(np.array([[1.0, 2.0], [4.0, 12.0]], dtype='float32'), [Y, N])
    b = ng.constant(np.array([[1.0, 2.0], [6.0, 12.0]], dtype='float32'), [Y, N])

    c = ng.multiply(a, b, dtype=np.dtype(np.float16))

    result = executor(c)()
    np.testing.assert_allclose(result, [[1.0, 4.0], [24.0, 144.0]])
示例#14
0
def test_constant_multiply(transformer_factory):
    # TODO: better error message when missing axes length in cases where it
    # is needed
    Y = ng.make_axis(name='Y')
    Y.length = 1

    # TODO: don't require axes
    a = ng.constant(np.array([4.0], dtype='float32'), [Y])
    b = ng.constant(np.array([2.0], dtype='float32'), [Y])

    c = ng.multiply(a, b)

    result = executor(c)()
    np.testing.assert_allclose(result, [8])
示例#15
0
def test_tensor_constant(transformer_factory):
    W = ng.make_axis(name='W')
    H = ng.make_axis(name='H')

    # Pass a NumPy array through as a constant
    W.length = 10
    H.length = 20
    aaxes = ng.make_axes([W, H])
    ashape = aaxes.lengths
    asize = aaxes.size
    aval = np.arange(asize, dtype=np.float32).reshape(ashape)

    x = ng.constant(aval, aaxes)
    cval = executor(x)()
    np.testing.assert_allclose(cval, aval)
示例#16
0
def ngraph_l2_norm(np_array):
    """
    TODO.

    Arguments:
      np_array: TODO

    Returns:
      TODO
    """
    axes = ()
    for i, l in enumerate(np_array.shape):
        axes += (ng.make_axis(name='axis%s' % i, length=l), )

    np_tensor = ng.constant(np_array, axes)
    var = ng.variable(axes, initial_value=np_tensor)
    return executor(ng.sqrt(ng.squared_L2(var)))()
示例#17
0
def one_hot_comparison(hot_axes, axes, C):
    """
    TODO.

    Arguments:
      hot_axes: TODO
      axes: TODO
    """
    u = rng.random_integers(0, C.length - 1, axes, dtype=np.int8)
    u_p = ng.placeholder(axes, dtype=u.dtype)
    v = np.zeros(hot_axes.lengths, dtype=np.float32)
    udxiter = np.nditer(u, flags=['multi_index'])
    for uiter in udxiter:
        vindex = [int(uiter)]
        vindex.extend(udxiter.multi_index)
        v[tuple(vindex)] = 1

    v_t = executor(ng.one_hot(u_p, axis=C), u_p)(u)
    np.testing.assert_allclose(v_t, v)
示例#18
0
def test_cross_entropy_binary_logistic_shortcut(transformer_factory):
    """TODO."""
    N = ng.make_axis(name='N')
    W = ng.make_axis(name='W')

    W.length = 20
    N.length = 128
    axes = ng.make_axes([W, N])
    p_u = ng.placeholder(axes)
    u = rng.uniform(-3.0, 3.0, p_u.axes)
    p_v = ng.placeholder(axes)
    v = np_softmax(rng.uniform(-3.0, 3.0, p_u.axes), 0)

    cel = cross_entropy_binary_logistic(u, v)
    cel_shortcut = cross_entropy_binary_logistic_shortcut(u, v)
    np.testing.assert_allclose(cel, cel_shortcut, rtol=1e-5)

    cel_graph = executor(ng.cross_entropy_binary_inner(ng.sigmoid(p_u), p_v), p_u, p_v)(u, v)
    np.testing.assert_allclose(cel, cel_graph, rtol=1e-5)
示例#19
0
def test_dimshuffle_fprop(transformer_factory):
    """
    dimshuffle a 2d array and make sure fprop works
    """
    A = ng.make_axis(2)
    B = ng.make_axis(3)

    x = ng.placeholder(ng.make_axes([A, B]))

    # compute convolution with graph
    output = ng.Dimshuffle(x, axes=ng.make_axes([B, A]))

    assert output.axes == ng.make_axes([B, A])

    # randomly initialize
    x_value = rng.uniform(-1, 1, x.axes)

    result = executor(output, x)(x_value)

    np.testing.assert_allclose(result, x_value.T)
示例#20
0
def test_linear_zeros(basic_linargs, transformer_factory):
    # basic sanity check with 0 weights random inputs
    nin, nout, batch_size = basic_linargs

    # set inputs
    N = ng.make_axis(batch_size, name="N", batch=True)
    F = ng.make_axis(nin, name="F")

    inp = ng.placeholder([F, N])
    layer = Linear(nout=nout, init=UniformInit(0.0, 0.0))
    fprop = layer.train_outputs(inp)

    # create data
    x = np.random.random((nin, batch_size))

    # evaluate
    ngt.make_transformer()
    out = executor(fprop, inp)(x)

    assert np.min(out) == 0.0 and np.max(out) == 0.0
示例#21
0
def test_print_op_fprop(capfd):
    """
    Ensure fprop of PrintOp makes no change to input, and also prints to
    stdout.
    """

    A = ng.make_axis(1, name='A')

    x = ng.placeholder(ng.make_axes([A]))

    # hardcode value so there are is no rounding to worry about in str
    # comparison in  final assert
    x_value = np.array([1])

    output = ng.PrintOp(x, 'prefix')
    result = executor(output, x)(x_value)

    np.testing.assert_allclose(result, x_value)

    out, err = capfd.readouterr()
    assert str(x_value[0]) in out
    assert 'prefix' in out
示例#22
0
def test_convolution(transformer_factory):
    """
    test convolution forward path
    """
    N = 128
    C, K = 3, 8
    D, T = 1, 1
    H = W = 32
    R = S = 2

    padding = dict(pad_d=0, pad_h=0, pad_w=0)
    strides = dict(str_d=1, str_h=1, str_w=1)
    conv_params = padding.copy()
    conv_params.update(strides)

    ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
    ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
    ax_i.set_shape((C, D, H, W, N))
    ax_f.set_shape((C, T, R, S, K))
    ax_o = ng.make_axes([
        ng.make_axis(ax_f.role_axes(ar.Channelout)[0].length,
                     name='C',
                     roles=[ar.Channel]),
        spatial_axis(ax_i,
                     ax_f,
                     padding['pad_d'],
                     strides['str_d'],
                     role=ar.Depth),
        spatial_axis(ax_i,
                     ax_f,
                     padding['pad_h'],
                     strides['str_h'],
                     role=ar.Height),
        spatial_axis(ax_i,
                     ax_f,
                     padding['pad_w'],
                     strides['str_w'],
                     role=ar.Width), ax.N
    ])

    inputs = ng.placeholder(axes=ax_i)
    filters = ng.placeholder(axes=ax_f)

    # randomly initialize
    input_value = rng.uniform(-1, 1, ax_i)
    filter_value = rng.uniform(-1, 1, ax_f)

    assert input_value.shape == ax_i.lengths
    assert filter_value.shape == ax_f.lengths

    inputs = ng.placeholder(ax_i)
    filters = ng.placeholder(ax_f)

    output = ng.convolution(conv_params, inputs, filters, axes=ax_o)
    targets = ng.placeholder(axes=output.axes)

    costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)
    error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)
    d_inputs = ng.deriv(error, inputs)
    d_filters = ng.deriv(error, filters)

    targets_value = rng.uniform(.1, 0.9, output.axes)

    conv_executor = executor([output, error, d_inputs, d_filters], inputs,
                             filters, targets)
    result_ng, err_ng, gradI_ng, gradF_ng = conv_executor(
        input_value, filter_value, targets_value)

    # Now compute reference values via NEON
    NervanaObject.be.bsz = N
    neon_layer = Convolution(fshape=(R, S, K),
                             padding=padding,
                             strides=strides)

    inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))
    neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))
    neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)
    neon_layer.configure((C, H, W))
    neon_layer.prev_layer = True
    neon_layer.allocate()
    neon_layer.set_deltas(DummyDeltaBuffers())

    result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)

    act_result_ne = 1. / (1.0 + np.exp(-result_ne))
    err = neon_layer.be.array(
        (act_result_ne - targets_value).reshape(-1, N) / float(N))
    gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)
    gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)

    # Compare fprop
    np.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)

    # Compare bprop
    np.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)

    # Compare update
    np.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)
示例#23
0
def test_pooling():
    """
    test pooling forward and backward path
    """
    N = 128
    C = 3
    D = 1
    H = W = 32

    J = T = 1
    R = S = 2
    ngt.make_transformer()

    padding = dict(pad_d=0, pad_h=0, pad_w=0, pad_c=0)
    strides = dict(str_d=1, str_h=1, str_w=1, str_c=1)
    fshape = dict(J=J, T=T, R=R, S=S)

    pool_params = dict(op='max')
    pool_params.update(padding)
    pool_params.update(strides)
    pool_params.update(fshape)

    ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
    ax_i.set_shape((C, D, H, W, N))
    inputs = ng.placeholder(axes=ax_i)

    ax_o = ng.make_axes([
        spatial_axis(ax_i, J, padding['pad_c'], strides['str_c'], role=ar.Channel),
        spatial_axis(ax_i, T, padding['pad_d'], strides['str_d'], role=ar.Depth),
        spatial_axis(ax_i, R, padding['pad_h'], strides['str_h'], role=ar.Height),
        spatial_axis(ax_i, S, padding['pad_w'], strides['str_w'], role=ar.Width),
        ax.N
    ])

    # randomly initialize
    input_value = rng.uniform(-1, 1, ax_i)

    assert input_value.shape == ax_i.lengths

    # compute convolution with graph
    output = ng.pooling(pool_params, inputs, axes=ax_o)
    targets = ng.placeholder(axes=ax_o)

    costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)
    error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)
    d_inputs = ng.deriv(error, inputs)

    targets_value = rng.uniform(.1, 0.9, output.axes)

    conv_executor = executor([output, error, d_inputs], inputs, targets)
    result_ng, err_ng, gradI_ng = conv_executor(input_value, targets_value)

    # Now compute reference values via NEON
    NervanaObject.be.bsz = N
    neon_layer = Pooling(fshape=fshape, padding=padding, strides=strides, op="max")

    inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))
    neon_layer.configure((C, H, W))
    neon_layer.prev_layer = True
    neon_layer.allocate()
    neon_layer.set_deltas(DummyDeltaBuffers())

    result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)

    act_result_ne = 1. / (1.0 + np.exp(-result_ne))
    err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))
    gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)

    # Compare fprop
    np.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)

    # Compare bprop
    np.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)