Ejemplo n.º 1
0
def test_constant_add():
    """TODO."""
    a = ng.constant(1)
    b = ng.constant(2)
    c = a + b

    with executor(c) as ex:
        result = ex()
    assert result == 3
Ejemplo n.º 2
0
    def __call__(self, *args, **kwargs):
        if len(self.ops) == 0:
            self.beta_1 = ng.constant(self.beta_1, dtype=np.float32)
            self.beta_2 = ng.constant(self.beta_2, dtype=np.float32)
            self.t = ng.persistent_tensor(axes=(), initial_value=0)

        self.t = ng.sequential([ng.assign(self.t, self.t + 1), self.t])
        self.ell = self.lrate * ng.sqrt(1 - self.beta_2 ** self.t) / (1 - self.beta_1 ** self.t)

        return super(Adam, self).__call__(*args, **kwargs)
Ejemplo n.º 3
0
def test_constant_init():
    """TODO."""
    a = ng.constant(5)
    with executor(a) as ex:
        result = ex()
    assert (result == 5)

    nparray = np.array(range(5))
    a = ng.constant(nparray)
    with executor(a) as ex:
        result = ex()
    ng.testing.assert_allclose(result, nparray)
Ejemplo n.º 4
0
def test_cputensor_add_constant():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_c = np.add(np_a, 2)

    a = ng.constant(np_a, [M, N])
    b = ng.constant(2)
    c = ng.add(a, b)
    with executor(c) as ex:
        result = ex()
    assert np.array_equal(result, np_c)
def test_exit_condition(transformer_factory):
    bsz = 16
    class_num = 10

    # Limiting maximum absolute value for tensors elements to 7.9.
    #
    # There is used np.random.randn function to fill tensors with random values. It can give any
    # value as a result however values above 5 are highly improbable and would appear very rarely.
    # Limit 7.9 would almost never modify the tested tensor but would prevent from random
    # failures from time to time when the test is run in continuous environment.
    # This limit is approximate upper bound of range [4, 8). Numbers from this region can be
    # expressed by flexpoint number of the same dec.
    # Why not 15.9 that is approximate limit of [8, 16) range ?
    # Numbers above 8 are highly improbable and if appear from time to time can cause random
    # failures due to reduced accuracy of all numbers in tensor. Most numbers in normal
    # distribution are close to 0.

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    N, Y = ng.make_axis(bsz), ng.make_axis(class_num)
    y_val = rng.randn_abs_clip(ng.make_axes([N, Y]), clip_max=clip_val)
    y = ng.constant(y_val, ng.make_axes([N, Y]))

    likelihood = ng.log(ng.softmax(y, normalization_axes=y.axes[1]))

    with ExecutorFactory() as ex:
        comp = ex.executor(likelihood)

        val1 = comp()
        val2 = comp()
        ng.testing.assert_allclose(val1, val2, atol=0, rtol=0)
def test_4d_chained(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    # Limitting minimum absolute value for tensors being input to reciprocal operation to 1/7.9
    #
    # This is consequence of the above and flexpoint accuracy.
    # Numbers very small have poor absolute accuracy. When reciprocal of them is calculated the
    # results becomes very large and has even worse accuracy. When small numbers would be accepted
    # as an input to reciprocal in the test the absolute maximum value of the result is undefined
    # and so absolute tolerance.
    # To have possibility to set atol in the test and test could pass with it minimum element of
    # the tensor that is input to reciprocal operation has to be limited.

    is_flex = is_flex_factory(transformer_factory)
    clip_val_max = 7.9 if is_flex else 0
    clip_val_min = 1.0 / 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes, clip_min=clip_val_min, clip_max=clip_val_max)
    y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val_max)
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    im = ng.reciprocal(x)
    out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0])

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0)

    # atol_multiplier = 15 * x_val.shape[0]
    #
    # x_val.shape[0] is number elements added together in operation
    # ng.sum(X, reduction_axes=input_axes[0])
    #
    # 15 is calculated the following way:
    #
    # Input tensor has values from the range 1/7.9 - 7.9
    # For DEC=12 absolute error is equal to 0.5*2^-12 = 0.000122
    # 1/7.9 = 0.126582 with this error becomes 0.126704
    # Reciprocal of 1/7.9 is 7.9
    # Reciprocal of 1/7.9 + err = 7.892389
    # Absolute difference is 0.007611
    # It is 15.2 times larger then atol limit 5e-4 from Argon transformer
    ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4, atol_multiplier=15 * x_val.shape[0])
Ejemplo n.º 7
0
def test_cputensor_fusion():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[3, 2, 1]], dtype=np.float32)
    np_d = np.multiply(np_b, np.add(np_a, 2))

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = ng.constant(2)
    d = ng.multiply(b, ng.add(a, c))

    with executor(d) as ex:
        result = ex()
    assert np.array_equal(result, np_d)
Ejemplo n.º 8
0
def test_cputensor_dot():
    Y = ng.make_axis(length=2)
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[1, 2], [2, 3], [3, 4]], dtype=np.float32)
    np_c = np.dot(np_a, np_b)

    a = ng.constant(np_a, [M, N]).named('a')
    b = ng.constant(np_b, [N, Y]).named('b')
    c = ng.dot(a, b)

    with executor(c) as ex:
        result = ex()

    assert np.array_equal(result, np_c)
def test_initial_value():
    # Test work-around for issue #1138
    w = [3, 4, 5]
    x = ng.constant(w)
    y = ng.variable([ng.make_axis(length=len(w))], initial_value=x)
    with ExecutorFactory() as ex:
        result = ex.executor(y)()
    ng.testing.assert_allclose(result, np.asarray(w, dtype=np.float32))
Ejemplo n.º 10
0
def test_dot():
    H = ng.make_axis(length=1)
    W = ng.make_axis(length=4)
    np_a = np.array([[1, 2, 3, 4]], dtype=np.float32)
    np_b = np.array(3, dtype=np.float32)

    a = ng.constant(np_a, [H, W])
    b = ng.constant(np_b, [])
    c = ng.dot(a, b)

    with executor(c) as _dot:
        _dot_val = _dot()

        # compute reference
        _dot_val_ref = np.dot(np_a, np_b)

        # this checks the dot product between scalar and vector, this is equivalent to
        # elementwise multiplication between scalar and vector
        assert np.allclose(_dot_val, _dot_val_ref)
Ejemplo n.º 11
0
def test_cputensor_mlp():
    """TODO."""
    D = ng.make_axis(length=3)
    H = ng.make_axis(length=2)
    N = ng.make_axis(length=1)

    np_x = np.array([[1, 2, 3]], dtype=np.float32)
    np_w = np.array([[1, 1], [1, 1], [1, 1]], dtype=np.float32)
    np_b = np.array([1, 2], dtype=np.float32)
    np_c = np.dot(np_x, np_w) + np_b

    x = ng.constant(np_x, [N, D])
    w = ng.constant(np_w, [D, H])
    b = ng.constant(np_b, [H])
    wx = ng.dot(x, w)
    c = wx + b
    with executor(c) as ex:
        result = ex()
    assert np.array_equal(result, np_c)
def test_scalar_broadcast():
    """
    Test broadcasting a scalar into a tensor
    """
    with ExecutorFactory() as ex:
        x_axes = ng.make_axes()
        broadcast_axes = ng.make_axes([ng.make_axis(2), ng.make_axis(3)])
        x = ng.constant(1., axes=x_axes)
        z = ng.broadcast(x, axes=broadcast_axes)
        z_comp = ex.executor(z)
        assert np.array_equal(z_comp(), np.ones(broadcast_axes.lengths))
Ejemplo n.º 13
0
def test_evaluation_twice():
    """Test executing a computation graph twice on a one layer MLP."""
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=2)
    W = ng.make_axis(length=1)

    x = ng.constant(np.array([[1, 2], [3, 4]], dtype='float32'),
                    ng.make_axes([C, D]))

    hidden1_weights = ng.constant(np.array([[1], [1]], dtype='float32'),
                                  ng.make_axes([C, W]))

    hidden1_biases = ng.constant(np.array([[2], [2]], dtype='float32'),
                                 ng.make_axes([D, W]))

    hidden1 = ng.dot(hidden1_weights, x) + hidden1_biases

    with executor(hidden1) as comp:
        result_1 = comp()
        result_2 = comp()
    assert np.array_equal(result_1, result_2)
Ejemplo n.º 14
0
def test_4d_elementwise(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    out = ng.add(x, y)

    with executor(out) as ex:
        graph_val = ex()

    np_val = np.add(x_val, y_val)

    ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def test_concatenate(concatenate_variables):
    x_list, np_list, pos = concatenate_variables

    with ExecutorFactory() as ex:
        v = ng.concat_along_axis(x_list, x_list[0].axes[pos])
        d = ng.deriv(v,
                     x_list[0],
                     error=ng.constant(np.ones(v.axes.lengths), axes=v.axes))
        f = ex.executor([v, d])
        e_v, e_d = f()
        np_v = np.concatenate(np_list, axis=pos)
        ng.testing.assert_allclose(e_v.copy(), np_v)
        ng.testing.assert_allclose(e_d.copy(), np.ones(x_list[0].axes.lengths))
Ejemplo n.º 16
0
def test_cputensor_add():
    """TODO."""
    Y = ng.make_axis(length=2)
    M = ng.make_axis(length=2)
    N = ng.make_axis(length=2)

    a = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
    b = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
    c = a + b
    with executor(c) as ex:
        result = ex()
    assert np.array_equal(result, [6, 10])

    np_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
    np_b = np.array([[1, 2], [3, 4]], dtype=np.float32)
    np_c = np_a + np_b

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = a + b
    with executor(c) as ex:
        result = ex()
    assert np.array_equal(result, np_c)
def concatenate_variables(request):
    num_vars, num_axes, concat_pos = request.param
    common_axes = [ng.make_axis(length=2) for _ in range(num_axes - 1)]
    x_list = list()
    np_list = list()
    ax = ng.make_axis(length=np.random.randint(3, 10))
    axes = ng.make_axes(common_axes[:concat_pos] + [ax] +
                        common_axes[concat_pos:])
    for _ in range(num_vars):
        var = np.random.uniform(0, 1, axes.full_lengths)
        np_list.append(var)
        x_list.append(ng.constant(var, axes=axes))

    return x_list, np_list, concat_pos
Ejemplo n.º 18
0
def test_broadcast():
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=4)

    np_a = np.array([[1, 2, 3, 4]], dtype=np.float32)
    np_c = np.add(np_a, 2)

    a = ng.constant(np_a, [M, N])
    c = ng.add(a, 2)

    with executor(c) as _add:
        result = _add()

        assert np.allclose(result, np_c)
Ejemplo n.º 19
0
def test_sum(num_units, sequence_length, batch_size):
    """
    This tests for a non-deterministic error that arose in ng.sum following
    a dot product using the gpu transformer.
    """
    shape = (num_units, sequence_length, batch_size)
    np_inp = np.random.uniform(-1, 1, shape)
    # Use an identity weight matrix on top of it
    np_w = np.eye(shape[0])

    # Create ngraph versions
    inp = ng.constant(np_inp)
    reduction_axes = inp.axes[:-2]
    other_axes = inp.axes[-2:]
    new_axis = ng.make_axis(length=shape[0])
    w_axes = ng.make_axes(new_axis) | reduction_axes
    w = ng.constant(np_w, axes=w_axes)

    # Reshape to do similar dot in numpy
    inp_reshape = np.reshape(
        np_inp, (np.prod(reduction_axes.lengths), np.prod(other_axes.lengths)))
    w_reshape = np.reshape(np_w, (new_axis.length, inp_reshape.shape[0]))

    # Reduce dimensions with identity weight matrix
    np_x = np.dot(w_reshape, inp_reshape)
    x = ng.dot(w, inp)

    # Sum over all but the first axis
    output_axes = ng.make_axes(x.axes[0])
    y = ng.sum(x, out_axes=output_axes)
    np_y = np.sum(np_x, axis=1)

    with executor([y, x]) as f:
        y_val, x_val = f()

        assert_allclose(x_val.ravel(), np_x.ravel(), atol=1e-1)
        assert_allclose(y_val, np_y, atol=1e-1)
Ejemplo n.º 20
0
def test_flatten_deriv_simplified():
    """
    Test derivative with dot and flatten
    """
    ax_N = ng.make_axis(length=3)
    ax_Y = ng.make_axis(length=2)

    x = ng.placeholder(ng.make_axes([ax_N]))
    w = ng.constant([5, 2], axes=ng.make_axes([ax_Y]))
    logits = ng.dot(x, w)
    cost = ng.sum(logits, reduction_axes=logits.axes)

    delta = 0.001
    u = rng.uniform(.1, 5.0, x.axes)
    check_derivative(cost, x, delta, u, atol=1e-2, rtol=1e-2)
Ejemplo n.º 21
0
def ngraph_l2_norm(np_array):
    """
    TODO.

    Arguments:
      np_array: TODO

    Returns:
      TODO
    """
    axes = ()
    for i, l in enumerate(np_array.shape):
        axes |= (ng.make_axis(length=l).named('axis%s' % i),)

    np_tensor = ng.constant(np_array, axes)
    var = ng.variable(axes, initial_value=np_tensor)
    with executor(ng.sqrt(ng.squared_L2(var))) as ex:
        return ex()
Ejemplo n.º 22
0
def test_4d_reduction(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    x = ng.constant(x_val, input_axes)

    out1 = ng.sum(x, reduction_axes=input_axes[1])
    out2 = ng.sum(x, reduction_axes=input_axes[3])

    with executor([out1, out2]) as ex:
        graph_val1, graph_val2 = ex()
        np_val1 = np.sum(x_val, 1)
        np_val2 = np.sum(x_val, 3)
        ng.testing.assert_allclose(graph_val1, np_val1, rtol=1e-4, atol_multiplier=x_val.shape[1])
        ng.testing.assert_allclose(graph_val2, np_val2, rtol=1e-4, atol_multiplier=x_val.shape[3])
Ejemplo n.º 23
0
def test_squared_L2():
    H = ng.make_axis(2)
    W = ng.make_axis(3)
    N = ng.make_axis(5, name='N')

    axes = ng.make_axes([H, W, N])
    a = ng.constant(np.ones(axes.lengths), axes=axes)

    with ExecutorFactory() as factory:
        l2_samples_fun = factory.executor(ng.squared_L2(a))
        l2_samples_val = np.ones([N.length]) * H.length * W.length
        l2_all_fun = factory.executor(ng.squared_L2(a, out_axes=[]))
        l2_all_val = np.ones([]) * W.length * H.length * N.length
        l2_W_fun = factory.executor(ng.squared_L2(a, reduction_axes=[H, N]))
        l2_W_val = np.ones([W.length]) * H.length * N.length
        l2_samples_result = l2_samples_fun()
        l2_all_result = l2_all_fun()
        l2_W_result = l2_W_fun()
        ng.testing.assert_allclose(l2_samples_val, l2_samples_result)
        ng.testing.assert_allclose(l2_all_val, l2_all_result)
        ng.testing.assert_allclose(l2_W_val, l2_W_result)
Ejemplo n.º 24
0
def test_metadata_capture():
    layer = Dummy()
    x = ng.constant(2)
    ret = layer.configure(x)
    assert ret.metadata['layer_type'] == 'convolution'
Ejemplo n.º 25
0
def cifar_mean_subtract(x):
    bgr_mean = ng.constant(
        const=np.array([104., 119., 127.]),
        axes=[x.axes.channel_axis()])

    return (x - bgr_mean) / 255.
 def __init__(self, params):
     lr_policy.__init__(self, params['name'], params['base_lr'])
     self.gamma = ng.constant(axes=(), const=params['gamma'])
     self.step = ng.constant(axes=(),
                             const=params['step'],
                             dtype=uint_dtype)
Ejemplo n.º 27
0
def i1k_mean_subtract(x):
    bgr_mean = ng.constant(
        axes=[x.axes.channel_axis()],
        const=np.array([127.0, 119.0, 104.0]))
    return (x - bgr_mean)
 def __init__(self, params):
     lr_policy.__init__(self, params['name'], params['base_lr'])
     self.max_iter = ng.constant(axes=(), const=params['max_iter'])
     self.power = ng.constant(axes=(), const=params['power'])
 def __init__(self, name, base_lr):
     self.name = name
     self.base_lr = ng.constant(axes=(), const=base_lr)
 def __init__(self, params):
     lr_policy.__init__(self, params['name'], params['base_lr'])
     self.gamma = ng.constant(axes=(), const=params['gamma'])
     self.step_size = ng.constant(axes=(), const=params['step_size'])