def test_exit_condition(transformer_factory):
    bsz = 16
    class_num = 10

    # Limiting maximum absolute value for tensors elements to 7.9.
    #
    # There is used np.random.randn function to fill tensors with random values. It can give any
    # value as a result however values above 5 are highly improbable and would appear very rarely.
    # Limit 7.9 would almost never modify the tested tensor but would prevent from random
    # failures from time to time when the test is run in continuous environment.
    # This limit is approximate upper bound of range [4, 8). Numbers from this region can be
    # expressed by flexpoint number of the same dec.
    # Why not 15.9 that is approximate limit of [8, 16) range ?
    # Numbers above 8 are highly improbable and if appear from time to time can cause random
    # failures due to reduced accuracy of all numbers in tensor. Most numbers in normal
    # distribution are close to 0.

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    N, Y = ng.make_axis(bsz), ng.make_axis(class_num)
    y_val = rng.randn_abs_clip(ng.make_axes([N, Y]), clip_max=clip_val)
    y = ng.constant(y_val, ng.make_axes([N, Y]))

    likelihood = ng.log(ng.softmax(y, normalization_axes=y.axes[1]))

    with ExecutorFactory() as ex:
        comp = ex.executor(likelihood)

        val1 = comp()
        val2 = comp()
        ng.testing.assert_allclose(val1, val2, atol=0, rtol=0)
def test_kernel_cache(transformer_factory):
    X = ng.make_axis(32)
    Y = ng.make_axis(32)
    C = ng.make_axis(16384)
    axes = ng.make_axes([
        X,
        Y
    ])
    bcast_axes = ng.make_axes([
        X,
        Y,
        C
    ])

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(axes, clip_max=clip_val)
    y_val = rng.randn_abs_clip(bcast_axes, clip_max=clip_val)
    z_val = rng.randn_abs_clip(bcast_axes, clip_max=clip_val)

    x = ng.constant(x_val, axes)
    y = ng.constant(y_val, bcast_axes)
    z = ng.constant(z_val, bcast_axes)

    out = ng.add(ng.add(x, y), z)

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.add(np.add(x_val.reshape(32, 32, 1), y_val), z_val)
    ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4, atol_multiplier=2)
def make_weights(input_placeholder,
                 hidden_size,
                 weight_initializer,
                 bias_initializer,
                 init_state=False):
    in_feature_axes = tuple(
        input_placeholder.axes)[:-2]  # input axis + any extra axes of length 1
    out_feature_axes = ng.make_axes([ng.make_axis(hidden_size)])
    batch_axis = input_placeholder.axes.batch_axis()
    hidden_axis = ng.make_axis(hidden_size)

    w_in_axes = ng.make_axes(hidden_axis) + in_feature_axes
    w_rec_axes = ng.make_axes(hidden_axis) + out_feature_axes

    W_in = weight_initializer(w_in_axes)
    W_rec = weight_initializer(w_rec_axes)
    b = bias_initializer(hidden_axis)

    if init_state is True:
        ax_s = ng.make_axes([hidden_axis, batch_axis])
        init_state = ng.placeholder(ax_s)
        init_state_value = rng.uniform(-1, 1, ax_s)
    else:
        init_state = None
        init_state_value = None

    return W_in, W_rec, b, init_state, init_state_value
Exemple #4
0
def test_flat_tensor_dot_tensor():
    """
    Ensure that a flattened argument axis is not unflattend in the result.

    """
    H = ng.make_axis(2)
    W = ng.make_axis(7)
    C = ng.make_axis(3)
    K = ng.make_axis(11)

    axes_a = ng.make_axes([H, W, C])
    a = ng.constant(np.ones(axes_a.lengths), axes=axes_a)
    flat_a = ng.flatten_at(a, 2)

    axes_b = ng.make_axes([C, K])
    b = ng.constant(np.ones(axes_b.lengths), axes=axes_b)

    result = ng.dot(b, flat_a)

    with ExecutorFactory() as factory:
        result_fun = factory.executor(result)
        result_val = result_fun()

    result_correct = np.ones_like(result_val) * C.length
    ng.testing.assert_allclose(result_val, result_correct)
Exemple #5
0
def test_duplicate_axis_different_length():
    a = ng.make_axis(1, name='N')
    b = ng.make_axis(2, name='N')
    with pytest.raises(ValueError) as e:
        ng.make_axes([a, b])

    # ensure the name of the axis appears in the exception
    assert 'N' in str(e)
def test_change_recurrent_axis_length(recurrent_layer_cls, batch_size,
                                      sequence_length, input_size,
                                      hidden_size):
    """
    Recurrent layer support for changing REC axis length
    (needed by seq2seq inference)
    """
    # create three identical recurrent layers with same weights
    W_input_val = np.random.normal(size=(hidden_size, input_size))
    W_recur_val = np.random.normal(size=(hidden_size, hidden_size))
    rec1 = recurrent_layer_cls(nout=hidden_size,
                               init=ConstantInit(W_input_val),
                               init_inner=ConstantInit(W_recur_val),
                               activation=Tanh())
    rec2 = recurrent_layer_cls(nout=hidden_size,
                               init=ConstantInit(W_input_val),
                               init_inner=ConstantInit(W_recur_val),
                               activation=Tanh())
    rec3 = recurrent_layer_cls(nout=hidden_size,
                               init=ConstantInit(W_input_val),
                               init_inner=ConstantInit(W_recur_val),
                               activation=Tanh())

    # create input placeholders and values
    # sequence length greater than 1
    N = ng.make_axis(length=batch_size, name='N')
    REC = ng.make_axis(length=sequence_length, name='REC')
    M = ng.make_axis(length=input_size, name='M')
    xn_axes = ng.make_axes([M, REC, N])
    xn = ng.placeholder(axes=xn_axes)
    xn_val = np.random.normal(size=(input_size, sequence_length, batch_size))
    # sequence length 1
    REC1 = ng.make_axis(length=1, name='REC')
    x1_axes = ng.make_axes([M, REC1, N])
    x1 = ng.placeholder(axes=x1_axes)
    x1_val = np.random.normal(size=(input_size, 1, batch_size))

    # check results of switching REC axis of a layer's input
    # computations switching REC axis
    y1_n = rec1(xn)
    y1_1 = rec1(x1)

    # check against not switching
    y2_n = rec2(xn)
    y3_1 = rec3(x1)

    with ExecutorFactory() as ex:

        y1_n_comp = ex.executor(y1_n, xn)
        y1_1_comp = ex.executor(y1_1, x1)
        y2_n_comp = ex.executor(y2_n, xn)
        y3_1_comp = ex.executor(y3_1, x1)

        ng.testing.assert_allclose(y1_n_comp(xn_val), y2_n_comp(xn_val))
        ng.testing.assert_allclose(y1_1_comp(x1_val), y3_1_comp(x1_val))
def test_scalar_broadcast():
    """
    Test broadcasting a scalar into a tensor
    """
    with ExecutorFactory() as ex:
        x_axes = ng.make_axes()
        broadcast_axes = ng.make_axes([ng.make_axis(2), ng.make_axis(3)])
        x = ng.constant(1., axes=x_axes)
        z = ng.broadcast(x, axes=broadcast_axes)
        z_comp = ex.executor(z)
        assert np.array_equal(z_comp(), np.ones(broadcast_axes.lengths))
Exemple #8
0
    def make_placeholders(self):
        batch_axis = ng.make_axis(length=self.batch_size, name="N")
        time_axis = ng.make_axis(length=self.time_steps, name="REC")
        feature_axis = ng.make_axis(length=self.nfeatures, name="feature_axis")

        dict = {}
        for k in self.data_arrays.keys():
            if k == 'inp_txt' or k == 'teacher_tgt':
                p_axes = ng.make_axes([batch_axis, time_axis, feature_axis])
            else:
                p_axes = ng.make_axes([batch_axis, time_axis])
            dict[k] = ng.placeholder(p_axes)

        return dict
Exemple #9
0
def test_axes_map():
    """
    map from Axes([aaa, bbb]) to Axes([zzz, bbb]) via AxesMap {aaa: zzz}
    """
    a = ng.make_axis(1, name='aaa')
    b = ng.make_axis(2, name='bbb')
    z = ng.make_axis(1, name='zzz')

    axes_before = ng.make_axes([a, b])
    axes_after = ng.make_axes([z, b])

    axes_map = AxesMap({a.name: z.name})

    assert axes_after == axes_map.map_axes(axes_before)
def test_flatten_deriv_simplified():
    """
    Test derivative with dot and flatten
    """
    ax_N = ng.make_axis(length=3)
    ax_Y = ng.make_axis(length=2)

    x = ng.placeholder(ng.make_axes([ax_N]))
    w = ng.constant([5, 2], axes=ng.make_axes([ax_Y]))
    logits = ng.dot(x, w)
    cost = ng.sum(logits, reduction_axes=logits.axes)

    delta = 0.001
    u = rng.uniform(.1, 5.0, x.axes)
    check_derivative(cost, x, delta, u, atol=1e-2, rtol=1e-2)
def input_axes(request):
    return ng.make_axes([
        ng.make_axis(length=request.param[0]),
        ng.make_axis(length=request.param[1]),
        ng.make_axis(length=request.param[2]),
        ng.make_axis(length=request.param[3])
    ])
Exemple #12
0
def test_stack():
    W = ng.make_axis(length=4)
    H = ng.make_axis(length=5)
    I = ng.make_axis(length=3)

    axes = ng.make_axes([W, H])

    rng = RandomTensorGenerator(0, np.float32)

    a_v = [rng.uniform(0, 1, axes) for i in range(I.length)]

    for pos in range(len(axes) + 1):
        a = [ng.placeholder(axes, initial_value=p) for p in a_v]

        s = ng.stack(a, I, pos)

        with ExecutorFactory() as ex:
            num_funs = [
                ex.numeric_derivative(s, p, delta,
                                      *(np for np in a if np is not p))
                for p in a
            ]
            sym_funs = [
                ex.derivative(s, p, *(np for np in a if np is not p))
                for p in a
            ]

            for n_fun, s_fun, a_i in zip(num_funs, sym_funs, a_v):
                na_is = list(na_i for na_i in a_v if na_i is not a_i)
                d_n = n_fun(a_i, *na_is)
                d_s = s_fun(a_i, *na_is)
                ng.testing.assert_allclose(d_n, d_s, rtol=rtol, atol=atol)
def test_variable_init(C):
    w_init = np.random.rand(C.length)
    W = ng.variable(ng.make_axes([C]), initial_value=w_init)

    with ExecutorFactory() as ex:
        result = ex.executor(W)()
    ng.testing.assert_allclose(result, w_init)
Exemple #14
0
def test_variable():
    input_axes = ng.make_axes([ng.make_axis(10), ng.make_axis(3)])
    var = ng.variable(axes=input_axes)
    assign_val = np.random.rand(10, 3)
    var_assign = ng.AssignOp(tensor=var, val=assign_val)
    var_seq = ng.sequential([var_assign, var])
    var_comp = ng.computation(var_seq, "all")
    results = dict()
    weight_saver = Saver()
    with closing(ngt.make_transformer()) as transformer:
        var_func = transformer.add_computation(var_comp)
        weight_saver.setup_save(transformer=transformer, computation=var_comp)
        results['saved'] = var_func().copy()
        weight_saver.save(filename="test_variable")

    reassign_val = np.random.rand(10, 3)
    var_reassign = ng.AssignOp(tensor=var, val=reassign_val)

    var_recomp = ng.computation(var_reassign, "all")
    var_read = ng.computation(var, "all")
    with closing(ngt.make_transformer()) as restore_transformer:
        var_recompfunc = restore_transformer.add_computation(var_recomp)
        weight_saver.setup_restore(transformer=restore_transformer,
                                   computation=var_recomp,
                                   filename="test_variable")
        var_readfunc = restore_transformer.add_computation(var_read)
        var_recompfunc()
        results['reassigned'] = var_readfunc().copy()
        weight_saver.restore()
        results['restored'] = var_readfunc().copy()
    os.remove("test_variable.npz")
    assert np.allclose(results['saved'], assign_val, atol=0)
    assert np.allclose(results['reassigned'], reassign_val, atol=0)
    assert np.allclose(results['saved'], results['restored'], atol=0)
def make_placeholder(input_size, sequence_length, batch_size, extra_axes=0):

    input_axis = ng.make_axis(name='features')
    recurrent_axis = ng.make_axis(name='REC')
    batch_axis = ng.make_axis(name='N')

    input_axes = ng.make_axes([input_axis, recurrent_axis, batch_axis])
    input_axes.set_shape((input_size, sequence_length, batch_size))
    input_axes = ng.make_axes([
        ng.make_axis(length=1, name='features_' + str(i))
        for i in range(extra_axes)
    ]) + input_axes

    input_placeholder = ng.placeholder(input_axes)
    input_value = rng.uniform(-0.01, 0.01, input_axes)

    return input_placeholder, input_value
Exemple #16
0
def CDHWN():
    return ng.make_axes([
        ng.make_axis(3, name='C'),
        ng.make_axis(4, name='D'),
        ng.make_axis(5, name='H'),
        ng.make_axis(6, name='W'),
        ng.make_axis(8, name='N')
    ])
def test_linear_keep_batch_axis():
    feature_axis = ng.make_axis(1, name='A')
    batch_axis = ng.make_axis(2, name='N')

    x = ng.placeholder([batch_axis])
    linear = Linear(axes=feature_axis,
                    keep_axes=[batch_axis],
                    init=UniformInit(1.0, 1.0))(x)

    assert linear.axes == ng.make_axes([feature_axis, batch_axis])
Exemple #18
0
def test_axes_ops(test_cases):
    # unpack test case
    axes_op_str, lhs_axes, rhs_axes, expected_res = test_cases
    lhs_axes = ng.make_axes(lhs_axes)
    rhs_axes = ng.make_axes(rhs_axes)
    if isinstance(expected_res, list):
        expected_res = ng.make_axes(expected_res)

    # check results against expected_res
    if expected_res is ValueError:
        with pytest.raises(ValueError):
            getattr(lhs_axes, axes_op_str)(rhs_axes)
    else:
        res = getattr(lhs_axes, axes_op_str)(rhs_axes)
        if res != expected_res:
            raise ValueError(
                "%s operation with %s and %s, "
                "expected result %s, but actually get %s" %
                (axes_op_str, lhs_axes, rhs_axes, expected_res, res))
Exemple #19
0
def _input_output_axes(w_axes):
    """
    Given the axes from a tensor of weights, provides the axes corresponding to inputs
    (often called 'fan-in') and the axes corresponding to outputs (often called 'fan-out').

    Args:
        w_axes (Axes): Axes of weight tensor

    Returns:
        axes_i (Axes): Fan-in axes
        axes_o (Axes): Fan-out axes

    Note:
        Assumes that output axes are shadow axes
    """

    return (
        ng.make_axes([axis for axis in w_axes if not is_shadow_axis(axis)]),
        ng.make_axes([axis for axis in w_axes if is_shadow_axis(axis)]),
    )
def test_evaluation_twice():
    """Test executing a computation graph twice on a one layer MLP."""
    C = ng.make_axis(length=2)
    D = ng.make_axis(length=2)
    W = ng.make_axis(length=1)

    x = ng.constant(np.array([[1, 2], [3, 4]], dtype='float32'),
                    ng.make_axes([C, D]))

    hidden1_weights = ng.constant(np.array([[1], [1]], dtype='float32'),
                                  ng.make_axes([C, W]))

    hidden1_biases = ng.constant(np.array([[2], [2]], dtype='float32'),
                                 ng.make_axes([D, W]))

    hidden1 = ng.dot(hidden1_weights, x) + hidden1_biases

    with executor(hidden1) as comp:
        result_1 = comp()
        result_2 = comp()
    assert np.array_equal(result_1, result_2)
def test_dilated_conv(dilation):
    """Test that the dilated convolution layer output matches expected. This test compares
    the maximum output value to an expected max output value. The expected value is computed
    based on the dilation parameter. The test also checks that the output size matches the
    expected size based on the dilaton parameter value."""
    image_size = 3
    batch_size = 1
    init_val = 0.1
    conv_size = 3
    pad = 3
    N_filters = 1
    image_channels = 3
    model = Sequential([
        Convolution((conv_size, conv_size, N_filters),
                    filter_init=ConstantInit(val=init_val),
                    padding=pad,
                    dilation=dilation)
    ])
    X = np.ones(shape=(batch_size, 3, image_size,
                       image_size))  # Create dummy image
    data = {'image': X, 'iteration': 1}
    data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size),
                             ('W', image_size)])
    ax = [
        ng.make_axis(length=data_size[k], name=k)
        for k in list(data_size.keys())
    ]
    p_axes = ng.make_axes(ax)
    named_inputs = {'image': ng.placeholder(p_axes)}
    outputs = model(named_inputs['image'])
    named_outputs = {outputs.name: outputs}
    with closing(ngt.make_transformer()) as transformer:
        m = make_bound_computation(transformer, named_outputs, named_inputs)
    output = m(data)[list(m(data).keys())[0]]
    filter_size = dilation * (conv_size -
                              1) + 1  # Compute expected filter size
    # Compute the expected output size based on convolution parameters
    out_size = (image_size + 2 * pad - filter_size) + 1
    filt_tmp = np.zeros(filter_size)
    filt_tmp[0::dilation] = 1
    # max overlap between dilated filter and image (in 1-d)
    max_overlap = int(np.min([filter_size, image_size]))
    exp_max_output = init_val * image_channels * (np.sum(
        filt_tmp[0:max_overlap]))**2
    # Expected max output changes for different dilation parameter values#
    assert int(10 * np.max(output)) == int(10 * exp_max_output), \
        ("Dilated conv max outputs do not match expected: "
         "{} != {}").format(np.max(output),
                            init_val * conv_size * ((image_size - (dilation - 1))**2))
    assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \
        ("Dilated conv output is not expected size: "
         "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size))
def test_tensor_slice():
    """
    slicing a tensor should work like numpy
    """
    input_axes = ng.make_axes(
        [ng.make_axis(10), ng.make_axis(20),
         ng.make_axis(5)])

    x = ng.placeholder(axes=input_axes)

    assert x[:5].axes.full_lengths == (5, 20, 5)
    assert x[:, 2:7].axes.full_lengths == (10, 5, 5)
    assert x[:5, :, :-1].axes.full_lengths == (5, 20, 4)
def test_pad_mixed():
    """
    mix 0 padding with non-0 padding
    """
    input_axes = ng.make_axes([ng.make_axis(1), ng.make_axis(1)])
    x = ng.variable(input_axes)

    pad = ng.pad(x, [0, 1])

    assert pad.axes[0].name == x.axes[0].name
    assert pad.axes[1].name == x.axes[1].name
    assert pad.axes[0].length == x.axes[0].length
    assert pad.axes[1].length != x.axes[1].length
Exemple #24
0
def test_rng_repetition():
    """
    Tests rng ops, to make sure they run every execution and not just initialization
    """
    axes = ng.make_axes([ng.make_axis(2), ng.make_axis(2)])
    y = ng.uniform(axes)
    mysum = ng.sum(y)
    trans = ng.transformers.make_transformer()
    rand_comp = trans.computation(mysum)
    val1 = rand_comp().copy()
    val2 = rand_comp().copy()
    assert val1 != val2
    trans.close()
def test_tensor_size():
    n, m = 3, 4

    N = ng.make_axis(length=n)
    M = ng.make_axis(length=m)

    aaxes = ng.make_axes([N, M])
    x = ng.placeholder(aaxes)

    size_fun = ng.tensor_size(x)
    nptensor = np.arange(n * m).reshape(n, m)

    with executor(size_fun, x) as ex:
        assert ex(nptensor) == n * m
def concatenate_variables(request):
    num_vars, num_axes, concat_pos = request.param
    common_axes = [ng.make_axis(length=2) for _ in range(num_axes - 1)]
    x_list = list()
    np_list = list()
    ax = ng.make_axis(length=np.random.randint(3, 10))
    axes = ng.make_axes(common_axes[:concat_pos] + [ax] +
                        common_axes[concat_pos:])
    for _ in range(num_vars):
        var = np.random.uniform(0, 1, axes.full_lengths)
        np_list.append(var)
        x_list.append(ng.constant(var, axes=axes))

    return x_list, np_list, concat_pos
def test_slice_nop():
    """
    slicing an axis shouldn't change the name
    """
    input_axes = ng.make_axes([ng.make_axis(1), ng.make_axis(1)])
    x = ng.variable(input_axes)

    s = ng.tensor_slice(x, [
        slice(None, None, None),
        slice(None, None, 1),
    ])

    assert s.axes[0] == x.axes[0]
    assert s.axes[1] == x.axes[1]
Exemple #28
0
def test_sum(num_units, sequence_length, batch_size):
    """
    This tests for a non-deterministic error that arose in ng.sum following
    a dot product using the gpu transformer.
    """
    shape = (num_units, sequence_length, batch_size)
    np_inp = np.random.uniform(-1, 1, shape)
    # Use an identity weight matrix on top of it
    np_w = np.eye(shape[0])

    # Create ngraph versions
    inp = ng.constant(np_inp)
    reduction_axes = inp.axes[:-2]
    other_axes = inp.axes[-2:]
    new_axis = ng.make_axis(length=shape[0])
    w_axes = ng.make_axes(new_axis) | reduction_axes
    w = ng.constant(np_w, axes=w_axes)

    # Reshape to do similar dot in numpy
    inp_reshape = np.reshape(
        np_inp, (np.prod(reduction_axes.lengths), np.prod(other_axes.lengths)))
    w_reshape = np.reshape(np_w, (new_axis.length, inp_reshape.shape[0]))

    # Reduce dimensions with identity weight matrix
    np_x = np.dot(w_reshape, inp_reshape)
    x = ng.dot(w, inp)

    # Sum over all but the first axis
    output_axes = ng.make_axes(x.axes[0])
    y = ng.sum(x, out_axes=output_axes)
    np_y = np.sum(np_x, axis=1)

    with executor([y, x]) as f:
        y_val, x_val = f()

        assert_allclose(x_val.ravel(), np_x.ravel(), atol=1e-1)
        assert_allclose(y_val, np_y, atol=1e-1)
Exemple #29
0
 def make_placeholders(self, include_iteration=False):
     placeholders = {}
     batch_axis = ng.make_axis(self._dataloader.batch_size, name="N")
     for placeholder_name, axis_info in self._dataloader.axes_info:
         p_axes = ng.make_axes([batch_axis])
         for nm, sz in axis_info:
             if placeholder_name == 'label':
                 continue
             if nm in NAME_MAP:
                 nm = NAME_MAP[nm]
             p_axes += ng.make_axis(name=nm, length=sz)
         placeholders[placeholder_name] = ng.placeholder(p_axes)
     if include_iteration:
         placeholders['iteration'] = ng.placeholder(axes=())
     return placeholders
Exemple #30
0
    def __init__(self, input_placeholder, output_size, RNN, bn_params):

        # Set up axes
        F, T, N = tuple(input_placeholder.axes)
        H = ng.make_axis(length=output_size, name="hidden")
        H2 = ng.make_axis(length=output_size, name="hidden_tmp")

        self.input_placeholder = input_placeholder

        # Make reference placeholder
        self.reference_input = ng.placeholder(axes=[H, T, N])

        # Create weight matrices
        w_rec_axes = ng.make_axes([H, H2])
        w_in_axes = ng.make_axes([H, F])
        self.W_rec = rng.uniform(-1, 1, w_rec_axes)
        self.W_in = rng.uniform(-1, 1, w_in_axes)
        self.W_id = np.eye(output_size).astype("float32")

        self.rnn_args = dict(nout=output_size,
                             init_inner=self.W_rec,
                             return_sequence=True,
                             activation=Tanh())

        self.reference_rnn = RNN(init=self.W_id, **self.rnn_args)
        self.rnn = RNN(init=self.W_in, batch_norm=True, **self.rnn_args)

        if self.has_gates:
            self.batch_norm_dict = self.rnn.batch_norm
        else:
            self.batch_norm_dict = {'gate': self.rnn.batch_norm}

        self.default_gate = list(self.batch_norm_dict.keys())[0]

        for bn in self.batch_norm_dict.values():
            bn.__dict__.update(bn_params)